TargetLowering.h revision e07f85eb76a0254d3adbdf8b5d61ff5c07858cef
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes how to lower LLVM code to machine code. This has two 11// main components: 12// 13// 1. Which ValueTypes are natively supported by the target. 14// 2. Which operations are supported for supported ValueTypes. 15// 3. Cost thresholds for alternative implementations of certain operations. 16// 17// In addition it has a few other components, like information about FP 18// immediates. 19// 20//===----------------------------------------------------------------------===// 21 22#ifndef LLVM_TARGET_TARGETLOWERING_H 23#define LLVM_TARGET_TARGETLOWERING_H 24 25#include "llvm/ADT/DenseMap.h" 26#include "llvm/AddressingMode.h" 27#include "llvm/Attributes.h" 28#include "llvm/CallingConv.h" 29#include "llvm/CodeGen/RuntimeLibcalls.h" 30#include "llvm/CodeGen/SelectionDAGNodes.h" 31#include "llvm/InlineAsm.h" 32#include "llvm/Support/CallSite.h" 33#include "llvm/Support/DebugLoc.h" 34#include "llvm/Target/TargetCallingConv.h" 35#include "llvm/Target/TargetMachine.h" 36#include <climits> 37#include <map> 38#include <vector> 39 40namespace llvm { 41 class CallInst; 42 class CCState; 43 class FastISel; 44 class FunctionLoweringInfo; 45 class ImmutableCallSite; 46 class IntrinsicInst; 47 class MachineBasicBlock; 48 class MachineFunction; 49 class MachineInstr; 50 class MachineJumpTableInfo; 51 class MCContext; 52 class MCExpr; 53 template<typename T> class SmallVectorImpl; 54 class DataLayout; 55 class TargetRegisterClass; 56 class TargetLibraryInfo; 57 class TargetLoweringObjectFile; 58 class Value; 59 60 namespace Sched { 61 enum Preference { 62 None, // No preference 63 Source, // Follow source order. 64 RegPressure, // Scheduling for lowest register pressure. 65 Hybrid, // Scheduling for both latency and register pressure. 66 ILP, // Scheduling for ILP in low register pressure mode. 67 VLIW // Scheduling for VLIW targets. 68 }; 69 } 70 71 72//===----------------------------------------------------------------------===// 73/// TargetLowering - This class defines information used to lower LLVM code to 74/// legal SelectionDAG operators that the target instruction selector can accept 75/// natively. 76/// 77/// This class also defines callbacks that targets must implement to lower 78/// target-specific constructs to SelectionDAG operators. 79/// 80class TargetLowering { 81 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION; 82 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION; 83public: 84 /// LegalizeAction - This enum indicates whether operations are valid for a 85 /// target, and if not, what action should be used to make them valid. 86 enum LegalizeAction { 87 Legal, // The target natively supports this operation. 88 Promote, // This operation should be executed in a larger type. 89 Expand, // Try to expand this to other ops, otherwise use a libcall. 90 Custom // Use the LowerOperation hook to implement custom lowering. 91 }; 92 93 /// LegalizeTypeAction - This enum indicates whether a types are legal for a 94 /// target, and if not, what action should be used to make them valid. 95 enum LegalizeTypeAction { 96 TypeLegal, // The target natively supports this type. 97 TypePromoteInteger, // Replace this integer with a larger one. 98 TypeExpandInteger, // Split this integer into two of half the size. 99 TypeSoftenFloat, // Convert this float to a same size integer type. 100 TypeExpandFloat, // Split this float into two of half the size. 101 TypeScalarizeVector, // Replace this one-element vector with its element. 102 TypeSplitVector, // Split this vector into two of half the size. 103 TypeWidenVector // This vector should be widened into a larger vector. 104 }; 105 106 /// LegalizeKind holds the legalization kind that needs to happen to EVT 107 /// in order to type-legalize it. 108 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind; 109 110 enum BooleanContent { // How the target represents true/false values. 111 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 112 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 113 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 114 }; 115 116 enum SelectSupportKind { 117 ScalarValSelect, // The target supports scalar selects (ex: cmov). 118 ScalarCondVectorVal, // The target supports selects with a scalar condition 119 // and vector values (ex: cmov). 120 VectorMaskSelect // The target supports vector selects with a vector 121 // mask (ex: x86 blends). 122 }; 123 124 static ISD::NodeType getExtendForContent(BooleanContent Content) { 125 switch (Content) { 126 case UndefinedBooleanContent: 127 // Extend by adding rubbish bits. 128 return ISD::ANY_EXTEND; 129 case ZeroOrOneBooleanContent: 130 // Extend by adding zero bits. 131 return ISD::ZERO_EXTEND; 132 case ZeroOrNegativeOneBooleanContent: 133 // Extend by copying the sign bit. 134 return ISD::SIGN_EXTEND; 135 } 136 llvm_unreachable("Invalid content kind"); 137 } 138 139 /// NOTE: The constructor takes ownership of TLOF. 140 explicit TargetLowering(const TargetMachine &TM, 141 const TargetLoweringObjectFile *TLOF); 142 virtual ~TargetLowering(); 143 144 const TargetMachine &getTargetMachine() const { return TM; } 145 const DataLayout *getDataLayout() const { return TD; } 146 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } 147 148 bool isBigEndian() const { return !IsLittleEndian; } 149 bool isLittleEndian() const { return IsLittleEndian; } 150 // Return the pointer type for the given address space, defaults to 151 // the pointer type from the data layout. 152 // FIXME: The default needs to be removed once all the code is updated. 153 virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; } 154 virtual MVT getShiftAmountTy(EVT LHSTy) const; 155 156 /// isSelectExpensive - Return true if the select operation is expensive for 157 /// this target. 158 bool isSelectExpensive() const { return SelectIsExpensive; } 159 160 virtual bool isSelectSupported(SelectSupportKind kind) const { return true; } 161 162 /// shouldSplitVectorElementType - Return true if a vector of the given type 163 /// should be split (TypeSplitVector) instead of promoted 164 /// (TypePromoteInteger) during type legalization. 165 virtual bool shouldSplitVectorElementType(EVT VT) const { return false; } 166 167 /// isIntDivCheap() - Return true if integer divide is usually cheaper than 168 /// a sequence of several shifts, adds, and multiplies for this target. 169 bool isIntDivCheap() const { return IntDivIsCheap; } 170 171 /// isSlowDivBypassed - Returns true if target has indicated at least one 172 /// type should be bypassed. 173 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 174 175 /// getBypassSlowDivTypes - Returns map of slow types for division or 176 /// remainder with corresponding fast types 177 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 178 return BypassSlowDivWidths; 179 } 180 181 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of 182 /// srl/add/sra. 183 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 184 185 /// isJumpExpensive() - Return true if Flow Control is an expensive operation 186 /// that should be avoided. 187 bool isJumpExpensive() const { return JumpIsExpensive; } 188 189 /// isPredictableSelectExpensive - Return true if selects are only cheaper 190 /// than branches if the branch is unlikely to be predicted right. 191 bool isPredictableSelectExpensive() const { 192 return predictableSelectIsExpensive; 193 } 194 195 /// getSetCCResultType - Return the ValueType of the result of SETCC 196 /// operations. Also used to obtain the target's preferred type for 197 /// the condition operand of SELECT and BRCOND nodes. In the case of 198 /// BRCOND the argument passed is MVT::Other since there are no other 199 /// operands to get a type hint from. 200 virtual EVT getSetCCResultType(EVT VT) const; 201 202 /// getCmpLibcallReturnType - Return the ValueType for comparison 203 /// libcalls. Comparions libcalls include floating point comparion calls, 204 /// and Ordered/Unordered check calls on floating point numbers. 205 virtual 206 MVT::SimpleValueType getCmpLibcallReturnType() const; 207 208 /// getBooleanContents - For targets without i1 registers, this gives the 209 /// nature of the high-bits of boolean values held in types wider than i1. 210 /// "Boolean values" are special true/false values produced by nodes like 211 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 212 /// Not to be confused with general values promoted from i1. 213 /// Some cpus distinguish between vectors of boolean and scalars; the isVec 214 /// parameter selects between the two kinds. For example on X86 a scalar 215 /// boolean should be zero extended from i1, while the elements of a vector 216 /// of booleans should be sign extended from i1. 217 BooleanContent getBooleanContents(bool isVec) const { 218 return isVec ? BooleanVectorContents : BooleanContents; 219 } 220 221 /// getSchedulingPreference - Return target scheduling preference. 222 Sched::Preference getSchedulingPreference() const { 223 return SchedPreferenceInfo; 224 } 225 226 /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to 227 /// different scheduling heuristics for different nodes. This function returns 228 /// the preference (or none) for the given node. 229 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 230 return Sched::None; 231 } 232 233 /// getRegClassFor - Return the register class that should be used for the 234 /// specified value type. 235 virtual const TargetRegisterClass *getRegClassFor(EVT VT) const { 236 assert(VT.isSimple() && "getRegClassFor called on illegal type!"); 237 const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; 238 assert(RC && "This value type is not natively supported!"); 239 return RC; 240 } 241 242 /// getRepRegClassFor - Return the 'representative' register class for the 243 /// specified value type. The 'representative' register class is the largest 244 /// legal super-reg register class for the register class of the value type. 245 /// For example, on i386 the rep register class for i8, i16, and i32 are GR32; 246 /// while the rep register class is GR64 on x86_64. 247 virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const { 248 assert(VT.isSimple() && "getRepRegClassFor called on illegal type!"); 249 const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy]; 250 return RC; 251 } 252 253 /// getRepRegClassCostFor - Return the cost of the 'representative' register 254 /// class for the specified value type. 255 virtual uint8_t getRepRegClassCostFor(EVT VT) const { 256 assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!"); 257 return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy]; 258 } 259 260 /// isTypeLegal - Return true if the target has native support for the 261 /// specified value type. This means that it has a register that directly 262 /// holds it without promotions or expansions. 263 bool isTypeLegal(EVT VT) const { 264 assert(!VT.isSimple() || 265 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 266 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0; 267 } 268 269 class ValueTypeActionImpl { 270 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 271 /// that indicates how instruction selection should deal with the type. 272 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE]; 273 274 public: 275 ValueTypeActionImpl() { 276 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0); 277 } 278 279 LegalizeTypeAction getTypeAction(MVT VT) const { 280 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy]; 281 } 282 283 void setTypeAction(EVT VT, LegalizeTypeAction Action) { 284 unsigned I = VT.getSimpleVT().SimpleTy; 285 ValueTypeActions[I] = Action; 286 } 287 }; 288 289 const ValueTypeActionImpl &getValueTypeActions() const { 290 return ValueTypeActions; 291 } 292 293 /// getTypeAction - Return how we should legalize values of this type, either 294 /// it is already legal (return 'Legal') or we need to promote it to a larger 295 /// type (return 'Promote'), or we need to expand it into multiple registers 296 /// of smaller integer type (return 'Expand'). 'Custom' is not an option. 297 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 298 return getTypeConversion(Context, VT).first; 299 } 300 LegalizeTypeAction getTypeAction(MVT VT) const { 301 return ValueTypeActions.getTypeAction(VT); 302 } 303 304 /// getTypeToTransformTo - For types supported by the target, this is an 305 /// identity function. For types that must be promoted to larger types, this 306 /// returns the larger type to promote to. For integer types that are larger 307 /// than the largest integer register, this contains one step in the expansion 308 /// to get to the smaller register. For illegal floating point types, this 309 /// returns the integer type to transform to. 310 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 311 return getTypeConversion(Context, VT).second; 312 } 313 314 /// getTypeToExpandTo - For types supported by the target, this is an 315 /// identity function. For types that must be expanded (i.e. integer types 316 /// that are larger than the largest integer register or illegal floating 317 /// point types), this returns the largest legal type it will be expanded to. 318 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 319 assert(!VT.isVector()); 320 while (true) { 321 switch (getTypeAction(Context, VT)) { 322 case TypeLegal: 323 return VT; 324 case TypeExpandInteger: 325 VT = getTypeToTransformTo(Context, VT); 326 break; 327 default: 328 llvm_unreachable("Type is not legal nor is it to be expanded!"); 329 } 330 } 331 } 332 333 /// getVectorTypeBreakdown - Vector types are broken down into some number of 334 /// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32 335 /// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack. 336 /// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86. 337 /// 338 /// This method returns the number of registers needed, and the VT for each 339 /// register. It also returns the VT and quantity of the intermediate values 340 /// before they are promoted/expanded. 341 /// 342 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 343 EVT &IntermediateVT, 344 unsigned &NumIntermediates, 345 EVT &RegisterVT) const; 346 347 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the 348 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If 349 /// this is the case, it returns true and store the intrinsic 350 /// information into the IntrinsicInfo that was passed to the function. 351 struct IntrinsicInfo { 352 unsigned opc; // target opcode 353 EVT memVT; // memory VT 354 const Value* ptrVal; // value representing memory location 355 int offset; // offset off of ptrVal 356 unsigned align; // alignment 357 bool vol; // is volatile? 358 bool readMem; // reads memory? 359 bool writeMem; // writes memory? 360 }; 361 362 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 363 unsigned /*Intrinsic*/) const { 364 return false; 365 } 366 367 /// isFPImmLegal - Returns true if the target can instruction select the 368 /// specified FP immediate natively. If false, the legalizer will materialize 369 /// the FP immediate as a load from a constant pool. 370 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const { 371 return false; 372 } 373 374 /// isShuffleMaskLegal - Targets can use this to indicate that they only 375 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 376 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 377 /// are assumed to be legal. 378 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 379 EVT /*VT*/) const { 380 return true; 381 } 382 383 /// canOpTrap - Returns true if the operation can trap for the value type. 384 /// VT must be a legal type. By default, we optimistically assume most 385 /// operations don't trap except for divide and remainder. 386 virtual bool canOpTrap(unsigned Op, EVT VT) const; 387 388 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 389 /// used by Targets can use this to indicate if there is a suitable 390 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 391 /// pool entry. 392 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 393 EVT /*VT*/) const { 394 return false; 395 } 396 397 /// getOperationAction - Return how this operation should be treated: either 398 /// it is legal, needs to be promoted to a larger size, needs to be 399 /// expanded to some other code sequence, or the target has a custom expander 400 /// for it. 401 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 402 if (VT.isExtended()) return Expand; 403 // If a target-specific SDNode requires legalization, require the target 404 // to provide custom legalization for it. 405 if (Op > array_lengthof(OpActions[0])) return Custom; 406 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy; 407 return (LegalizeAction)OpActions[I][Op]; 408 } 409 410 /// isOperationLegalOrCustom - Return true if the specified operation is 411 /// legal on this target or can be made legal with custom lowering. This 412 /// is used to help guide high-level lowering decisions. 413 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const { 414 return (VT == MVT::Other || isTypeLegal(VT)) && 415 (getOperationAction(Op, VT) == Legal || 416 getOperationAction(Op, VT) == Custom); 417 } 418 419 /// isOperationExpand - Return true if the specified operation is illegal on 420 /// this target or unlikely to be made legal with custom lowering. This is 421 /// used to help guide high-level lowering decisions. 422 bool isOperationExpand(unsigned Op, EVT VT) const { 423 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 424 } 425 426 /// isOperationLegal - Return true if the specified operation is legal on this 427 /// target. 428 bool isOperationLegal(unsigned Op, EVT VT) const { 429 return (VT == MVT::Other || isTypeLegal(VT)) && 430 getOperationAction(Op, VT) == Legal; 431 } 432 433 /// getLoadExtAction - Return how this load with extension should be treated: 434 /// either it is legal, needs to be promoted to a larger size, needs to be 435 /// expanded to some other code sequence, or the target has a custom expander 436 /// for it. 437 LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const { 438 assert(ExtType < ISD::LAST_LOADEXT_TYPE && 439 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 440 "Table isn't big enough!"); 441 return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType]; 442 } 443 444 /// isLoadExtLegal - Return true if the specified load with extension is legal 445 /// on this target. 446 bool isLoadExtLegal(unsigned ExtType, EVT VT) const { 447 return VT.isSimple() && getLoadExtAction(ExtType, VT) == Legal; 448 } 449 450 /// getTruncStoreAction - Return how this store with truncation should be 451 /// treated: either it is legal, needs to be promoted to a larger size, needs 452 /// to be expanded to some other code sequence, or the target has a custom 453 /// expander for it. 454 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 455 assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE && 456 MemVT.getSimpleVT() < MVT::LAST_VALUETYPE && 457 "Table isn't big enough!"); 458 return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy] 459 [MemVT.getSimpleVT().SimpleTy]; 460 } 461 462 /// isTruncStoreLegal - Return true if the specified store with truncation is 463 /// legal on this target. 464 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 465 return isTypeLegal(ValVT) && MemVT.isSimple() && 466 getTruncStoreAction(ValVT, MemVT) == Legal; 467 } 468 469 /// getIndexedLoadAction - Return how the indexed load should be treated: 470 /// either it is legal, needs to be promoted to a larger size, needs to be 471 /// expanded to some other code sequence, or the target has a custom expander 472 /// for it. 473 LegalizeAction 474 getIndexedLoadAction(unsigned IdxMode, EVT VT) const { 475 assert(IdxMode < ISD::LAST_INDEXED_MODE && 476 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 477 "Table isn't big enough!"); 478 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; 479 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4); 480 } 481 482 /// isIndexedLoadLegal - Return true if the specified indexed load is legal 483 /// on this target. 484 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 485 return VT.isSimple() && 486 (getIndexedLoadAction(IdxMode, VT) == Legal || 487 getIndexedLoadAction(IdxMode, VT) == Custom); 488 } 489 490 /// getIndexedStoreAction - Return how the indexed store should be treated: 491 /// either it is legal, needs to be promoted to a larger size, needs to be 492 /// expanded to some other code sequence, or the target has a custom expander 493 /// for it. 494 LegalizeAction 495 getIndexedStoreAction(unsigned IdxMode, EVT VT) const { 496 assert(IdxMode < ISD::LAST_INDEXED_MODE && 497 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 498 "Table isn't big enough!"); 499 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; 500 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); 501 } 502 503 /// isIndexedStoreLegal - Return true if the specified indexed load is legal 504 /// on this target. 505 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 506 return VT.isSimple() && 507 (getIndexedStoreAction(IdxMode, VT) == Legal || 508 getIndexedStoreAction(IdxMode, VT) == Custom); 509 } 510 511 /// getCondCodeAction - Return how the condition code should be treated: 512 /// either it is legal, needs to be expanded to some other code sequence, 513 /// or the target has a custom expander for it. 514 LegalizeAction 515 getCondCodeAction(ISD::CondCode CC, EVT VT) const { 516 assert((unsigned)CC < array_lengthof(CondCodeActions) && 517 (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 && 518 "Table isn't big enough!"); 519 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit 520 /// value and the upper 27 bits index into the second dimension of the 521 /// array to select what 64bit value to use. 522 LegalizeAction Action = (LegalizeAction) 523 ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5] 524 >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3); 525 assert(Action != Promote && "Can't promote condition code!"); 526 return Action; 527 } 528 529 /// isCondCodeLegal - Return true if the specified condition code is legal 530 /// on this target. 531 bool isCondCodeLegal(ISD::CondCode CC, EVT VT) const { 532 return getCondCodeAction(CC, VT) == Legal || 533 getCondCodeAction(CC, VT) == Custom; 534 } 535 536 537 /// getTypeToPromoteTo - If the action for this operation is to promote, this 538 /// method returns the ValueType to promote to. 539 EVT getTypeToPromoteTo(unsigned Op, EVT VT) const { 540 assert(getOperationAction(Op, VT) == Promote && 541 "This operation isn't promoted!"); 542 543 // See if this has an explicit type specified. 544 std::map<std::pair<unsigned, MVT::SimpleValueType>, 545 MVT::SimpleValueType>::const_iterator PTTI = 546 PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy)); 547 if (PTTI != PromoteToType.end()) return PTTI->second; 548 549 assert((VT.isInteger() || VT.isFloatingPoint()) && 550 "Cannot autopromote this type, add it with AddPromotedToType."); 551 552 EVT NVT = VT; 553 do { 554 NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1); 555 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 556 "Didn't find type to promote to!"); 557 } while (!isTypeLegal(NVT) || 558 getOperationAction(Op, NVT) == Promote); 559 return NVT; 560 } 561 562 /// getValueType - Return the EVT corresponding to this LLVM type. 563 /// This is fixed by the LLVM operations except for the pointer size. If 564 /// AllowUnknown is true, this will return MVT::Other for types with no EVT 565 /// counterpart (e.g. structs), otherwise it will assert. 566 EVT getValueType(Type *Ty, bool AllowUnknown = false) const { 567 // Lower scalar pointers to native pointer types. 568 if (Ty->isPointerTy()) return PointerTy; 569 570 if (Ty->isVectorTy()) { 571 VectorType *VTy = cast<VectorType>(Ty); 572 Type *Elm = VTy->getElementType(); 573 // Lower vectors of pointers to native pointer types. 574 if (Elm->isPointerTy()) 575 Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext()); 576 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), 577 VTy->getNumElements()); 578 } 579 return EVT::getEVT(Ty, AllowUnknown); 580 } 581 582 583 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 584 /// function arguments in the caller parameter area. This is the actual 585 /// alignment, not its logarithm. 586 virtual unsigned getByValTypeAlignment(Type *Ty) const; 587 588 /// getRegisterType - Return the type of registers that this ValueType will 589 /// eventually require. 590 EVT getRegisterType(MVT VT) const { 591 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); 592 return RegisterTypeForVT[VT.SimpleTy]; 593 } 594 595 /// getRegisterType - Return the type of registers that this ValueType will 596 /// eventually require. 597 EVT getRegisterType(LLVMContext &Context, EVT VT) const { 598 if (VT.isSimple()) { 599 assert((unsigned)VT.getSimpleVT().SimpleTy < 600 array_lengthof(RegisterTypeForVT)); 601 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; 602 } 603 if (VT.isVector()) { 604 EVT VT1, RegisterVT; 605 unsigned NumIntermediates; 606 (void)getVectorTypeBreakdown(Context, VT, VT1, 607 NumIntermediates, RegisterVT); 608 return RegisterVT; 609 } 610 if (VT.isInteger()) { 611 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 612 } 613 llvm_unreachable("Unsupported extended type!"); 614 } 615 616 /// getNumRegisters - Return the number of registers that this ValueType will 617 /// eventually require. This is one for any types promoted to live in larger 618 /// registers, but may be more than one for types (like i64) that are split 619 /// into pieces. For types like i140, which are first promoted then expanded, 620 /// it is the number of registers needed to hold all the bits of the original 621 /// type. For an i140 on a 32 bit machine this means 5 registers. 622 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { 623 if (VT.isSimple()) { 624 assert((unsigned)VT.getSimpleVT().SimpleTy < 625 array_lengthof(NumRegistersForVT)); 626 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 627 } 628 if (VT.isVector()) { 629 EVT VT1, VT2; 630 unsigned NumIntermediates; 631 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 632 } 633 if (VT.isInteger()) { 634 unsigned BitWidth = VT.getSizeInBits(); 635 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 636 return (BitWidth + RegWidth - 1) / RegWidth; 637 } 638 llvm_unreachable("Unsupported extended type!"); 639 } 640 641 /// ShouldShrinkFPConstant - If true, then instruction selection should 642 /// seek to shrink the FP constant of the specified type to a smaller type 643 /// in order to save space and / or reduce runtime. 644 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 645 646 /// hasTargetDAGCombine - If true, the target has custom DAG combine 647 /// transformations that it can perform for the specified node. 648 bool hasTargetDAGCombine(ISD::NodeType NT) const { 649 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 650 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 651 } 652 653 /// This function returns the maximum number of store operations permitted 654 /// to replace a call to llvm.memset. The value is set by the target at the 655 /// performance threshold for such a replacement. If OptSize is true, 656 /// return the limit for functions that have OptSize attribute. 657 /// @brief Get maximum # of store operations permitted for llvm.memset 658 unsigned getMaxStoresPerMemset(bool OptSize) const { 659 return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset; 660 } 661 662 /// This function returns the maximum number of store operations permitted 663 /// to replace a call to llvm.memcpy. The value is set by the target at the 664 /// performance threshold for such a replacement. If OptSize is true, 665 /// return the limit for functions that have OptSize attribute. 666 /// @brief Get maximum # of store operations permitted for llvm.memcpy 667 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 668 return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy; 669 } 670 671 /// This function returns the maximum number of store operations permitted 672 /// to replace a call to llvm.memmove. The value is set by the target at the 673 /// performance threshold for such a replacement. If OptSize is true, 674 /// return the limit for functions that have OptSize attribute. 675 /// @brief Get maximum # of store operations permitted for llvm.memmove 676 unsigned getMaxStoresPerMemmove(bool OptSize) const { 677 return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove; 678 } 679 680 /// This function returns true if the target allows unaligned memory accesses. 681 /// of the specified type. If true, it also returns whether the unaligned 682 /// memory access is "fast" in the second argument by reference. This is used, 683 /// for example, in situations where an array copy/move/set is converted to a 684 /// sequence of store operations. It's use helps to ensure that such 685 /// replacements don't generate code that causes an alignment error (trap) on 686 /// the target machine. 687 /// @brief Determine if the target supports unaligned memory accesses. 688 virtual bool allowsUnalignedMemoryAccesses(EVT, bool *Fast = 0) const { 689 return false; 690 } 691 692 /// This function returns true if the target would benefit from code placement 693 /// optimization. 694 /// @brief Determine if the target should perform code placement optimization. 695 bool shouldOptimizeCodePlacement() const { 696 return benefitFromCodePlacementOpt; 697 } 698 699 /// getOptimalMemOpType - Returns the target specific optimal type for load 700 /// and store operations as a result of memset, memcpy, and memmove 701 /// lowering. If DstAlign is zero that means it's safe to destination 702 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 703 /// means there isn't a need to check it against alignment requirement, 704 /// probably because the source does not need to be loaded. If 705 /// 'IsZeroVal' is true, that means it's safe to return a 706 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 707 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 708 /// constant so it does not need to be loaded. 709 /// It returns EVT::Other if the type should be determined using generic 710 /// target-independent logic. 711 virtual EVT getOptimalMemOpType(uint64_t /*Size*/, 712 unsigned /*DstAlign*/, unsigned /*SrcAlign*/, 713 bool /*IsZeroVal*/, 714 bool /*MemcpyStrSrc*/, 715 MachineFunction &/*MF*/) const { 716 return MVT::Other; 717 } 718 719 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp 720 /// to implement llvm.setjmp. 721 bool usesUnderscoreSetJmp() const { 722 return UseUnderscoreSetJmp; 723 } 724 725 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp 726 /// to implement llvm.longjmp. 727 bool usesUnderscoreLongJmp() const { 728 return UseUnderscoreLongJmp; 729 } 730 731 /// supportJumpTables - return whether the target can generate code for 732 /// jump tables. 733 bool supportJumpTables() const { 734 return SupportJumpTables; 735 } 736 737 /// getMinimumJumpTableEntries - return integer threshold on number of 738 /// blocks to use jump tables rather than if sequence. 739 int getMinimumJumpTableEntries() const { 740 return MinimumJumpTableEntries; 741 } 742 743 /// getStackPointerRegisterToSaveRestore - If a physical register, this 744 /// specifies the register that llvm.savestack/llvm.restorestack should save 745 /// and restore. 746 unsigned getStackPointerRegisterToSaveRestore() const { 747 return StackPointerRegisterToSaveRestore; 748 } 749 750 /// getExceptionPointerRegister - If a physical register, this returns 751 /// the register that receives the exception address on entry to a landing 752 /// pad. 753 unsigned getExceptionPointerRegister() const { 754 return ExceptionPointerRegister; 755 } 756 757 /// getExceptionSelectorRegister - If a physical register, this returns 758 /// the register that receives the exception typeid on entry to a landing 759 /// pad. 760 unsigned getExceptionSelectorRegister() const { 761 return ExceptionSelectorRegister; 762 } 763 764 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never 765 /// set, the default is 200) 766 unsigned getJumpBufSize() const { 767 return JumpBufSize; 768 } 769 770 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes 771 /// (if never set, the default is 0) 772 unsigned getJumpBufAlignment() const { 773 return JumpBufAlignment; 774 } 775 776 /// getMinStackArgumentAlignment - return the minimum stack alignment of an 777 /// argument. 778 unsigned getMinStackArgumentAlignment() const { 779 return MinStackArgumentAlignment; 780 } 781 782 /// getMinFunctionAlignment - return the minimum function alignment. 783 /// 784 unsigned getMinFunctionAlignment() const { 785 return MinFunctionAlignment; 786 } 787 788 /// getPrefFunctionAlignment - return the preferred function alignment. 789 /// 790 unsigned getPrefFunctionAlignment() const { 791 return PrefFunctionAlignment; 792 } 793 794 /// getPrefLoopAlignment - return the preferred loop alignment. 795 /// 796 unsigned getPrefLoopAlignment() const { 797 return PrefLoopAlignment; 798 } 799 800 /// getShouldFoldAtomicFences - return whether the combiner should fold 801 /// fence MEMBARRIER instructions into the atomic intrinsic instructions. 802 /// 803 bool getShouldFoldAtomicFences() const { 804 return ShouldFoldAtomicFences; 805 } 806 807 /// getInsertFencesFor - return whether the DAG builder should automatically 808 /// insert fences and reduce ordering for atomics. 809 /// 810 bool getInsertFencesForAtomic() const { 811 return InsertFencesForAtomic; 812 } 813 814 /// getPreIndexedAddressParts - returns true by value, base pointer and 815 /// offset pointer and addressing mode by reference if the node's address 816 /// can be legally represented as pre-indexed load / store address. 817 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 818 SDValue &/*Offset*/, 819 ISD::MemIndexedMode &/*AM*/, 820 SelectionDAG &/*DAG*/) const { 821 return false; 822 } 823 824 /// getPostIndexedAddressParts - returns true by value, base pointer and 825 /// offset pointer and addressing mode by reference if this node can be 826 /// combined with a load / store to form a post-indexed load / store. 827 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 828 SDValue &/*Base*/, SDValue &/*Offset*/, 829 ISD::MemIndexedMode &/*AM*/, 830 SelectionDAG &/*DAG*/) const { 831 return false; 832 } 833 834 /// getJumpTableEncoding - Return the entry encoding for a jump table in the 835 /// current function. The returned value is a member of the 836 /// MachineJumpTableInfo::JTEntryKind enum. 837 virtual unsigned getJumpTableEncoding() const; 838 839 virtual const MCExpr * 840 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 841 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 842 MCContext &/*Ctx*/) const { 843 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 844 } 845 846 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 847 /// jumptable. 848 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 849 SelectionDAG &DAG) const; 850 851 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 852 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 853 /// MCExpr. 854 virtual const MCExpr * 855 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 856 unsigned JTI, MCContext &Ctx) const; 857 858 /// isOffsetFoldingLegal - Return true if folding a constant offset 859 /// with the given GlobalAddress is legal. It is frequently not legal in 860 /// PIC relocation models. 861 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 862 863 /// getStackCookieLocation - Return true if the target stores stack 864 /// protector cookies at a fixed offset in some non-standard address 865 /// space, and populates the address space and offset as 866 /// appropriate. 867 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/, 868 unsigned &/*Offset*/) const { 869 return false; 870 } 871 872 /// getMaximalGlobalOffset - Returns the maximal possible offset which can be 873 /// used for loads / stores from the global. 874 virtual unsigned getMaximalGlobalOffset() const { 875 return 0; 876 } 877 878 //===--------------------------------------------------------------------===// 879 // TargetLowering Optimization Methods 880 // 881 882 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two 883 /// SDValues for returning information from TargetLowering to its clients 884 /// that want to combine 885 struct TargetLoweringOpt { 886 SelectionDAG &DAG; 887 bool LegalTys; 888 bool LegalOps; 889 SDValue Old; 890 SDValue New; 891 892 explicit TargetLoweringOpt(SelectionDAG &InDAG, 893 bool LT, bool LO) : 894 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 895 896 bool LegalTypes() const { return LegalTys; } 897 bool LegalOperations() const { return LegalOps; } 898 899 bool CombineTo(SDValue O, SDValue N) { 900 Old = O; 901 New = N; 902 return true; 903 } 904 905 /// ShrinkDemandedConstant - Check to see if the specified operand of the 906 /// specified instruction is a constant integer. If so, check to see if 907 /// there are any bits set in the constant that are not demanded. If so, 908 /// shrink the constant and return true. 909 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 910 911 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 912 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 913 /// cast, but it could be generalized for targets with other types of 914 /// implicit widening casts. 915 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, 916 DebugLoc dl); 917 }; 918 919 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 920 /// DemandedMask bits of the result of Op are ever used downstream. If we can 921 /// use this information to simplify Op, create a new simplified DAG node and 922 /// return true, returning the original and new nodes in Old and New. 923 /// Otherwise, analyze the expression and return a mask of KnownOne and 924 /// KnownZero bits for the expression (used to simplify the caller). 925 /// The KnownZero/One bits may only be accurate for those bits in the 926 /// DemandedMask. 927 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 928 APInt &KnownZero, APInt &KnownOne, 929 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 930 931 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in 932 /// Mask are known to be either zero or one and return them in the 933 /// KnownZero/KnownOne bitsets. 934 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 935 APInt &KnownZero, 936 APInt &KnownOne, 937 const SelectionDAG &DAG, 938 unsigned Depth = 0) const; 939 940 /// ComputeNumSignBitsForTargetNode - This method can be implemented by 941 /// targets that want to expose additional information about sign bits to the 942 /// DAG Combiner. 943 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 944 unsigned Depth = 0) const; 945 946 struct DAGCombinerInfo { 947 void *DC; // The DAG Combiner object. 948 bool BeforeLegalize; 949 bool BeforeLegalizeOps; 950 bool CalledByLegalizer; 951 public: 952 SelectionDAG &DAG; 953 954 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc) 955 : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo), 956 CalledByLegalizer(cl), DAG(dag) {} 957 958 bool isBeforeLegalize() const { return BeforeLegalize; } 959 bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; } 960 bool isCalledByLegalizer() const { return CalledByLegalizer; } 961 962 void AddToWorklist(SDNode *N); 963 void RemoveFromWorklist(SDNode *N); 964 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, 965 bool AddTo = true); 966 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 967 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 968 969 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 970 }; 971 972 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 973 /// and cc. If it is unable to simplify it, return a null SDValue. 974 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 975 ISD::CondCode Cond, bool foldBooleans, 976 DAGCombinerInfo &DCI, DebugLoc dl) const; 977 978 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 979 /// node is a GlobalAddress + offset. 980 virtual bool 981 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 982 983 /// PerformDAGCombine - This method will be invoked for all target nodes and 984 /// for any target-independent nodes that the target has registered with 985 /// invoke it for. 986 /// 987 /// The semantics are as follows: 988 /// Return Value: 989 /// SDValue.Val == 0 - No change was made 990 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 991 /// otherwise - N should be replaced by the returned Operand. 992 /// 993 /// In addition, methods provided by DAGCombinerInfo may be used to perform 994 /// more complex transformations. 995 /// 996 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 997 998 /// isTypeDesirableForOp - Return true if the target has native support for 999 /// the specified value type and it is 'desirable' to use the type for the 1000 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 1001 /// instruction encodings are longer and some i16 instructions are slow. 1002 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 1003 // By default, assume all legal types are desirable. 1004 return isTypeLegal(VT); 1005 } 1006 1007 /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner 1008 /// to transform a floating point op of specified opcode to a equivalent op of 1009 /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM. 1010 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 1011 EVT /*VT*/) const { 1012 return false; 1013 } 1014 1015 /// IsDesirableToPromoteOp - This method query the target whether it is 1016 /// beneficial for dag combiner to promote the specified node. If true, it 1017 /// should return the desired promotion type by reference. 1018 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 1019 return false; 1020 } 1021 1022 //===--------------------------------------------------------------------===// 1023 // TargetLowering Configuration Methods - These methods should be invoked by 1024 // the derived class constructor to configure this object for the target. 1025 // 1026 1027protected: 1028 /// setBooleanContents - Specify how the target extends the result of a 1029 /// boolean value from i1 to a wider type. See getBooleanContents. 1030 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 1031 /// setBooleanVectorContents - Specify how the target extends the result 1032 /// of a vector boolean value from a vector of i1 to a wider type. See 1033 /// getBooleanContents. 1034 void setBooleanVectorContents(BooleanContent Ty) { 1035 BooleanVectorContents = Ty; 1036 } 1037 1038 /// setSchedulingPreference - Specify the target scheduling preference. 1039 void setSchedulingPreference(Sched::Preference Pref) { 1040 SchedPreferenceInfo = Pref; 1041 } 1042 1043 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to 1044 /// use _setjmp to implement llvm.setjmp or the non _ version. 1045 /// Defaults to false. 1046 void setUseUnderscoreSetJmp(bool Val) { 1047 UseUnderscoreSetJmp = Val; 1048 } 1049 1050 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to 1051 /// use _longjmp to implement llvm.longjmp or the non _ version. 1052 /// Defaults to false. 1053 void setUseUnderscoreLongJmp(bool Val) { 1054 UseUnderscoreLongJmp = Val; 1055 } 1056 1057 /// setSupportJumpTables - Indicate whether the target can generate code for 1058 /// jump tables. 1059 void setSupportJumpTables(bool Val) { 1060 SupportJumpTables = Val; 1061 } 1062 1063 /// setMinimumJumpTableEntries - Indicate the number of blocks to generate 1064 /// jump tables rather than if sequence. 1065 void setMinimumJumpTableEntries(int Val) { 1066 MinimumJumpTableEntries = Val; 1067 } 1068 1069 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this 1070 /// specifies the register that llvm.savestack/llvm.restorestack should save 1071 /// and restore. 1072 void setStackPointerRegisterToSaveRestore(unsigned R) { 1073 StackPointerRegisterToSaveRestore = R; 1074 } 1075 1076 /// setExceptionPointerRegister - If set to a physical register, this sets 1077 /// the register that receives the exception address on entry to a landing 1078 /// pad. 1079 void setExceptionPointerRegister(unsigned R) { 1080 ExceptionPointerRegister = R; 1081 } 1082 1083 /// setExceptionSelectorRegister - If set to a physical register, this sets 1084 /// the register that receives the exception typeid on entry to a landing 1085 /// pad. 1086 void setExceptionSelectorRegister(unsigned R) { 1087 ExceptionSelectorRegister = R; 1088 } 1089 1090 /// SelectIsExpensive - Tells the code generator not to expand operations 1091 /// into sequences that use the select operations if possible. 1092 void setSelectIsExpensive(bool isExpensive = true) { 1093 SelectIsExpensive = isExpensive; 1094 } 1095 1096 /// JumpIsExpensive - Tells the code generator not to expand sequence of 1097 /// operations into a separate sequences that increases the amount of 1098 /// flow control. 1099 void setJumpIsExpensive(bool isExpensive = true) { 1100 JumpIsExpensive = isExpensive; 1101 } 1102 1103 /// setIntDivIsCheap - Tells the code generator that integer divide is 1104 /// expensive, and if possible, should be replaced by an alternate sequence 1105 /// of instructions not containing an integer divide. 1106 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 1107 1108 /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass. 1109 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 1110 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 1111 } 1112 1113 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate 1114 /// srl/add/sra for a signed divide by power of two, and let the target handle 1115 /// it. 1116 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 1117 1118 /// addRegisterClass - Add the specified register class as an available 1119 /// regclass for the specified value type. This indicates the selector can 1120 /// handle values of that class natively. 1121 void addRegisterClass(EVT VT, const TargetRegisterClass *RC) { 1122 assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 1123 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 1124 RegClassForVT[VT.getSimpleVT().SimpleTy] = RC; 1125 } 1126 1127 /// findRepresentativeClass - Return the largest legal super-reg register class 1128 /// of the register class for the specified type and its associated "cost". 1129 virtual std::pair<const TargetRegisterClass*, uint8_t> 1130 findRepresentativeClass(EVT VT) const; 1131 1132 /// computeRegisterProperties - Once all of the register classes are added, 1133 /// this allows us to compute derived properties we expose. 1134 void computeRegisterProperties(); 1135 1136 /// setOperationAction - Indicate that the specified operation does not work 1137 /// with the specified type and indicate what to do about it. 1138 void setOperationAction(unsigned Op, MVT VT, 1139 LegalizeAction Action) { 1140 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 1141 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action; 1142 } 1143 1144 /// setLoadExtAction - Indicate that the specified load with extension does 1145 /// not work with the specified type and indicate what to do about it. 1146 void setLoadExtAction(unsigned ExtType, MVT VT, 1147 LegalizeAction Action) { 1148 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 1149 "Table isn't big enough!"); 1150 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action; 1151 } 1152 1153 /// setTruncStoreAction - Indicate that the specified truncating store does 1154 /// not work with the specified type and indicate what to do about it. 1155 void setTruncStoreAction(MVT ValVT, MVT MemVT, 1156 LegalizeAction Action) { 1157 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 1158 "Table isn't big enough!"); 1159 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action; 1160 } 1161 1162 /// setIndexedLoadAction - Indicate that the specified indexed load does or 1163 /// does not work with the specified type and indicate what to do abort 1164 /// it. NOTE: All indexed mode loads are initialized to Expand in 1165 /// TargetLowering.cpp 1166 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 1167 LegalizeAction Action) { 1168 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1169 (unsigned)Action < 0xf && "Table isn't big enough!"); 1170 // Load action are kept in the upper half. 1171 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; 1172 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; 1173 } 1174 1175 /// setIndexedStoreAction - Indicate that the specified indexed store does or 1176 /// does not work with the specified type and indicate what to do about 1177 /// it. NOTE: All indexed mode stores are initialized to Expand in 1178 /// TargetLowering.cpp 1179 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 1180 LegalizeAction Action) { 1181 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1182 (unsigned)Action < 0xf && "Table isn't big enough!"); 1183 // Store action are kept in the lower half. 1184 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; 1185 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); 1186 } 1187 1188 /// setCondCodeAction - Indicate that the specified condition code is or isn't 1189 /// supported on the target and indicate what to do about it. 1190 void setCondCodeAction(ISD::CondCode CC, MVT VT, 1191 LegalizeAction Action) { 1192 assert(VT < MVT::LAST_VALUETYPE && 1193 (unsigned)CC < array_lengthof(CondCodeActions) && 1194 "Table isn't big enough!"); 1195 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit 1196 /// value and the upper 27 bits index into the second dimension of the 1197 /// array to select what 64bit value to use. 1198 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] 1199 &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2); 1200 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] 1201 |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2; 1202 } 1203 1204 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the 1205 /// promotion code defaults to trying a larger integer/fp until it can find 1206 /// one that works. If that default is insufficient, this method can be used 1207 /// by the target to override the default. 1208 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 1209 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 1210 } 1211 1212 /// setTargetDAGCombine - Targets should invoke this method for each target 1213 /// independent node that they want to provide a custom DAG combiner for by 1214 /// implementing the PerformDAGCombine virtual method. 1215 void setTargetDAGCombine(ISD::NodeType NT) { 1216 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1217 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 1218 } 1219 1220 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in 1221 /// bytes); default is 200 1222 void setJumpBufSize(unsigned Size) { 1223 JumpBufSize = Size; 1224 } 1225 1226 /// setJumpBufAlignment - Set the target's required jmp_buf buffer 1227 /// alignment (in bytes); default is 0 1228 void setJumpBufAlignment(unsigned Align) { 1229 JumpBufAlignment = Align; 1230 } 1231 1232 /// setMinFunctionAlignment - Set the target's minimum function alignment (in 1233 /// log2(bytes)) 1234 void setMinFunctionAlignment(unsigned Align) { 1235 MinFunctionAlignment = Align; 1236 } 1237 1238 /// setPrefFunctionAlignment - Set the target's preferred function alignment. 1239 /// This should be set if there is a performance benefit to 1240 /// higher-than-minimum alignment (in log2(bytes)) 1241 void setPrefFunctionAlignment(unsigned Align) { 1242 PrefFunctionAlignment = Align; 1243 } 1244 1245 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default 1246 /// alignment is zero, it means the target does not care about loop alignment. 1247 /// The alignment is specified in log2(bytes). 1248 void setPrefLoopAlignment(unsigned Align) { 1249 PrefLoopAlignment = Align; 1250 } 1251 1252 /// setMinStackArgumentAlignment - Set the minimum stack alignment of an 1253 /// argument (in log2(bytes)). 1254 void setMinStackArgumentAlignment(unsigned Align) { 1255 MinStackArgumentAlignment = Align; 1256 } 1257 1258 /// setShouldFoldAtomicFences - Set if the target's implementation of the 1259 /// atomic operation intrinsics includes locking. Default is false. 1260 void setShouldFoldAtomicFences(bool fold) { 1261 ShouldFoldAtomicFences = fold; 1262 } 1263 1264 /// setInsertFencesForAtomic - Set if the DAG builder should 1265 /// automatically insert fences and reduce the order of atomic memory 1266 /// operations to Monotonic. 1267 void setInsertFencesForAtomic(bool fence) { 1268 InsertFencesForAtomic = fence; 1269 } 1270 1271public: 1272 //===--------------------------------------------------------------------===// 1273 // Lowering methods - These methods must be implemented by targets so that 1274 // the SelectionDAGBuilder code knows how to lower these. 1275 // 1276 1277 /// LowerFormalArguments - This hook must be implemented to lower the 1278 /// incoming (formal) arguments, described by the Ins array, into the 1279 /// specified DAG. The implementation should fill in the InVals array 1280 /// with legal-type argument values, and return the resulting token 1281 /// chain value. 1282 /// 1283 virtual SDValue 1284 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1285 bool /*isVarArg*/, 1286 const SmallVectorImpl<ISD::InputArg> &/*Ins*/, 1287 DebugLoc /*dl*/, SelectionDAG &/*DAG*/, 1288 SmallVectorImpl<SDValue> &/*InVals*/) const { 1289 llvm_unreachable("Not Implemented"); 1290 } 1291 1292 struct ArgListEntry { 1293 SDValue Node; 1294 Type* Ty; 1295 bool isSExt : 1; 1296 bool isZExt : 1; 1297 bool isInReg : 1; 1298 bool isSRet : 1; 1299 bool isNest : 1; 1300 bool isByVal : 1; 1301 uint16_t Alignment; 1302 1303 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1304 isSRet(false), isNest(false), isByVal(false), Alignment(0) { } 1305 }; 1306 typedef std::vector<ArgListEntry> ArgListTy; 1307 1308 /// CallLoweringInfo - This structure contains all information that is 1309 /// necessary for lowering calls. It is passed to TLI::LowerCallTo when the 1310 /// SelectionDAG builder needs to lower a call, and targets will see this 1311 /// struct in their LowerCall implementation. 1312 struct CallLoweringInfo { 1313 SDValue Chain; 1314 Type *RetTy; 1315 bool RetSExt : 1; 1316 bool RetZExt : 1; 1317 bool IsVarArg : 1; 1318 bool IsInReg : 1; 1319 bool DoesNotReturn : 1; 1320 bool IsReturnValueUsed : 1; 1321 1322 // IsTailCall should be modified by implementations of 1323 // TargetLowering::LowerCall that perform tail call conversions. 1324 bool IsTailCall; 1325 1326 unsigned NumFixedArgs; 1327 CallingConv::ID CallConv; 1328 SDValue Callee; 1329 ArgListTy &Args; 1330 SelectionDAG &DAG; 1331 DebugLoc DL; 1332 ImmutableCallSite *CS; 1333 SmallVector<ISD::OutputArg, 32> Outs; 1334 SmallVector<SDValue, 32> OutVals; 1335 SmallVector<ISD::InputArg, 32> Ins; 1336 1337 1338 /// CallLoweringInfo - Constructs a call lowering context based on the 1339 /// ImmutableCallSite \p cs. 1340 CallLoweringInfo(SDValue chain, Type *retTy, 1341 FunctionType *FTy, bool isTailCall, SDValue callee, 1342 ArgListTy &args, SelectionDAG &dag, DebugLoc dl, 1343 ImmutableCallSite &cs) 1344 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)), 1345 RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()), 1346 IsInReg(cs.paramHasAttr(0, Attributes::InReg)), 1347 DoesNotReturn(cs.doesNotReturn()), 1348 IsReturnValueUsed(!cs.getInstruction()->use_empty()), 1349 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()), 1350 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag), 1351 DL(dl), CS(&cs) {} 1352 1353 /// CallLoweringInfo - Constructs a call lowering context based on the 1354 /// provided call information. 1355 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt, 1356 bool isVarArg, bool isInReg, unsigned numFixedArgs, 1357 CallingConv::ID callConv, bool isTailCall, 1358 bool doesNotReturn, bool isReturnValueUsed, SDValue callee, 1359 ArgListTy &args, SelectionDAG &dag, DebugLoc dl) 1360 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt), 1361 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn), 1362 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall), 1363 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee), 1364 Args(args), DAG(dag), DL(dl), CS(NULL) {} 1365 }; 1366 1367 /// LowerCallTo - This function lowers an abstract call to a function into an 1368 /// actual call. This returns a pair of operands. The first element is the 1369 /// return value for the function (if RetTy is not VoidTy). The second 1370 /// element is the outgoing token chain. It calls LowerCall to do the actual 1371 /// lowering. 1372 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 1373 1374 /// LowerCall - This hook must be implemented to lower calls into the 1375 /// the specified DAG. The outgoing arguments to the call are described 1376 /// by the Outs array, and the values to be returned by the call are 1377 /// described by the Ins array. The implementation should fill in the 1378 /// InVals array with legal-type return values from the call, and return 1379 /// the resulting token chain value. 1380 virtual SDValue 1381 LowerCall(CallLoweringInfo &/*CLI*/, 1382 SmallVectorImpl<SDValue> &/*InVals*/) const { 1383 llvm_unreachable("Not Implemented"); 1384 } 1385 1386 /// HandleByVal - Target-specific cleanup for formal ByVal parameters. 1387 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {} 1388 1389 /// CanLowerReturn - This hook should be implemented to check whether the 1390 /// return values described by the Outs array can fit into the return 1391 /// registers. If false is returned, an sret-demotion is performed. 1392 /// 1393 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 1394 MachineFunction &/*MF*/, bool /*isVarArg*/, 1395 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1396 LLVMContext &/*Context*/) const 1397 { 1398 // Return true by default to get preexisting behavior. 1399 return true; 1400 } 1401 1402 /// LowerReturn - This hook must be implemented to lower outgoing 1403 /// return values, described by the Outs array, into the specified 1404 /// DAG. The implementation should return the resulting token chain 1405 /// value. 1406 /// 1407 virtual SDValue 1408 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1409 bool /*isVarArg*/, 1410 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1411 const SmallVectorImpl<SDValue> &/*OutVals*/, 1412 DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const { 1413 llvm_unreachable("Not Implemented"); 1414 } 1415 1416 /// isUsedByReturnOnly - Return true if result of the specified node is used 1417 /// by a return node only. It also compute and return the input chain for the 1418 /// tail call. 1419 /// This is used to determine whether it is possible 1420 /// to codegen a libcall as tail call at legalization time. 1421 virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const { 1422 return false; 1423 } 1424 1425 /// mayBeEmittedAsTailCall - Return true if the target may be able emit the 1426 /// call instruction as a tail call. This is used by optimization passes to 1427 /// determine if it's profitable to duplicate return instructions to enable 1428 /// tailcall optimization. 1429 virtual bool mayBeEmittedAsTailCall(CallInst *) const { 1430 return false; 1431 } 1432 1433 /// getTypeForExtArgOrReturn - Return the type that should be used to zero or 1434 /// sign extend a zeroext/signext integer argument or return value. 1435 /// FIXME: Most C calling convention requires the return type to be promoted, 1436 /// but this is not true all the time, e.g. i1 on x86-64. It is also not 1437 /// necessary for non-C calling conventions. The frontend should handle this 1438 /// and include all of the necessary information. 1439 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1440 ISD::NodeType /*ExtendKind*/) const { 1441 EVT MinVT = getRegisterType(Context, MVT::i32); 1442 return VT.bitsLT(MinVT) ? MinVT : VT; 1443 } 1444 1445 /// LowerOperationWrapper - This callback is invoked by the type legalizer 1446 /// to legalize nodes with an illegal operand type but legal result types. 1447 /// It replaces the LowerOperation callback in the type Legalizer. 1448 /// The reason we can not do away with LowerOperation entirely is that 1449 /// LegalizeDAG isn't yet ready to use this callback. 1450 /// TODO: Consider merging with ReplaceNodeResults. 1451 1452 /// The target places new result values for the node in Results (their number 1453 /// and types must exactly match those of the original return values of 1454 /// the node), or leaves Results empty, which indicates that the node is not 1455 /// to be custom lowered after all. 1456 /// The default implementation calls LowerOperation. 1457 virtual void LowerOperationWrapper(SDNode *N, 1458 SmallVectorImpl<SDValue> &Results, 1459 SelectionDAG &DAG) const; 1460 1461 /// LowerOperation - This callback is invoked for operations that are 1462 /// unsupported by the target, which are registered to use 'custom' lowering, 1463 /// and whose defined values are all legal. 1464 /// If the target has no operations that require custom lowering, it need not 1465 /// implement this. The default implementation of this aborts. 1466 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 1467 1468 /// ReplaceNodeResults - This callback is invoked when a node result type is 1469 /// illegal for the target, and the operation was registered to use 'custom' 1470 /// lowering for that result type. The target places new result values for 1471 /// the node in Results (their number and types must exactly match those of 1472 /// the original return values of the node), or leaves Results empty, which 1473 /// indicates that the node is not to be custom lowered after all. 1474 /// 1475 /// If the target has no operations that require custom lowering, it need not 1476 /// implement this. The default implementation aborts. 1477 virtual void ReplaceNodeResults(SDNode * /*N*/, 1478 SmallVectorImpl<SDValue> &/*Results*/, 1479 SelectionDAG &/*DAG*/) const { 1480 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 1481 } 1482 1483 /// getTargetNodeName() - This method returns the name of a target specific 1484 /// DAG node. 1485 virtual const char *getTargetNodeName(unsigned Opcode) const; 1486 1487 /// createFastISel - This method returns a target specific FastISel object, 1488 /// or null if the target does not support "fast" ISel. 1489 virtual FastISel *createFastISel(FunctionLoweringInfo &, 1490 const TargetLibraryInfo *) const { 1491 return 0; 1492 } 1493 1494 //===--------------------------------------------------------------------===// 1495 // Inline Asm Support hooks 1496 // 1497 1498 /// ExpandInlineAsm - This hook allows the target to expand an inline asm 1499 /// call to be explicit llvm code if it wants to. This is useful for 1500 /// turning simple inline asms into LLVM intrinsics, which gives the 1501 /// compiler more information about the behavior of the code. 1502 virtual bool ExpandInlineAsm(CallInst *) const { 1503 return false; 1504 } 1505 1506 enum ConstraintType { 1507 C_Register, // Constraint represents specific register(s). 1508 C_RegisterClass, // Constraint represents any of register(s) in class. 1509 C_Memory, // Memory constraint. 1510 C_Other, // Something else. 1511 C_Unknown // Unsupported constraint. 1512 }; 1513 1514 enum ConstraintWeight { 1515 // Generic weights. 1516 CW_Invalid = -1, // No match. 1517 CW_Okay = 0, // Acceptable. 1518 CW_Good = 1, // Good weight. 1519 CW_Better = 2, // Better weight. 1520 CW_Best = 3, // Best weight. 1521 1522 // Well-known weights. 1523 CW_SpecificReg = CW_Okay, // Specific register operands. 1524 CW_Register = CW_Good, // Register operands. 1525 CW_Memory = CW_Better, // Memory operands. 1526 CW_Constant = CW_Best, // Constant operand. 1527 CW_Default = CW_Okay // Default or don't know type. 1528 }; 1529 1530 /// AsmOperandInfo - This contains information for each constraint that we are 1531 /// lowering. 1532 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 1533 /// ConstraintCode - This contains the actual string for the code, like "m". 1534 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that 1535 /// most closely matches the operand. 1536 std::string ConstraintCode; 1537 1538 /// ConstraintType - Information about the constraint code, e.g. Register, 1539 /// RegisterClass, Memory, Other, Unknown. 1540 TargetLowering::ConstraintType ConstraintType; 1541 1542 /// CallOperandval - If this is the result output operand or a 1543 /// clobber, this is null, otherwise it is the incoming operand to the 1544 /// CallInst. This gets modified as the asm is processed. 1545 Value *CallOperandVal; 1546 1547 /// ConstraintVT - The ValueType for the operand value. 1548 EVT ConstraintVT; 1549 1550 /// isMatchingInputConstraint - Return true of this is an input operand that 1551 /// is a matching constraint like "4". 1552 bool isMatchingInputConstraint() const; 1553 1554 /// getMatchedOperand - If this is an input matching constraint, this method 1555 /// returns the output operand it matches. 1556 unsigned getMatchedOperand() const; 1557 1558 /// Copy constructor for copying from an AsmOperandInfo. 1559 AsmOperandInfo(const AsmOperandInfo &info) 1560 : InlineAsm::ConstraintInfo(info), 1561 ConstraintCode(info.ConstraintCode), 1562 ConstraintType(info.ConstraintType), 1563 CallOperandVal(info.CallOperandVal), 1564 ConstraintVT(info.ConstraintVT) { 1565 } 1566 1567 /// Copy constructor for copying from a ConstraintInfo. 1568 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 1569 : InlineAsm::ConstraintInfo(info), 1570 ConstraintType(TargetLowering::C_Unknown), 1571 CallOperandVal(0), ConstraintVT(MVT::Other) { 1572 } 1573 }; 1574 1575 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector; 1576 1577 /// ParseConstraints - Split up the constraint string from the inline 1578 /// assembly value into the specific constraints and their prefixes, 1579 /// and also tie in the associated operand values. 1580 /// If this returns an empty vector, and if the constraint string itself 1581 /// isn't empty, there was an error parsing. 1582 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const; 1583 1584 /// Examine constraint type and operand type and determine a weight value. 1585 /// The operand object must already have been set up with the operand type. 1586 virtual ConstraintWeight getMultipleConstraintMatchWeight( 1587 AsmOperandInfo &info, int maIndex) const; 1588 1589 /// Examine constraint string and operand type and determine a weight value. 1590 /// The operand object must already have been set up with the operand type. 1591 virtual ConstraintWeight getSingleConstraintMatchWeight( 1592 AsmOperandInfo &info, const char *constraint) const; 1593 1594 /// ComputeConstraintToUse - Determines the constraint code and constraint 1595 /// type to use for the specific AsmOperandInfo, setting 1596 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand 1597 /// being passed in is available, it can be passed in as Op, otherwise an 1598 /// empty SDValue can be passed. 1599 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1600 SDValue Op, 1601 SelectionDAG *DAG = 0) const; 1602 1603 /// getConstraintType - Given a constraint, return the type of constraint it 1604 /// is for this target. 1605 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 1606 1607 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. 1608 /// {edx}), return the register number and the register class for the 1609 /// register. 1610 /// 1611 /// Given a register class constraint, like 'r', if this corresponds directly 1612 /// to an LLVM register class, return a register of 0 and the register class 1613 /// pointer. 1614 /// 1615 /// This should only be used for C_Register constraints. On error, 1616 /// this returns a register number of 0 and a null register class pointer.. 1617 virtual std::pair<unsigned, const TargetRegisterClass*> 1618 getRegForInlineAsmConstraint(const std::string &Constraint, 1619 EVT VT) const; 1620 1621 /// LowerXConstraint - try to replace an X constraint, which matches anything, 1622 /// with another that has more specific requirements based on the type of the 1623 /// corresponding operand. This returns null if there is no replacement to 1624 /// make. 1625 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 1626 1627 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1628 /// vector. If it is invalid, don't add anything to Ops. 1629 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 1630 std::vector<SDValue> &Ops, 1631 SelectionDAG &DAG) const; 1632 1633 //===--------------------------------------------------------------------===// 1634 // Instruction Emitting Hooks 1635 // 1636 1637 // EmitInstrWithCustomInserter - This method should be implemented by targets 1638 // that mark instructions with the 'usesCustomInserter' flag. These 1639 // instructions are special in various ways, which require special support to 1640 // insert. The specified MachineInstr is created but not inserted into any 1641 // basic blocks, and this method is called to expand it into a sequence of 1642 // instructions, potentially also creating new basic blocks and control flow. 1643 virtual MachineBasicBlock * 1644 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; 1645 1646 /// AdjustInstrPostInstrSelection - This method should be implemented by 1647 /// targets that mark instructions with the 'hasPostISelHook' flag. These 1648 /// instructions must be adjusted after instruction selection by target hooks. 1649 /// e.g. To fill in optional defs for ARM 's' setting instructions. 1650 virtual void 1651 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 1652 1653 //===--------------------------------------------------------------------===// 1654 // Addressing mode description hooks (used by LSR etc). 1655 // 1656 1657 /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the 1658 /// same BB as Load/Store instructions reading the address. This allows as 1659 /// much computation as possible to be done in the address mode for that 1660 /// operand. This hook lets targets also pass back when this should be done 1661 /// on intrinsics which load/store. 1662 virtual bool GetAddrModeArguments(IntrinsicInst *I, 1663 SmallVectorImpl<Value*> &Ops, 1664 Type *&AccessTy) const { 1665 return false; 1666 } 1667 1668 /// isLegalAddressingMode - Return true if the addressing mode represented by 1669 /// AM is legal for this target, for a load/store of the specified type. 1670 /// The type may be VoidTy, in which case only return true if the addressing 1671 /// mode is legal for a load/store of any legal type. 1672 /// TODO: Handle pre/postinc as well. 1673 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; 1674 1675 /// isLegalICmpImmediate - Return true if the specified immediate is legal 1676 /// icmp immediate, that is the target has icmp instructions which can compare 1677 /// a register against the immediate without having to materialize the 1678 /// immediate into a register. 1679 virtual bool isLegalICmpImmediate(int64_t) const { 1680 return true; 1681 } 1682 1683 /// isLegalAddImmediate - Return true if the specified immediate is legal 1684 /// add immediate, that is the target has add instructions which can add 1685 /// a register with the immediate without having to materialize the 1686 /// immediate into a register. 1687 virtual bool isLegalAddImmediate(int64_t) const { 1688 return true; 1689 } 1690 1691 /// isTruncateFree - Return true if it's free to truncate a value of 1692 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 1693 /// register EAX to i16 by referencing its sub-register AX. 1694 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1695 return false; 1696 } 1697 1698 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const { 1699 return false; 1700 } 1701 1702 /// isZExtFree - Return true if any actual instruction that defines a 1703 /// value of type Ty1 implicitly zero-extends the value to Ty2 in the result 1704 /// register. This does not necessarily include registers defined in 1705 /// unknown ways, such as incoming arguments, or copies from unknown 1706 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 1707 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 1708 /// all instructions that define 32-bit values implicit zero-extend the 1709 /// result out to 64 bits. 1710 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1711 return false; 1712 } 1713 1714 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const { 1715 return false; 1716 } 1717 1718 /// isZExtFree - Return true if zero-extending the specific node Val to type 1719 /// VT2 is free (either because it's implicitly zero-extended such as ARM 1720 /// ldrb / ldrh or because it's folded such as X86 zero-extending loads). 1721 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 1722 return isZExtFree(Val.getValueType(), VT2); 1723 } 1724 1725 /// isFNegFree - Return true if an fneg operation is free to the point where 1726 /// it is never worthwhile to replace it with a bitwise operation. 1727 virtual bool isFNegFree(EVT) const { 1728 return false; 1729 } 1730 1731 /// isFAbsFree - Return true if an fneg operation is free to the point where 1732 /// it is never worthwhile to replace it with a bitwise operation. 1733 virtual bool isFAbsFree(EVT) const { 1734 return false; 1735 } 1736 1737 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 1738 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 1739 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 1740 /// is expanded to mul + add. 1741 virtual bool isFMAFasterThanMulAndAdd(EVT) const { 1742 return false; 1743 } 1744 1745 /// isNarrowingProfitable - Return true if it's profitable to narrow 1746 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 1747 /// from i32 to i8 but not from i32 to i16. 1748 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { 1749 return false; 1750 } 1751 1752 //===--------------------------------------------------------------------===// 1753 // Div utility functions 1754 // 1755 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl, 1756 SelectionDAG &DAG) const; 1757 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 1758 std::vector<SDNode*> *Created) const; 1759 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 1760 std::vector<SDNode*> *Created) const; 1761 1762 1763 //===--------------------------------------------------------------------===// 1764 // Runtime Library hooks 1765 // 1766 1767 /// setLibcallName - Rename the default libcall routine name for the specified 1768 /// libcall. 1769 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1770 LibcallRoutineNames[Call] = Name; 1771 } 1772 1773 /// getLibcallName - Get the libcall routine name for the specified libcall. 1774 /// 1775 const char *getLibcallName(RTLIB::Libcall Call) const { 1776 return LibcallRoutineNames[Call]; 1777 } 1778 1779 /// setCmpLibcallCC - Override the default CondCode to be used to test the 1780 /// result of the comparison libcall against zero. 1781 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1782 CmpLibcallCCs[Call] = CC; 1783 } 1784 1785 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of 1786 /// the comparison libcall against zero. 1787 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1788 return CmpLibcallCCs[Call]; 1789 } 1790 1791 /// setLibcallCallingConv - Set the CallingConv that should be used for the 1792 /// specified libcall. 1793 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 1794 LibcallCallingConvs[Call] = CC; 1795 } 1796 1797 /// getLibcallCallingConv - Get the CallingConv that should be used for the 1798 /// specified libcall. 1799 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 1800 return LibcallCallingConvs[Call]; 1801 } 1802 1803private: 1804 const TargetMachine &TM; 1805 const DataLayout *TD; 1806 const TargetLoweringObjectFile &TLOF; 1807 1808 /// PointerTy - The type to use for pointers for the default address space, 1809 /// usually i32 or i64. 1810 /// 1811 MVT PointerTy; 1812 1813 /// IsLittleEndian - True if this is a little endian target. 1814 /// 1815 bool IsLittleEndian; 1816 1817 /// SelectIsExpensive - Tells the code generator not to expand operations 1818 /// into sequences that use the select operations if possible. 1819 bool SelectIsExpensive; 1820 1821 /// IntDivIsCheap - Tells the code generator not to expand integer divides by 1822 /// constants into a sequence of muls, adds, and shifts. This is a hack until 1823 /// a real cost model is in place. If we ever optimize for size, this will be 1824 /// set to true unconditionally. 1825 bool IntDivIsCheap; 1826 1827 /// BypassSlowDivMap - Tells the code generator to bypass slow divide or 1828 /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the 1829 /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned 1830 /// integer div/rem when the operands are positive and less than 256. 1831 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 1832 1833 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate 1834 /// srl/add/sra for a signed divide by power of two, and let the target handle 1835 /// it. 1836 bool Pow2DivIsCheap; 1837 1838 /// JumpIsExpensive - Tells the code generator that it shouldn't generate 1839 /// extra flow control instructions and should attempt to combine flow 1840 /// control instructions via predication. 1841 bool JumpIsExpensive; 1842 1843 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement 1844 /// llvm.setjmp. Defaults to false. 1845 bool UseUnderscoreSetJmp; 1846 1847 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement 1848 /// llvm.longjmp. Defaults to false. 1849 bool UseUnderscoreLongJmp; 1850 1851 /// SupportJumpTables - Whether the target can generate code for jumptables. 1852 /// If it's not true, then each jumptable must be lowered into if-then-else's. 1853 bool SupportJumpTables; 1854 1855 /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables. 1856 int MinimumJumpTableEntries; 1857 1858 /// BooleanContents - Information about the contents of the high-bits in 1859 /// boolean values held in a type wider than i1. See getBooleanContents. 1860 BooleanContent BooleanContents; 1861 /// BooleanVectorContents - Information about the contents of the high-bits 1862 /// in boolean vector values when the element type is wider than i1. See 1863 /// getBooleanContents. 1864 BooleanContent BooleanVectorContents; 1865 1866 /// SchedPreferenceInfo - The target scheduling preference: shortest possible 1867 /// total cycles or lowest register usage. 1868 Sched::Preference SchedPreferenceInfo; 1869 1870 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers 1871 unsigned JumpBufSize; 1872 1873 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf 1874 /// buffers 1875 unsigned JumpBufAlignment; 1876 1877 /// MinStackArgumentAlignment - The minimum alignment that any argument 1878 /// on the stack needs to have. 1879 /// 1880 unsigned MinStackArgumentAlignment; 1881 1882 /// MinFunctionAlignment - The minimum function alignment (used when 1883 /// optimizing for size, and to prevent explicitly provided alignment 1884 /// from leading to incorrect code). 1885 /// 1886 unsigned MinFunctionAlignment; 1887 1888 /// PrefFunctionAlignment - The preferred function alignment (used when 1889 /// alignment unspecified and optimizing for speed). 1890 /// 1891 unsigned PrefFunctionAlignment; 1892 1893 /// PrefLoopAlignment - The preferred loop alignment. 1894 /// 1895 unsigned PrefLoopAlignment; 1896 1897 /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should 1898 /// be folded into the enclosed atomic intrinsic instruction by the 1899 /// combiner. 1900 bool ShouldFoldAtomicFences; 1901 1902 /// InsertFencesForAtomic - Whether the DAG builder should automatically 1903 /// insert fences and reduce ordering for atomics. (This will be set for 1904 /// for most architectures with weak memory ordering.) 1905 bool InsertFencesForAtomic; 1906 1907 /// StackPointerRegisterToSaveRestore - If set to a physical register, this 1908 /// specifies the register that llvm.savestack/llvm.restorestack should save 1909 /// and restore. 1910 unsigned StackPointerRegisterToSaveRestore; 1911 1912 /// ExceptionPointerRegister - If set to a physical register, this specifies 1913 /// the register that receives the exception address on entry to a landing 1914 /// pad. 1915 unsigned ExceptionPointerRegister; 1916 1917 /// ExceptionSelectorRegister - If set to a physical register, this specifies 1918 /// the register that receives the exception typeid on entry to a landing 1919 /// pad. 1920 unsigned ExceptionSelectorRegister; 1921 1922 /// RegClassForVT - This indicates the default register class to use for 1923 /// each ValueType the target supports natively. 1924 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1925 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1926 EVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1927 1928 /// RepRegClassForVT - This indicates the "representative" register class to 1929 /// use for each ValueType the target supports natively. This information is 1930 /// used by the scheduler to track register pressure. By default, the 1931 /// representative register class is the largest legal super-reg register 1932 /// class of the register class of the specified type. e.g. On x86, i8, i16, 1933 /// and i32's representative class would be GR32. 1934 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE]; 1935 1936 /// RepRegClassCostForVT - This indicates the "cost" of the "representative" 1937 /// register class for each ValueType. The cost is used by the scheduler to 1938 /// approximate register pressure. 1939 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE]; 1940 1941 /// TransformToType - For any value types we are promoting or expanding, this 1942 /// contains the value type that we are changing to. For Expanded types, this 1943 /// contains one step of the expand (e.g. i64 -> i32), even if there are 1944 /// multiple steps required (e.g. i64 -> i16). For types natively supported 1945 /// by the system, this holds the same type (e.g. i32 -> i32). 1946 EVT TransformToType[MVT::LAST_VALUETYPE]; 1947 1948 /// OpActions - For each operation and each value type, keep a LegalizeAction 1949 /// that indicates how instruction selection should deal with the operation. 1950 /// Most operations are Legal (aka, supported natively by the target), but 1951 /// operations that are not should be described. Note that operations on 1952 /// non-legal value types are not described here. 1953 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; 1954 1955 /// LoadExtActions - For each load extension type and each value type, 1956 /// keep a LegalizeAction that indicates how instruction selection should deal 1957 /// with a load of a specific value type and extension type. 1958 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE]; 1959 1960 /// TruncStoreActions - For each value type pair keep a LegalizeAction that 1961 /// indicates whether a truncating store of a specific value type and 1962 /// truncating type is legal. 1963 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; 1964 1965 /// IndexedModeActions - For each indexed mode and each value type, 1966 /// keep a pair of LegalizeAction that indicates how instruction 1967 /// selection should deal with the load / store. The first dimension is the 1968 /// value_type for the reference. The second dimension represents the various 1969 /// modes for load store. 1970 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; 1971 1972 /// CondCodeActions - For each condition code (ISD::CondCode) keep a 1973 /// LegalizeAction that indicates how instruction selection should 1974 /// deal with the condition code. 1975 /// Because each CC action takes up 2 bits, we need to have the array size 1976 /// be large enough to fit all of the value types. This can be done by 1977 /// dividing the MVT::LAST_VALUETYPE by 32 and adding one. 1978 uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1]; 1979 1980 ValueTypeActionImpl ValueTypeActions; 1981 1982public: 1983 LegalizeKind 1984 getTypeConversion(LLVMContext &Context, EVT VT) const { 1985 // If this is a simple type, use the ComputeRegisterProp mechanism. 1986 if (VT.isSimple()) { 1987 assert((unsigned)VT.getSimpleVT().SimpleTy < 1988 array_lengthof(TransformToType)); 1989 EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy]; 1990 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT()); 1991 1992 assert( 1993 (!(NVT.isSimple() && LA != TypeLegal) || 1994 ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger) 1995 && "Promote may not follow Expand or Promote"); 1996 1997 if (LA == TypeSplitVector) 1998 NVT = EVT::getVectorVT(Context, VT.getVectorElementType(), 1999 VT.getVectorNumElements() / 2); 2000 return LegalizeKind(LA, NVT); 2001 } 2002 2003 // Handle Extended Scalar Types. 2004 if (!VT.isVector()) { 2005 assert(VT.isInteger() && "Float types must be simple"); 2006 unsigned BitSize = VT.getSizeInBits(); 2007 // First promote to a power-of-two size, then expand if necessary. 2008 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 2009 EVT NVT = VT.getRoundIntegerType(Context); 2010 assert(NVT != VT && "Unable to round integer VT"); 2011 LegalizeKind NextStep = getTypeConversion(Context, NVT); 2012 // Avoid multi-step promotion. 2013 if (NextStep.first == TypePromoteInteger) return NextStep; 2014 // Return rounded integer type. 2015 return LegalizeKind(TypePromoteInteger, NVT); 2016 } 2017 2018 return LegalizeKind(TypeExpandInteger, 2019 EVT::getIntegerVT(Context, VT.getSizeInBits()/2)); 2020 } 2021 2022 // Handle vector types. 2023 unsigned NumElts = VT.getVectorNumElements(); 2024 EVT EltVT = VT.getVectorElementType(); 2025 2026 // Vectors with only one element are always scalarized. 2027 if (NumElts == 1) 2028 return LegalizeKind(TypeScalarizeVector, EltVT); 2029 2030 // Try to widen vector elements until a legal type is found. 2031 if (EltVT.isInteger()) { 2032 // Vectors with a number of elements that is not a power of two are always 2033 // widened, for example <3 x float> -> <4 x float>. 2034 if (!VT.isPow2VectorType()) { 2035 NumElts = (unsigned)NextPowerOf2(NumElts); 2036 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 2037 return LegalizeKind(TypeWidenVector, NVT); 2038 } 2039 2040 // Examine the element type. 2041 LegalizeKind LK = getTypeConversion(Context, EltVT); 2042 2043 // If type is to be expanded, split the vector. 2044 // <4 x i140> -> <2 x i140> 2045 if (LK.first == TypeExpandInteger) 2046 return LegalizeKind(TypeSplitVector, 2047 EVT::getVectorVT(Context, EltVT, NumElts / 2)); 2048 2049 // Promote the integer element types until a legal vector type is found 2050 // or until the element integer type is too big. If a legal type was not 2051 // found, fallback to the usual mechanism of widening/splitting the 2052 // vector. 2053 while (1) { 2054 // Increase the bitwidth of the element to the next pow-of-two 2055 // (which is greater than 8 bits). 2056 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits() 2057 ).getRoundIntegerType(Context); 2058 2059 // Stop trying when getting a non-simple element type. 2060 // Note that vector elements may be greater than legal vector element 2061 // types. Example: X86 XMM registers hold 64bit element on 32bit systems. 2062 if (!EltVT.isSimple()) break; 2063 2064 // Build a new vector type and check if it is legal. 2065 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 2066 // Found a legal promoted vector type. 2067 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 2068 return LegalizeKind(TypePromoteInteger, 2069 EVT::getVectorVT(Context, EltVT, NumElts)); 2070 } 2071 } 2072 2073 // Try to widen the vector until a legal type is found. 2074 // If there is no wider legal type, split the vector. 2075 while (1) { 2076 // Round up to the next power of 2. 2077 NumElts = (unsigned)NextPowerOf2(NumElts); 2078 2079 // If there is no simple vector type with this many elements then there 2080 // cannot be a larger legal vector type. Note that this assumes that 2081 // there are no skipped intermediate vector types in the simple types. 2082 if (!EltVT.isSimple()) break; 2083 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 2084 if (LargerVector == MVT()) break; 2085 2086 // If this type is legal then widen the vector. 2087 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 2088 return LegalizeKind(TypeWidenVector, LargerVector); 2089 } 2090 2091 // Widen odd vectors to next power of two. 2092 if (!VT.isPow2VectorType()) { 2093 EVT NVT = VT.getPow2VectorType(Context); 2094 return LegalizeKind(TypeWidenVector, NVT); 2095 } 2096 2097 // Vectors with illegal element types are expanded. 2098 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); 2099 return LegalizeKind(TypeSplitVector, NVT); 2100 } 2101 2102private: 2103 std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses; 2104 2105 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would 2106 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), 2107 /// which sets a bit in this array. 2108 unsigned char 2109 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 2110 2111 /// PromoteToType - For operations that must be promoted to a specific type, 2112 /// this holds the destination type. This map should be sparse, so don't hold 2113 /// it as an array. 2114 /// 2115 /// Targets add entries to this map with AddPromotedToType(..), clients access 2116 /// this with getTypeToPromoteTo(..). 2117 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 2118 PromoteToType; 2119 2120 /// LibcallRoutineNames - Stores the name each libcall. 2121 /// 2122 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 2123 2124 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result 2125 /// of each of the comparison libcall against zero. 2126 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 2127 2128 /// LibcallCallingConvs - Stores the CallingConv that should be used for each 2129 /// libcall. 2130 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 2131 2132protected: 2133 /// When lowering \@llvm.memset this field specifies the maximum number of 2134 /// store operations that may be substituted for the call to memset. Targets 2135 /// must set this value based on the cost threshold for that target. Targets 2136 /// should assume that the memset will be done using as many of the largest 2137 /// store operations first, followed by smaller ones, if necessary, per 2138 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 2139 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 2140 /// store. This only applies to setting a constant array of a constant size. 2141 /// @brief Specify maximum number of store instructions per memset call. 2142 unsigned maxStoresPerMemset; 2143 2144 /// Maximum number of stores operations that may be substituted for the call 2145 /// to memset, used for functions with OptSize attribute. 2146 unsigned maxStoresPerMemsetOptSize; 2147 2148 /// When lowering \@llvm.memcpy this field specifies the maximum number of 2149 /// store operations that may be substituted for a call to memcpy. Targets 2150 /// must set this value based on the cost threshold for that target. Targets 2151 /// should assume that the memcpy will be done using as many of the largest 2152 /// store operations first, followed by smaller ones, if necessary, per 2153 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 2154 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 2155 /// and one 1-byte store. This only applies to copying a constant array of 2156 /// constant size. 2157 /// @brief Specify maximum bytes of store instructions per memcpy call. 2158 unsigned maxStoresPerMemcpy; 2159 2160 /// Maximum number of store operations that may be substituted for a call 2161 /// to memcpy, used for functions with OptSize attribute. 2162 unsigned maxStoresPerMemcpyOptSize; 2163 2164 /// When lowering \@llvm.memmove this field specifies the maximum number of 2165 /// store instructions that may be substituted for a call to memmove. Targets 2166 /// must set this value based on the cost threshold for that target. Targets 2167 /// should assume that the memmove will be done using as many of the largest 2168 /// store operations first, followed by smaller ones, if necessary, per 2169 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 2170 /// with 8-bit alignment would result in nine 1-byte stores. This only 2171 /// applies to copying a constant array of constant size. 2172 /// @brief Specify maximum bytes of store instructions per memmove call. 2173 unsigned maxStoresPerMemmove; 2174 2175 /// Maximum number of store instructions that may be substituted for a call 2176 /// to memmove, used for functions with OpSize attribute. 2177 unsigned maxStoresPerMemmoveOptSize; 2178 2179 /// This field specifies whether the target can benefit from code placement 2180 /// optimization. 2181 bool benefitFromCodePlacementOpt; 2182 2183 /// predictableSelectIsExpensive - Tells the code generator that select is 2184 /// more expensive than a branch if the branch is usually predicted right. 2185 bool predictableSelectIsExpensive; 2186 2187private: 2188 /// isLegalRC - Return true if the value types that can be represented by the 2189 /// specified register class are all legal. 2190 bool isLegalRC(const TargetRegisterClass *RC) const; 2191}; 2192 2193/// GetReturnInfo - Given an LLVM IR type and return type attributes, 2194/// compute the return value EVTs and flags, and optionally also 2195/// the offsets, if the return value is being lowered to memory. 2196void GetReturnInfo(Type* ReturnType, Attributes attr, 2197 SmallVectorImpl<ISD::OutputArg> &Outs, 2198 const TargetLowering &TLI); 2199 2200} // end llvm namespace 2201 2202#endif 2203