TargetLowering.h revision 2b0002b579dba5604a2673bbee2cd9969e183a71
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// 10/// \file 11/// This file describes how to lower LLVM code to machine code. This has two 12/// main components: 13/// 14/// 1. Which ValueTypes are natively supported by the target. 15/// 2. Which operations are supported for supported ValueTypes. 16/// 3. Cost thresholds for alternative implementations of certain operations. 17/// 18/// In addition it has a few other components, like information about FP 19/// immediates. 20/// 21//===----------------------------------------------------------------------===// 22 23#ifndef LLVM_TARGET_TARGETLOWERING_H 24#define LLVM_TARGET_TARGETLOWERING_H 25 26#include "llvm/ADT/DenseMap.h" 27#include "llvm/CodeGen/DAGCombine.h" 28#include "llvm/CodeGen/RuntimeLibcalls.h" 29#include "llvm/CodeGen/SelectionDAGNodes.h" 30#include "llvm/IR/Attributes.h" 31#include "llvm/IR/CallingConv.h" 32#include "llvm/IR/InlineAsm.h" 33#include "llvm/Support/CallSite.h" 34#include "llvm/Target/TargetCallingConv.h" 35#include "llvm/Target/TargetMachine.h" 36#include <climits> 37#include <map> 38#include <vector> 39 40namespace llvm { 41 class CallInst; 42 class CCState; 43 class FastISel; 44 class FunctionLoweringInfo; 45 class ImmutableCallSite; 46 class IntrinsicInst; 47 class MachineBasicBlock; 48 class MachineFunction; 49 class MachineInstr; 50 class MachineJumpTableInfo; 51 class MCContext; 52 class MCExpr; 53 template<typename T> class SmallVectorImpl; 54 class DataLayout; 55 class TargetRegisterClass; 56 class TargetLibraryInfo; 57 class TargetLoweringObjectFile; 58 class Value; 59 60 namespace Sched { 61 enum Preference { 62 None, // No preference 63 Source, // Follow source order. 64 RegPressure, // Scheduling for lowest register pressure. 65 Hybrid, // Scheduling for both latency and register pressure. 66 ILP, // Scheduling for ILP in low register pressure mode. 67 VLIW // Scheduling for VLIW targets. 68 }; 69 } 70 71/// This base class for TargetLowering contains the SelectionDAG-independent 72/// parts that can be used from the rest of CodeGen. 73class TargetLoweringBase { 74 TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION; 75 void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION; 76 77public: 78 /// This enum indicates whether operations are valid for a target, and if not, 79 /// what action should be used to make them valid. 80 enum LegalizeAction { 81 Legal, // The target natively supports this operation. 82 Promote, // This operation should be executed in a larger type. 83 Expand, // Try to expand this to other ops, otherwise use a libcall. 84 Custom // Use the LowerOperation hook to implement custom lowering. 85 }; 86 87 /// This enum indicates whether a types are legal for a target, and if not, 88 /// what action should be used to make them valid. 89 enum LegalizeTypeAction { 90 TypeLegal, // The target natively supports this type. 91 TypePromoteInteger, // Replace this integer with a larger one. 92 TypeExpandInteger, // Split this integer into two of half the size. 93 TypeSoftenFloat, // Convert this float to a same size integer type. 94 TypeExpandFloat, // Split this float into two of half the size. 95 TypeScalarizeVector, // Replace this one-element vector with its element. 96 TypeSplitVector, // Split this vector into two of half the size. 97 TypeWidenVector // This vector should be widened into a larger vector. 98 }; 99 100 /// LegalizeKind holds the legalization kind that needs to happen to EVT 101 /// in order to type-legalize it. 102 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind; 103 104 /// Enum that describes how the target represents true/false values. 105 enum BooleanContent { 106 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 107 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 108 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 109 }; 110 111 /// Enum that describes what type of support for selects the target has. 112 enum SelectSupportKind { 113 ScalarValSelect, // The target supports scalar selects (ex: cmov). 114 ScalarCondVectorVal, // The target supports selects with a scalar condition 115 // and vector values (ex: cmov). 116 VectorMaskSelect // The target supports vector selects with a vector 117 // mask (ex: x86 blends). 118 }; 119 120 static ISD::NodeType getExtendForContent(BooleanContent Content) { 121 switch (Content) { 122 case UndefinedBooleanContent: 123 // Extend by adding rubbish bits. 124 return ISD::ANY_EXTEND; 125 case ZeroOrOneBooleanContent: 126 // Extend by adding zero bits. 127 return ISD::ZERO_EXTEND; 128 case ZeroOrNegativeOneBooleanContent: 129 // Extend by copying the sign bit. 130 return ISD::SIGN_EXTEND; 131 } 132 llvm_unreachable("Invalid content kind"); 133 } 134 135 /// NOTE: The constructor takes ownership of TLOF. 136 explicit TargetLoweringBase(const TargetMachine &TM, 137 const TargetLoweringObjectFile *TLOF); 138 virtual ~TargetLoweringBase(); 139 140protected: 141 /// \brief Initialize all of the actions to default values. 142 void initActions(); 143 144public: 145 const TargetMachine &getTargetMachine() const { return TM; } 146 const DataLayout *getDataLayout() const { return TD; } 147 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } 148 149 bool isBigEndian() const { return !IsLittleEndian; } 150 bool isLittleEndian() const { return IsLittleEndian; } 151 // Return the pointer type for the given address space, defaults to 152 // the pointer type from the data layout. 153 // FIXME: The default needs to be removed once all the code is updated. 154 virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const; 155 unsigned getPointerSizeInBits(uint32_t AS = 0) const; 156 unsigned getPointerTypeSizeInBits(Type *Ty) const; 157 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const; 158 159 EVT getShiftAmountTy(EVT LHSTy) const; 160 161 /// Returns the type to be used for the index operand of: 162 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, 163 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR 164 virtual MVT getVectorIdxTy() const { 165 return getPointerTy(); 166 } 167 168 /// Return true if the select operation is expensive for this target. 169 bool isSelectExpensive() const { return SelectIsExpensive; } 170 171 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { 172 return true; 173 } 174 175 /// Return true if a vector of the given type should be split 176 /// (TypeSplitVector) instead of promoted (TypePromoteInteger) during type 177 /// legalization. 178 virtual bool shouldSplitVectorElementType(EVT /*VT*/) const { return false; } 179 180 /// Return true if integer divide is usually cheaper than a sequence of 181 /// several shifts, adds, and multiplies for this target. 182 bool isIntDivCheap() const { return IntDivIsCheap; } 183 184 /// Returns true if target has indicated at least one type should be bypassed. 185 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 186 187 /// Returns map of slow types for division or remainder with corresponding 188 /// fast types 189 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 190 return BypassSlowDivWidths; 191 } 192 193 /// Return true if pow2 div is cheaper than a chain of srl/add/sra. 194 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 195 196 /// Return true if Flow Control is an expensive operation that should be 197 /// avoided. 198 bool isJumpExpensive() const { return JumpIsExpensive; } 199 200 /// Return true if selects are only cheaper than branches if the branch is 201 /// unlikely to be predicted right. 202 bool isPredictableSelectExpensive() const { 203 return PredictableSelectIsExpensive; 204 } 205 206 /// Return the ValueType of the result of SETCC operations. Also used to 207 /// obtain the target's preferred type for the condition operand of SELECT and 208 /// BRCOND nodes. In the case of BRCOND the argument passed is MVT::Other 209 /// since there are no other operands to get a type hint from. 210 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; 211 212 /// Return the ValueType for comparison libcalls. Comparions libcalls include 213 /// floating point comparion calls, and Ordered/Unordered check calls on 214 /// floating point numbers. 215 virtual 216 MVT::SimpleValueType getCmpLibcallReturnType() const; 217 218 /// For targets without i1 registers, this gives the nature of the high-bits 219 /// of boolean values held in types wider than i1. 220 /// 221 /// "Boolean values" are special true/false values produced by nodes like 222 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 223 /// Not to be confused with general values promoted from i1. Some cpus 224 /// distinguish between vectors of boolean and scalars; the isVec parameter 225 /// selects between the two kinds. For example on X86 a scalar boolean should 226 /// be zero extended from i1, while the elements of a vector of booleans 227 /// should be sign extended from i1. 228 BooleanContent getBooleanContents(bool isVec) const { 229 return isVec ? BooleanVectorContents : BooleanContents; 230 } 231 232 /// Return target scheduling preference. 233 Sched::Preference getSchedulingPreference() const { 234 return SchedPreferenceInfo; 235 } 236 237 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics 238 /// for different nodes. This function returns the preference (or none) for 239 /// the given node. 240 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 241 return Sched::None; 242 } 243 244 /// Return the register class that should be used for the specified value 245 /// type. 246 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const { 247 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 248 assert(RC && "This value type is not natively supported!"); 249 return RC; 250 } 251 252 /// Return the 'representative' register class for the specified value 253 /// type. 254 /// 255 /// The 'representative' register class is the largest legal super-reg 256 /// register class for the register class of the value type. For example, on 257 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep 258 /// register class is GR64 on x86_64. 259 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { 260 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; 261 return RC; 262 } 263 264 /// Return the cost of the 'representative' register class for the specified 265 /// value type. 266 virtual uint8_t getRepRegClassCostFor(MVT VT) const { 267 return RepRegClassCostForVT[VT.SimpleTy]; 268 } 269 270 /// Return true if the target has native support for the specified value type. 271 /// This means that it has a register that directly holds it without 272 /// promotions or expansions. 273 bool isTypeLegal(EVT VT) const { 274 assert(!VT.isSimple() || 275 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 276 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0; 277 } 278 279 class ValueTypeActionImpl { 280 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 281 /// that indicates how instruction selection should deal with the type. 282 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE]; 283 284 public: 285 ValueTypeActionImpl() { 286 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0); 287 } 288 289 LegalizeTypeAction getTypeAction(MVT VT) const { 290 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy]; 291 } 292 293 void setTypeAction(MVT VT, LegalizeTypeAction Action) { 294 unsigned I = VT.SimpleTy; 295 ValueTypeActions[I] = Action; 296 } 297 }; 298 299 const ValueTypeActionImpl &getValueTypeActions() const { 300 return ValueTypeActions; 301 } 302 303 /// Return how we should legalize values of this type, either it is already 304 /// legal (return 'Legal') or we need to promote it to a larger type (return 305 /// 'Promote'), or we need to expand it into multiple registers of smaller 306 /// integer type (return 'Expand'). 'Custom' is not an option. 307 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 308 return getTypeConversion(Context, VT).first; 309 } 310 LegalizeTypeAction getTypeAction(MVT VT) const { 311 return ValueTypeActions.getTypeAction(VT); 312 } 313 314 /// For types supported by the target, this is an identity function. For 315 /// types that must be promoted to larger types, this returns the larger type 316 /// to promote to. For integer types that are larger than the largest integer 317 /// register, this contains one step in the expansion to get to the smaller 318 /// register. For illegal floating point types, this returns the integer type 319 /// to transform to. 320 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 321 return getTypeConversion(Context, VT).second; 322 } 323 324 /// For types supported by the target, this is an identity function. For 325 /// types that must be expanded (i.e. integer types that are larger than the 326 /// largest integer register or illegal floating point types), this returns 327 /// the largest legal type it will be expanded to. 328 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 329 assert(!VT.isVector()); 330 while (true) { 331 switch (getTypeAction(Context, VT)) { 332 case TypeLegal: 333 return VT; 334 case TypeExpandInteger: 335 VT = getTypeToTransformTo(Context, VT); 336 break; 337 default: 338 llvm_unreachable("Type is not legal nor is it to be expanded!"); 339 } 340 } 341 } 342 343 /// Vector types are broken down into some number of legal first class types. 344 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 345 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 346 /// turns into 4 EVT::i32 values with both PPC and X86. 347 /// 348 /// This method returns the number of registers needed, and the VT for each 349 /// register. It also returns the VT and quantity of the intermediate values 350 /// before they are promoted/expanded. 351 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 352 EVT &IntermediateVT, 353 unsigned &NumIntermediates, 354 MVT &RegisterVT) const; 355 356 struct IntrinsicInfo { 357 unsigned opc; // target opcode 358 EVT memVT; // memory VT 359 const Value* ptrVal; // value representing memory location 360 int offset; // offset off of ptrVal 361 unsigned align; // alignment 362 bool vol; // is volatile? 363 bool readMem; // reads memory? 364 bool writeMem; // writes memory? 365 }; 366 367 /// Given an intrinsic, checks if on the target the intrinsic will need to map 368 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 369 /// true and store the intrinsic information into the IntrinsicInfo that was 370 /// passed to the function. 371 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 372 unsigned /*Intrinsic*/) const { 373 return false; 374 } 375 376 /// Returns true if the target can instruction select the specified FP 377 /// immediate natively. If false, the legalizer will materialize the FP 378 /// immediate as a load from a constant pool. 379 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const { 380 return false; 381 } 382 383 /// Targets can use this to indicate that they only support *some* 384 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a 385 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be 386 /// legal. 387 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 388 EVT /*VT*/) const { 389 return true; 390 } 391 392 /// Returns true if the operation can trap for the value type. 393 /// 394 /// VT must be a legal type. By default, we optimistically assume most 395 /// operations don't trap except for divide and remainder. 396 virtual bool canOpTrap(unsigned Op, EVT VT) const; 397 398 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to 399 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace 400 /// a VAND with a constant pool entry. 401 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 402 EVT /*VT*/) const { 403 return false; 404 } 405 406 /// Return how this operation should be treated: either it is legal, needs to 407 /// be promoted to a larger size, needs to be expanded to some other code 408 /// sequence, or the target has a custom expander for it. 409 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 410 if (VT.isExtended()) return Expand; 411 // If a target-specific SDNode requires legalization, require the target 412 // to provide custom legalization for it. 413 if (Op > array_lengthof(OpActions[0])) return Custom; 414 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy; 415 return (LegalizeAction)OpActions[I][Op]; 416 } 417 418 /// Return true if the specified operation is legal on this target or can be 419 /// made legal with custom lowering. This is used to help guide high-level 420 /// lowering decisions. 421 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const { 422 return (VT == MVT::Other || isTypeLegal(VT)) && 423 (getOperationAction(Op, VT) == Legal || 424 getOperationAction(Op, VT) == Custom); 425 } 426 427 /// Return true if the specified operation is legal on this target or can be 428 /// made legal using promotion. This is used to help guide high-level lowering 429 /// decisions. 430 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const { 431 return (VT == MVT::Other || isTypeLegal(VT)) && 432 (getOperationAction(Op, VT) == Legal || 433 getOperationAction(Op, VT) == Promote); 434 } 435 436 /// Return true if the specified operation is illegal on this target or 437 /// unlikely to be made legal with custom lowering. This is used to help guide 438 /// high-level lowering decisions. 439 bool isOperationExpand(unsigned Op, EVT VT) const { 440 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 441 } 442 443 /// Return true if the specified operation is legal on this target. 444 bool isOperationLegal(unsigned Op, EVT VT) const { 445 return (VT == MVT::Other || isTypeLegal(VT)) && 446 getOperationAction(Op, VT) == Legal; 447 } 448 449 /// Return how this load with extension should be treated: either it is legal, 450 /// needs to be promoted to a larger size, needs to be expanded to some other 451 /// code sequence, or the target has a custom expander for it. 452 LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const { 453 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 454 "Table isn't big enough!"); 455 return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType]; 456 } 457 458 /// Return true if the specified load with extension is legal on this target. 459 bool isLoadExtLegal(unsigned ExtType, EVT VT) const { 460 return VT.isSimple() && 461 getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal; 462 } 463 464 /// Return how this store with truncation should be treated: either it is 465 /// legal, needs to be promoted to a larger size, needs to be expanded to some 466 /// other code sequence, or the target has a custom expander for it. 467 LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const { 468 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 469 "Table isn't big enough!"); 470 return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy] 471 [MemVT.SimpleTy]; 472 } 473 474 /// Return true if the specified store with truncation is legal on this 475 /// target. 476 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 477 return isTypeLegal(ValVT) && MemVT.isSimple() && 478 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal; 479 } 480 481 /// Return how the indexed load should be treated: either it is legal, needs 482 /// to be promoted to a larger size, needs to be expanded to some other code 483 /// sequence, or the target has a custom expander for it. 484 LegalizeAction 485 getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 486 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE && 487 "Table isn't big enough!"); 488 unsigned Ty = (unsigned)VT.SimpleTy; 489 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4); 490 } 491 492 /// Return true if the specified indexed load is legal on this target. 493 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 494 return VT.isSimple() && 495 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 496 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 497 } 498 499 /// Return how the indexed store should be treated: either it is legal, needs 500 /// to be promoted to a larger size, needs to be expanded to some other code 501 /// sequence, or the target has a custom expander for it. 502 LegalizeAction 503 getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 504 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE && 505 "Table isn't big enough!"); 506 unsigned Ty = (unsigned)VT.SimpleTy; 507 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); 508 } 509 510 /// Return true if the specified indexed load is legal on this target. 511 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 512 return VT.isSimple() && 513 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 514 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 515 } 516 517 /// Return how the condition code should be treated: either it is legal, needs 518 /// to be expanded to some other code sequence, or the target has a custom 519 /// expander for it. 520 LegalizeAction 521 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 522 assert((unsigned)CC < array_lengthof(CondCodeActions) && 523 ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) && 524 "Table isn't big enough!"); 525 // See setCondCodeAction for how this is encoded. 526 uint32_t Shift = 2 * (VT.SimpleTy & 0xF); 527 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4]; 528 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3); 529 assert(Action != Promote && "Can't promote condition code!"); 530 return Action; 531 } 532 533 /// Return true if the specified condition code is legal on this target. 534 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 535 return 536 getCondCodeAction(CC, VT) == Legal || 537 getCondCodeAction(CC, VT) == Custom; 538 } 539 540 541 /// If the action for this operation is to promote, this method returns the 542 /// ValueType to promote to. 543 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 544 assert(getOperationAction(Op, VT) == Promote && 545 "This operation isn't promoted!"); 546 547 // See if this has an explicit type specified. 548 std::map<std::pair<unsigned, MVT::SimpleValueType>, 549 MVT::SimpleValueType>::const_iterator PTTI = 550 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); 551 if (PTTI != PromoteToType.end()) return PTTI->second; 552 553 assert((VT.isInteger() || VT.isFloatingPoint()) && 554 "Cannot autopromote this type, add it with AddPromotedToType."); 555 556 MVT NVT = VT; 557 do { 558 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); 559 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 560 "Didn't find type to promote to!"); 561 } while (!isTypeLegal(NVT) || 562 getOperationAction(Op, NVT) == Promote); 563 return NVT; 564 } 565 566 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM 567 /// operations except for the pointer size. If AllowUnknown is true, this 568 /// will return MVT::Other for types with no EVT counterpart (e.g. structs), 569 /// otherwise it will assert. 570 EVT getValueType(Type *Ty, bool AllowUnknown = false) const { 571 // Lower scalar pointers to native pointer types. 572 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) 573 return getPointerTy(PTy->getAddressSpace()); 574 575 if (Ty->isVectorTy()) { 576 VectorType *VTy = cast<VectorType>(Ty); 577 Type *Elm = VTy->getElementType(); 578 // Lower vectors of pointers to native pointer types. 579 if (PointerType *PT = dyn_cast<PointerType>(Elm)) { 580 EVT PointerTy(getPointerTy(PT->getAddressSpace())); 581 Elm = PointerTy.getTypeForEVT(Ty->getContext()); 582 } 583 584 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), 585 VTy->getNumElements()); 586 } 587 return EVT::getEVT(Ty, AllowUnknown); 588 } 589 590 /// Return the MVT corresponding to this LLVM type. See getValueType. 591 MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const { 592 return getValueType(Ty, AllowUnknown).getSimpleVT(); 593 } 594 595 /// Return the desired alignment for ByVal aggregate function arguments in the 596 /// caller parameter area. This is the actual alignment, not its logarithm. 597 virtual unsigned getByValTypeAlignment(Type *Ty) const; 598 599 /// Return the type of registers that this ValueType will eventually require. 600 MVT getRegisterType(MVT VT) const { 601 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); 602 return RegisterTypeForVT[VT.SimpleTy]; 603 } 604 605 /// Return the type of registers that this ValueType will eventually require. 606 MVT getRegisterType(LLVMContext &Context, EVT VT) const { 607 if (VT.isSimple()) { 608 assert((unsigned)VT.getSimpleVT().SimpleTy < 609 array_lengthof(RegisterTypeForVT)); 610 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; 611 } 612 if (VT.isVector()) { 613 EVT VT1; 614 MVT RegisterVT; 615 unsigned NumIntermediates; 616 (void)getVectorTypeBreakdown(Context, VT, VT1, 617 NumIntermediates, RegisterVT); 618 return RegisterVT; 619 } 620 if (VT.isInteger()) { 621 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 622 } 623 llvm_unreachable("Unsupported extended type!"); 624 } 625 626 /// Return the number of registers that this ValueType will eventually 627 /// require. 628 /// 629 /// This is one for any types promoted to live in larger registers, but may be 630 /// more than one for types (like i64) that are split into pieces. For types 631 /// like i140, which are first promoted then expanded, it is the number of 632 /// registers needed to hold all the bits of the original type. For an i140 633 /// on a 32 bit machine this means 5 registers. 634 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { 635 if (VT.isSimple()) { 636 assert((unsigned)VT.getSimpleVT().SimpleTy < 637 array_lengthof(NumRegistersForVT)); 638 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 639 } 640 if (VT.isVector()) { 641 EVT VT1; 642 MVT VT2; 643 unsigned NumIntermediates; 644 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 645 } 646 if (VT.isInteger()) { 647 unsigned BitWidth = VT.getSizeInBits(); 648 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 649 return (BitWidth + RegWidth - 1) / RegWidth; 650 } 651 llvm_unreachable("Unsupported extended type!"); 652 } 653 654 /// If true, then instruction selection should seek to shrink the FP constant 655 /// of the specified type to a smaller type in order to save space and / or 656 /// reduce runtime. 657 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 658 659 /// If true, the target has custom DAG combine transformations that it can 660 /// perform for the specified node. 661 bool hasTargetDAGCombine(ISD::NodeType NT) const { 662 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 663 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 664 } 665 666 /// \brief Get maximum # of store operations permitted for llvm.memset 667 /// 668 /// This function returns the maximum number of store operations permitted 669 /// to replace a call to llvm.memset. The value is set by the target at the 670 /// performance threshold for such a replacement. If OptSize is true, 671 /// return the limit for functions that have OptSize attribute. 672 unsigned getMaxStoresPerMemset(bool OptSize) const { 673 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; 674 } 675 676 /// \brief Get maximum # of store operations permitted for llvm.memcpy 677 /// 678 /// This function returns the maximum number of store operations permitted 679 /// to replace a call to llvm.memcpy. The value is set by the target at the 680 /// performance threshold for such a replacement. If OptSize is true, 681 /// return the limit for functions that have OptSize attribute. 682 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 683 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; 684 } 685 686 /// \brief Get maximum # of store operations permitted for llvm.memmove 687 /// 688 /// This function returns the maximum number of store operations permitted 689 /// to replace a call to llvm.memmove. The value is set by the target at the 690 /// performance threshold for such a replacement. If OptSize is true, 691 /// return the limit for functions that have OptSize attribute. 692 unsigned getMaxStoresPerMemmove(bool OptSize) const { 693 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; 694 } 695 696 /// \brief Determine if the target supports unaligned memory accesses. 697 /// 698 /// This function returns true if the target allows unaligned memory accesses. 699 /// of the specified type. If true, it also returns whether the unaligned 700 /// memory access is "fast" in the second argument by reference. This is used, 701 /// for example, in situations where an array copy/move/set is converted to a 702 /// sequence of store operations. It's use helps to ensure that such 703 /// replacements don't generate code that causes an alignment error (trap) on 704 /// the target machine. 705 virtual bool allowsUnalignedMemoryAccesses(EVT, bool * /*Fast*/ = 0) const { 706 return false; 707 } 708 709 /// Returns the target specific optimal type for load and store operations as 710 /// a result of memset, memcpy, and memmove lowering. 711 /// 712 /// If DstAlign is zero that means it's safe to destination alignment can 713 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't 714 /// a need to check it against alignment requirement, probably because the 715 /// source does not need to be loaded. If 'IsMemset' is true, that means it's 716 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of 717 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it 718 /// does not need to be loaded. It returns EVT::Other if the type should be 719 /// determined using generic target-independent logic. 720 virtual EVT getOptimalMemOpType(uint64_t /*Size*/, 721 unsigned /*DstAlign*/, unsigned /*SrcAlign*/, 722 bool /*IsMemset*/, 723 bool /*ZeroMemset*/, 724 bool /*MemcpyStrSrc*/, 725 MachineFunction &/*MF*/) const { 726 return MVT::Other; 727 } 728 729 /// Returns true if it's safe to use load / store of the specified type to 730 /// expand memcpy / memset inline. 731 /// 732 /// This is mostly true for all types except for some special cases. For 733 /// example, on X86 targets without SSE2 f64 load / store are done with fldl / 734 /// fstpl which also does type conversion. Note the specified type doesn't 735 /// have to be legal as the hook is used before type legalization. 736 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } 737 738 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp. 739 bool usesUnderscoreSetJmp() const { 740 return UseUnderscoreSetJmp; 741 } 742 743 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp. 744 bool usesUnderscoreLongJmp() const { 745 return UseUnderscoreLongJmp; 746 } 747 748 /// Return whether the target can generate code for jump tables. 749 bool supportJumpTables() const { 750 return SupportJumpTables; 751 } 752 753 /// Return integer threshold on number of blocks to use jump tables rather 754 /// than if sequence. 755 int getMinimumJumpTableEntries() const { 756 return MinimumJumpTableEntries; 757 } 758 759 /// If a physical register, this specifies the register that 760 /// llvm.savestack/llvm.restorestack should save and restore. 761 unsigned getStackPointerRegisterToSaveRestore() const { 762 return StackPointerRegisterToSaveRestore; 763 } 764 765 /// If a physical register, this returns the register that receives the 766 /// exception address on entry to a landing pad. 767 unsigned getExceptionPointerRegister() const { 768 return ExceptionPointerRegister; 769 } 770 771 /// If a physical register, this returns the register that receives the 772 /// exception typeid on entry to a landing pad. 773 unsigned getExceptionSelectorRegister() const { 774 return ExceptionSelectorRegister; 775 } 776 777 /// Returns the target's jmp_buf size in bytes (if never set, the default is 778 /// 200) 779 unsigned getJumpBufSize() const { 780 return JumpBufSize; 781 } 782 783 /// Returns the target's jmp_buf alignment in bytes (if never set, the default 784 /// is 0) 785 unsigned getJumpBufAlignment() const { 786 return JumpBufAlignment; 787 } 788 789 /// Return the minimum stack alignment of an argument. 790 unsigned getMinStackArgumentAlignment() const { 791 return MinStackArgumentAlignment; 792 } 793 794 /// Return the minimum function alignment. 795 unsigned getMinFunctionAlignment() const { 796 return MinFunctionAlignment; 797 } 798 799 /// Return the preferred function alignment. 800 unsigned getPrefFunctionAlignment() const { 801 return PrefFunctionAlignment; 802 } 803 804 /// Return the preferred loop alignment. 805 unsigned getPrefLoopAlignment() const { 806 return PrefLoopAlignment; 807 } 808 809 /// Return whether the DAG builder should automatically insert fences and 810 /// reduce ordering for atomics. 811 bool getInsertFencesForAtomic() const { 812 return InsertFencesForAtomic; 813 } 814 815 /// Return true if the target stores stack protector cookies at a fixed offset 816 /// in some non-standard address space, and populates the address space and 817 /// offset as appropriate. 818 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/, 819 unsigned &/*Offset*/) const { 820 return false; 821 } 822 823 /// Returns the maximal possible offset which can be used for loads / stores 824 /// from the global. 825 virtual unsigned getMaximalGlobalOffset() const { 826 return 0; 827 } 828 829 //===--------------------------------------------------------------------===// 830 /// \name Helpers for TargetTransformInfo implementations 831 /// @{ 832 833 /// Get the ISD node that corresponds to the Instruction class opcode. 834 int InstructionOpcodeToISD(unsigned Opcode) const; 835 836 /// Estimate the cost of type-legalization and the legalized type. 837 std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const; 838 839 /// @} 840 841 //===--------------------------------------------------------------------===// 842 // TargetLowering Configuration Methods - These methods should be invoked by 843 // the derived class constructor to configure this object for the target. 844 // 845 846 /// \brief Reset the operation actions based on target options. 847 virtual void resetOperationActions() {} 848 849protected: 850 /// Specify how the target extends the result of a boolean value from i1 to a 851 /// wider type. See getBooleanContents. 852 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 853 854 /// Specify how the target extends the result of a vector boolean value from a 855 /// vector of i1 to a wider type. See getBooleanContents. 856 void setBooleanVectorContents(BooleanContent Ty) { 857 BooleanVectorContents = Ty; 858 } 859 860 /// Specify the target scheduling preference. 861 void setSchedulingPreference(Sched::Preference Pref) { 862 SchedPreferenceInfo = Pref; 863 } 864 865 /// Indicate whether this target prefers to use _setjmp to implement 866 /// llvm.setjmp or the non _ version. Defaults to false. 867 void setUseUnderscoreSetJmp(bool Val) { 868 UseUnderscoreSetJmp = Val; 869 } 870 871 /// Indicate whether this target prefers to use _longjmp to implement 872 /// llvm.longjmp or the non _ version. Defaults to false. 873 void setUseUnderscoreLongJmp(bool Val) { 874 UseUnderscoreLongJmp = Val; 875 } 876 877 /// Indicate whether the target can generate code for jump tables. 878 void setSupportJumpTables(bool Val) { 879 SupportJumpTables = Val; 880 } 881 882 /// Indicate the number of blocks to generate jump tables rather than if 883 /// sequence. 884 void setMinimumJumpTableEntries(int Val) { 885 MinimumJumpTableEntries = Val; 886 } 887 888 /// If set to a physical register, this specifies the register that 889 /// llvm.savestack/llvm.restorestack should save and restore. 890 void setStackPointerRegisterToSaveRestore(unsigned R) { 891 StackPointerRegisterToSaveRestore = R; 892 } 893 894 /// If set to a physical register, this sets the register that receives the 895 /// exception address on entry to a landing pad. 896 void setExceptionPointerRegister(unsigned R) { 897 ExceptionPointerRegister = R; 898 } 899 900 /// If set to a physical register, this sets the register that receives the 901 /// exception typeid on entry to a landing pad. 902 void setExceptionSelectorRegister(unsigned R) { 903 ExceptionSelectorRegister = R; 904 } 905 906 /// Tells the code generator not to expand operations into sequences that use 907 /// the select operations if possible. 908 void setSelectIsExpensive(bool isExpensive = true) { 909 SelectIsExpensive = isExpensive; 910 } 911 912 /// Tells the code generator not to expand sequence of operations into a 913 /// separate sequences that increases the amount of flow control. 914 void setJumpIsExpensive(bool isExpensive = true) { 915 JumpIsExpensive = isExpensive; 916 } 917 918 /// Tells the code generator that integer divide is expensive, and if 919 /// possible, should be replaced by an alternate sequence of instructions not 920 /// containing an integer divide. 921 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 922 923 /// Tells the code generator which bitwidths to bypass. 924 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 925 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 926 } 927 928 /// Tells the code generator that it shouldn't generate srl/add/sra for a 929 /// signed divide by power of two, and let the target handle it. 930 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 931 932 /// Add the specified register class as an available regclass for the 933 /// specified value type. This indicates the selector can handle values of 934 /// that class natively. 935 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { 936 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)); 937 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 938 RegClassForVT[VT.SimpleTy] = RC; 939 } 940 941 /// Remove all register classes. 942 void clearRegisterClasses() { 943 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*)); 944 945 AvailableRegClasses.clear(); 946 } 947 948 /// \brief Remove all operation actions. 949 void clearOperationActions() { 950 } 951 952 /// Return the largest legal super-reg register class of the register class 953 /// for the specified type and its associated "cost". 954 virtual std::pair<const TargetRegisterClass*, uint8_t> 955 findRepresentativeClass(MVT VT) const; 956 957 /// Once all of the register classes are added, this allows us to compute 958 /// derived properties we expose. 959 void computeRegisterProperties(); 960 961 /// Indicate that the specified operation does not work with the specified 962 /// type and indicate what to do about it. 963 void setOperationAction(unsigned Op, MVT VT, 964 LegalizeAction Action) { 965 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 966 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action; 967 } 968 969 /// Indicate that the specified load with extension does not work with the 970 /// specified type and indicate what to do about it. 971 void setLoadExtAction(unsigned ExtType, MVT VT, 972 LegalizeAction Action) { 973 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 974 "Table isn't big enough!"); 975 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action; 976 } 977 978 /// Indicate that the specified truncating store does not work with the 979 /// specified type and indicate what to do about it. 980 void setTruncStoreAction(MVT ValVT, MVT MemVT, 981 LegalizeAction Action) { 982 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 983 "Table isn't big enough!"); 984 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action; 985 } 986 987 /// Indicate that the specified indexed load does or does not work with the 988 /// specified type and indicate what to do abort it. 989 /// 990 /// NOTE: All indexed mode loads are initialized to Expand in 991 /// TargetLowering.cpp 992 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 993 LegalizeAction Action) { 994 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 995 (unsigned)Action < 0xf && "Table isn't big enough!"); 996 // Load action are kept in the upper half. 997 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; 998 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; 999 } 1000 1001 /// Indicate that the specified indexed store does or does not work with the 1002 /// specified type and indicate what to do about it. 1003 /// 1004 /// NOTE: All indexed mode stores are initialized to Expand in 1005 /// TargetLowering.cpp 1006 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 1007 LegalizeAction Action) { 1008 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1009 (unsigned)Action < 0xf && "Table isn't big enough!"); 1010 // Store action are kept in the lower half. 1011 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; 1012 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); 1013 } 1014 1015 /// Indicate that the specified condition code is or isn't supported on the 1016 /// target and indicate what to do about it. 1017 void setCondCodeAction(ISD::CondCode CC, MVT VT, 1018 LegalizeAction Action) { 1019 assert(VT < MVT::LAST_VALUETYPE && 1020 (unsigned)CC < array_lengthof(CondCodeActions) && 1021 "Table isn't big enough!"); 1022 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit 1023 /// value and the upper 27 bits index into the second dimension of the array 1024 /// to select what 32-bit value to use. 1025 uint32_t Shift = 2 * (VT.SimpleTy & 0xF); 1026 CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift); 1027 CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift; 1028 } 1029 1030 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults 1031 /// to trying a larger integer/fp until it can find one that works. If that 1032 /// default is insufficient, this method can be used by the target to override 1033 /// the default. 1034 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 1035 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 1036 } 1037 1038 /// Targets should invoke this method for each target independent node that 1039 /// they want to provide a custom DAG combiner for by implementing the 1040 /// PerformDAGCombine virtual method. 1041 void setTargetDAGCombine(ISD::NodeType NT) { 1042 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1043 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 1044 } 1045 1046 /// Set the target's required jmp_buf buffer size (in bytes); default is 200 1047 void setJumpBufSize(unsigned Size) { 1048 JumpBufSize = Size; 1049 } 1050 1051 /// Set the target's required jmp_buf buffer alignment (in bytes); default is 1052 /// 0 1053 void setJumpBufAlignment(unsigned Align) { 1054 JumpBufAlignment = Align; 1055 } 1056 1057 /// Set the target's minimum function alignment (in log2(bytes)) 1058 void setMinFunctionAlignment(unsigned Align) { 1059 MinFunctionAlignment = Align; 1060 } 1061 1062 /// Set the target's preferred function alignment. This should be set if 1063 /// there is a performance benefit to higher-than-minimum alignment (in 1064 /// log2(bytes)) 1065 void setPrefFunctionAlignment(unsigned Align) { 1066 PrefFunctionAlignment = Align; 1067 } 1068 1069 /// Set the target's preferred loop alignment. Default alignment is zero, it 1070 /// means the target does not care about loop alignment. The alignment is 1071 /// specified in log2(bytes). 1072 void setPrefLoopAlignment(unsigned Align) { 1073 PrefLoopAlignment = Align; 1074 } 1075 1076 /// Set the minimum stack alignment of an argument (in log2(bytes)). 1077 void setMinStackArgumentAlignment(unsigned Align) { 1078 MinStackArgumentAlignment = Align; 1079 } 1080 1081 /// Set if the DAG builder should automatically insert fences and reduce the 1082 /// order of atomic memory operations to Monotonic. 1083 void setInsertFencesForAtomic(bool fence) { 1084 InsertFencesForAtomic = fence; 1085 } 1086 1087public: 1088 //===--------------------------------------------------------------------===// 1089 // Addressing mode description hooks (used by LSR etc). 1090 // 1091 1092 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store 1093 /// instructions reading the address. This allows as much computation as 1094 /// possible to be done in the address mode for that operand. This hook lets 1095 /// targets also pass back when this should be done on intrinsics which 1096 /// load/store. 1097 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/, 1098 SmallVectorImpl<Value*> &/*Ops*/, 1099 Type *&/*AccessTy*/) const { 1100 return false; 1101 } 1102 1103 /// This represents an addressing mode of: 1104 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 1105 /// If BaseGV is null, there is no BaseGV. 1106 /// If BaseOffs is zero, there is no base offset. 1107 /// If HasBaseReg is false, there is no base register. 1108 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 1109 /// no scale. 1110 struct AddrMode { 1111 GlobalValue *BaseGV; 1112 int64_t BaseOffs; 1113 bool HasBaseReg; 1114 int64_t Scale; 1115 AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} 1116 }; 1117 1118 /// Return true if the addressing mode represented by AM is legal for this 1119 /// target, for a load/store of the specified type. 1120 /// 1121 /// The type may be VoidTy, in which case only return true if the addressing 1122 /// mode is legal for a load/store of any legal type. TODO: Handle 1123 /// pre/postinc as well. 1124 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; 1125 1126 /// \brief Return the cost of the scaling factor used in the addressing mode 1127 /// represented by AM for this target, for a load/store of the specified type. 1128 /// 1129 /// If the AM is supported, the return value must be >= 0. 1130 /// If the AM is not supported, it returns a negative value. 1131 /// TODO: Handle pre/postinc as well. 1132 virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const { 1133 // Default: assume that any scaling factor used in a legal AM is free. 1134 if (isLegalAddressingMode(AM, Ty)) return 0; 1135 return -1; 1136 } 1137 1138 /// Return true if the specified immediate is legal icmp immediate, that is 1139 /// the target has icmp instructions which can compare a register against the 1140 /// immediate without having to materialize the immediate into a register. 1141 virtual bool isLegalICmpImmediate(int64_t) const { 1142 return true; 1143 } 1144 1145 /// Return true if the specified immediate is legal add immediate, that is the 1146 /// target has add instructions which can add a register with the immediate 1147 /// without having to materialize the immediate into a register. 1148 virtual bool isLegalAddImmediate(int64_t) const { 1149 return true; 1150 } 1151 1152 /// Return true if it's free to truncate a value of type Ty1 to type 1153 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 1154 /// by referencing its sub-register AX. 1155 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1156 return false; 1157 } 1158 1159 /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding 1160 /// whether a call is in tail position. Typically this means that both results 1161 /// would be assigned to the same register or stack slot, but it could mean 1162 /// the target performs adequate checks of its own before proceeding with the 1163 /// tail call. 1164 virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const { 1165 return false; 1166 } 1167 1168 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const { 1169 return false; 1170 } 1171 1172 /// Return true if any actual instruction that defines a value of type Ty1 1173 /// implicitly zero-extends the value to Ty2 in the result register. 1174 /// 1175 /// This does not necessarily include registers defined in unknown ways, such 1176 /// as incoming arguments, or copies from unknown virtual registers. Also, if 1177 /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to 1178 /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit 1179 /// values implicit zero-extend the result out to 64 bits. 1180 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1181 return false; 1182 } 1183 1184 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const { 1185 return false; 1186 } 1187 1188 /// Return true if the target supplies and combines to a paired load 1189 /// two loaded values of type LoadedType next to each other in memory. 1190 /// RequiredAlignment gives the minimal alignment constraints that must be met 1191 /// to be able to select this paired load. 1192 /// 1193 /// This information is *not* used to generate actual paired loads, but it is 1194 /// used to generate a sequence of loads that is easier to combine into a 1195 /// paired load. 1196 /// For instance, something like this: 1197 /// a = load i64* addr 1198 /// b = trunc i64 a to i32 1199 /// c = lshr i64 a, 32 1200 /// d = trunc i64 c to i32 1201 /// will be optimized into: 1202 /// b = load i32* addr1 1203 /// d = load i32* addr2 1204 /// Where addr1 = addr2 +/- sizeof(i32). 1205 /// 1206 /// In other words, unless the target performs a post-isel load combining, 1207 /// this information should not be provided because it will generate more 1208 /// loads. 1209 virtual bool hasPairedLoad(Type * /*LoadedType*/, 1210 unsigned & /*RequiredAligment*/) const { 1211 return false; 1212 } 1213 1214 virtual bool hasPairedLoad(EVT /*LoadedType*/, 1215 unsigned & /*RequiredAligment*/) const { 1216 return false; 1217 } 1218 1219 /// Return true if zero-extending the specific node Val to type VT2 is free 1220 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or 1221 /// because it's folded such as X86 zero-extending loads). 1222 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 1223 return isZExtFree(Val.getValueType(), VT2); 1224 } 1225 1226 /// Return true if an fneg operation is free to the point where it is never 1227 /// worthwhile to replace it with a bitwise operation. 1228 virtual bool isFNegFree(EVT VT) const { 1229 assert(VT.isFloatingPoint()); 1230 return false; 1231 } 1232 1233 /// Return true if an fabs operation is free to the point where it is never 1234 /// worthwhile to replace it with a bitwise operation. 1235 virtual bool isFAbsFree(EVT VT) const { 1236 assert(VT.isFloatingPoint()); 1237 return false; 1238 } 1239 1240 /// Return true if an FMA operation is faster than a pair of fmul and fadd 1241 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 1242 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 1243 /// 1244 /// NOTE: This may be called before legalization on types for which FMAs are 1245 /// not legal, but should return true if those types will eventually legalize 1246 /// to types that support FMAs. After legalization, it will only be called on 1247 /// types that support FMAs (via Legal or Custom actions) 1248 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const { 1249 return false; 1250 } 1251 1252 /// Return true if it's profitable to narrow operations of type VT1 to 1253 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from 1254 /// i32 to i16. 1255 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { 1256 return false; 1257 } 1258 1259 //===--------------------------------------------------------------------===// 1260 // Runtime Library hooks 1261 // 1262 1263 /// Rename the default libcall routine name for the specified libcall. 1264 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1265 LibcallRoutineNames[Call] = Name; 1266 } 1267 1268 /// Get the libcall routine name for the specified libcall. 1269 const char *getLibcallName(RTLIB::Libcall Call) const { 1270 return LibcallRoutineNames[Call]; 1271 } 1272 1273 /// Override the default CondCode to be used to test the result of the 1274 /// comparison libcall against zero. 1275 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1276 CmpLibcallCCs[Call] = CC; 1277 } 1278 1279 /// Get the CondCode that's to be used to test the result of the comparison 1280 /// libcall against zero. 1281 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1282 return CmpLibcallCCs[Call]; 1283 } 1284 1285 /// Set the CallingConv that should be used for the specified libcall. 1286 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 1287 LibcallCallingConvs[Call] = CC; 1288 } 1289 1290 /// Get the CallingConv that should be used for the specified libcall. 1291 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 1292 return LibcallCallingConvs[Call]; 1293 } 1294 1295private: 1296 const TargetMachine &TM; 1297 const DataLayout *TD; 1298 const TargetLoweringObjectFile &TLOF; 1299 1300 /// True if this is a little endian target. 1301 bool IsLittleEndian; 1302 1303 /// Tells the code generator not to expand operations into sequences that use 1304 /// the select operations if possible. 1305 bool SelectIsExpensive; 1306 1307 /// Tells the code generator not to expand integer divides by constants into a 1308 /// sequence of muls, adds, and shifts. This is a hack until a real cost 1309 /// model is in place. If we ever optimize for size, this will be set to true 1310 /// unconditionally. 1311 bool IntDivIsCheap; 1312 1313 /// Tells the code generator to bypass slow divide or remainder 1314 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code 1315 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer 1316 /// div/rem when the operands are positive and less than 256. 1317 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 1318 1319 /// Tells the code generator that it shouldn't generate srl/add/sra for a 1320 /// signed divide by power of two, and let the target handle it. 1321 bool Pow2DivIsCheap; 1322 1323 /// Tells the code generator that it shouldn't generate extra flow control 1324 /// instructions and should attempt to combine flow control instructions via 1325 /// predication. 1326 bool JumpIsExpensive; 1327 1328 /// This target prefers to use _setjmp to implement llvm.setjmp. 1329 /// 1330 /// Defaults to false. 1331 bool UseUnderscoreSetJmp; 1332 1333 /// This target prefers to use _longjmp to implement llvm.longjmp. 1334 /// 1335 /// Defaults to false. 1336 bool UseUnderscoreLongJmp; 1337 1338 /// Whether the target can generate code for jumptables. If it's not true, 1339 /// then each jumptable must be lowered into if-then-else's. 1340 bool SupportJumpTables; 1341 1342 /// Number of blocks threshold to use jump tables. 1343 int MinimumJumpTableEntries; 1344 1345 /// Information about the contents of the high-bits in boolean values held in 1346 /// a type wider than i1. See getBooleanContents. 1347 BooleanContent BooleanContents; 1348 1349 /// Information about the contents of the high-bits in boolean vector values 1350 /// when the element type is wider than i1. See getBooleanContents. 1351 BooleanContent BooleanVectorContents; 1352 1353 /// The target scheduling preference: shortest possible total cycles or lowest 1354 /// register usage. 1355 Sched::Preference SchedPreferenceInfo; 1356 1357 /// The size, in bytes, of the target's jmp_buf buffers 1358 unsigned JumpBufSize; 1359 1360 /// The alignment, in bytes, of the target's jmp_buf buffers 1361 unsigned JumpBufAlignment; 1362 1363 /// The minimum alignment that any argument on the stack needs to have. 1364 unsigned MinStackArgumentAlignment; 1365 1366 /// The minimum function alignment (used when optimizing for size, and to 1367 /// prevent explicitly provided alignment from leading to incorrect code). 1368 unsigned MinFunctionAlignment; 1369 1370 /// The preferred function alignment (used when alignment unspecified and 1371 /// optimizing for speed). 1372 unsigned PrefFunctionAlignment; 1373 1374 /// The preferred loop alignment. 1375 unsigned PrefLoopAlignment; 1376 1377 /// Whether the DAG builder should automatically insert fences and reduce 1378 /// ordering for atomics. (This will be set for for most architectures with 1379 /// weak memory ordering.) 1380 bool InsertFencesForAtomic; 1381 1382 /// If set to a physical register, this specifies the register that 1383 /// llvm.savestack/llvm.restorestack should save and restore. 1384 unsigned StackPointerRegisterToSaveRestore; 1385 1386 /// If set to a physical register, this specifies the register that receives 1387 /// the exception address on entry to a landing pad. 1388 unsigned ExceptionPointerRegister; 1389 1390 /// If set to a physical register, this specifies the register that receives 1391 /// the exception typeid on entry to a landing pad. 1392 unsigned ExceptionSelectorRegister; 1393 1394 /// This indicates the default register class to use for each ValueType the 1395 /// target supports natively. 1396 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1397 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1398 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1399 1400 /// This indicates the "representative" register class to use for each 1401 /// ValueType the target supports natively. This information is used by the 1402 /// scheduler to track register pressure. By default, the representative 1403 /// register class is the largest legal super-reg register class of the 1404 /// register class of the specified type. e.g. On x86, i8, i16, and i32's 1405 /// representative class would be GR32. 1406 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE]; 1407 1408 /// This indicates the "cost" of the "representative" register class for each 1409 /// ValueType. The cost is used by the scheduler to approximate register 1410 /// pressure. 1411 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE]; 1412 1413 /// For any value types we are promoting or expanding, this contains the value 1414 /// type that we are changing to. For Expanded types, this contains one step 1415 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required 1416 /// (e.g. i64 -> i16). For types natively supported by the system, this holds 1417 /// the same type (e.g. i32 -> i32). 1418 MVT TransformToType[MVT::LAST_VALUETYPE]; 1419 1420 /// For each operation and each value type, keep a LegalizeAction that 1421 /// indicates how instruction selection should deal with the operation. Most 1422 /// operations are Legal (aka, supported natively by the target), but 1423 /// operations that are not should be described. Note that operations on 1424 /// non-legal value types are not described here. 1425 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; 1426 1427 /// For each load extension type and each value type, keep a LegalizeAction 1428 /// that indicates how instruction selection should deal with a load of a 1429 /// specific value type and extension type. 1430 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE]; 1431 1432 /// For each value type pair keep a LegalizeAction that indicates whether a 1433 /// truncating store of a specific value type and truncating type is legal. 1434 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; 1435 1436 /// For each indexed mode and each value type, keep a pair of LegalizeAction 1437 /// that indicates how instruction selection should deal with the load / 1438 /// store. 1439 /// 1440 /// The first dimension is the value_type for the reference. The second 1441 /// dimension represents the various modes for load store. 1442 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; 1443 1444 /// For each condition code (ISD::CondCode) keep a LegalizeAction that 1445 /// indicates how instruction selection should deal with the condition code. 1446 /// 1447 /// Because each CC action takes up 2 bits, we need to have the array size be 1448 /// large enough to fit all of the value types. This can be done by dividing 1449 /// the MVT::LAST_VALUETYPE by 16 and adding one. 1450 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16]; 1451 1452 ValueTypeActionImpl ValueTypeActions; 1453 1454public: 1455 LegalizeKind 1456 getTypeConversion(LLVMContext &Context, EVT VT) const { 1457 // If this is a simple type, use the ComputeRegisterProp mechanism. 1458 if (VT.isSimple()) { 1459 MVT SVT = VT.getSimpleVT(); 1460 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); 1461 MVT NVT = TransformToType[SVT.SimpleTy]; 1462 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 1463 1464 assert( 1465 (LA == TypeLegal || 1466 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) 1467 && "Promote may not follow Expand or Promote"); 1468 1469 if (LA == TypeSplitVector) 1470 return LegalizeKind(LA, EVT::getVectorVT(Context, 1471 SVT.getVectorElementType(), 1472 SVT.getVectorNumElements()/2)); 1473 if (LA == TypeScalarizeVector) 1474 return LegalizeKind(LA, SVT.getVectorElementType()); 1475 return LegalizeKind(LA, NVT); 1476 } 1477 1478 // Handle Extended Scalar Types. 1479 if (!VT.isVector()) { 1480 assert(VT.isInteger() && "Float types must be simple"); 1481 unsigned BitSize = VT.getSizeInBits(); 1482 // First promote to a power-of-two size, then expand if necessary. 1483 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1484 EVT NVT = VT.getRoundIntegerType(Context); 1485 assert(NVT != VT && "Unable to round integer VT"); 1486 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1487 // Avoid multi-step promotion. 1488 if (NextStep.first == TypePromoteInteger) return NextStep; 1489 // Return rounded integer type. 1490 return LegalizeKind(TypePromoteInteger, NVT); 1491 } 1492 1493 return LegalizeKind(TypeExpandInteger, 1494 EVT::getIntegerVT(Context, VT.getSizeInBits()/2)); 1495 } 1496 1497 // Handle vector types. 1498 unsigned NumElts = VT.getVectorNumElements(); 1499 EVT EltVT = VT.getVectorElementType(); 1500 1501 // Vectors with only one element are always scalarized. 1502 if (NumElts == 1) 1503 return LegalizeKind(TypeScalarizeVector, EltVT); 1504 1505 // Try to widen vector elements until the element type is a power of two and 1506 // promote it to a legal type later on, for example: 1507 // <3 x i8> -> <4 x i8> -> <4 x i32> 1508 if (EltVT.isInteger()) { 1509 // Vectors with a number of elements that is not a power of two are always 1510 // widened, for example <3 x i8> -> <4 x i8>. 1511 if (!VT.isPow2VectorType()) { 1512 NumElts = (unsigned)NextPowerOf2(NumElts); 1513 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1514 return LegalizeKind(TypeWidenVector, NVT); 1515 } 1516 1517 // Examine the element type. 1518 LegalizeKind LK = getTypeConversion(Context, EltVT); 1519 1520 // If type is to be expanded, split the vector. 1521 // <4 x i140> -> <2 x i140> 1522 if (LK.first == TypeExpandInteger) 1523 return LegalizeKind(TypeSplitVector, 1524 EVT::getVectorVT(Context, EltVT, NumElts / 2)); 1525 1526 // Promote the integer element types until a legal vector type is found 1527 // or until the element integer type is too big. If a legal type was not 1528 // found, fallback to the usual mechanism of widening/splitting the 1529 // vector. 1530 EVT OldEltVT = EltVT; 1531 while (1) { 1532 // Increase the bitwidth of the element to the next pow-of-two 1533 // (which is greater than 8 bits). 1534 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits() 1535 ).getRoundIntegerType(Context); 1536 1537 // Stop trying when getting a non-simple element type. 1538 // Note that vector elements may be greater than legal vector element 1539 // types. Example: X86 XMM registers hold 64bit element on 32bit 1540 // systems. 1541 if (!EltVT.isSimple()) break; 1542 1543 // Build a new vector type and check if it is legal. 1544 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1545 // Found a legal promoted vector type. 1546 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1547 return LegalizeKind(TypePromoteInteger, 1548 EVT::getVectorVT(Context, EltVT, NumElts)); 1549 } 1550 1551 // Reset the type to the unexpanded type if we did not find a legal vector 1552 // type with a promoted vector element type. 1553 EltVT = OldEltVT; 1554 } 1555 1556 // Try to widen the vector until a legal type is found. 1557 // If there is no wider legal type, split the vector. 1558 while (1) { 1559 // Round up to the next power of 2. 1560 NumElts = (unsigned)NextPowerOf2(NumElts); 1561 1562 // If there is no simple vector type with this many elements then there 1563 // cannot be a larger legal vector type. Note that this assumes that 1564 // there are no skipped intermediate vector types in the simple types. 1565 if (!EltVT.isSimple()) break; 1566 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1567 if (LargerVector == MVT()) break; 1568 1569 // If this type is legal then widen the vector. 1570 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1571 return LegalizeKind(TypeWidenVector, LargerVector); 1572 } 1573 1574 // Widen odd vectors to next power of two. 1575 if (!VT.isPow2VectorType()) { 1576 EVT NVT = VT.getPow2VectorType(Context); 1577 return LegalizeKind(TypeWidenVector, NVT); 1578 } 1579 1580 // Vectors with illegal element types are expanded. 1581 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); 1582 return LegalizeKind(TypeSplitVector, NVT); 1583 } 1584 1585private: 1586 std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses; 1587 1588 /// Targets can specify ISD nodes that they would like PerformDAGCombine 1589 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this 1590 /// array. 1591 unsigned char 1592 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 1593 1594 /// For operations that must be promoted to a specific type, this holds the 1595 /// destination type. This map should be sparse, so don't hold it as an 1596 /// array. 1597 /// 1598 /// Targets add entries to this map with AddPromotedToType(..), clients access 1599 /// this with getTypeToPromoteTo(..). 1600 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 1601 PromoteToType; 1602 1603 /// Stores the name each libcall. 1604 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 1605 1606 /// The ISD::CondCode that should be used to test the result of each of the 1607 /// comparison libcall against zero. 1608 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 1609 1610 /// Stores the CallingConv that should be used for each libcall. 1611 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 1612 1613protected: 1614 /// \brief Specify maximum number of store instructions per memset call. 1615 /// 1616 /// When lowering \@llvm.memset this field specifies the maximum number of 1617 /// store operations that may be substituted for the call to memset. Targets 1618 /// must set this value based on the cost threshold for that target. Targets 1619 /// should assume that the memset will be done using as many of the largest 1620 /// store operations first, followed by smaller ones, if necessary, per 1621 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 1622 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 1623 /// store. This only applies to setting a constant array of a constant size. 1624 unsigned MaxStoresPerMemset; 1625 1626 /// Maximum number of stores operations that may be substituted for the call 1627 /// to memset, used for functions with OptSize attribute. 1628 unsigned MaxStoresPerMemsetOptSize; 1629 1630 /// \brief Specify maximum bytes of store instructions per memcpy call. 1631 /// 1632 /// When lowering \@llvm.memcpy this field specifies the maximum number of 1633 /// store operations that may be substituted for a call to memcpy. Targets 1634 /// must set this value based on the cost threshold for that target. Targets 1635 /// should assume that the memcpy will be done using as many of the largest 1636 /// store operations first, followed by smaller ones, if necessary, per 1637 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 1638 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 1639 /// and one 1-byte store. This only applies to copying a constant array of 1640 /// constant size. 1641 unsigned MaxStoresPerMemcpy; 1642 1643 /// Maximum number of store operations that may be substituted for a call to 1644 /// memcpy, used for functions with OptSize attribute. 1645 unsigned MaxStoresPerMemcpyOptSize; 1646 1647 /// \brief Specify maximum bytes of store instructions per memmove call. 1648 /// 1649 /// When lowering \@llvm.memmove this field specifies the maximum number of 1650 /// store instructions that may be substituted for a call to memmove. Targets 1651 /// must set this value based on the cost threshold for that target. Targets 1652 /// should assume that the memmove will be done using as many of the largest 1653 /// store operations first, followed by smaller ones, if necessary, per 1654 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 1655 /// with 8-bit alignment would result in nine 1-byte stores. This only 1656 /// applies to copying a constant array of constant size. 1657 unsigned MaxStoresPerMemmove; 1658 1659 /// Maximum number of store instructions that may be substituted for a call to 1660 /// memmove, used for functions with OpSize attribute. 1661 unsigned MaxStoresPerMemmoveOptSize; 1662 1663 /// Tells the code generator that select is more expensive than a branch if 1664 /// the branch is usually predicted right. 1665 bool PredictableSelectIsExpensive; 1666 1667protected: 1668 /// Return true if the value types that can be represented by the specified 1669 /// register class are all legal. 1670 bool isLegalRC(const TargetRegisterClass *RC) const; 1671}; 1672 1673/// This class defines information used to lower LLVM code to legal SelectionDAG 1674/// operators that the target instruction selector can accept natively. 1675/// 1676/// This class also defines callbacks that targets must implement to lower 1677/// target-specific constructs to SelectionDAG operators. 1678class TargetLowering : public TargetLoweringBase { 1679 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION; 1680 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION; 1681 1682public: 1683 /// NOTE: The constructor takes ownership of TLOF. 1684 explicit TargetLowering(const TargetMachine &TM, 1685 const TargetLoweringObjectFile *TLOF); 1686 1687 /// Returns true by value, base pointer and offset pointer and addressing mode 1688 /// by reference if the node's address can be legally represented as 1689 /// pre-indexed load / store address. 1690 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 1691 SDValue &/*Offset*/, 1692 ISD::MemIndexedMode &/*AM*/, 1693 SelectionDAG &/*DAG*/) const { 1694 return false; 1695 } 1696 1697 /// Returns true by value, base pointer and offset pointer and addressing mode 1698 /// by reference if this node can be combined with a load / store to form a 1699 /// post-indexed load / store. 1700 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 1701 SDValue &/*Base*/, 1702 SDValue &/*Offset*/, 1703 ISD::MemIndexedMode &/*AM*/, 1704 SelectionDAG &/*DAG*/) const { 1705 return false; 1706 } 1707 1708 /// Return the entry encoding for a jump table in the current function. The 1709 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 1710 virtual unsigned getJumpTableEncoding() const; 1711 1712 virtual const MCExpr * 1713 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 1714 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 1715 MCContext &/*Ctx*/) const { 1716 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 1717 } 1718 1719 /// Returns relocation base for the given PIC jumptable. 1720 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 1721 SelectionDAG &DAG) const; 1722 1723 /// This returns the relocation base for the given PIC jumptable, the same as 1724 /// getPICJumpTableRelocBase, but as an MCExpr. 1725 virtual const MCExpr * 1726 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 1727 unsigned JTI, MCContext &Ctx) const; 1728 1729 /// Return true if folding a constant offset with the given GlobalAddress is 1730 /// legal. It is frequently not legal in PIC relocation models. 1731 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 1732 1733 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 1734 SDValue &Chain) const; 1735 1736 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, 1737 SDValue &NewLHS, SDValue &NewRHS, 1738 ISD::CondCode &CCCode, SDLoc DL) const; 1739 1740 /// Returns a pair of (return value, chain). 1741 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, 1742 EVT RetVT, const SDValue *Ops, 1743 unsigned NumOps, bool isSigned, 1744 SDLoc dl, bool doesNotReturn = false, 1745 bool isReturnValueUsed = true) const; 1746 1747 //===--------------------------------------------------------------------===// 1748 // TargetLowering Optimization Methods 1749 // 1750 1751 /// A convenience struct that encapsulates a DAG, and two SDValues for 1752 /// returning information from TargetLowering to its clients that want to 1753 /// combine. 1754 struct TargetLoweringOpt { 1755 SelectionDAG &DAG; 1756 bool LegalTys; 1757 bool LegalOps; 1758 SDValue Old; 1759 SDValue New; 1760 1761 explicit TargetLoweringOpt(SelectionDAG &InDAG, 1762 bool LT, bool LO) : 1763 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 1764 1765 bool LegalTypes() const { return LegalTys; } 1766 bool LegalOperations() const { return LegalOps; } 1767 1768 bool CombineTo(SDValue O, SDValue N) { 1769 Old = O; 1770 New = N; 1771 return true; 1772 } 1773 1774 /// Check to see if the specified operand of the specified instruction is a 1775 /// constant integer. If so, check to see if there are any bits set in the 1776 /// constant that are not demanded. If so, shrink the constant and return 1777 /// true. 1778 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 1779 1780 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This 1781 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 1782 /// generalized for targets with other types of implicit widening casts. 1783 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, 1784 SDLoc dl); 1785 }; 1786 1787 /// Look at Op. At this point, we know that only the DemandedMask bits of the 1788 /// result of Op are ever used downstream. If we can use this information to 1789 /// simplify Op, create a new simplified DAG node and return true, returning 1790 /// the original and new nodes in Old and New. Otherwise, analyze the 1791 /// expression and return a mask of KnownOne and KnownZero bits for the 1792 /// expression (used to simplify the caller). The KnownZero/One bits may only 1793 /// be accurate for those bits in the DemandedMask. 1794 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 1795 APInt &KnownZero, APInt &KnownOne, 1796 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 1797 1798 /// Determine which of the bits specified in Mask are known to be either zero 1799 /// or one and return them in the KnownZero/KnownOne bitsets. 1800 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 1801 APInt &KnownZero, 1802 APInt &KnownOne, 1803 const SelectionDAG &DAG, 1804 unsigned Depth = 0) const; 1805 1806 /// This method can be implemented by targets that want to expose additional 1807 /// information about sign bits to the DAG Combiner. 1808 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 1809 unsigned Depth = 0) const; 1810 1811 struct DAGCombinerInfo { 1812 void *DC; // The DAG Combiner object. 1813 CombineLevel Level; 1814 bool CalledByLegalizer; 1815 public: 1816 SelectionDAG &DAG; 1817 1818 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) 1819 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} 1820 1821 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } 1822 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } 1823 bool isAfterLegalizeVectorOps() const { 1824 return Level == AfterLegalizeDAG; 1825 } 1826 CombineLevel getDAGCombineLevel() { return Level; } 1827 bool isCalledByLegalizer() const { return CalledByLegalizer; } 1828 1829 void AddToWorklist(SDNode *N); 1830 void RemoveFromWorklist(SDNode *N); 1831 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, 1832 bool AddTo = true); 1833 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 1834 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 1835 1836 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 1837 }; 1838 1839 /// Try to simplify a setcc built with the specified operands and cc. If it is 1840 /// unable to simplify it, return a null SDValue. 1841 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 1842 ISD::CondCode Cond, bool foldBooleans, 1843 DAGCombinerInfo &DCI, SDLoc dl) const; 1844 1845 /// Returns true (and the GlobalValue and the offset) if the node is a 1846 /// GlobalAddress + offset. 1847 virtual bool 1848 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 1849 1850 /// This method will be invoked for all target nodes and for any 1851 /// target-independent nodes that the target has registered with invoke it 1852 /// for. 1853 /// 1854 /// The semantics are as follows: 1855 /// Return Value: 1856 /// SDValue.Val == 0 - No change was made 1857 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 1858 /// otherwise - N should be replaced by the returned Operand. 1859 /// 1860 /// In addition, methods provided by DAGCombinerInfo may be used to perform 1861 /// more complex transformations. 1862 /// 1863 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 1864 1865 /// Return true if the target has native support for the specified value type 1866 /// and it is 'desirable' to use the type for the given node type. e.g. On x86 1867 /// i16 is legal, but undesirable since i16 instruction encodings are longer 1868 /// and some i16 instructions are slow. 1869 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 1870 // By default, assume all legal types are desirable. 1871 return isTypeLegal(VT); 1872 } 1873 1874 /// Return true if it is profitable for dag combiner to transform a floating 1875 /// point op of specified opcode to a equivalent op of an integer 1876 /// type. e.g. f32 load -> i32 load can be profitable on ARM. 1877 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 1878 EVT /*VT*/) const { 1879 return false; 1880 } 1881 1882 /// This method query the target whether it is beneficial for dag combiner to 1883 /// promote the specified node. If true, it should return the desired 1884 /// promotion type by reference. 1885 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 1886 return false; 1887 } 1888 1889 //===--------------------------------------------------------------------===// 1890 // Lowering methods - These methods must be implemented by targets so that 1891 // the SelectionDAGBuilder code knows how to lower these. 1892 // 1893 1894 /// This hook must be implemented to lower the incoming (formal) arguments, 1895 /// described by the Ins array, into the specified DAG. The implementation 1896 /// should fill in the InVals array with legal-type argument values, and 1897 /// return the resulting token chain value. 1898 /// 1899 virtual SDValue 1900 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1901 bool /*isVarArg*/, 1902 const SmallVectorImpl<ISD::InputArg> &/*Ins*/, 1903 SDLoc /*dl*/, SelectionDAG &/*DAG*/, 1904 SmallVectorImpl<SDValue> &/*InVals*/) const { 1905 llvm_unreachable("Not Implemented"); 1906 } 1907 1908 struct ArgListEntry { 1909 SDValue Node; 1910 Type* Ty; 1911 bool isSExt : 1; 1912 bool isZExt : 1; 1913 bool isInReg : 1; 1914 bool isSRet : 1; 1915 bool isNest : 1; 1916 bool isByVal : 1; 1917 bool isReturned : 1; 1918 uint16_t Alignment; 1919 1920 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1921 isSRet(false), isNest(false), isByVal(false), isReturned(false), 1922 Alignment(0) { } 1923 }; 1924 typedef std::vector<ArgListEntry> ArgListTy; 1925 1926 /// This structure contains all information that is necessary for lowering 1927 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder 1928 /// needs to lower a call, and targets will see this struct in their LowerCall 1929 /// implementation. 1930 struct CallLoweringInfo { 1931 SDValue Chain; 1932 Type *RetTy; 1933 bool RetSExt : 1; 1934 bool RetZExt : 1; 1935 bool IsVarArg : 1; 1936 bool IsInReg : 1; 1937 bool DoesNotReturn : 1; 1938 bool IsReturnValueUsed : 1; 1939 1940 // IsTailCall should be modified by implementations of 1941 // TargetLowering::LowerCall that perform tail call conversions. 1942 bool IsTailCall; 1943 1944 unsigned NumFixedArgs; 1945 CallingConv::ID CallConv; 1946 SDValue Callee; 1947 ArgListTy &Args; 1948 SelectionDAG &DAG; 1949 SDLoc DL; 1950 ImmutableCallSite *CS; 1951 SmallVector<ISD::OutputArg, 32> Outs; 1952 SmallVector<SDValue, 32> OutVals; 1953 SmallVector<ISD::InputArg, 32> Ins; 1954 1955 1956 /// Constructs a call lowering context based on the ImmutableCallSite \p cs. 1957 CallLoweringInfo(SDValue chain, Type *retTy, 1958 FunctionType *FTy, bool isTailCall, SDValue callee, 1959 ArgListTy &args, SelectionDAG &dag, SDLoc dl, 1960 ImmutableCallSite &cs) 1961 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)), 1962 RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()), 1963 IsInReg(cs.paramHasAttr(0, Attribute::InReg)), 1964 DoesNotReturn(cs.doesNotReturn()), 1965 IsReturnValueUsed(!cs.getInstruction()->use_empty()), 1966 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()), 1967 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag), 1968 DL(dl), CS(&cs) {} 1969 1970 /// Constructs a call lowering context based on the provided call 1971 /// information. 1972 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt, 1973 bool isVarArg, bool isInReg, unsigned numFixedArgs, 1974 CallingConv::ID callConv, bool isTailCall, 1975 bool doesNotReturn, bool isReturnValueUsed, SDValue callee, 1976 ArgListTy &args, SelectionDAG &dag, SDLoc dl) 1977 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt), 1978 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn), 1979 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall), 1980 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee), 1981 Args(args), DAG(dag), DL(dl), CS(NULL) {} 1982 }; 1983 1984 /// This function lowers an abstract call to a function into an actual call. 1985 /// This returns a pair of operands. The first element is the return value 1986 /// for the function (if RetTy is not VoidTy). The second element is the 1987 /// outgoing token chain. It calls LowerCall to do the actual lowering. 1988 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 1989 1990 /// This hook must be implemented to lower calls into the the specified 1991 /// DAG. The outgoing arguments to the call are described by the Outs array, 1992 /// and the values to be returned by the call are described by the Ins 1993 /// array. The implementation should fill in the InVals array with legal-type 1994 /// return values from the call, and return the resulting token chain value. 1995 virtual SDValue 1996 LowerCall(CallLoweringInfo &/*CLI*/, 1997 SmallVectorImpl<SDValue> &/*InVals*/) const { 1998 llvm_unreachable("Not Implemented"); 1999 } 2000 2001 /// Target-specific cleanup for formal ByVal parameters. 2002 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {} 2003 2004 /// This hook should be implemented to check whether the return values 2005 /// described by the Outs array can fit into the return registers. If false 2006 /// is returned, an sret-demotion is performed. 2007 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 2008 MachineFunction &/*MF*/, bool /*isVarArg*/, 2009 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 2010 LLVMContext &/*Context*/) const 2011 { 2012 // Return true by default to get preexisting behavior. 2013 return true; 2014 } 2015 2016 /// This hook must be implemented to lower outgoing return values, described 2017 /// by the Outs array, into the specified DAG. The implementation should 2018 /// return the resulting token chain value. 2019 virtual SDValue 2020 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 2021 bool /*isVarArg*/, 2022 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 2023 const SmallVectorImpl<SDValue> &/*OutVals*/, 2024 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const { 2025 llvm_unreachable("Not Implemented"); 2026 } 2027 2028 /// Return true if result of the specified node is used by a return node 2029 /// only. It also compute and return the input chain for the tail call. 2030 /// 2031 /// This is used to determine whether it is possible to codegen a libcall as 2032 /// tail call at legalization time. 2033 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { 2034 return false; 2035 } 2036 2037 /// Return true if the target may be able emit the call instruction as a tail 2038 /// call. This is used by optimization passes to determine if it's profitable 2039 /// to duplicate return instructions to enable tailcall optimization. 2040 virtual bool mayBeEmittedAsTailCall(CallInst *) const { 2041 return false; 2042 } 2043 2044 /// Return the type that should be used to zero or sign extend a 2045 /// zeroext/signext integer argument or return value. FIXME: Most C calling 2046 /// convention requires the return type to be promoted, but this is not true 2047 /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C 2048 /// calling conventions. The frontend should handle this and include all of 2049 /// the necessary information. 2050 virtual MVT getTypeForExtArgOrReturn(MVT VT, 2051 ISD::NodeType /*ExtendKind*/) const { 2052 MVT MinVT = getRegisterType(MVT::i32); 2053 return VT.bitsLT(MinVT) ? MinVT : VT; 2054 } 2055 2056 /// This callback is invoked by the type legalizer to legalize nodes with an 2057 /// illegal operand type but legal result types. It replaces the 2058 /// LowerOperation callback in the type Legalizer. The reason we can not do 2059 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to 2060 /// use this callback. 2061 /// 2062 /// TODO: Consider merging with ReplaceNodeResults. 2063 /// 2064 /// The target places new result values for the node in Results (their number 2065 /// and types must exactly match those of the original return values of 2066 /// the node), or leaves Results empty, which indicates that the node is not 2067 /// to be custom lowered after all. 2068 /// The default implementation calls LowerOperation. 2069 virtual void LowerOperationWrapper(SDNode *N, 2070 SmallVectorImpl<SDValue> &Results, 2071 SelectionDAG &DAG) const; 2072 2073 /// This callback is invoked for operations that are unsupported by the 2074 /// target, which are registered to use 'custom' lowering, and whose defined 2075 /// values are all legal. If the target has no operations that require custom 2076 /// lowering, it need not implement this. The default implementation of this 2077 /// aborts. 2078 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 2079 2080 /// This callback is invoked when a node result type is illegal for the 2081 /// target, and the operation was registered to use 'custom' lowering for that 2082 /// result type. The target places new result values for the node in Results 2083 /// (their number and types must exactly match those of the original return 2084 /// values of the node), or leaves Results empty, which indicates that the 2085 /// node is not to be custom lowered after all. 2086 /// 2087 /// If the target has no operations that require custom lowering, it need not 2088 /// implement this. The default implementation aborts. 2089 virtual void ReplaceNodeResults(SDNode * /*N*/, 2090 SmallVectorImpl<SDValue> &/*Results*/, 2091 SelectionDAG &/*DAG*/) const { 2092 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 2093 } 2094 2095 /// This method returns the name of a target specific DAG node. 2096 virtual const char *getTargetNodeName(unsigned Opcode) const; 2097 2098 /// This method returns a target specific FastISel object, or null if the 2099 /// target does not support "fast" ISel. 2100 virtual FastISel *createFastISel(FunctionLoweringInfo &, 2101 const TargetLibraryInfo *) const { 2102 return 0; 2103 } 2104 2105 //===--------------------------------------------------------------------===// 2106 // Inline Asm Support hooks 2107 // 2108 2109 /// This hook allows the target to expand an inline asm call to be explicit 2110 /// llvm code if it wants to. This is useful for turning simple inline asms 2111 /// into LLVM intrinsics, which gives the compiler more information about the 2112 /// behavior of the code. 2113 virtual bool ExpandInlineAsm(CallInst *) const { 2114 return false; 2115 } 2116 2117 enum ConstraintType { 2118 C_Register, // Constraint represents specific register(s). 2119 C_RegisterClass, // Constraint represents any of register(s) in class. 2120 C_Memory, // Memory constraint. 2121 C_Other, // Something else. 2122 C_Unknown // Unsupported constraint. 2123 }; 2124 2125 enum ConstraintWeight { 2126 // Generic weights. 2127 CW_Invalid = -1, // No match. 2128 CW_Okay = 0, // Acceptable. 2129 CW_Good = 1, // Good weight. 2130 CW_Better = 2, // Better weight. 2131 CW_Best = 3, // Best weight. 2132 2133 // Well-known weights. 2134 CW_SpecificReg = CW_Okay, // Specific register operands. 2135 CW_Register = CW_Good, // Register operands. 2136 CW_Memory = CW_Better, // Memory operands. 2137 CW_Constant = CW_Best, // Constant operand. 2138 CW_Default = CW_Okay // Default or don't know type. 2139 }; 2140 2141 /// This contains information for each constraint that we are lowering. 2142 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 2143 /// This contains the actual string for the code, like "m". TargetLowering 2144 /// picks the 'best' code from ConstraintInfo::Codes that most closely 2145 /// matches the operand. 2146 std::string ConstraintCode; 2147 2148 /// Information about the constraint code, e.g. Register, RegisterClass, 2149 /// Memory, Other, Unknown. 2150 TargetLowering::ConstraintType ConstraintType; 2151 2152 /// If this is the result output operand or a clobber, this is null, 2153 /// otherwise it is the incoming operand to the CallInst. This gets 2154 /// modified as the asm is processed. 2155 Value *CallOperandVal; 2156 2157 /// The ValueType for the operand value. 2158 MVT ConstraintVT; 2159 2160 /// Return true of this is an input operand that is a matching constraint 2161 /// like "4". 2162 bool isMatchingInputConstraint() const; 2163 2164 /// If this is an input matching constraint, this method returns the output 2165 /// operand it matches. 2166 unsigned getMatchedOperand() const; 2167 2168 /// Copy constructor for copying from an AsmOperandInfo. 2169 AsmOperandInfo(const AsmOperandInfo &info) 2170 : InlineAsm::ConstraintInfo(info), 2171 ConstraintCode(info.ConstraintCode), 2172 ConstraintType(info.ConstraintType), 2173 CallOperandVal(info.CallOperandVal), 2174 ConstraintVT(info.ConstraintVT) { 2175 } 2176 2177 /// Copy constructor for copying from a ConstraintInfo. 2178 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 2179 : InlineAsm::ConstraintInfo(info), 2180 ConstraintType(TargetLowering::C_Unknown), 2181 CallOperandVal(0), ConstraintVT(MVT::Other) { 2182 } 2183 }; 2184 2185 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector; 2186 2187 /// Split up the constraint string from the inline assembly value into the 2188 /// specific constraints and their prefixes, and also tie in the associated 2189 /// operand values. If this returns an empty vector, and if the constraint 2190 /// string itself isn't empty, there was an error parsing. 2191 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const; 2192 2193 /// Examine constraint type and operand type and determine a weight value. 2194 /// The operand object must already have been set up with the operand type. 2195 virtual ConstraintWeight getMultipleConstraintMatchWeight( 2196 AsmOperandInfo &info, int maIndex) const; 2197 2198 /// Examine constraint string and operand type and determine a weight value. 2199 /// The operand object must already have been set up with the operand type. 2200 virtual ConstraintWeight getSingleConstraintMatchWeight( 2201 AsmOperandInfo &info, const char *constraint) const; 2202 2203 /// Determines the constraint code and constraint type to use for the specific 2204 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 2205 /// If the actual operand being passed in is available, it can be passed in as 2206 /// Op, otherwise an empty SDValue can be passed. 2207 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 2208 SDValue Op, 2209 SelectionDAG *DAG = 0) const; 2210 2211 /// Given a constraint, return the type of constraint it is for this target. 2212 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 2213 2214 /// Given a physical register constraint (e.g. {edx}), return the register 2215 /// number and the register class for the register. 2216 /// 2217 /// Given a register class constraint, like 'r', if this corresponds directly 2218 /// to an LLVM register class, return a register of 0 and the register class 2219 /// pointer. 2220 /// 2221 /// This should only be used for C_Register constraints. On error, this 2222 /// returns a register number of 0 and a null register class pointer.. 2223 virtual std::pair<unsigned, const TargetRegisterClass*> 2224 getRegForInlineAsmConstraint(const std::string &Constraint, 2225 MVT VT) const; 2226 2227 /// Try to replace an X constraint, which matches anything, with another that 2228 /// has more specific requirements based on the type of the corresponding 2229 /// operand. This returns null if there is no replacement to make. 2230 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 2231 2232 /// Lower the specified operand into the Ops vector. If it is invalid, don't 2233 /// add anything to Ops. 2234 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 2235 std::vector<SDValue> &Ops, 2236 SelectionDAG &DAG) const; 2237 2238 //===--------------------------------------------------------------------===// 2239 // Div utility functions 2240 // 2241 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl, 2242 SelectionDAG &DAG) const; 2243 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 2244 std::vector<SDNode*> *Created) const; 2245 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 2246 std::vector<SDNode*> *Created) const; 2247 2248 //===--------------------------------------------------------------------===// 2249 // Instruction Emitting Hooks 2250 // 2251 2252 // This method should be implemented by targets that mark instructions with 2253 // the 'usesCustomInserter' flag. These instructions are special in various 2254 // ways, which require special support to insert. The specified MachineInstr 2255 // is created but not inserted into any basic blocks, and this method is 2256 // called to expand it into a sequence of instructions, potentially also 2257 // creating new basic blocks and control flow. 2258 virtual MachineBasicBlock * 2259 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; 2260 2261 /// This method should be implemented by targets that mark instructions with 2262 /// the 'hasPostISelHook' flag. These instructions must be adjusted after 2263 /// instruction selection by target hooks. e.g. To fill in optional defs for 2264 /// ARM 's' setting instructions. 2265 virtual void 2266 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 2267}; 2268 2269/// Given an LLVM IR type and return type attributes, compute the return value 2270/// EVTs and flags, and optionally also the offsets, if the return value is 2271/// being lowered to memory. 2272void GetReturnInfo(Type* ReturnType, AttributeSet attr, 2273 SmallVectorImpl<ISD::OutputArg> &Outs, 2274 const TargetLowering &TLI); 2275 2276} // end llvm namespace 2277 2278#endif 2279