TargetLowering.h revision f560ffae1f1f6591859c7b70636a3eca6c03f083
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes how to lower LLVM code to machine code. This has two 11// main components: 12// 13// 1. Which ValueTypes are natively supported by the target. 14// 2. Which operations are supported for supported ValueTypes. 15// 3. Cost thresholds for alternative implementations of certain operations. 16// 17// In addition it has a few other components, like information about FP 18// immediates. 19// 20//===----------------------------------------------------------------------===// 21 22#ifndef LLVM_TARGET_TARGETLOWERING_H 23#define LLVM_TARGET_TARGETLOWERING_H 24 25#include "llvm/InlineAsm.h" 26#include "llvm/CodeGen/SelectionDAGNodes.h" 27#include "llvm/CodeGen/RuntimeLibcalls.h" 28#include "llvm/ADT/APFloat.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/SmallSet.h" 31#include "llvm/ADT/STLExtras.h" 32#include <climits> 33#include <map> 34#include <vector> 35 36namespace llvm { 37 class AllocaInst; 38 class CallInst; 39 class Function; 40 class FastISel; 41 class MachineBasicBlock; 42 class MachineFunction; 43 class MachineFrameInfo; 44 class MachineInstr; 45 class MachineModuleInfo; 46 class DwarfWriter; 47 class SDNode; 48 class SDValue; 49 class SelectionDAG; 50 class TargetData; 51 class TargetMachine; 52 class TargetRegisterClass; 53 class TargetSubtarget; 54 class Value; 55 56//===----------------------------------------------------------------------===// 57/// TargetLowering - This class defines information used to lower LLVM code to 58/// legal SelectionDAG operators that the target instruction selector can accept 59/// natively. 60/// 61/// This class also defines callbacks that targets must implement to lower 62/// target-specific constructs to SelectionDAG operators. 63/// 64class TargetLowering { 65public: 66 /// LegalizeAction - This enum indicates whether operations are valid for a 67 /// target, and if not, what action should be used to make them valid. 68 enum LegalizeAction { 69 Legal, // The target natively supports this operation. 70 Promote, // This operation should be executed in a larger type. 71 Expand, // Try to expand this to other ops, otherwise use a libcall. 72 Custom // Use the LowerOperation hook to implement custom lowering. 73 }; 74 75 enum OutOfRangeShiftAmount { 76 Undefined, // Oversized shift amounts are undefined (default). 77 Mask, // Shift amounts are auto masked (anded) to value size. 78 Extend // Oversized shift pulls in zeros or sign bits. 79 }; 80 81 enum BooleanContent { // How the target represents true/false values. 82 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 83 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 84 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 85 }; 86 87 enum SchedPreference { 88 SchedulingForLatency, // Scheduling for shortest total latency. 89 SchedulingForRegPressure // Scheduling for lowest register pressure. 90 }; 91 92 explicit TargetLowering(TargetMachine &TM); 93 virtual ~TargetLowering(); 94 95 TargetMachine &getTargetMachine() const { return TM; } 96 const TargetData *getTargetData() const { return TD; } 97 98 bool isBigEndian() const { return !IsLittleEndian; } 99 bool isLittleEndian() const { return IsLittleEndian; } 100 MVT getPointerTy() const { return PointerTy; } 101 MVT getShiftAmountTy() const { return ShiftAmountTy; } 102 OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; } 103 104 /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC 105 /// codegen. 106 bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; } 107 108 /// isSelectExpensive - Return true if the select operation is expensive for 109 /// this target. 110 bool isSelectExpensive() const { return SelectIsExpensive; } 111 112 /// isIntDivCheap() - Return true if integer divide is usually cheaper than 113 /// a sequence of several shifts, adds, and multiplies for this target. 114 bool isIntDivCheap() const { return IntDivIsCheap; } 115 116 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of 117 /// srl/add/sra. 118 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 119 120 /// getSetCCResultType - Return the ValueType of the result of SETCC 121 /// operations. Also used to obtain the target's preferred type for 122 /// the condition operand of SELECT and BRCOND nodes. In the case of 123 /// BRCOND the argument passed is MVT::Other since there are no other 124 /// operands to get a type hint from. 125 virtual MVT getSetCCResultType(MVT VT) const; 126 127 /// getBooleanContents - For targets without i1 registers, this gives the 128 /// nature of the high-bits of boolean values held in types wider than i1. 129 /// "Boolean values" are special true/false values produced by nodes like 130 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 131 /// Not to be confused with general values promoted from i1. 132 BooleanContent getBooleanContents() const { return BooleanContents;} 133 134 /// getSchedulingPreference - Return target scheduling preference. 135 SchedPreference getSchedulingPreference() const { 136 return SchedPreferenceInfo; 137 } 138 139 /// getRegClassFor - Return the register class that should be used for the 140 /// specified value type. This may only be called on legal types. 141 TargetRegisterClass *getRegClassFor(MVT VT) const { 142 assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); 143 TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()]; 144 assert(RC && "This value type is not natively supported!"); 145 return RC; 146 } 147 148 /// isTypeLegal - Return true if the target has native support for the 149 /// specified value type. This means that it has a register that directly 150 /// holds it without promotions or expansions. 151 bool isTypeLegal(MVT VT) const { 152 assert(!VT.isSimple() || 153 (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); 154 return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0; 155 } 156 157 class ValueTypeActionImpl { 158 /// ValueTypeActions - This is a bitvector that contains two bits for each 159 /// value type, where the two bits correspond to the LegalizeAction enum. 160 /// This can be queried with "getTypeAction(VT)". 161 uint32_t ValueTypeActions[2]; 162 public: 163 ValueTypeActionImpl() { 164 ValueTypeActions[0] = ValueTypeActions[1] = 0; 165 } 166 ValueTypeActionImpl(const ValueTypeActionImpl &RHS) { 167 ValueTypeActions[0] = RHS.ValueTypeActions[0]; 168 ValueTypeActions[1] = RHS.ValueTypeActions[1]; 169 } 170 171 LegalizeAction getTypeAction(MVT VT) const { 172 if (VT.isExtended()) { 173 if (VT.isVector()) { 174 return VT.isPow2VectorType() ? Expand : Promote; 175 } 176 if (VT.isInteger()) 177 // First promote to a power-of-two size, then expand if necessary. 178 return VT == VT.getRoundIntegerType() ? Expand : Promote; 179 assert(0 && "Unsupported extended type!"); 180 return Legal; 181 } 182 unsigned I = VT.getSimpleVT(); 183 assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); 184 return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3); 185 } 186 void setTypeAction(MVT VT, LegalizeAction Action) { 187 unsigned I = VT.getSimpleVT(); 188 assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); 189 ValueTypeActions[I>>4] |= Action << ((I*2) & 31); 190 } 191 }; 192 193 const ValueTypeActionImpl &getValueTypeActions() const { 194 return ValueTypeActions; 195 } 196 197 /// getTypeAction - Return how we should legalize values of this type, either 198 /// it is already legal (return 'Legal') or we need to promote it to a larger 199 /// type (return 'Promote'), or we need to expand it into multiple registers 200 /// of smaller integer type (return 'Expand'). 'Custom' is not an option. 201 LegalizeAction getTypeAction(MVT VT) const { 202 return ValueTypeActions.getTypeAction(VT); 203 } 204 205 /// getTypeToTransformTo - For types supported by the target, this is an 206 /// identity function. For types that must be promoted to larger types, this 207 /// returns the larger type to promote to. For integer types that are larger 208 /// than the largest integer register, this contains one step in the expansion 209 /// to get to the smaller register. For illegal floating point types, this 210 /// returns the integer type to transform to. 211 MVT getTypeToTransformTo(MVT VT) const { 212 if (VT.isSimple()) { 213 assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType)); 214 MVT NVT = TransformToType[VT.getSimpleVT()]; 215 assert(getTypeAction(NVT) != Promote && 216 "Promote may not follow Expand or Promote"); 217 return NVT; 218 } 219 220 if (VT.isVector()) { 221 MVT NVT = VT.getPow2VectorType(); 222 if (NVT == VT) { 223 // Vector length is a power of 2 - split to half the size. 224 unsigned NumElts = VT.getVectorNumElements(); 225 MVT EltVT = VT.getVectorElementType(); 226 return (NumElts == 1) ? EltVT : MVT::getVectorVT(EltVT, NumElts / 2); 227 } 228 // Promote to a power of two size, avoiding multi-step promotion. 229 return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; 230 } else if (VT.isInteger()) { 231 MVT NVT = VT.getRoundIntegerType(); 232 if (NVT == VT) 233 // Size is a power of two - expand to half the size. 234 return MVT::getIntegerVT(VT.getSizeInBits() / 2); 235 else 236 // Promote to a power of two size, avoiding multi-step promotion. 237 return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; 238 } 239 assert(0 && "Unsupported extended type!"); 240 return MVT(); // Not reached 241 } 242 243 /// getTypeToExpandTo - For types supported by the target, this is an 244 /// identity function. For types that must be expanded (i.e. integer types 245 /// that are larger than the largest integer register or illegal floating 246 /// point types), this returns the largest legal type it will be expanded to. 247 MVT getTypeToExpandTo(MVT VT) const { 248 assert(!VT.isVector()); 249 while (true) { 250 switch (getTypeAction(VT)) { 251 case Legal: 252 return VT; 253 case Expand: 254 VT = getTypeToTransformTo(VT); 255 break; 256 default: 257 assert(false && "Type is not legal nor is it to be expanded!"); 258 return VT; 259 } 260 } 261 return VT; 262 } 263 264 /// getVectorTypeBreakdown - Vector types are broken down into some number of 265 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 266 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 267 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 268 /// 269 /// This method returns the number of registers needed, and the VT for each 270 /// register. It also returns the VT and quantity of the intermediate values 271 /// before they are promoted/expanded. 272 /// 273 unsigned getVectorTypeBreakdown(MVT VT, 274 MVT &IntermediateVT, 275 unsigned &NumIntermediates, 276 MVT &RegisterVT) const; 277 278 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the 279 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If 280 /// this is the case, it returns true and store the intrinsic 281 /// information into the IntrinsicInfo that was passed to the function. 282 typedef struct IntrinsicInfo { 283 unsigned opc; // target opcode 284 MVT memVT; // memory VT 285 const Value* ptrVal; // value representing memory location 286 int offset; // offset off of ptrVal 287 unsigned align; // alignment 288 bool vol; // is volatile? 289 bool readMem; // reads memory? 290 bool writeMem; // writes memory? 291 } IntrinisicInfo; 292 293 virtual bool getTgtMemIntrinsic(IntrinsicInfo& Info, 294 CallInst &I, unsigned Intrinsic) { 295 return false; 296 } 297 298 /// getWidenVectorType: given a vector type, returns the type to widen to 299 /// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. 300 /// If there is no vector type that we want to widen to, returns MVT::Other 301 /// When and were to widen is target dependent based on the cost of 302 /// scalarizing vs using the wider vector type. 303 virtual MVT getWidenVectorType(MVT VT) const; 304 305 typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator; 306 legal_fpimm_iterator legal_fpimm_begin() const { 307 return LegalFPImmediates.begin(); 308 } 309 legal_fpimm_iterator legal_fpimm_end() const { 310 return LegalFPImmediates.end(); 311 } 312 313 /// isShuffleMaskLegal - Targets can use this to indicate that they only 314 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 315 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 316 /// are assumed to be legal. 317 virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const { 318 return true; 319 } 320 321 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 322 /// used by Targets can use this to indicate if there is a suitable 323 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 324 /// pool entry. 325 virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps, 326 MVT EVT, 327 SelectionDAG &DAG) const { 328 return false; 329 } 330 331 /// getOperationAction - Return how this operation should be treated: either 332 /// it is legal, needs to be promoted to a larger size, needs to be 333 /// expanded to some other code sequence, or the target has a custom expander 334 /// for it. 335 LegalizeAction getOperationAction(unsigned Op, MVT VT) const { 336 if (VT.isExtended()) return Expand; 337 assert(Op < array_lengthof(OpActions) && 338 (unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && 339 "Table isn't big enough!"); 340 return (LegalizeAction)((OpActions[Op] >> (2*VT.getSimpleVT())) & 3); 341 } 342 343 /// isOperationLegalOrCustom - Return true if the specified operation is 344 /// legal on this target or can be made legal with custom lowering. This 345 /// is used to help guide high-level lowering decisions. 346 bool isOperationLegalOrCustom(unsigned Op, MVT VT) const { 347 return (VT == MVT::Other || isTypeLegal(VT)) && 348 (getOperationAction(Op, VT) == Legal || 349 getOperationAction(Op, VT) == Custom); 350 } 351 352 /// isOperationLegal - Return true if the specified operation is legal on this 353 /// target. 354 bool isOperationLegal(unsigned Op, MVT VT) const { 355 return (VT == MVT::Other || isTypeLegal(VT)) && 356 getOperationAction(Op, VT) == Legal; 357 } 358 359 /// getLoadExtAction - Return how this load with extension should be treated: 360 /// either it is legal, needs to be promoted to a larger size, needs to be 361 /// expanded to some other code sequence, or the target has a custom expander 362 /// for it. 363 LegalizeAction getLoadExtAction(unsigned LType, MVT VT) const { 364 assert(LType < array_lengthof(LoadExtActions) && 365 (unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && 366 "Table isn't big enough!"); 367 return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3); 368 } 369 370 /// isLoadExtLegal - Return true if the specified load with extension is legal 371 /// on this target. 372 bool isLoadExtLegal(unsigned LType, MVT VT) const { 373 return VT.isSimple() && 374 (getLoadExtAction(LType, VT) == Legal || 375 getLoadExtAction(LType, VT) == Custom); 376 } 377 378 /// getTruncStoreAction - Return how this store with truncation should be 379 /// treated: either it is legal, needs to be promoted to a larger size, needs 380 /// to be expanded to some other code sequence, or the target has a custom 381 /// expander for it. 382 LegalizeAction getTruncStoreAction(MVT ValVT, 383 MVT MemVT) const { 384 assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && 385 (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && 386 "Table isn't big enough!"); 387 return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >> 388 (2*MemVT.getSimpleVT())) & 3); 389 } 390 391 /// isTruncStoreLegal - Return true if the specified store with truncation is 392 /// legal on this target. 393 bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const { 394 return isTypeLegal(ValVT) && MemVT.isSimple() && 395 (getTruncStoreAction(ValVT, MemVT) == Legal || 396 getTruncStoreAction(ValVT, MemVT) == Custom); 397 } 398 399 /// getIndexedLoadAction - Return how the indexed load should be treated: 400 /// either it is legal, needs to be promoted to a larger size, needs to be 401 /// expanded to some other code sequence, or the target has a custom expander 402 /// for it. 403 LegalizeAction 404 getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 405 assert(IdxMode < array_lengthof(IndexedModeActions[0]) && 406 (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0][0])*4 && 407 "Table isn't big enough!"); 408 return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> 409 (2*VT.getSimpleVT())) & 3); 410 } 411 412 /// isIndexedLoadLegal - Return true if the specified indexed load is legal 413 /// on this target. 414 bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const { 415 return VT.isSimple() && 416 (getIndexedLoadAction(IdxMode, VT) == Legal || 417 getIndexedLoadAction(IdxMode, VT) == Custom); 418 } 419 420 /// getIndexedStoreAction - Return how the indexed store should be treated: 421 /// either it is legal, needs to be promoted to a larger size, needs to be 422 /// expanded to some other code sequence, or the target has a custom expander 423 /// for it. 424 LegalizeAction 425 getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 426 assert(IdxMode < array_lengthof(IndexedModeActions[1]) && 427 (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && 428 "Table isn't big enough!"); 429 return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> 430 (2*VT.getSimpleVT())) & 3); 431 } 432 433 /// isIndexedStoreLegal - Return true if the specified indexed load is legal 434 /// on this target. 435 bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const { 436 return VT.isSimple() && 437 (getIndexedStoreAction(IdxMode, VT) == Legal || 438 getIndexedStoreAction(IdxMode, VT) == Custom); 439 } 440 441 /// getConvertAction - Return how the conversion should be treated: 442 /// either it is legal, needs to be promoted to a larger size, needs to be 443 /// expanded to some other code sequence, or the target has a custom expander 444 /// for it. 445 LegalizeAction 446 getConvertAction(MVT FromVT, MVT ToVT) const { 447 assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && 448 (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && 449 "Table isn't big enough!"); 450 return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >> 451 (2*ToVT.getSimpleVT())) & 3); 452 } 453 454 /// isConvertLegal - Return true if the specified conversion is legal 455 /// on this target. 456 bool isConvertLegal(MVT FromVT, MVT ToVT) const { 457 return isTypeLegal(FromVT) && isTypeLegal(ToVT) && 458 (getConvertAction(FromVT, ToVT) == Legal || 459 getConvertAction(FromVT, ToVT) == Custom); 460 } 461 462 /// getCondCodeAction - Return how the condition code should be treated: 463 /// either it is legal, needs to be expanded to some other code sequence, 464 /// or the target has a custom expander for it. 465 LegalizeAction 466 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 467 assert((unsigned)CC < array_lengthof(CondCodeActions) && 468 (unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && 469 "Table isn't big enough!"); 470 LegalizeAction Action = (LegalizeAction) 471 ((CondCodeActions[CC] >> (2*VT.getSimpleVT())) & 3); 472 assert(Action != Promote && "Can't promote condition code!"); 473 return Action; 474 } 475 476 /// isCondCodeLegal - Return true if the specified condition code is legal 477 /// on this target. 478 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 479 return getCondCodeAction(CC, VT) == Legal || 480 getCondCodeAction(CC, VT) == Custom; 481 } 482 483 484 /// getTypeToPromoteTo - If the action for this operation is to promote, this 485 /// method returns the ValueType to promote to. 486 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 487 assert(getOperationAction(Op, VT) == Promote && 488 "This operation isn't promoted!"); 489 490 // See if this has an explicit type specified. 491 std::map<std::pair<unsigned, MVT::SimpleValueType>, 492 MVT::SimpleValueType>::const_iterator PTTI = 493 PromoteToType.find(std::make_pair(Op, VT.getSimpleVT())); 494 if (PTTI != PromoteToType.end()) return PTTI->second; 495 496 assert((VT.isInteger() || VT.isFloatingPoint()) && 497 "Cannot autopromote this type, add it with AddPromotedToType."); 498 499 MVT NVT = VT; 500 do { 501 NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1); 502 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 503 "Didn't find type to promote to!"); 504 } while (!isTypeLegal(NVT) || 505 getOperationAction(Op, NVT) == Promote); 506 return NVT; 507 } 508 509 /// getValueType - Return the MVT corresponding to this LLVM type. 510 /// This is fixed by the LLVM operations except for the pointer size. If 511 /// AllowUnknown is true, this will return MVT::Other for types with no MVT 512 /// counterpart (e.g. structs), otherwise it will assert. 513 MVT getValueType(const Type *Ty, bool AllowUnknown = false) const { 514 MVT VT = MVT::getMVT(Ty, AllowUnknown); 515 return VT == MVT::iPTR ? PointerTy : VT; 516 } 517 518 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 519 /// function arguments in the caller parameter area. This is the actual 520 /// alignment, not its logarithm. 521 virtual unsigned getByValTypeAlignment(const Type *Ty) const; 522 523 /// getRegisterType - Return the type of registers that this ValueType will 524 /// eventually require. 525 MVT getRegisterType(MVT VT) const { 526 if (VT.isSimple()) { 527 assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT)); 528 return RegisterTypeForVT[VT.getSimpleVT()]; 529 } 530 if (VT.isVector()) { 531 MVT VT1, RegisterVT; 532 unsigned NumIntermediates; 533 (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); 534 return RegisterVT; 535 } 536 if (VT.isInteger()) { 537 return getRegisterType(getTypeToTransformTo(VT)); 538 } 539 assert(0 && "Unsupported extended type!"); 540 return MVT(); // Not reached 541 } 542 543 /// getNumRegisters - Return the number of registers that this ValueType will 544 /// eventually require. This is one for any types promoted to live in larger 545 /// registers, but may be more than one for types (like i64) that are split 546 /// into pieces. For types like i140, which are first promoted then expanded, 547 /// it is the number of registers needed to hold all the bits of the original 548 /// type. For an i140 on a 32 bit machine this means 5 registers. 549 unsigned getNumRegisters(MVT VT) const { 550 if (VT.isSimple()) { 551 assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT)); 552 return NumRegistersForVT[VT.getSimpleVT()]; 553 } 554 if (VT.isVector()) { 555 MVT VT1, VT2; 556 unsigned NumIntermediates; 557 return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); 558 } 559 if (VT.isInteger()) { 560 unsigned BitWidth = VT.getSizeInBits(); 561 unsigned RegWidth = getRegisterType(VT).getSizeInBits(); 562 return (BitWidth + RegWidth - 1) / RegWidth; 563 } 564 assert(0 && "Unsupported extended type!"); 565 return 0; // Not reached 566 } 567 568 /// ShouldShrinkFPConstant - If true, then instruction selection should 569 /// seek to shrink the FP constant of the specified type to a smaller type 570 /// in order to save space and / or reduce runtime. 571 virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; } 572 573 /// hasTargetDAGCombine - If true, the target has custom DAG combine 574 /// transformations that it can perform for the specified node. 575 bool hasTargetDAGCombine(ISD::NodeType NT) const { 576 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 577 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 578 } 579 580 /// This function returns the maximum number of store operations permitted 581 /// to replace a call to llvm.memset. The value is set by the target at the 582 /// performance threshold for such a replacement. 583 /// @brief Get maximum # of store operations permitted for llvm.memset 584 unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; } 585 586 /// This function returns the maximum number of store operations permitted 587 /// to replace a call to llvm.memcpy. The value is set by the target at the 588 /// performance threshold for such a replacement. 589 /// @brief Get maximum # of store operations permitted for llvm.memcpy 590 unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; } 591 592 /// This function returns the maximum number of store operations permitted 593 /// to replace a call to llvm.memmove. The value is set by the target at the 594 /// performance threshold for such a replacement. 595 /// @brief Get maximum # of store operations permitted for llvm.memmove 596 unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; } 597 598 /// This function returns true if the target allows unaligned memory accesses. 599 /// This is used, for example, in situations where an array copy/move/set is 600 /// converted to a sequence of store operations. It's use helps to ensure that 601 /// such replacements don't generate code that causes an alignment error 602 /// (trap) on the target machine. 603 /// @brief Determine if the target supports unaligned memory accesses. 604 bool allowsUnalignedMemoryAccesses() const { 605 return allowUnalignedMemoryAccesses; 606 } 607 608 /// getOptimalMemOpType - Returns the target specific optimal type for load 609 /// and store operations as a result of memset, memcpy, and memmove lowering. 610 /// It returns MVT::iAny if SelectionDAG should be responsible for 611 /// determining it. 612 virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, 613 bool isSrcConst, bool isSrcStr) const { 614 return MVT::iAny; 615 } 616 617 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp 618 /// to implement llvm.setjmp. 619 bool usesUnderscoreSetJmp() const { 620 return UseUnderscoreSetJmp; 621 } 622 623 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp 624 /// to implement llvm.longjmp. 625 bool usesUnderscoreLongJmp() const { 626 return UseUnderscoreLongJmp; 627 } 628 629 /// getStackPointerRegisterToSaveRestore - If a physical register, this 630 /// specifies the register that llvm.savestack/llvm.restorestack should save 631 /// and restore. 632 unsigned getStackPointerRegisterToSaveRestore() const { 633 return StackPointerRegisterToSaveRestore; 634 } 635 636 /// getExceptionAddressRegister - If a physical register, this returns 637 /// the register that receives the exception address on entry to a landing 638 /// pad. 639 unsigned getExceptionAddressRegister() const { 640 return ExceptionPointerRegister; 641 } 642 643 /// getExceptionSelectorRegister - If a physical register, this returns 644 /// the register that receives the exception typeid on entry to a landing 645 /// pad. 646 unsigned getExceptionSelectorRegister() const { 647 return ExceptionSelectorRegister; 648 } 649 650 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never 651 /// set, the default is 200) 652 unsigned getJumpBufSize() const { 653 return JumpBufSize; 654 } 655 656 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes 657 /// (if never set, the default is 0) 658 unsigned getJumpBufAlignment() const { 659 return JumpBufAlignment; 660 } 661 662 /// getIfCvtBlockLimit - returns the target specific if-conversion block size 663 /// limit. Any block whose size is greater should not be predicated. 664 unsigned getIfCvtBlockSizeLimit() const { 665 return IfCvtBlockSizeLimit; 666 } 667 668 /// getIfCvtDupBlockLimit - returns the target specific size limit for a 669 /// block to be considered for duplication. Any block whose size is greater 670 /// should not be duplicated to facilitate its predication. 671 unsigned getIfCvtDupBlockSizeLimit() const { 672 return IfCvtDupBlockSizeLimit; 673 } 674 675 /// getPrefLoopAlignment - return the preferred loop alignment. 676 /// 677 unsigned getPrefLoopAlignment() const { 678 return PrefLoopAlignment; 679 } 680 681 /// getPreIndexedAddressParts - returns true by value, base pointer and 682 /// offset pointer and addressing mode by reference if the node's address 683 /// can be legally represented as pre-indexed load / store address. 684 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 685 SDValue &Offset, 686 ISD::MemIndexedMode &AM, 687 SelectionDAG &DAG) const { 688 return false; 689 } 690 691 /// getPostIndexedAddressParts - returns true by value, base pointer and 692 /// offset pointer and addressing mode by reference if this node can be 693 /// combined with a load / store to form a post-indexed load / store. 694 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 695 SDValue &Base, SDValue &Offset, 696 ISD::MemIndexedMode &AM, 697 SelectionDAG &DAG) const { 698 return false; 699 } 700 701 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 702 /// jumptable. 703 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 704 SelectionDAG &DAG) const; 705 706 /// isOffsetFoldingLegal - Return true if folding a constant offset 707 /// with the given GlobalAddress is legal. It is frequently not legal in 708 /// PIC relocation models. 709 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 710 711 //===--------------------------------------------------------------------===// 712 // TargetLowering Optimization Methods 713 // 714 715 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two 716 /// SDValues for returning information from TargetLowering to its clients 717 /// that want to combine 718 struct TargetLoweringOpt { 719 SelectionDAG &DAG; 720 SDValue Old; 721 SDValue New; 722 723 explicit TargetLoweringOpt(SelectionDAG &InDAG) : DAG(InDAG) {} 724 725 bool CombineTo(SDValue O, SDValue N) { 726 Old = O; 727 New = N; 728 return true; 729 } 730 731 /// ShrinkDemandedConstant - Check to see if the specified operand of the 732 /// specified instruction is a constant integer. If so, check to see if 733 /// there are any bits set in the constant that are not demanded. If so, 734 /// shrink the constant and return true. 735 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 736 }; 737 738 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 739 /// DemandedMask bits of the result of Op are ever used downstream. If we can 740 /// use this information to simplify Op, create a new simplified DAG node and 741 /// return true, returning the original and new nodes in Old and New. 742 /// Otherwise, analyze the expression and return a mask of KnownOne and 743 /// KnownZero bits for the expression (used to simplify the caller). 744 /// The KnownZero/One bits may only be accurate for those bits in the 745 /// DemandedMask. 746 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 747 APInt &KnownZero, APInt &KnownOne, 748 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 749 750 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in 751 /// Mask are known to be either zero or one and return them in the 752 /// KnownZero/KnownOne bitsets. 753 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 754 const APInt &Mask, 755 APInt &KnownZero, 756 APInt &KnownOne, 757 const SelectionDAG &DAG, 758 unsigned Depth = 0) const; 759 760 /// ComputeNumSignBitsForTargetNode - This method can be implemented by 761 /// targets that want to expose additional information about sign bits to the 762 /// DAG Combiner. 763 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 764 unsigned Depth = 0) const; 765 766 struct DAGCombinerInfo { 767 void *DC; // The DAG Combiner object. 768 bool BeforeLegalize; 769 bool CalledByLegalizer; 770 public: 771 SelectionDAG &DAG; 772 773 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc) 774 : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {} 775 776 bool isBeforeLegalize() const { return BeforeLegalize; } 777 bool isCalledByLegalizer() const { return CalledByLegalizer; } 778 779 void AddToWorklist(SDNode *N); 780 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To); 781 SDValue CombineTo(SDNode *N, SDValue Res); 782 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1); 783 }; 784 785 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 786 /// and cc. If it is unable to simplify it, return a null SDValue. 787 SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, 788 ISD::CondCode Cond, bool foldBooleans, 789 DAGCombinerInfo &DCI) const; 790 791 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 792 /// node is a GlobalAddress + offset. 793 virtual bool 794 isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; 795 796 /// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 797 /// loading 'Bytes' bytes from a location that is 'Dist' units away from the 798 /// location that the 'Base' load is loading from. 799 bool isConsecutiveLoad(SDNode *LD, SDNode *Base, unsigned Bytes, int Dist, 800 const MachineFrameInfo *MFI) const; 801 802 /// PerformDAGCombine - This method will be invoked for all target nodes and 803 /// for any target-independent nodes that the target has registered with 804 /// invoke it for. 805 /// 806 /// The semantics are as follows: 807 /// Return Value: 808 /// SDValue.Val == 0 - No change was made 809 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 810 /// otherwise - N should be replaced by the returned Operand. 811 /// 812 /// In addition, methods provided by DAGCombinerInfo may be used to perform 813 /// more complex transformations. 814 /// 815 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 816 817 //===--------------------------------------------------------------------===// 818 // TargetLowering Configuration Methods - These methods should be invoked by 819 // the derived class constructor to configure this object for the target. 820 // 821 822protected: 823 /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a 824 /// GOT for PC-relative code. 825 void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; } 826 827 /// setShiftAmountType - Describe the type that should be used for shift 828 /// amounts. This type defaults to the pointer type. 829 void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; } 830 831 /// setBooleanContents - Specify how the target extends the result of a 832 /// boolean value from i1 to a wider type. See getBooleanContents. 833 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 834 835 /// setSchedulingPreference - Specify the target scheduling preference. 836 void setSchedulingPreference(SchedPreference Pref) { 837 SchedPreferenceInfo = Pref; 838 } 839 840 /// setShiftAmountFlavor - Describe how the target handles out of range shift 841 /// amounts. 842 void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) { 843 ShiftAmtHandling = OORSA; 844 } 845 846 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to 847 /// use _setjmp to implement llvm.setjmp or the non _ version. 848 /// Defaults to false. 849 void setUseUnderscoreSetJmp(bool Val) { 850 UseUnderscoreSetJmp = Val; 851 } 852 853 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to 854 /// use _longjmp to implement llvm.longjmp or the non _ version. 855 /// Defaults to false. 856 void setUseUnderscoreLongJmp(bool Val) { 857 UseUnderscoreLongJmp = Val; 858 } 859 860 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this 861 /// specifies the register that llvm.savestack/llvm.restorestack should save 862 /// and restore. 863 void setStackPointerRegisterToSaveRestore(unsigned R) { 864 StackPointerRegisterToSaveRestore = R; 865 } 866 867 /// setExceptionPointerRegister - If set to a physical register, this sets 868 /// the register that receives the exception address on entry to a landing 869 /// pad. 870 void setExceptionPointerRegister(unsigned R) { 871 ExceptionPointerRegister = R; 872 } 873 874 /// setExceptionSelectorRegister - If set to a physical register, this sets 875 /// the register that receives the exception typeid on entry to a landing 876 /// pad. 877 void setExceptionSelectorRegister(unsigned R) { 878 ExceptionSelectorRegister = R; 879 } 880 881 /// SelectIsExpensive - Tells the code generator not to expand operations 882 /// into sequences that use the select operations if possible. 883 void setSelectIsExpensive() { SelectIsExpensive = true; } 884 885 /// setIntDivIsCheap - Tells the code generator that integer divide is 886 /// expensive, and if possible, should be replaced by an alternate sequence 887 /// of instructions not containing an integer divide. 888 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 889 890 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate 891 /// srl/add/sra for a signed divide by power of two, and let the target handle 892 /// it. 893 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 894 895 /// addRegisterClass - Add the specified register class as an available 896 /// regclass for the specified value type. This indicates the selector can 897 /// handle values of that class natively. 898 void addRegisterClass(MVT VT, TargetRegisterClass *RC) { 899 assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); 900 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 901 RegClassForVT[VT.getSimpleVT()] = RC; 902 } 903 904 /// computeRegisterProperties - Once all of the register classes are added, 905 /// this allows us to compute derived properties we expose. 906 void computeRegisterProperties(); 907 908 /// setOperationAction - Indicate that the specified operation does not work 909 /// with the specified type and indicate what to do about it. 910 void setOperationAction(unsigned Op, MVT VT, 911 LegalizeAction Action) { 912 assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && 913 Op < array_lengthof(OpActions) && "Table isn't big enough!"); 914 OpActions[Op] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 915 OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2; 916 } 917 918 /// setLoadExtAction - Indicate that the specified load with extension does 919 /// not work with the with specified type and indicate what to do about it. 920 void setLoadExtAction(unsigned ExtType, MVT VT, 921 LegalizeAction Action) { 922 assert((unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && 923 ExtType < array_lengthof(LoadExtActions) && 924 "Table isn't big enough!"); 925 LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 926 LoadExtActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2; 927 } 928 929 /// setTruncStoreAction - Indicate that the specified truncating store does 930 /// not work with the with specified type and indicate what to do about it. 931 void setTruncStoreAction(MVT ValVT, MVT MemVT, 932 LegalizeAction Action) { 933 assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && 934 (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && 935 "Table isn't big enough!"); 936 TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) << 937 MemVT.getSimpleVT()*2); 938 TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action << 939 MemVT.getSimpleVT()*2; 940 } 941 942 /// setIndexedLoadAction - Indicate that the specified indexed load does or 943 /// does not work with the with specified type and indicate what to do abort 944 /// it. NOTE: All indexed mode loads are initialized to Expand in 945 /// TargetLowering.cpp 946 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 947 LegalizeAction Action) { 948 assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0])*4 && 949 IdxMode < array_lengthof(IndexedModeActions[0]) && 950 "Table isn't big enough!"); 951 IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 952 IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; 953 } 954 955 /// setIndexedStoreAction - Indicate that the specified indexed store does or 956 /// does not work with the with specified type and indicate what to do about 957 /// it. NOTE: All indexed mode stores are initialized to Expand in 958 /// TargetLowering.cpp 959 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 960 LegalizeAction Action) { 961 assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && 962 IdxMode < array_lengthof(IndexedModeActions[1]) && 963 "Table isn't big enough!"); 964 IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 965 IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; 966 } 967 968 /// setConvertAction - Indicate that the specified conversion does or does 969 /// not work with the with specified type and indicate what to do about it. 970 void setConvertAction(MVT FromVT, MVT ToVT, 971 LegalizeAction Action) { 972 assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && 973 (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && 974 "Table isn't big enough!"); 975 ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) << 976 ToVT.getSimpleVT()*2); 977 ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action << 978 ToVT.getSimpleVT()*2; 979 } 980 981 /// setCondCodeAction - Indicate that the specified condition code is or isn't 982 /// supported on the target and indicate what to do about it. 983 void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action) { 984 assert((unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && 985 (unsigned)CC < array_lengthof(CondCodeActions) && 986 "Table isn't big enough!"); 987 CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 988 CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.getSimpleVT()*2; 989 } 990 991 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the 992 /// promotion code defaults to trying a larger integer/fp until it can find 993 /// one that works. If that default is insufficient, this method can be used 994 /// by the target to override the default. 995 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 996 PromoteToType[std::make_pair(Opc, OrigVT.getSimpleVT())] = 997 DestVT.getSimpleVT(); 998 } 999 1000 /// addLegalFPImmediate - Indicate that this target can instruction select 1001 /// the specified FP immediate natively. 1002 void addLegalFPImmediate(const APFloat& Imm) { 1003 LegalFPImmediates.push_back(Imm); 1004 } 1005 1006 /// setTargetDAGCombine - Targets should invoke this method for each target 1007 /// independent node that they want to provide a custom DAG combiner for by 1008 /// implementing the PerformDAGCombine virtual method. 1009 void setTargetDAGCombine(ISD::NodeType NT) { 1010 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1011 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 1012 } 1013 1014 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in 1015 /// bytes); default is 200 1016 void setJumpBufSize(unsigned Size) { 1017 JumpBufSize = Size; 1018 } 1019 1020 /// setJumpBufAlignment - Set the target's required jmp_buf buffer 1021 /// alignment (in bytes); default is 0 1022 void setJumpBufAlignment(unsigned Align) { 1023 JumpBufAlignment = Align; 1024 } 1025 1026 /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size 1027 /// limit (in number of instructions); default is 2. 1028 void setIfCvtBlockSizeLimit(unsigned Limit) { 1029 IfCvtBlockSizeLimit = Limit; 1030 } 1031 1032 /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number 1033 /// of instructions) to be considered for code duplication during 1034 /// if-conversion; default is 2. 1035 void setIfCvtDupBlockSizeLimit(unsigned Limit) { 1036 IfCvtDupBlockSizeLimit = Limit; 1037 } 1038 1039 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default 1040 /// alignment is zero, it means the target does not care about loop alignment. 1041 void setPrefLoopAlignment(unsigned Align) { 1042 PrefLoopAlignment = Align; 1043 } 1044 1045public: 1046 1047 virtual const TargetSubtarget *getSubtarget() { 1048 assert(0 && "Not Implemented"); 1049 return NULL; // this is here to silence compiler errors 1050 } 1051 //===--------------------------------------------------------------------===// 1052 // Lowering methods - These methods must be implemented by targets so that 1053 // the SelectionDAGLowering code knows how to lower these. 1054 // 1055 1056 /// LowerArguments - This hook must be implemented to indicate how we should 1057 /// lower the arguments for the specified function, into the specified DAG. 1058 virtual void 1059 LowerArguments(Function &F, SelectionDAG &DAG, 1060 SmallVectorImpl<SDValue>& ArgValues); 1061 1062 /// LowerCallTo - This hook lowers an abstract call to a function into an 1063 /// actual call. This returns a pair of operands. The first element is the 1064 /// return value for the function (if RetTy is not VoidTy). The second 1065 /// element is the outgoing token chain. 1066 struct ArgListEntry { 1067 SDValue Node; 1068 const Type* Ty; 1069 bool isSExt : 1; 1070 bool isZExt : 1; 1071 bool isInReg : 1; 1072 bool isSRet : 1; 1073 bool isNest : 1; 1074 bool isByVal : 1; 1075 uint16_t Alignment; 1076 1077 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1078 isSRet(false), isNest(false), isByVal(false), Alignment(0) { } 1079 }; 1080 typedef std::vector<ArgListEntry> ArgListTy; 1081 virtual std::pair<SDValue, SDValue> 1082 LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, 1083 bool isVarArg, bool isInreg, unsigned CallingConv, 1084 bool isTailCall, SDValue Callee, ArgListTy &Args, 1085 SelectionDAG &DAG); 1086 1087 /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a 1088 /// memcpy. This can be used by targets to provide code sequences for cases 1089 /// that don't fit the target's parameters for simple loads/stores and can be 1090 /// more efficient than using a library call. This function can return a null 1091 /// SDValue if the target declines to use custom code and a different 1092 /// lowering strategy should be used. 1093 /// 1094 /// If AlwaysInline is true, the size is constant and the target should not 1095 /// emit any calls and is strongly encouraged to attempt to emit inline code 1096 /// even if it is beyond the usual threshold because this intrinsic is being 1097 /// expanded in a place where calls are not feasible (e.g. within the prologue 1098 /// for another call). If the target chooses to decline an AlwaysInline 1099 /// request here, legalize will resort to using simple loads and stores. 1100 virtual SDValue 1101 EmitTargetCodeForMemcpy(SelectionDAG &DAG, 1102 SDValue Chain, 1103 SDValue Op1, SDValue Op2, 1104 SDValue Op3, unsigned Align, 1105 bool AlwaysInline, 1106 const Value *DstSV, uint64_t DstOff, 1107 const Value *SrcSV, uint64_t SrcOff) { 1108 return SDValue(); 1109 } 1110 1111 /// EmitTargetCodeForMemmove - Emit target-specific code that performs a 1112 /// memmove. This can be used by targets to provide code sequences for cases 1113 /// that don't fit the target's parameters for simple loads/stores and can be 1114 /// more efficient than using a library call. This function can return a null 1115 /// SDValue if the target declines to use custom code and a different 1116 /// lowering strategy should be used. 1117 virtual SDValue 1118 EmitTargetCodeForMemmove(SelectionDAG &DAG, 1119 SDValue Chain, 1120 SDValue Op1, SDValue Op2, 1121 SDValue Op3, unsigned Align, 1122 const Value *DstSV, uint64_t DstOff, 1123 const Value *SrcSV, uint64_t SrcOff) { 1124 return SDValue(); 1125 } 1126 1127 /// EmitTargetCodeForMemset - Emit target-specific code that performs a 1128 /// memset. This can be used by targets to provide code sequences for cases 1129 /// that don't fit the target's parameters for simple stores and can be more 1130 /// efficient than using a library call. This function can return a null 1131 /// SDValue if the target declines to use custom code and a different 1132 /// lowering strategy should be used. 1133 virtual SDValue 1134 EmitTargetCodeForMemset(SelectionDAG &DAG, 1135 SDValue Chain, 1136 SDValue Op1, SDValue Op2, 1137 SDValue Op3, unsigned Align, 1138 const Value *DstSV, uint64_t DstOff) { 1139 return SDValue(); 1140 } 1141 1142 /// LowerOperationWrapper - This callback is invoked by the type legalizer 1143 /// to legalize nodes with an illegal operand type but legal result types. 1144 /// It replaces the LowerOperation callback in the type Legalizer. 1145 /// The reason we can not do away with LowerOperation entirely is that 1146 /// LegalizeDAG isn't yet ready to use this callback. 1147 /// TODO: Consider merging with ReplaceNodeResults. 1148 1149 /// The target places new result values for the node in Results (their number 1150 /// and types must exactly match those of the original return values of 1151 /// the node), or leaves Results empty, which indicates that the node is not 1152 /// to be custom lowered after all. 1153 /// The default implementation calls LowerOperation. 1154 virtual void LowerOperationWrapper(SDNode *N, 1155 SmallVectorImpl<SDValue> &Results, 1156 SelectionDAG &DAG); 1157 1158 /// LowerOperation - This callback is invoked for operations that are 1159 /// unsupported by the target, which are registered to use 'custom' lowering, 1160 /// and whose defined values are all legal. 1161 /// If the target has no operations that require custom lowering, it need not 1162 /// implement this. The default implementation of this aborts. 1163 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); 1164 1165 /// ReplaceNodeResults - This callback is invoked when a node result type is 1166 /// illegal for the target, and the operation was registered to use 'custom' 1167 /// lowering for that result type. The target places new result values for 1168 /// the node in Results (their number and types must exactly match those of 1169 /// the original return values of the node), or leaves Results empty, which 1170 /// indicates that the node is not to be custom lowered after all. 1171 /// 1172 /// If the target has no operations that require custom lowering, it need not 1173 /// implement this. The default implementation aborts. 1174 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 1175 SelectionDAG &DAG) { 1176 assert(0 && "ReplaceNodeResults not implemented for this target!"); 1177 } 1178 1179 /// IsEligibleForTailCallOptimization - Check whether the call is eligible for 1180 /// tail call optimization. Targets which want to do tail call optimization 1181 /// should override this function. 1182 virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call, 1183 SDValue Ret, 1184 SelectionDAG &DAG) const { 1185 return false; 1186 } 1187 1188 /// CheckTailCallReturnConstraints - Check whether CALL node immediatly 1189 /// preceeds the RET node and whether the return uses the result of the node 1190 /// or is a void return. This function can be used by the target to determine 1191 /// eligiblity of tail call optimization. 1192 static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret) { 1193 unsigned NumOps = Ret.getNumOperands(); 1194 if ((NumOps == 1 && 1195 (Ret.getOperand(0) == SDValue(TheCall,1) || 1196 Ret.getOperand(0) == SDValue(TheCall,0))) || 1197 (NumOps > 1 && 1198 Ret.getOperand(0) == SDValue(TheCall, 1199 TheCall->getNumValues()-1) && 1200 Ret.getOperand(1) == SDValue(TheCall,0))) 1201 return true; 1202 return false; 1203 } 1204 1205 /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if 1206 /// it exists. Skip a possible ISD::TokenFactor. 1207 static SDValue GetPossiblePreceedingTailCall(SDValue Chain, 1208 unsigned TailCallNodeOpCode) { 1209 if (Chain.getOpcode() == TailCallNodeOpCode) { 1210 return Chain; 1211 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1212 if (Chain.getNumOperands() && 1213 Chain.getOperand(0).getOpcode() == TailCallNodeOpCode) 1214 return Chain.getOperand(0); 1215 } 1216 return Chain; 1217 } 1218 1219 /// getTargetNodeName() - This method returns the name of a target specific 1220 /// DAG node. 1221 virtual const char *getTargetNodeName(unsigned Opcode) const; 1222 1223 /// createFastISel - This method returns a target specific FastISel object, 1224 /// or null if the target does not support "fast" ISel. 1225 virtual FastISel * 1226 createFastISel(MachineFunction &, 1227 MachineModuleInfo *, DwarfWriter *, 1228 DenseMap<const Value *, unsigned> &, 1229 DenseMap<const BasicBlock *, MachineBasicBlock *> &, 1230 DenseMap<const AllocaInst *, int> & 1231#ifndef NDEBUG 1232 , SmallSet<Instruction*, 8> &CatchInfoLost 1233#endif 1234 ) { 1235 return 0; 1236 } 1237 1238 //===--------------------------------------------------------------------===// 1239 // Inline Asm Support hooks 1240 // 1241 1242 enum ConstraintType { 1243 C_Register, // Constraint represents specific register(s). 1244 C_RegisterClass, // Constraint represents any of register(s) in class. 1245 C_Memory, // Memory constraint. 1246 C_Other, // Something else. 1247 C_Unknown // Unsupported constraint. 1248 }; 1249 1250 /// AsmOperandInfo - This contains information for each constraint that we are 1251 /// lowering. 1252 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 1253 /// ConstraintCode - This contains the actual string for the code, like "m". 1254 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that 1255 /// most closely matches the operand. 1256 std::string ConstraintCode; 1257 1258 /// ConstraintType - Information about the constraint code, e.g. Register, 1259 /// RegisterClass, Memory, Other, Unknown. 1260 TargetLowering::ConstraintType ConstraintType; 1261 1262 /// CallOperandval - If this is the result output operand or a 1263 /// clobber, this is null, otherwise it is the incoming operand to the 1264 /// CallInst. This gets modified as the asm is processed. 1265 Value *CallOperandVal; 1266 1267 /// ConstraintVT - The ValueType for the operand value. 1268 MVT ConstraintVT; 1269 1270 /// isMatchingInputConstraint - Return true of this is an input operand that 1271 /// is a matching constraint like "4". 1272 bool isMatchingInputConstraint() const; 1273 1274 /// getMatchedOperand - If this is an input matching constraint, this method 1275 /// returns the output operand it matches. 1276 unsigned getMatchedOperand() const; 1277 1278 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 1279 : InlineAsm::ConstraintInfo(info), 1280 ConstraintType(TargetLowering::C_Unknown), 1281 CallOperandVal(0), ConstraintVT(MVT::Other) { 1282 } 1283 }; 1284 1285 /// ComputeConstraintToUse - Determines the constraint code and constraint 1286 /// type to use for the specific AsmOperandInfo, setting 1287 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand 1288 /// being passed in is available, it can be passed in as Op, otherwise an 1289 /// empty SDValue can be passed. If hasMemory is true it means one of the asm 1290 /// constraint of the inline asm instruction being processed is 'm'. 1291 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1292 SDValue Op, 1293 bool hasMemory, 1294 SelectionDAG *DAG = 0) const; 1295 1296 /// getConstraintType - Given a constraint, return the type of constraint it 1297 /// is for this target. 1298 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 1299 1300 /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"), 1301 /// return a list of registers that can be used to satisfy the constraint. 1302 /// This should only be used for C_RegisterClass constraints. 1303 virtual std::vector<unsigned> 1304 getRegClassForInlineAsmConstraint(const std::string &Constraint, 1305 MVT VT) const; 1306 1307 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. 1308 /// {edx}), return the register number and the register class for the 1309 /// register. 1310 /// 1311 /// Given a register class constraint, like 'r', if this corresponds directly 1312 /// to an LLVM register class, return a register of 0 and the register class 1313 /// pointer. 1314 /// 1315 /// This should only be used for C_Register constraints. On error, 1316 /// this returns a register number of 0 and a null register class pointer.. 1317 virtual std::pair<unsigned, const TargetRegisterClass*> 1318 getRegForInlineAsmConstraint(const std::string &Constraint, 1319 MVT VT) const; 1320 1321 /// LowerXConstraint - try to replace an X constraint, which matches anything, 1322 /// with another that has more specific requirements based on the type of the 1323 /// corresponding operand. This returns null if there is no replacement to 1324 /// make. 1325 virtual const char *LowerXConstraint(MVT ConstraintVT) const; 1326 1327 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1328 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true 1329 /// it means one of the asm constraint of the inline asm instruction being 1330 /// processed is 'm'. 1331 virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, 1332 bool hasMemory, 1333 std::vector<SDValue> &Ops, 1334 SelectionDAG &DAG) const; 1335 1336 //===--------------------------------------------------------------------===// 1337 // Scheduler hooks 1338 // 1339 1340 // EmitInstrWithCustomInserter - This method should be implemented by targets 1341 // that mark instructions with the 'usesCustomDAGSchedInserter' flag. These 1342 // instructions are special in various ways, which require special support to 1343 // insert. The specified MachineInstr is created but not inserted into any 1344 // basic blocks, and the scheduler passes ownership of it to this method. 1345 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, 1346 MachineBasicBlock *MBB); 1347 1348 //===--------------------------------------------------------------------===// 1349 // Addressing mode description hooks (used by LSR etc). 1350 // 1351 1352 /// AddrMode - This represents an addressing mode of: 1353 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 1354 /// If BaseGV is null, there is no BaseGV. 1355 /// If BaseOffs is zero, there is no base offset. 1356 /// If HasBaseReg is false, there is no base register. 1357 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 1358 /// no scale. 1359 /// 1360 struct AddrMode { 1361 GlobalValue *BaseGV; 1362 int64_t BaseOffs; 1363 bool HasBaseReg; 1364 int64_t Scale; 1365 AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} 1366 }; 1367 1368 /// isLegalAddressingMode - Return true if the addressing mode represented by 1369 /// AM is legal for this target, for a load/store of the specified type. 1370 /// TODO: Handle pre/postinc as well. 1371 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const; 1372 1373 /// isTruncateFree - Return true if it's free to truncate a value of 1374 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 1375 /// register EAX to i16 by referencing its sub-register AX. 1376 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const { 1377 return false; 1378 } 1379 1380 virtual bool isTruncateFree(MVT VT1, MVT VT2) const { 1381 return false; 1382 } 1383 1384 //===--------------------------------------------------------------------===// 1385 // Div utility functions 1386 // 1387 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, 1388 std::vector<SDNode*>* Created) const; 1389 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, 1390 std::vector<SDNode*>* Created) const; 1391 1392 1393 //===--------------------------------------------------------------------===// 1394 // Runtime Library hooks 1395 // 1396 1397 /// setLibcallName - Rename the default libcall routine name for the specified 1398 /// libcall. 1399 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1400 LibcallRoutineNames[Call] = Name; 1401 } 1402 1403 /// getLibcallName - Get the libcall routine name for the specified libcall. 1404 /// 1405 const char *getLibcallName(RTLIB::Libcall Call) const { 1406 return LibcallRoutineNames[Call]; 1407 } 1408 1409 /// setCmpLibcallCC - Override the default CondCode to be used to test the 1410 /// result of the comparison libcall against zero. 1411 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1412 CmpLibcallCCs[Call] = CC; 1413 } 1414 1415 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of 1416 /// the comparison libcall against zero. 1417 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1418 return CmpLibcallCCs[Call]; 1419 } 1420 1421private: 1422 TargetMachine &TM; 1423 const TargetData *TD; 1424 1425 /// PointerTy - The type to use for pointers, usually i32 or i64. 1426 /// 1427 MVT PointerTy; 1428 1429 /// IsLittleEndian - True if this is a little endian target. 1430 /// 1431 bool IsLittleEndian; 1432 1433 /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen. 1434 /// 1435 bool UsesGlobalOffsetTable; 1436 1437 /// SelectIsExpensive - Tells the code generator not to expand operations 1438 /// into sequences that use the select operations if possible. 1439 bool SelectIsExpensive; 1440 1441 /// IntDivIsCheap - Tells the code generator not to expand integer divides by 1442 /// constants into a sequence of muls, adds, and shifts. This is a hack until 1443 /// a real cost model is in place. If we ever optimize for size, this will be 1444 /// set to true unconditionally. 1445 bool IntDivIsCheap; 1446 1447 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate 1448 /// srl/add/sra for a signed divide by power of two, and let the target handle 1449 /// it. 1450 bool Pow2DivIsCheap; 1451 1452 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement 1453 /// llvm.setjmp. Defaults to false. 1454 bool UseUnderscoreSetJmp; 1455 1456 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement 1457 /// llvm.longjmp. Defaults to false. 1458 bool UseUnderscoreLongJmp; 1459 1460 /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever 1461 /// PointerTy is. 1462 MVT ShiftAmountTy; 1463 1464 OutOfRangeShiftAmount ShiftAmtHandling; 1465 1466 /// BooleanContents - Information about the contents of the high-bits in 1467 /// boolean values held in a type wider than i1. See getBooleanContents. 1468 BooleanContent BooleanContents; 1469 1470 /// SchedPreferenceInfo - The target scheduling preference: shortest possible 1471 /// total cycles or lowest register usage. 1472 SchedPreference SchedPreferenceInfo; 1473 1474 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers 1475 unsigned JumpBufSize; 1476 1477 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf 1478 /// buffers 1479 unsigned JumpBufAlignment; 1480 1481 /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be 1482 /// if-converted. 1483 unsigned IfCvtBlockSizeLimit; 1484 1485 /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be 1486 /// duplicated during if-conversion. 1487 unsigned IfCvtDupBlockSizeLimit; 1488 1489 /// PrefLoopAlignment - The perferred loop alignment. 1490 /// 1491 unsigned PrefLoopAlignment; 1492 1493 /// StackPointerRegisterToSaveRestore - If set to a physical register, this 1494 /// specifies the register that llvm.savestack/llvm.restorestack should save 1495 /// and restore. 1496 unsigned StackPointerRegisterToSaveRestore; 1497 1498 /// ExceptionPointerRegister - If set to a physical register, this specifies 1499 /// the register that receives the exception address on entry to a landing 1500 /// pad. 1501 unsigned ExceptionPointerRegister; 1502 1503 /// ExceptionSelectorRegister - If set to a physical register, this specifies 1504 /// the register that receives the exception typeid on entry to a landing 1505 /// pad. 1506 unsigned ExceptionSelectorRegister; 1507 1508 /// RegClassForVT - This indicates the default register class to use for 1509 /// each ValueType the target supports natively. 1510 TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1511 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1512 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1513 1514 /// TransformToType - For any value types we are promoting or expanding, this 1515 /// contains the value type that we are changing to. For Expanded types, this 1516 /// contains one step of the expand (e.g. i64 -> i32), even if there are 1517 /// multiple steps required (e.g. i64 -> i16). For types natively supported 1518 /// by the system, this holds the same type (e.g. i32 -> i32). 1519 MVT TransformToType[MVT::LAST_VALUETYPE]; 1520 1521 /// OpActions - For each operation and each value type, keep a LegalizeAction 1522 /// that indicates how instruction selection should deal with the operation. 1523 /// Most operations are Legal (aka, supported natively by the target), but 1524 /// operations that are not should be described. Note that operations on 1525 /// non-legal value types are not described here. 1526 uint64_t OpActions[ISD::BUILTIN_OP_END]; 1527 1528 /// LoadExtActions - For each load of load extension type and each value type, 1529 /// keep a LegalizeAction that indicates how instruction selection should deal 1530 /// with the load. 1531 uint64_t LoadExtActions[ISD::LAST_LOADEXT_TYPE]; 1532 1533 /// TruncStoreActions - For each truncating store, keep a LegalizeAction that 1534 /// indicates how instruction selection should deal with the store. 1535 uint64_t TruncStoreActions[MVT::LAST_VALUETYPE]; 1536 1537 /// IndexedModeActions - For each indexed mode and each value type, keep a 1538 /// pair of LegalizeAction that indicates how instruction selection should 1539 /// deal with the load / store. 1540 uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE]; 1541 1542 /// ConvertActions - For each conversion from source type to destination type, 1543 /// keep a LegalizeAction that indicates how instruction selection should 1544 /// deal with the conversion. 1545 /// Currently, this is used only for floating->floating conversions 1546 /// (FP_EXTEND and FP_ROUND). 1547 uint64_t ConvertActions[MVT::LAST_VALUETYPE]; 1548 1549 /// CondCodeActions - For each condition code (ISD::CondCode) keep a 1550 /// LegalizeAction that indicates how instruction selection should 1551 /// deal with the condition code. 1552 uint64_t CondCodeActions[ISD::SETCC_INVALID]; 1553 1554 ValueTypeActionImpl ValueTypeActions; 1555 1556 std::vector<APFloat> LegalFPImmediates; 1557 1558 std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses; 1559 1560 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would 1561 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), 1562 /// which sets a bit in this array. 1563 unsigned char 1564 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 1565 1566 /// PromoteToType - For operations that must be promoted to a specific type, 1567 /// this holds the destination type. This map should be sparse, so don't hold 1568 /// it as an array. 1569 /// 1570 /// Targets add entries to this map with AddPromotedToType(..), clients access 1571 /// this with getTypeToPromoteTo(..). 1572 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 1573 PromoteToType; 1574 1575 /// LibcallRoutineNames - Stores the name each libcall. 1576 /// 1577 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 1578 1579 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result 1580 /// of each of the comparison libcall against zero. 1581 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 1582 1583protected: 1584 /// When lowering @llvm.memset this field specifies the maximum number of 1585 /// store operations that may be substituted for the call to memset. Targets 1586 /// must set this value based on the cost threshold for that target. Targets 1587 /// should assume that the memset will be done using as many of the largest 1588 /// store operations first, followed by smaller ones, if necessary, per 1589 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 1590 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 1591 /// store. This only applies to setting a constant array of a constant size. 1592 /// @brief Specify maximum number of store instructions per memset call. 1593 unsigned maxStoresPerMemset; 1594 1595 /// When lowering @llvm.memcpy this field specifies the maximum number of 1596 /// store operations that may be substituted for a call to memcpy. Targets 1597 /// must set this value based on the cost threshold for that target. Targets 1598 /// should assume that the memcpy will be done using as many of the largest 1599 /// store operations first, followed by smaller ones, if necessary, per 1600 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 1601 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 1602 /// and one 1-byte store. This only applies to copying a constant array of 1603 /// constant size. 1604 /// @brief Specify maximum bytes of store instructions per memcpy call. 1605 unsigned maxStoresPerMemcpy; 1606 1607 /// When lowering @llvm.memmove this field specifies the maximum number of 1608 /// store instructions that may be substituted for a call to memmove. Targets 1609 /// must set this value based on the cost threshold for that target. Targets 1610 /// should assume that the memmove will be done using as many of the largest 1611 /// store operations first, followed by smaller ones, if necessary, per 1612 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 1613 /// with 8-bit alignment would result in nine 1-byte stores. This only 1614 /// applies to copying a constant array of constant size. 1615 /// @brief Specify maximum bytes of store instructions per memmove call. 1616 unsigned maxStoresPerMemmove; 1617 1618 /// This field specifies whether the target machine permits unaligned memory 1619 /// accesses. This is used, for example, to determine the size of store 1620 /// operations when copying small arrays and other similar tasks. 1621 /// @brief Indicate whether the target permits unaligned memory accesses. 1622 bool allowUnalignedMemoryAccesses; 1623}; 1624} // end llvm namespace 1625 1626#endif 1627