TargetLowering.h revision 11df7e5157352d082bcb556907c3c8239228ae7f
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes how to lower LLVM code to machine code. This has two 11// main components: 12// 13// 1. Which ValueTypes are natively supported by the target. 14// 2. Which operations are supported for supported ValueTypes. 15// 3. Cost thresholds for alternative implementations of certain operations. 16// 17// In addition it has a few other components, like information about FP 18// immediates. 19// 20//===----------------------------------------------------------------------===// 21 22#ifndef LLVM_TARGET_TARGETLOWERING_H 23#define LLVM_TARGET_TARGETLOWERING_H 24 25#include "llvm/InlineAsm.h" 26#include "llvm/CodeGen/SelectionDAGNodes.h" 27#include "llvm/CodeGen/RuntimeLibcalls.h" 28#include "llvm/ADT/APFloat.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/ADT/SmallSet.h" 31#include "llvm/ADT/STLExtras.h" 32#include <map> 33#include <vector> 34 35namespace llvm { 36 class AllocaInst; 37 class CallInst; 38 class Function; 39 class FastISel; 40 class MachineBasicBlock; 41 class MachineFunction; 42 class MachineFrameInfo; 43 class MachineInstr; 44 class MachineModuleInfo; 45 class SDNode; 46 class SDValue; 47 class SelectionDAG; 48 class TargetData; 49 class TargetMachine; 50 class TargetRegisterClass; 51 class TargetSubtarget; 52 class Value; 53 54//===----------------------------------------------------------------------===// 55/// TargetLowering - This class defines information used to lower LLVM code to 56/// legal SelectionDAG operators that the target instruction selector can accept 57/// natively. 58/// 59/// This class also defines callbacks that targets must implement to lower 60/// target-specific constructs to SelectionDAG operators. 61/// 62class TargetLowering { 63public: 64 /// LegalizeAction - This enum indicates whether operations are valid for a 65 /// target, and if not, what action should be used to make them valid. 66 enum LegalizeAction { 67 Legal, // The target natively supports this operation. 68 Promote, // This operation should be executed in a larger type. 69 Expand, // Try to expand this to other ops, otherwise use a libcall. 70 Custom // Use the LowerOperation hook to implement custom lowering. 71 }; 72 73 enum OutOfRangeShiftAmount { 74 Undefined, // Oversized shift amounts are undefined (default). 75 Mask, // Shift amounts are auto masked (anded) to value size. 76 Extend // Oversized shift pulls in zeros or sign bits. 77 }; 78 79 enum BooleanContent { // How the target represents true/false values. 80 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 81 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 82 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 83 }; 84 85 enum SchedPreference { 86 SchedulingForLatency, // Scheduling for shortest total latency. 87 SchedulingForRegPressure // Scheduling for lowest register pressure. 88 }; 89 90 explicit TargetLowering(TargetMachine &TM); 91 virtual ~TargetLowering(); 92 93 TargetMachine &getTargetMachine() const { return TM; } 94 const TargetData *getTargetData() const { return TD; } 95 96 bool isBigEndian() const { return !IsLittleEndian; } 97 bool isLittleEndian() const { return IsLittleEndian; } 98 MVT getPointerTy() const { return PointerTy; } 99 MVT getShiftAmountTy() const { return ShiftAmountTy; } 100 OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; } 101 102 /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC 103 /// codegen. 104 bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; } 105 106 /// isSelectExpensive - Return true if the select operation is expensive for 107 /// this target. 108 bool isSelectExpensive() const { return SelectIsExpensive; } 109 110 /// isIntDivCheap() - Return true if integer divide is usually cheaper than 111 /// a sequence of several shifts, adds, and multiplies for this target. 112 bool isIntDivCheap() const { return IntDivIsCheap; } 113 114 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of 115 /// srl/add/sra. 116 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 117 118 /// getSetCCResultType - Return the ValueType of the result of setcc 119 /// operations. 120 virtual MVT getSetCCResultType(MVT VT) const; 121 122 /// getBooleanContents - For targets without i1 registers, this gives the 123 /// nature of the high-bits of boolean values held in types wider than i1. 124 /// "Boolean values" are special true/false values produced by nodes like 125 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 126 /// Not to be confused with general values promoted from i1. 127 BooleanContent getBooleanContents() const { return BooleanContents;} 128 129 /// getSchedulingPreference - Return target scheduling preference. 130 SchedPreference getSchedulingPreference() const { 131 return SchedPreferenceInfo; 132 } 133 134 /// getRegClassFor - Return the register class that should be used for the 135 /// specified value type. This may only be called on legal types. 136 TargetRegisterClass *getRegClassFor(MVT VT) const { 137 assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); 138 TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()]; 139 assert(RC && "This value type is not natively supported!"); 140 return RC; 141 } 142 143 /// isTypeLegal - Return true if the target has native support for the 144 /// specified value type. This means that it has a register that directly 145 /// holds it without promotions or expansions. 146 bool isTypeLegal(MVT VT) const { 147 assert(!VT.isSimple() || 148 (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); 149 return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0; 150 } 151 152 class ValueTypeActionImpl { 153 /// ValueTypeActions - This is a bitvector that contains two bits for each 154 /// value type, where the two bits correspond to the LegalizeAction enum. 155 /// This can be queried with "getTypeAction(VT)". 156 uint32_t ValueTypeActions[2]; 157 public: 158 ValueTypeActionImpl() { 159 ValueTypeActions[0] = ValueTypeActions[1] = 0; 160 } 161 ValueTypeActionImpl(const ValueTypeActionImpl &RHS) { 162 ValueTypeActions[0] = RHS.ValueTypeActions[0]; 163 ValueTypeActions[1] = RHS.ValueTypeActions[1]; 164 } 165 166 LegalizeAction getTypeAction(MVT VT) const { 167 if (VT.isExtended()) { 168 if (VT.isVector()) { 169 return VT.isPow2VectorType() ? Expand : Promote; 170 } 171 if (VT.isInteger()) 172 // First promote to a power-of-two size, then expand if necessary. 173 return VT == VT.getRoundIntegerType() ? Expand : Promote; 174 assert(0 && "Unsupported extended type!"); 175 return Legal; 176 } 177 unsigned I = VT.getSimpleVT(); 178 assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); 179 return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3); 180 } 181 void setTypeAction(MVT VT, LegalizeAction Action) { 182 unsigned I = VT.getSimpleVT(); 183 assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); 184 ValueTypeActions[I>>4] |= Action << ((I*2) & 31); 185 } 186 }; 187 188 const ValueTypeActionImpl &getValueTypeActions() const { 189 return ValueTypeActions; 190 } 191 192 /// getTypeAction - Return how we should legalize values of this type, either 193 /// it is already legal (return 'Legal') or we need to promote it to a larger 194 /// type (return 'Promote'), or we need to expand it into multiple registers 195 /// of smaller integer type (return 'Expand'). 'Custom' is not an option. 196 LegalizeAction getTypeAction(MVT VT) const { 197 return ValueTypeActions.getTypeAction(VT); 198 } 199 200 /// getTypeToTransformTo - For types supported by the target, this is an 201 /// identity function. For types that must be promoted to larger types, this 202 /// returns the larger type to promote to. For integer types that are larger 203 /// than the largest integer register, this contains one step in the expansion 204 /// to get to the smaller register. For illegal floating point types, this 205 /// returns the integer type to transform to. 206 MVT getTypeToTransformTo(MVT VT) const { 207 if (VT.isSimple()) { 208 assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType)); 209 MVT NVT = TransformToType[VT.getSimpleVT()]; 210 assert(getTypeAction(NVT) != Promote && 211 "Promote may not follow Expand or Promote"); 212 return NVT; 213 } 214 215 if (VT.isVector()) { 216 MVT NVT = VT.getPow2VectorType(); 217 if (NVT == VT) { 218 // Vector length is a power of 2 - split to half the size. 219 unsigned NumElts = VT.getVectorNumElements(); 220 MVT EltVT = VT.getVectorElementType(); 221 return (NumElts == 1) ? EltVT : MVT::getVectorVT(EltVT, NumElts / 2); 222 } 223 // Promote to a power of two size, avoiding multi-step promotion. 224 return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; 225 } else if (VT.isInteger()) { 226 MVT NVT = VT.getRoundIntegerType(); 227 if (NVT == VT) 228 // Size is a power of two - expand to half the size. 229 return MVT::getIntegerVT(VT.getSizeInBits() / 2); 230 else 231 // Promote to a power of two size, avoiding multi-step promotion. 232 return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; 233 } 234 assert(0 && "Unsupported extended type!"); 235 return MVT(); // Not reached 236 } 237 238 /// getTypeToExpandTo - For types supported by the target, this is an 239 /// identity function. For types that must be expanded (i.e. integer types 240 /// that are larger than the largest integer register or illegal floating 241 /// point types), this returns the largest legal type it will be expanded to. 242 MVT getTypeToExpandTo(MVT VT) const { 243 assert(!VT.isVector()); 244 while (true) { 245 switch (getTypeAction(VT)) { 246 case Legal: 247 return VT; 248 case Expand: 249 VT = getTypeToTransformTo(VT); 250 break; 251 default: 252 assert(false && "Type is not legal nor is it to be expanded!"); 253 return VT; 254 } 255 } 256 return VT; 257 } 258 259 /// getVectorTypeBreakdown - Vector types are broken down into some number of 260 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 261 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 262 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 263 /// 264 /// This method returns the number of registers needed, and the VT for each 265 /// register. It also returns the VT and quantity of the intermediate values 266 /// before they are promoted/expanded. 267 /// 268 unsigned getVectorTypeBreakdown(MVT VT, 269 MVT &IntermediateVT, 270 unsigned &NumIntermediates, 271 MVT &RegisterVT) const; 272 273 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the 274 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If 275 /// this is the case, it returns true and store the intrinsic 276 /// information into the IntrinsicInfo that was passed to the function. 277 typedef struct IntrinsicInfo { 278 unsigned opc; // target opcode 279 MVT memVT; // memory VT 280 const Value* ptrVal; // value representing memory location 281 int offset; // offset off of ptrVal 282 unsigned align; // alignment 283 bool vol; // is volatile? 284 bool readMem; // reads memory? 285 bool writeMem; // writes memory? 286 } IntrinisicInfo; 287 288 virtual bool getTgtMemIntrinsic(IntrinsicInfo& Info, 289 CallInst &I, unsigned Intrinsic) { 290 return false; 291 } 292 293 /// getWidenVectorType: given a vector type, returns the type to widen to 294 /// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. 295 /// If there is no vector type that we want to widen to, returns MVT::Other 296 /// When and were to widen is target dependent based on the cost of 297 /// scalarizing vs using the wider vector type. 298 virtual MVT getWidenVectorType(MVT VT); 299 300 typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator; 301 legal_fpimm_iterator legal_fpimm_begin() const { 302 return LegalFPImmediates.begin(); 303 } 304 legal_fpimm_iterator legal_fpimm_end() const { 305 return LegalFPImmediates.end(); 306 } 307 308 /// isShuffleMaskLegal - Targets can use this to indicate that they only 309 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 310 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 311 /// are assumed to be legal. 312 virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const { 313 return true; 314 } 315 316 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 317 /// used by Targets can use this to indicate if there is a suitable 318 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 319 /// pool entry. 320 virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps, 321 MVT EVT, 322 SelectionDAG &DAG) const { 323 return false; 324 } 325 326 /// getOperationAction - Return how this operation should be treated: either 327 /// it is legal, needs to be promoted to a larger size, needs to be 328 /// expanded to some other code sequence, or the target has a custom expander 329 /// for it. 330 LegalizeAction getOperationAction(unsigned Op, MVT VT) const { 331 if (VT.isExtended()) return Expand; 332 assert(Op < array_lengthof(OpActions) && 333 (unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && 334 "Table isn't big enough!"); 335 return (LegalizeAction)((OpActions[Op] >> (2*VT.getSimpleVT())) & 3); 336 } 337 338 /// isOperationLegal - Return true if the specified operation is legal on this 339 /// target. 340 bool isOperationLegal(unsigned Op, MVT VT) const { 341 return (VT == MVT::Other || isTypeLegal(VT)) && 342 (getOperationAction(Op, VT) == Legal || 343 getOperationAction(Op, VT) == Custom); 344 } 345 346 /// getLoadExtAction - Return how this load with extension should be treated: 347 /// either it is legal, needs to be promoted to a larger size, needs to be 348 /// expanded to some other code sequence, or the target has a custom expander 349 /// for it. 350 LegalizeAction getLoadExtAction(unsigned LType, MVT VT) const { 351 assert(LType < array_lengthof(LoadExtActions) && 352 (unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && 353 "Table isn't big enough!"); 354 return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3); 355 } 356 357 /// isLoadExtLegal - Return true if the specified load with extension is legal 358 /// on this target. 359 bool isLoadExtLegal(unsigned LType, MVT VT) const { 360 return VT.isSimple() && 361 (getLoadExtAction(LType, VT) == Legal || 362 getLoadExtAction(LType, VT) == Custom); 363 } 364 365 /// getTruncStoreAction - Return how this store with truncation should be 366 /// treated: either it is legal, needs to be promoted to a larger size, needs 367 /// to be expanded to some other code sequence, or the target has a custom 368 /// expander for it. 369 LegalizeAction getTruncStoreAction(MVT ValVT, 370 MVT MemVT) const { 371 assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && 372 (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && 373 "Table isn't big enough!"); 374 return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >> 375 (2*MemVT.getSimpleVT())) & 3); 376 } 377 378 /// isTruncStoreLegal - Return true if the specified store with truncation is 379 /// legal on this target. 380 bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const { 381 return isTypeLegal(ValVT) && MemVT.isSimple() && 382 (getTruncStoreAction(ValVT, MemVT) == Legal || 383 getTruncStoreAction(ValVT, MemVT) == Custom); 384 } 385 386 /// getIndexedLoadAction - Return how the indexed load should be treated: 387 /// either it is legal, needs to be promoted to a larger size, needs to be 388 /// expanded to some other code sequence, or the target has a custom expander 389 /// for it. 390 LegalizeAction 391 getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 392 assert(IdxMode < array_lengthof(IndexedModeActions[0]) && 393 (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0][0])*4 && 394 "Table isn't big enough!"); 395 return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> 396 (2*VT.getSimpleVT())) & 3); 397 } 398 399 /// isIndexedLoadLegal - Return true if the specified indexed load is legal 400 /// on this target. 401 bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const { 402 return VT.isSimple() && 403 (getIndexedLoadAction(IdxMode, VT) == Legal || 404 getIndexedLoadAction(IdxMode, VT) == Custom); 405 } 406 407 /// getIndexedStoreAction - Return how the indexed store should be treated: 408 /// either it is legal, needs to be promoted to a larger size, needs to be 409 /// expanded to some other code sequence, or the target has a custom expander 410 /// for it. 411 LegalizeAction 412 getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 413 assert(IdxMode < array_lengthof(IndexedModeActions[1]) && 414 (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && 415 "Table isn't big enough!"); 416 return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> 417 (2*VT.getSimpleVT())) & 3); 418 } 419 420 /// isIndexedStoreLegal - Return true if the specified indexed load is legal 421 /// on this target. 422 bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const { 423 return VT.isSimple() && 424 (getIndexedStoreAction(IdxMode, VT) == Legal || 425 getIndexedStoreAction(IdxMode, VT) == Custom); 426 } 427 428 /// getConvertAction - Return how the conversion should be treated: 429 /// either it is legal, needs to be promoted to a larger size, needs to be 430 /// expanded to some other code sequence, or the target has a custom expander 431 /// for it. 432 LegalizeAction 433 getConvertAction(MVT FromVT, MVT ToVT) const { 434 assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && 435 (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && 436 "Table isn't big enough!"); 437 return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >> 438 (2*ToVT.getSimpleVT())) & 3); 439 } 440 441 /// isConvertLegal - Return true if the specified conversion is legal 442 /// on this target. 443 bool isConvertLegal(MVT FromVT, MVT ToVT) const { 444 return isTypeLegal(FromVT) && isTypeLegal(ToVT) && 445 (getConvertAction(FromVT, ToVT) == Legal || 446 getConvertAction(FromVT, ToVT) == Custom); 447 } 448 449 /// getCondCodeAction - Return how the condition code should be treated: 450 /// either it is legal, needs to be expanded to some other code sequence, 451 /// or the target has a custom expander for it. 452 LegalizeAction 453 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 454 assert((unsigned)CC < array_lengthof(CondCodeActions) && 455 (unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && 456 "Table isn't big enough!"); 457 LegalizeAction Action = (LegalizeAction) 458 ((CondCodeActions[CC] >> (2*VT.getSimpleVT())) & 3); 459 assert(Action != Promote && "Can't promote condition code!"); 460 return Action; 461 } 462 463 /// isCondCodeLegal - Return true if the specified condition code is legal 464 /// on this target. 465 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 466 return getCondCodeAction(CC, VT) == Legal || 467 getCondCodeAction(CC, VT) == Custom; 468 } 469 470 471 /// getTypeToPromoteTo - If the action for this operation is to promote, this 472 /// method returns the ValueType to promote to. 473 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 474 assert(getOperationAction(Op, VT) == Promote && 475 "This operation isn't promoted!"); 476 477 // See if this has an explicit type specified. 478 std::map<std::pair<unsigned, MVT::SimpleValueType>, 479 MVT::SimpleValueType>::const_iterator PTTI = 480 PromoteToType.find(std::make_pair(Op, VT.getSimpleVT())); 481 if (PTTI != PromoteToType.end()) return PTTI->second; 482 483 assert((VT.isInteger() || VT.isFloatingPoint()) && 484 "Cannot autopromote this type, add it with AddPromotedToType."); 485 486 MVT NVT = VT; 487 do { 488 NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1); 489 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 490 "Didn't find type to promote to!"); 491 } while (!isTypeLegal(NVT) || 492 getOperationAction(Op, NVT) == Promote); 493 return NVT; 494 } 495 496 /// getValueType - Return the MVT corresponding to this LLVM type. 497 /// This is fixed by the LLVM operations except for the pointer size. If 498 /// AllowUnknown is true, this will return MVT::Other for types with no MVT 499 /// counterpart (e.g. structs), otherwise it will assert. 500 MVT getValueType(const Type *Ty, bool AllowUnknown = false) const { 501 MVT VT = MVT::getMVT(Ty, AllowUnknown); 502 return VT == MVT::iPTR ? PointerTy : VT; 503 } 504 505 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 506 /// function arguments in the caller parameter area. This is the actual 507 /// alignment, not its logarithm. 508 virtual unsigned getByValTypeAlignment(const Type *Ty) const; 509 510 /// getRegisterType - Return the type of registers that this ValueType will 511 /// eventually require. 512 MVT getRegisterType(MVT VT) const { 513 if (VT.isSimple()) { 514 assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT)); 515 return RegisterTypeForVT[VT.getSimpleVT()]; 516 } 517 if (VT.isVector()) { 518 MVT VT1, RegisterVT; 519 unsigned NumIntermediates; 520 (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); 521 return RegisterVT; 522 } 523 if (VT.isInteger()) { 524 return getRegisterType(getTypeToTransformTo(VT)); 525 } 526 assert(0 && "Unsupported extended type!"); 527 return MVT(); // Not reached 528 } 529 530 /// getNumRegisters - Return the number of registers that this ValueType will 531 /// eventually require. This is one for any types promoted to live in larger 532 /// registers, but may be more than one for types (like i64) that are split 533 /// into pieces. For types like i140, which are first promoted then expanded, 534 /// it is the number of registers needed to hold all the bits of the original 535 /// type. For an i140 on a 32 bit machine this means 5 registers. 536 unsigned getNumRegisters(MVT VT) const { 537 if (VT.isSimple()) { 538 assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT)); 539 return NumRegistersForVT[VT.getSimpleVT()]; 540 } 541 if (VT.isVector()) { 542 MVT VT1, VT2; 543 unsigned NumIntermediates; 544 return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); 545 } 546 if (VT.isInteger()) { 547 unsigned BitWidth = VT.getSizeInBits(); 548 unsigned RegWidth = getRegisterType(VT).getSizeInBits(); 549 return (BitWidth + RegWidth - 1) / RegWidth; 550 } 551 assert(0 && "Unsupported extended type!"); 552 return 0; // Not reached 553 } 554 555 /// ShouldShrinkFPConstant - If true, then instruction selection should 556 /// seek to shrink the FP constant of the specified type to a smaller type 557 /// in order to save space and / or reduce runtime. 558 virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; } 559 560 /// hasTargetDAGCombine - If true, the target has custom DAG combine 561 /// transformations that it can perform for the specified node. 562 bool hasTargetDAGCombine(ISD::NodeType NT) const { 563 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 564 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 565 } 566 567 /// This function returns the maximum number of store operations permitted 568 /// to replace a call to llvm.memset. The value is set by the target at the 569 /// performance threshold for such a replacement. 570 /// @brief Get maximum # of store operations permitted for llvm.memset 571 unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; } 572 573 /// This function returns the maximum number of store operations permitted 574 /// to replace a call to llvm.memcpy. The value is set by the target at the 575 /// performance threshold for such a replacement. 576 /// @brief Get maximum # of store operations permitted for llvm.memcpy 577 unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; } 578 579 /// This function returns the maximum number of store operations permitted 580 /// to replace a call to llvm.memmove. The value is set by the target at the 581 /// performance threshold for such a replacement. 582 /// @brief Get maximum # of store operations permitted for llvm.memmove 583 unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; } 584 585 /// This function returns true if the target allows unaligned memory accesses. 586 /// This is used, for example, in situations where an array copy/move/set is 587 /// converted to a sequence of store operations. It's use helps to ensure that 588 /// such replacements don't generate code that causes an alignment error 589 /// (trap) on the target machine. 590 /// @brief Determine if the target supports unaligned memory accesses. 591 bool allowsUnalignedMemoryAccesses() const { 592 return allowUnalignedMemoryAccesses; 593 } 594 595 /// getOptimalMemOpType - Returns the target specific optimal type for load 596 /// and store operations as a result of memset, memcpy, and memmove lowering. 597 /// It returns MVT::iAny if SelectionDAG should be responsible for 598 /// determining it. 599 virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align, 600 bool isSrcConst, bool isSrcStr) const { 601 return MVT::iAny; 602 } 603 604 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp 605 /// to implement llvm.setjmp. 606 bool usesUnderscoreSetJmp() const { 607 return UseUnderscoreSetJmp; 608 } 609 610 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp 611 /// to implement llvm.longjmp. 612 bool usesUnderscoreLongJmp() const { 613 return UseUnderscoreLongJmp; 614 } 615 616 /// getStackPointerRegisterToSaveRestore - If a physical register, this 617 /// specifies the register that llvm.savestack/llvm.restorestack should save 618 /// and restore. 619 unsigned getStackPointerRegisterToSaveRestore() const { 620 return StackPointerRegisterToSaveRestore; 621 } 622 623 /// getExceptionAddressRegister - If a physical register, this returns 624 /// the register that receives the exception address on entry to a landing 625 /// pad. 626 unsigned getExceptionAddressRegister() const { 627 return ExceptionPointerRegister; 628 } 629 630 /// getExceptionSelectorRegister - If a physical register, this returns 631 /// the register that receives the exception typeid on entry to a landing 632 /// pad. 633 unsigned getExceptionSelectorRegister() const { 634 return ExceptionSelectorRegister; 635 } 636 637 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never 638 /// set, the default is 200) 639 unsigned getJumpBufSize() const { 640 return JumpBufSize; 641 } 642 643 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes 644 /// (if never set, the default is 0) 645 unsigned getJumpBufAlignment() const { 646 return JumpBufAlignment; 647 } 648 649 /// getIfCvtBlockLimit - returns the target specific if-conversion block size 650 /// limit. Any block whose size is greater should not be predicated. 651 unsigned getIfCvtBlockSizeLimit() const { 652 return IfCvtBlockSizeLimit; 653 } 654 655 /// getIfCvtDupBlockLimit - returns the target specific size limit for a 656 /// block to be considered for duplication. Any block whose size is greater 657 /// should not be duplicated to facilitate its predication. 658 unsigned getIfCvtDupBlockSizeLimit() const { 659 return IfCvtDupBlockSizeLimit; 660 } 661 662 /// getPrefLoopAlignment - return the preferred loop alignment. 663 /// 664 unsigned getPrefLoopAlignment() const { 665 return PrefLoopAlignment; 666 } 667 668 /// getPreIndexedAddressParts - returns true by value, base pointer and 669 /// offset pointer and addressing mode by reference if the node's address 670 /// can be legally represented as pre-indexed load / store address. 671 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 672 SDValue &Offset, 673 ISD::MemIndexedMode &AM, 674 SelectionDAG &DAG) { 675 return false; 676 } 677 678 /// getPostIndexedAddressParts - returns true by value, base pointer and 679 /// offset pointer and addressing mode by reference if this node can be 680 /// combined with a load / store to form a post-indexed load / store. 681 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 682 SDValue &Base, SDValue &Offset, 683 ISD::MemIndexedMode &AM, 684 SelectionDAG &DAG) { 685 return false; 686 } 687 688 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 689 /// jumptable. 690 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 691 SelectionDAG &DAG) const; 692 693 /// isOffsetFoldingLegal - Return true if folding a constant offset 694 /// with the given GlobalAddress is legal. It is frequently not legal in 695 /// PIC relocation models. 696 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 697 698 //===--------------------------------------------------------------------===// 699 // TargetLowering Optimization Methods 700 // 701 702 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two 703 /// SDValues for returning information from TargetLowering to its clients 704 /// that want to combine 705 struct TargetLoweringOpt { 706 SelectionDAG &DAG; 707 SDValue Old; 708 SDValue New; 709 710 explicit TargetLoweringOpt(SelectionDAG &InDAG) : DAG(InDAG) {} 711 712 bool CombineTo(SDValue O, SDValue N) { 713 Old = O; 714 New = N; 715 return true; 716 } 717 718 /// ShrinkDemandedConstant - Check to see if the specified operand of the 719 /// specified instruction is a constant integer. If so, check to see if 720 /// there are any bits set in the constant that are not demanded. If so, 721 /// shrink the constant and return true. 722 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 723 }; 724 725 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 726 /// DemandedMask bits of the result of Op are ever used downstream. If we can 727 /// use this information to simplify Op, create a new simplified DAG node and 728 /// return true, returning the original and new nodes in Old and New. 729 /// Otherwise, analyze the expression and return a mask of KnownOne and 730 /// KnownZero bits for the expression (used to simplify the caller). 731 /// The KnownZero/One bits may only be accurate for those bits in the 732 /// DemandedMask. 733 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 734 APInt &KnownZero, APInt &KnownOne, 735 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 736 737 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in 738 /// Mask are known to be either zero or one and return them in the 739 /// KnownZero/KnownOne bitsets. 740 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 741 const APInt &Mask, 742 APInt &KnownZero, 743 APInt &KnownOne, 744 const SelectionDAG &DAG, 745 unsigned Depth = 0) const; 746 747 /// ComputeNumSignBitsForTargetNode - This method can be implemented by 748 /// targets that want to expose additional information about sign bits to the 749 /// DAG Combiner. 750 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 751 unsigned Depth = 0) const; 752 753 struct DAGCombinerInfo { 754 void *DC; // The DAG Combiner object. 755 bool BeforeLegalize; 756 bool CalledByLegalizer; 757 public: 758 SelectionDAG &DAG; 759 760 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc) 761 : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {} 762 763 bool isBeforeLegalize() const { return BeforeLegalize; } 764 bool isCalledByLegalizer() const { return CalledByLegalizer; } 765 766 void AddToWorklist(SDNode *N); 767 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To); 768 SDValue CombineTo(SDNode *N, SDValue Res); 769 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1); 770 }; 771 772 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 773 /// and cc. If it is unable to simplify it, return a null SDValue. 774 SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, 775 ISD::CondCode Cond, bool foldBooleans, 776 DAGCombinerInfo &DCI) const; 777 778 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 779 /// node is a GlobalAddress + offset. 780 virtual bool 781 isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; 782 783 /// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 784 /// loading 'Bytes' bytes from a location that is 'Dist' units away from the 785 /// location that the 'Base' load is loading from. 786 bool isConsecutiveLoad(SDNode *LD, SDNode *Base, unsigned Bytes, int Dist, 787 const MachineFrameInfo *MFI) const; 788 789 /// PerformDAGCombine - This method will be invoked for all target nodes and 790 /// for any target-independent nodes that the target has registered with 791 /// invoke it for. 792 /// 793 /// The semantics are as follows: 794 /// Return Value: 795 /// SDValue.Val == 0 - No change was made 796 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 797 /// otherwise - N should be replaced by the returned Operand. 798 /// 799 /// In addition, methods provided by DAGCombinerInfo may be used to perform 800 /// more complex transformations. 801 /// 802 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 803 804 //===--------------------------------------------------------------------===// 805 // TargetLowering Configuration Methods - These methods should be invoked by 806 // the derived class constructor to configure this object for the target. 807 // 808 809protected: 810 /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a 811 /// GOT for PC-relative code. 812 void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; } 813 814 /// setShiftAmountType - Describe the type that should be used for shift 815 /// amounts. This type defaults to the pointer type. 816 void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; } 817 818 /// setBooleanContents - Specify how the target extends the result of a 819 /// boolean value from i1 to a wider type. See getBooleanContents. 820 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 821 822 /// setSchedulingPreference - Specify the target scheduling preference. 823 void setSchedulingPreference(SchedPreference Pref) { 824 SchedPreferenceInfo = Pref; 825 } 826 827 /// setShiftAmountFlavor - Describe how the target handles out of range shift 828 /// amounts. 829 void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) { 830 ShiftAmtHandling = OORSA; 831 } 832 833 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to 834 /// use _setjmp to implement llvm.setjmp or the non _ version. 835 /// Defaults to false. 836 void setUseUnderscoreSetJmp(bool Val) { 837 UseUnderscoreSetJmp = Val; 838 } 839 840 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to 841 /// use _longjmp to implement llvm.longjmp or the non _ version. 842 /// Defaults to false. 843 void setUseUnderscoreLongJmp(bool Val) { 844 UseUnderscoreLongJmp = Val; 845 } 846 847 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this 848 /// specifies the register that llvm.savestack/llvm.restorestack should save 849 /// and restore. 850 void setStackPointerRegisterToSaveRestore(unsigned R) { 851 StackPointerRegisterToSaveRestore = R; 852 } 853 854 /// setExceptionPointerRegister - If set to a physical register, this sets 855 /// the register that receives the exception address on entry to a landing 856 /// pad. 857 void setExceptionPointerRegister(unsigned R) { 858 ExceptionPointerRegister = R; 859 } 860 861 /// setExceptionSelectorRegister - If set to a physical register, this sets 862 /// the register that receives the exception typeid on entry to a landing 863 /// pad. 864 void setExceptionSelectorRegister(unsigned R) { 865 ExceptionSelectorRegister = R; 866 } 867 868 /// SelectIsExpensive - Tells the code generator not to expand operations 869 /// into sequences that use the select operations if possible. 870 void setSelectIsExpensive() { SelectIsExpensive = true; } 871 872 /// setIntDivIsCheap - Tells the code generator that integer divide is 873 /// expensive, and if possible, should be replaced by an alternate sequence 874 /// of instructions not containing an integer divide. 875 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 876 877 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate 878 /// srl/add/sra for a signed divide by power of two, and let the target handle 879 /// it. 880 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 881 882 /// addRegisterClass - Add the specified register class as an available 883 /// regclass for the specified value type. This indicates the selector can 884 /// handle values of that class natively. 885 void addRegisterClass(MVT VT, TargetRegisterClass *RC) { 886 assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT)); 887 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 888 RegClassForVT[VT.getSimpleVT()] = RC; 889 } 890 891 /// computeRegisterProperties - Once all of the register classes are added, 892 /// this allows us to compute derived properties we expose. 893 void computeRegisterProperties(); 894 895 /// setOperationAction - Indicate that the specified operation does not work 896 /// with the specified type and indicate what to do about it. 897 void setOperationAction(unsigned Op, MVT VT, 898 LegalizeAction Action) { 899 assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 && 900 Op < array_lengthof(OpActions) && "Table isn't big enough!"); 901 OpActions[Op] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 902 OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2; 903 } 904 905 /// setLoadExtAction - Indicate that the specified load with extension does 906 /// not work with the with specified type and indicate what to do about it. 907 void setLoadExtAction(unsigned ExtType, MVT VT, 908 LegalizeAction Action) { 909 assert((unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 && 910 ExtType < array_lengthof(LoadExtActions) && 911 "Table isn't big enough!"); 912 LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 913 LoadExtActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2; 914 } 915 916 /// setTruncStoreAction - Indicate that the specified truncating store does 917 /// not work with the with specified type and indicate what to do about it. 918 void setTruncStoreAction(MVT ValVT, MVT MemVT, 919 LegalizeAction Action) { 920 assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) && 921 (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 && 922 "Table isn't big enough!"); 923 TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) << 924 MemVT.getSimpleVT()*2); 925 TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action << 926 MemVT.getSimpleVT()*2; 927 } 928 929 /// setIndexedLoadAction - Indicate that the specified indexed load does or 930 /// does not work with the with specified type and indicate what to do abort 931 /// it. NOTE: All indexed mode loads are initialized to Expand in 932 /// TargetLowering.cpp 933 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 934 LegalizeAction Action) { 935 assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0])*4 && 936 IdxMode < array_lengthof(IndexedModeActions[0]) && 937 "Table isn't big enough!"); 938 IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 939 IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; 940 } 941 942 /// setIndexedStoreAction - Indicate that the specified indexed store does or 943 /// does not work with the with specified type and indicate what to do about 944 /// it. NOTE: All indexed mode stores are initialized to Expand in 945 /// TargetLowering.cpp 946 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 947 LegalizeAction Action) { 948 assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 && 949 IdxMode < array_lengthof(IndexedModeActions[1]) && 950 "Table isn't big enough!"); 951 IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 952 IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2; 953 } 954 955 /// setConvertAction - Indicate that the specified conversion does or does 956 /// not work with the with specified type and indicate what to do about it. 957 void setConvertAction(MVT FromVT, MVT ToVT, 958 LegalizeAction Action) { 959 assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) && 960 (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 && 961 "Table isn't big enough!"); 962 ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) << 963 ToVT.getSimpleVT()*2); 964 ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action << 965 ToVT.getSimpleVT()*2; 966 } 967 968 /// setCondCodeAction - Indicate that the specified condition code is or isn't 969 /// supported on the target and indicate what to do about it. 970 void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action) { 971 assert((unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 && 972 (unsigned)CC < array_lengthof(CondCodeActions) && 973 "Table isn't big enough!"); 974 CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2); 975 CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.getSimpleVT()*2; 976 } 977 978 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the 979 /// promotion code defaults to trying a larger integer/fp until it can find 980 /// one that works. If that default is insufficient, this method can be used 981 /// by the target to override the default. 982 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 983 PromoteToType[std::make_pair(Opc, OrigVT.getSimpleVT())] = 984 DestVT.getSimpleVT(); 985 } 986 987 /// addLegalFPImmediate - Indicate that this target can instruction select 988 /// the specified FP immediate natively. 989 void addLegalFPImmediate(const APFloat& Imm) { 990 LegalFPImmediates.push_back(Imm); 991 } 992 993 /// setTargetDAGCombine - Targets should invoke this method for each target 994 /// independent node that they want to provide a custom DAG combiner for by 995 /// implementing the PerformDAGCombine virtual method. 996 void setTargetDAGCombine(ISD::NodeType NT) { 997 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 998 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 999 } 1000 1001 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in 1002 /// bytes); default is 200 1003 void setJumpBufSize(unsigned Size) { 1004 JumpBufSize = Size; 1005 } 1006 1007 /// setJumpBufAlignment - Set the target's required jmp_buf buffer 1008 /// alignment (in bytes); default is 0 1009 void setJumpBufAlignment(unsigned Align) { 1010 JumpBufAlignment = Align; 1011 } 1012 1013 /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size 1014 /// limit (in number of instructions); default is 2. 1015 void setIfCvtBlockSizeLimit(unsigned Limit) { 1016 IfCvtBlockSizeLimit = Limit; 1017 } 1018 1019 /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number 1020 /// of instructions) to be considered for code duplication during 1021 /// if-conversion; default is 2. 1022 void setIfCvtDupBlockSizeLimit(unsigned Limit) { 1023 IfCvtDupBlockSizeLimit = Limit; 1024 } 1025 1026 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default 1027 /// alignment is zero, it means the target does not care about loop alignment. 1028 void setPrefLoopAlignment(unsigned Align) { 1029 PrefLoopAlignment = Align; 1030 } 1031 1032public: 1033 1034 virtual const TargetSubtarget *getSubtarget() { 1035 assert(0 && "Not Implemented"); 1036 return NULL; // this is here to silence compiler errors 1037 } 1038 //===--------------------------------------------------------------------===// 1039 // Lowering methods - These methods must be implemented by targets so that 1040 // the SelectionDAGLowering code knows how to lower these. 1041 // 1042 1043 /// LowerArguments - This hook must be implemented to indicate how we should 1044 /// lower the arguments for the specified function, into the specified DAG. 1045 virtual void 1046 LowerArguments(Function &F, SelectionDAG &DAG, 1047 SmallVectorImpl<SDValue>& ArgValues); 1048 1049 /// LowerCallTo - This hook lowers an abstract call to a function into an 1050 /// actual call. This returns a pair of operands. The first element is the 1051 /// return value for the function (if RetTy is not VoidTy). The second 1052 /// element is the outgoing token chain. 1053 struct ArgListEntry { 1054 SDValue Node; 1055 const Type* Ty; 1056 bool isSExt : 1; 1057 bool isZExt : 1; 1058 bool isInReg : 1; 1059 bool isSRet : 1; 1060 bool isNest : 1; 1061 bool isByVal : 1; 1062 uint16_t Alignment; 1063 1064 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1065 isSRet(false), isNest(false), isByVal(false), Alignment(0) { } 1066 }; 1067 typedef std::vector<ArgListEntry> ArgListTy; 1068 virtual std::pair<SDValue, SDValue> 1069 LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, 1070 bool isVarArg, bool isInreg, unsigned CallingConv, 1071 bool isTailCall, SDValue Callee, ArgListTy &Args, 1072 SelectionDAG &DAG); 1073 1074 /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a 1075 /// memcpy. This can be used by targets to provide code sequences for cases 1076 /// that don't fit the target's parameters for simple loads/stores and can be 1077 /// more efficient than using a library call. This function can return a null 1078 /// SDValue if the target declines to use custom code and a different 1079 /// lowering strategy should be used. 1080 /// 1081 /// If AlwaysInline is true, the size is constant and the target should not 1082 /// emit any calls and is strongly encouraged to attempt to emit inline code 1083 /// even if it is beyond the usual threshold because this intrinsic is being 1084 /// expanded in a place where calls are not feasible (e.g. within the prologue 1085 /// for another call). If the target chooses to decline an AlwaysInline 1086 /// request here, legalize will resort to using simple loads and stores. 1087 virtual SDValue 1088 EmitTargetCodeForMemcpy(SelectionDAG &DAG, 1089 SDValue Chain, 1090 SDValue Op1, SDValue Op2, 1091 SDValue Op3, unsigned Align, 1092 bool AlwaysInline, 1093 const Value *DstSV, uint64_t DstOff, 1094 const Value *SrcSV, uint64_t SrcOff) { 1095 return SDValue(); 1096 } 1097 1098 /// EmitTargetCodeForMemmove - Emit target-specific code that performs a 1099 /// memmove. This can be used by targets to provide code sequences for cases 1100 /// that don't fit the target's parameters for simple loads/stores and can be 1101 /// more efficient than using a library call. This function can return a null 1102 /// SDValue if the target declines to use custom code and a different 1103 /// lowering strategy should be used. 1104 virtual SDValue 1105 EmitTargetCodeForMemmove(SelectionDAG &DAG, 1106 SDValue Chain, 1107 SDValue Op1, SDValue Op2, 1108 SDValue Op3, unsigned Align, 1109 const Value *DstSV, uint64_t DstOff, 1110 const Value *SrcSV, uint64_t SrcOff) { 1111 return SDValue(); 1112 } 1113 1114 /// EmitTargetCodeForMemset - Emit target-specific code that performs a 1115 /// memset. This can be used by targets to provide code sequences for cases 1116 /// that don't fit the target's parameters for simple stores and can be more 1117 /// efficient than using a library call. This function can return a null 1118 /// SDValue if the target declines to use custom code and a different 1119 /// lowering strategy should be used. 1120 virtual SDValue 1121 EmitTargetCodeForMemset(SelectionDAG &DAG, 1122 SDValue Chain, 1123 SDValue Op1, SDValue Op2, 1124 SDValue Op3, unsigned Align, 1125 const Value *DstSV, uint64_t DstOff) { 1126 return SDValue(); 1127 } 1128 1129 /// LowerOperation - This callback is invoked for operations that are 1130 /// unsupported by the target, which are registered to use 'custom' lowering, 1131 /// and whose defined values are all legal. 1132 /// If the target has no operations that require custom lowering, it need not 1133 /// implement this. The default implementation of this aborts. 1134 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); 1135 1136 /// ReplaceNodeResults - This callback is invoked when a node result type is 1137 /// illegal for the target, and the operation was registered to use 'custom' 1138 /// lowering for that result type. The target places new result values for 1139 /// the node in Results (their number and types must exactly match those of 1140 /// the original return values of the node), or leaves Results empty, which 1141 /// indicates that the node is not to be custom lowered after all. 1142 /// 1143 /// If the target has no operations that require custom lowering, it need not 1144 /// implement this. The default implementation aborts. 1145 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 1146 SelectionDAG &DAG) { 1147 assert(0 && "ReplaceNodeResults not implemented for this target!"); 1148 } 1149 1150 /// IsEligibleForTailCallOptimization - Check whether the call is eligible for 1151 /// tail call optimization. Targets which want to do tail call optimization 1152 /// should override this function. 1153 virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call, 1154 SDValue Ret, 1155 SelectionDAG &DAG) const { 1156 return false; 1157 } 1158 1159 /// CheckTailCallReturnConstraints - Check whether CALL node immediatly 1160 /// preceeds the RET node and whether the return uses the result of the node 1161 /// or is a void return. This function can be used by the target to determine 1162 /// eligiblity of tail call optimization. 1163 static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret) { 1164 unsigned NumOps = Ret.getNumOperands(); 1165 if ((NumOps == 1 && 1166 (Ret.getOperand(0) == SDValue(TheCall,1) || 1167 Ret.getOperand(0) == SDValue(TheCall,0))) || 1168 (NumOps > 1 && 1169 Ret.getOperand(0) == SDValue(TheCall, 1170 TheCall->getNumValues()-1) && 1171 Ret.getOperand(1) == SDValue(TheCall,0))) 1172 return true; 1173 return false; 1174 } 1175 1176 /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if 1177 /// it exists. Skip a possible ISD::TokenFactor. 1178 static SDValue GetPossiblePreceedingTailCall(SDValue Chain, 1179 unsigned TailCallNodeOpCode) { 1180 if (Chain.getOpcode() == TailCallNodeOpCode) { 1181 return Chain; 1182 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1183 if (Chain.getNumOperands() && 1184 Chain.getOperand(0).getOpcode() == TailCallNodeOpCode) 1185 return Chain.getOperand(0); 1186 } 1187 return Chain; 1188 } 1189 1190 /// getTargetNodeName() - This method returns the name of a target specific 1191 /// DAG node. 1192 virtual const char *getTargetNodeName(unsigned Opcode) const; 1193 1194 /// createFastISel - This method returns a target specific FastISel object, 1195 /// or null if the target does not support "fast" ISel. 1196 virtual FastISel * 1197 createFastISel(MachineFunction &, 1198 MachineModuleInfo *, 1199 DenseMap<const Value *, unsigned> &, 1200 DenseMap<const BasicBlock *, MachineBasicBlock *> &, 1201 DenseMap<const AllocaInst *, int> & 1202#ifndef NDEBUG 1203 , SmallSet<Instruction*, 8> &CatchInfoLost 1204#endif 1205 ) { 1206 return 0; 1207 } 1208 1209 //===--------------------------------------------------------------------===// 1210 // Inline Asm Support hooks 1211 // 1212 1213 enum ConstraintType { 1214 C_Register, // Constraint represents specific register(s). 1215 C_RegisterClass, // Constraint represents any of register(s) in class. 1216 C_Memory, // Memory constraint. 1217 C_Other, // Something else. 1218 C_Unknown // Unsupported constraint. 1219 }; 1220 1221 /// AsmOperandInfo - This contains information for each constraint that we are 1222 /// lowering. 1223 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 1224 /// ConstraintCode - This contains the actual string for the code, like "m". 1225 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that 1226 /// most closely matches the operand. 1227 std::string ConstraintCode; 1228 1229 /// ConstraintType - Information about the constraint code, e.g. Register, 1230 /// RegisterClass, Memory, Other, Unknown. 1231 TargetLowering::ConstraintType ConstraintType; 1232 1233 /// CallOperandval - If this is the result output operand or a 1234 /// clobber, this is null, otherwise it is the incoming operand to the 1235 /// CallInst. This gets modified as the asm is processed. 1236 Value *CallOperandVal; 1237 1238 /// ConstraintVT - The ValueType for the operand value. 1239 MVT ConstraintVT; 1240 1241 /// isMatchingInputConstraint - Return true of this is an input operand that 1242 /// is a matching constraint like "4". 1243 bool isMatchingInputConstraint() const; 1244 1245 /// getMatchedOperand - If this is an input matching constraint, this method 1246 /// returns the output operand it matches. 1247 unsigned getMatchedOperand() const; 1248 1249 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 1250 : InlineAsm::ConstraintInfo(info), 1251 ConstraintType(TargetLowering::C_Unknown), 1252 CallOperandVal(0), ConstraintVT(MVT::Other) { 1253 } 1254 }; 1255 1256 /// ComputeConstraintToUse - Determines the constraint code and constraint 1257 /// type to use for the specific AsmOperandInfo, setting 1258 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand 1259 /// being passed in is available, it can be passed in as Op, otherwise an 1260 /// empty SDValue can be passed. If hasMemory is true it means one of the asm 1261 /// constraint of the inline asm instruction being processed is 'm'. 1262 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1263 SDValue Op, 1264 bool hasMemory, 1265 SelectionDAG *DAG = 0) const; 1266 1267 /// getConstraintType - Given a constraint, return the type of constraint it 1268 /// is for this target. 1269 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 1270 1271 /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"), 1272 /// return a list of registers that can be used to satisfy the constraint. 1273 /// This should only be used for C_RegisterClass constraints. 1274 virtual std::vector<unsigned> 1275 getRegClassForInlineAsmConstraint(const std::string &Constraint, 1276 MVT VT) const; 1277 1278 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. 1279 /// {edx}), return the register number and the register class for the 1280 /// register. 1281 /// 1282 /// Given a register class constraint, like 'r', if this corresponds directly 1283 /// to an LLVM register class, return a register of 0 and the register class 1284 /// pointer. 1285 /// 1286 /// This should only be used for C_Register constraints. On error, 1287 /// this returns a register number of 0 and a null register class pointer.. 1288 virtual std::pair<unsigned, const TargetRegisterClass*> 1289 getRegForInlineAsmConstraint(const std::string &Constraint, 1290 MVT VT) const; 1291 1292 /// LowerXConstraint - try to replace an X constraint, which matches anything, 1293 /// with another that has more specific requirements based on the type of the 1294 /// corresponding operand. This returns null if there is no replacement to 1295 /// make. 1296 virtual const char *LowerXConstraint(MVT ConstraintVT) const; 1297 1298 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1299 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true 1300 /// it means one of the asm constraint of the inline asm instruction being 1301 /// processed is 'm'. 1302 virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, 1303 bool hasMemory, 1304 std::vector<SDValue> &Ops, 1305 SelectionDAG &DAG) const; 1306 1307 //===--------------------------------------------------------------------===// 1308 // Scheduler hooks 1309 // 1310 1311 // EmitInstrWithCustomInserter - This method should be implemented by targets 1312 // that mark instructions with the 'usesCustomDAGSchedInserter' flag. These 1313 // instructions are special in various ways, which require special support to 1314 // insert. The specified MachineInstr is created but not inserted into any 1315 // basic blocks, and the scheduler passes ownership of it to this method. 1316 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, 1317 MachineBasicBlock *MBB); 1318 1319 //===--------------------------------------------------------------------===// 1320 // Addressing mode description hooks (used by LSR etc). 1321 // 1322 1323 /// AddrMode - This represents an addressing mode of: 1324 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 1325 /// If BaseGV is null, there is no BaseGV. 1326 /// If BaseOffs is zero, there is no base offset. 1327 /// If HasBaseReg is false, there is no base register. 1328 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 1329 /// no scale. 1330 /// 1331 struct AddrMode { 1332 GlobalValue *BaseGV; 1333 int64_t BaseOffs; 1334 bool HasBaseReg; 1335 int64_t Scale; 1336 AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} 1337 }; 1338 1339 /// isLegalAddressingMode - Return true if the addressing mode represented by 1340 /// AM is legal for this target, for a load/store of the specified type. 1341 /// TODO: Handle pre/postinc as well. 1342 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const; 1343 1344 /// isTruncateFree - Return true if it's free to truncate a value of 1345 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 1346 /// register EAX to i16 by referencing its sub-register AX. 1347 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const { 1348 return false; 1349 } 1350 1351 virtual bool isTruncateFree(MVT VT1, MVT VT2) const { 1352 return false; 1353 } 1354 1355 //===--------------------------------------------------------------------===// 1356 // Div utility functions 1357 // 1358 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, 1359 std::vector<SDNode*>* Created) const; 1360 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, 1361 std::vector<SDNode*>* Created) const; 1362 1363 1364 //===--------------------------------------------------------------------===// 1365 // Runtime Library hooks 1366 // 1367 1368 /// setLibcallName - Rename the default libcall routine name for the specified 1369 /// libcall. 1370 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1371 LibcallRoutineNames[Call] = Name; 1372 } 1373 1374 /// getLibcallName - Get the libcall routine name for the specified libcall. 1375 /// 1376 const char *getLibcallName(RTLIB::Libcall Call) const { 1377 return LibcallRoutineNames[Call]; 1378 } 1379 1380 /// setCmpLibcallCC - Override the default CondCode to be used to test the 1381 /// result of the comparison libcall against zero. 1382 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1383 CmpLibcallCCs[Call] = CC; 1384 } 1385 1386 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of 1387 /// the comparison libcall against zero. 1388 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1389 return CmpLibcallCCs[Call]; 1390 } 1391 1392private: 1393 TargetMachine &TM; 1394 const TargetData *TD; 1395 1396 /// PointerTy - The type to use for pointers, usually i32 or i64. 1397 /// 1398 MVT PointerTy; 1399 1400 /// IsLittleEndian - True if this is a little endian target. 1401 /// 1402 bool IsLittleEndian; 1403 1404 /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen. 1405 /// 1406 bool UsesGlobalOffsetTable; 1407 1408 /// SelectIsExpensive - Tells the code generator not to expand operations 1409 /// into sequences that use the select operations if possible. 1410 bool SelectIsExpensive; 1411 1412 /// IntDivIsCheap - Tells the code generator not to expand integer divides by 1413 /// constants into a sequence of muls, adds, and shifts. This is a hack until 1414 /// a real cost model is in place. If we ever optimize for size, this will be 1415 /// set to true unconditionally. 1416 bool IntDivIsCheap; 1417 1418 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate 1419 /// srl/add/sra for a signed divide by power of two, and let the target handle 1420 /// it. 1421 bool Pow2DivIsCheap; 1422 1423 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement 1424 /// llvm.setjmp. Defaults to false. 1425 bool UseUnderscoreSetJmp; 1426 1427 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement 1428 /// llvm.longjmp. Defaults to false. 1429 bool UseUnderscoreLongJmp; 1430 1431 /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever 1432 /// PointerTy is. 1433 MVT ShiftAmountTy; 1434 1435 OutOfRangeShiftAmount ShiftAmtHandling; 1436 1437 /// BooleanContents - Information about the contents of the high-bits in 1438 /// boolean values held in a type wider than i1. See getBooleanContents. 1439 BooleanContent BooleanContents; 1440 1441 /// SchedPreferenceInfo - The target scheduling preference: shortest possible 1442 /// total cycles or lowest register usage. 1443 SchedPreference SchedPreferenceInfo; 1444 1445 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers 1446 unsigned JumpBufSize; 1447 1448 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf 1449 /// buffers 1450 unsigned JumpBufAlignment; 1451 1452 /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be 1453 /// if-converted. 1454 unsigned IfCvtBlockSizeLimit; 1455 1456 /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be 1457 /// duplicated during if-conversion. 1458 unsigned IfCvtDupBlockSizeLimit; 1459 1460 /// PrefLoopAlignment - The perferred loop alignment. 1461 /// 1462 unsigned PrefLoopAlignment; 1463 1464 /// StackPointerRegisterToSaveRestore - If set to a physical register, this 1465 /// specifies the register that llvm.savestack/llvm.restorestack should save 1466 /// and restore. 1467 unsigned StackPointerRegisterToSaveRestore; 1468 1469 /// ExceptionPointerRegister - If set to a physical register, this specifies 1470 /// the register that receives the exception address on entry to a landing 1471 /// pad. 1472 unsigned ExceptionPointerRegister; 1473 1474 /// ExceptionSelectorRegister - If set to a physical register, this specifies 1475 /// the register that receives the exception typeid on entry to a landing 1476 /// pad. 1477 unsigned ExceptionSelectorRegister; 1478 1479 /// RegClassForVT - This indicates the default register class to use for 1480 /// each ValueType the target supports natively. 1481 TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1482 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1483 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1484 1485 /// TransformToType - For any value types we are promoting or expanding, this 1486 /// contains the value type that we are changing to. For Expanded types, this 1487 /// contains one step of the expand (e.g. i64 -> i32), even if there are 1488 /// multiple steps required (e.g. i64 -> i16). For types natively supported 1489 /// by the system, this holds the same type (e.g. i32 -> i32). 1490 MVT TransformToType[MVT::LAST_VALUETYPE]; 1491 1492 /// OpActions - For each operation and each value type, keep a LegalizeAction 1493 /// that indicates how instruction selection should deal with the operation. 1494 /// Most operations are Legal (aka, supported natively by the target), but 1495 /// operations that are not should be described. Note that operations on 1496 /// non-legal value types are not described here. 1497 uint64_t OpActions[ISD::BUILTIN_OP_END]; 1498 1499 /// LoadExtActions - For each load of load extension type and each value type, 1500 /// keep a LegalizeAction that indicates how instruction selection should deal 1501 /// with the load. 1502 uint64_t LoadExtActions[ISD::LAST_LOADEXT_TYPE]; 1503 1504 /// TruncStoreActions - For each truncating store, keep a LegalizeAction that 1505 /// indicates how instruction selection should deal with the store. 1506 uint64_t TruncStoreActions[MVT::LAST_VALUETYPE]; 1507 1508 /// IndexedModeActions - For each indexed mode and each value type, keep a 1509 /// pair of LegalizeAction that indicates how instruction selection should 1510 /// deal with the load / store. 1511 uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE]; 1512 1513 /// ConvertActions - For each conversion from source type to destination type, 1514 /// keep a LegalizeAction that indicates how instruction selection should 1515 /// deal with the conversion. 1516 /// Currently, this is used only for floating->floating conversions 1517 /// (FP_EXTEND and FP_ROUND). 1518 uint64_t ConvertActions[MVT::LAST_VALUETYPE]; 1519 1520 /// CondCodeActions - For each condition code (ISD::CondCode) keep a 1521 /// LegalizeAction that indicates how instruction selection should 1522 /// deal with the condition code. 1523 uint64_t CondCodeActions[ISD::SETCC_INVALID]; 1524 1525 ValueTypeActionImpl ValueTypeActions; 1526 1527 std::vector<APFloat> LegalFPImmediates; 1528 1529 std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses; 1530 1531 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would 1532 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), 1533 /// which sets a bit in this array. 1534 unsigned char 1535 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 1536 1537 /// PromoteToType - For operations that must be promoted to a specific type, 1538 /// this holds the destination type. This map should be sparse, so don't hold 1539 /// it as an array. 1540 /// 1541 /// Targets add entries to this map with AddPromotedToType(..), clients access 1542 /// this with getTypeToPromoteTo(..). 1543 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 1544 PromoteToType; 1545 1546 /// LibcallRoutineNames - Stores the name each libcall. 1547 /// 1548 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 1549 1550 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result 1551 /// of each of the comparison libcall against zero. 1552 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 1553 1554protected: 1555 /// When lowering @llvm.memset this field specifies the maximum number of 1556 /// store operations that may be substituted for the call to memset. Targets 1557 /// must set this value based on the cost threshold for that target. Targets 1558 /// should assume that the memset will be done using as many of the largest 1559 /// store operations first, followed by smaller ones, if necessary, per 1560 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 1561 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 1562 /// store. This only applies to setting a constant array of a constant size. 1563 /// @brief Specify maximum number of store instructions per memset call. 1564 unsigned maxStoresPerMemset; 1565 1566 /// When lowering @llvm.memcpy this field specifies the maximum number of 1567 /// store operations that may be substituted for a call to memcpy. Targets 1568 /// must set this value based on the cost threshold for that target. Targets 1569 /// should assume that the memcpy will be done using as many of the largest 1570 /// store operations first, followed by smaller ones, if necessary, per 1571 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 1572 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 1573 /// and one 1-byte store. This only applies to copying a constant array of 1574 /// constant size. 1575 /// @brief Specify maximum bytes of store instructions per memcpy call. 1576 unsigned maxStoresPerMemcpy; 1577 1578 /// When lowering @llvm.memmove this field specifies the maximum number of 1579 /// store instructions that may be substituted for a call to memmove. Targets 1580 /// must set this value based on the cost threshold for that target. Targets 1581 /// should assume that the memmove will be done using as many of the largest 1582 /// store operations first, followed by smaller ones, if necessary, per 1583 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 1584 /// with 8-bit alignment would result in nine 1-byte stores. This only 1585 /// applies to copying a constant array of constant size. 1586 /// @brief Specify maximum bytes of store instructions per memmove call. 1587 unsigned maxStoresPerMemmove; 1588 1589 /// This field specifies whether the target machine permits unaligned memory 1590 /// accesses. This is used, for example, to determine the size of store 1591 /// operations when copying small arrays and other similar tasks. 1592 /// @brief Indicate whether the target permits unaligned memory accesses. 1593 bool allowUnalignedMemoryAccesses; 1594}; 1595} // end llvm namespace 1596 1597#endif 1598