TargetLowering.cpp revision b2ff885aaed8f9b033b16ca78d645650efc32433
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetAsmInfo.h" 15#include "llvm/Target/TargetLowering.h" 16#include "llvm/Target/TargetSubtarget.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetRegisterInfo.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/STLExtras.h" 26#include "llvm/Support/MathExtras.h" 27using namespace llvm; 28 29/// InitLibcallNames - Set default libcall names. 30/// 31static void InitLibcallNames(const char **Names) { 32 Names[RTLIB::SHL_I32] = "__ashlsi3"; 33 Names[RTLIB::SHL_I64] = "__ashldi3"; 34 Names[RTLIB::SHL_I128] = "__ashlti3"; 35 Names[RTLIB::SRL_I32] = "__lshrsi3"; 36 Names[RTLIB::SRL_I64] = "__lshrdi3"; 37 Names[RTLIB::SRL_I128] = "__lshrti3"; 38 Names[RTLIB::SRA_I32] = "__ashrsi3"; 39 Names[RTLIB::SRA_I64] = "__ashrdi3"; 40 Names[RTLIB::SRA_I128] = "__ashrti3"; 41 Names[RTLIB::MUL_I32] = "__mulsi3"; 42 Names[RTLIB::MUL_I64] = "__muldi3"; 43 Names[RTLIB::MUL_I128] = "__multi3"; 44 Names[RTLIB::SDIV_I32] = "__divsi3"; 45 Names[RTLIB::SDIV_I64] = "__divdi3"; 46 Names[RTLIB::SDIV_I128] = "__divti3"; 47 Names[RTLIB::UDIV_I32] = "__udivsi3"; 48 Names[RTLIB::UDIV_I64] = "__udivdi3"; 49 Names[RTLIB::UDIV_I128] = "__udivti3"; 50 Names[RTLIB::SREM_I32] = "__modsi3"; 51 Names[RTLIB::SREM_I64] = "__moddi3"; 52 Names[RTLIB::SREM_I128] = "__modti3"; 53 Names[RTLIB::UREM_I32] = "__umodsi3"; 54 Names[RTLIB::UREM_I64] = "__umoddi3"; 55 Names[RTLIB::UREM_I128] = "__umodti3"; 56 Names[RTLIB::NEG_I32] = "__negsi2"; 57 Names[RTLIB::NEG_I64] = "__negdi2"; 58 Names[RTLIB::ADD_F32] = "__addsf3"; 59 Names[RTLIB::ADD_F64] = "__adddf3"; 60 Names[RTLIB::ADD_F80] = "__addxf3"; 61 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 62 Names[RTLIB::SUB_F32] = "__subsf3"; 63 Names[RTLIB::SUB_F64] = "__subdf3"; 64 Names[RTLIB::SUB_F80] = "__subxf3"; 65 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 66 Names[RTLIB::MUL_F32] = "__mulsf3"; 67 Names[RTLIB::MUL_F64] = "__muldf3"; 68 Names[RTLIB::MUL_F80] = "__mulxf3"; 69 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 70 Names[RTLIB::DIV_F32] = "__divsf3"; 71 Names[RTLIB::DIV_F64] = "__divdf3"; 72 Names[RTLIB::DIV_F80] = "__divxf3"; 73 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 74 Names[RTLIB::REM_F32] = "fmodf"; 75 Names[RTLIB::REM_F64] = "fmod"; 76 Names[RTLIB::REM_F80] = "fmodl"; 77 Names[RTLIB::REM_PPCF128] = "fmodl"; 78 Names[RTLIB::POWI_F32] = "__powisf2"; 79 Names[RTLIB::POWI_F64] = "__powidf2"; 80 Names[RTLIB::POWI_F80] = "__powixf2"; 81 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 82 Names[RTLIB::SQRT_F32] = "sqrtf"; 83 Names[RTLIB::SQRT_F64] = "sqrt"; 84 Names[RTLIB::SQRT_F80] = "sqrtl"; 85 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 86 Names[RTLIB::SIN_F32] = "sinf"; 87 Names[RTLIB::SIN_F64] = "sin"; 88 Names[RTLIB::SIN_F80] = "sinl"; 89 Names[RTLIB::SIN_PPCF128] = "sinl"; 90 Names[RTLIB::COS_F32] = "cosf"; 91 Names[RTLIB::COS_F64] = "cos"; 92 Names[RTLIB::COS_F80] = "cosl"; 93 Names[RTLIB::COS_PPCF128] = "cosl"; 94 Names[RTLIB::POW_F32] = "powf"; 95 Names[RTLIB::POW_F64] = "pow"; 96 Names[RTLIB::POW_F80] = "powl"; 97 Names[RTLIB::POW_PPCF128] = "powl"; 98 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 99 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 100 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 101 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 102 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 103 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 104 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 105 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 106 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 107 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 108 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 109 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 110 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 111 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 112 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 113 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 114 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 115 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 116 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 117 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 118 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 119 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 120 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 121 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 122 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 123 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 124 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 125 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 126 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 127 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 128 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 129 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 130 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 131 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 132 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 133 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 134 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 135 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 136 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 137 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 138 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 139 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 140 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 141 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 142 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 143 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 144 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 145 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 146 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 147 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 148 Names[RTLIB::OEQ_F32] = "__eqsf2"; 149 Names[RTLIB::OEQ_F64] = "__eqdf2"; 150 Names[RTLIB::UNE_F32] = "__nesf2"; 151 Names[RTLIB::UNE_F64] = "__nedf2"; 152 Names[RTLIB::OGE_F32] = "__gesf2"; 153 Names[RTLIB::OGE_F64] = "__gedf2"; 154 Names[RTLIB::OLT_F32] = "__ltsf2"; 155 Names[RTLIB::OLT_F64] = "__ltdf2"; 156 Names[RTLIB::OLE_F32] = "__lesf2"; 157 Names[RTLIB::OLE_F64] = "__ledf2"; 158 Names[RTLIB::OGT_F32] = "__gtsf2"; 159 Names[RTLIB::OGT_F64] = "__gtdf2"; 160 Names[RTLIB::UO_F32] = "__unordsf2"; 161 Names[RTLIB::UO_F64] = "__unorddf2"; 162 Names[RTLIB::O_F32] = "__unordsf2"; 163 Names[RTLIB::O_F64] = "__unorddf2"; 164} 165 166/// getFPEXT - Return the FPEXT_*_* value for the given types, or 167/// UNKNOWN_LIBCALL if there is none. 168RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) { 169 if (OpVT == MVT::f32) { 170 if (RetVT == MVT::f64) 171 return FPEXT_F32_F64; 172 } 173 return UNKNOWN_LIBCALL; 174} 175 176/// getFPROUND - Return the FPROUND_*_* value for the given types, or 177/// UNKNOWN_LIBCALL if there is none. 178RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) { 179 if (OpVT == MVT::f64) { 180 if (RetVT == MVT::f32) 181 return FPROUND_F64_F32; 182 } 183 return UNKNOWN_LIBCALL; 184} 185 186/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 187/// UNKNOWN_LIBCALL if there is none. 188RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) { 189 if (OpVT == MVT::f32) { 190 if (RetVT == MVT::i32) 191 return FPTOSINT_F32_I32; 192 if (RetVT == MVT::i64) 193 return FPTOSINT_F32_I64; 194 if (RetVT == MVT::i128) 195 return FPTOSINT_F32_I128; 196 } else if (OpVT == MVT::f64) { 197 if (RetVT == MVT::i32) 198 return FPTOSINT_F64_I32; 199 if (RetVT == MVT::i64) 200 return FPTOSINT_F64_I64; 201 if (RetVT == MVT::i128) 202 return FPTOSINT_F64_I128; 203 } else if (OpVT == MVT::f80) { 204 if (RetVT == MVT::i32) 205 return FPTOSINT_F80_I32; 206 if (RetVT == MVT::i64) 207 return FPTOSINT_F80_I64; 208 if (RetVT == MVT::i128) 209 return FPTOSINT_F80_I128; 210 } else if (OpVT == MVT::ppcf128) { 211 if (RetVT == MVT::i32) 212 return FPTOSINT_PPCF128_I32; 213 if (RetVT == MVT::i64) 214 return FPTOSINT_PPCF128_I64; 215 if (RetVT == MVT::i128) 216 return FPTOSINT_PPCF128_I128; 217 } 218 return UNKNOWN_LIBCALL; 219} 220 221/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 222/// UNKNOWN_LIBCALL if there is none. 223RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) { 224 if (OpVT == MVT::f32) { 225 if (RetVT == MVT::i32) 226 return FPTOUINT_F32_I32; 227 if (RetVT == MVT::i64) 228 return FPTOUINT_F32_I64; 229 if (RetVT == MVT::i128) 230 return FPTOUINT_F32_I128; 231 } else if (OpVT == MVT::f64) { 232 if (RetVT == MVT::i32) 233 return FPTOUINT_F64_I32; 234 if (RetVT == MVT::i64) 235 return FPTOUINT_F64_I64; 236 if (RetVT == MVT::i128) 237 return FPTOUINT_F64_I128; 238 } else if (OpVT == MVT::f80) { 239 if (RetVT == MVT::i32) 240 return FPTOUINT_F80_I32; 241 if (RetVT == MVT::i64) 242 return FPTOUINT_F80_I64; 243 if (RetVT == MVT::i128) 244 return FPTOUINT_F80_I128; 245 } else if (OpVT == MVT::ppcf128) { 246 if (RetVT == MVT::i32) 247 return FPTOUINT_PPCF128_I32; 248 if (RetVT == MVT::i64) 249 return FPTOUINT_PPCF128_I64; 250 if (RetVT == MVT::i128) 251 return FPTOUINT_PPCF128_I128; 252 } 253 return UNKNOWN_LIBCALL; 254} 255 256/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 257/// UNKNOWN_LIBCALL if there is none. 258RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) { 259 if (OpVT == MVT::i32) { 260 if (RetVT == MVT::f32) 261 return SINTTOFP_I32_F32; 262 else if (RetVT == MVT::f64) 263 return SINTTOFP_I32_F64; 264 else if (RetVT == MVT::f80) 265 return SINTTOFP_I32_F80; 266 else if (RetVT == MVT::ppcf128) 267 return SINTTOFP_I32_PPCF128; 268 } else if (OpVT == MVT::i64) { 269 if (RetVT == MVT::f32) 270 return SINTTOFP_I64_F32; 271 else if (RetVT == MVT::f64) 272 return SINTTOFP_I64_F64; 273 else if (RetVT == MVT::f80) 274 return SINTTOFP_I64_F80; 275 else if (RetVT == MVT::ppcf128) 276 return SINTTOFP_I64_PPCF128; 277 } else if (OpVT == MVT::i128) { 278 if (RetVT == MVT::f32) 279 return SINTTOFP_I128_F32; 280 else if (RetVT == MVT::f64) 281 return SINTTOFP_I128_F64; 282 else if (RetVT == MVT::f80) 283 return SINTTOFP_I128_F80; 284 else if (RetVT == MVT::ppcf128) 285 return SINTTOFP_I128_PPCF128; 286 } 287 return UNKNOWN_LIBCALL; 288} 289 290/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 291/// UNKNOWN_LIBCALL if there is none. 292RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) { 293 if (OpVT == MVT::i32) { 294 if (RetVT == MVT::f32) 295 return UINTTOFP_I32_F32; 296 else if (RetVT == MVT::f64) 297 return UINTTOFP_I32_F64; 298 else if (RetVT == MVT::f80) 299 return UINTTOFP_I32_F80; 300 else if (RetVT == MVT::ppcf128) 301 return UINTTOFP_I32_PPCF128; 302 } else if (OpVT == MVT::i64) { 303 if (RetVT == MVT::f32) 304 return UINTTOFP_I64_F32; 305 else if (RetVT == MVT::f64) 306 return UINTTOFP_I64_F64; 307 else if (RetVT == MVT::f80) 308 return UINTTOFP_I64_F80; 309 else if (RetVT == MVT::ppcf128) 310 return UINTTOFP_I64_PPCF128; 311 } else if (OpVT == MVT::i128) { 312 if (RetVT == MVT::f32) 313 return UINTTOFP_I128_F32; 314 else if (RetVT == MVT::f64) 315 return UINTTOFP_I128_F64; 316 else if (RetVT == MVT::f80) 317 return UINTTOFP_I128_F80; 318 else if (RetVT == MVT::ppcf128) 319 return UINTTOFP_I128_PPCF128; 320 } 321 return UNKNOWN_LIBCALL; 322} 323 324/// InitCmpLibcallCCs - Set default comparison libcall CC. 325/// 326static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 327 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 328 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 329 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 330 CCs[RTLIB::UNE_F32] = ISD::SETNE; 331 CCs[RTLIB::UNE_F64] = ISD::SETNE; 332 CCs[RTLIB::OGE_F32] = ISD::SETGE; 333 CCs[RTLIB::OGE_F64] = ISD::SETGE; 334 CCs[RTLIB::OLT_F32] = ISD::SETLT; 335 CCs[RTLIB::OLT_F64] = ISD::SETLT; 336 CCs[RTLIB::OLE_F32] = ISD::SETLE; 337 CCs[RTLIB::OLE_F64] = ISD::SETLE; 338 CCs[RTLIB::OGT_F32] = ISD::SETGT; 339 CCs[RTLIB::OGT_F64] = ISD::SETGT; 340 CCs[RTLIB::UO_F32] = ISD::SETNE; 341 CCs[RTLIB::UO_F64] = ISD::SETNE; 342 CCs[RTLIB::O_F32] = ISD::SETEQ; 343 CCs[RTLIB::O_F64] = ISD::SETEQ; 344} 345 346TargetLowering::TargetLowering(TargetMachine &tm) 347 : TM(tm), TD(TM.getTargetData()) { 348 assert(ISD::BUILTIN_OP_END <= OpActionsCapacity && 349 "Fixed size array in TargetLowering is not large enough!"); 350 // All operations default to being supported. 351 memset(OpActions, 0, sizeof(OpActions)); 352 memset(LoadXActions, 0, sizeof(LoadXActions)); 353 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 354 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 355 memset(ConvertActions, 0, sizeof(ConvertActions)); 356 357 // Set default actions for various operations. 358 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 359 // Default all indexed load / store to expand. 360 for (unsigned IM = (unsigned)ISD::PRE_INC; 361 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 362 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 363 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 364 } 365 366 // These operations default to expand. 367 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 368 } 369 370 // Most targets ignore the @llvm.prefetch intrinsic. 371 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 372 373 // ConstantFP nodes default to expand. Targets can either change this to 374 // Legal, in which case all fp constants are legal, or use addLegalFPImmediate 375 // to optimize expansions for certain constants. 376 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 377 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 378 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 379 380 // Default ISD::TRAP to expand (which turns it into abort). 381 setOperationAction(ISD::TRAP, MVT::Other, Expand); 382 383 IsLittleEndian = TD->isLittleEndian(); 384 UsesGlobalOffsetTable = false; 385 ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType()); 386 ShiftAmtHandling = Undefined; 387 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 388 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 389 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 390 allowUnalignedMemoryAccesses = false; 391 UseUnderscoreSetJmp = false; 392 UseUnderscoreLongJmp = false; 393 SelectIsExpensive = false; 394 IntDivIsCheap = false; 395 Pow2DivIsCheap = false; 396 StackPointerRegisterToSaveRestore = 0; 397 ExceptionPointerRegister = 0; 398 ExceptionSelectorRegister = 0; 399 SetCCResultContents = UndefinedSetCCResult; 400 SchedPreferenceInfo = SchedulingForLatency; 401 JumpBufSize = 0; 402 JumpBufAlignment = 0; 403 IfCvtBlockSizeLimit = 2; 404 IfCvtDupBlockSizeLimit = 0; 405 PrefLoopAlignment = 0; 406 407 InitLibcallNames(LibcallRoutineNames); 408 InitCmpLibcallCCs(CmpLibcallCCs); 409 410 // Tell Legalize whether the assembler supports DEBUG_LOC. 411 if (!TM.getTargetAsmInfo()->hasDotLocAndDotFile()) 412 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 413} 414 415TargetLowering::~TargetLowering() {} 416 417/// computeRegisterProperties - Once all of the register classes are added, 418/// this allows us to compute derived properties we expose. 419void TargetLowering::computeRegisterProperties() { 420 assert(MVT::LAST_VALUETYPE <= 32 && 421 "Too many value types for ValueTypeActions to hold!"); 422 423 // Everything defaults to needing one register. 424 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 425 NumRegistersForVT[i] = 1; 426 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 427 } 428 // ...except isVoid, which doesn't need any registers. 429 NumRegistersForVT[MVT::isVoid] = 0; 430 431 // Find the largest integer register class. 432 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 433 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 434 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 435 436 // Every integer value type larger than this largest register takes twice as 437 // many registers to represent as the previous ValueType. 438 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 439 MVT EVT = (MVT::SimpleValueType)ExpandedReg; 440 if (!EVT.isInteger()) 441 break; 442 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 443 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 444 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 445 ValueTypeActions.setTypeAction(EVT, Expand); 446 } 447 448 // Inspect all of the ValueType's smaller than the largest integer 449 // register to see which ones need promotion. 450 unsigned LegalIntReg = LargestIntReg; 451 for (unsigned IntReg = LargestIntReg - 1; 452 IntReg >= (unsigned)MVT::i1; --IntReg) { 453 MVT IVT = (MVT::SimpleValueType)IntReg; 454 if (isTypeLegal(IVT)) { 455 LegalIntReg = IntReg; 456 } else { 457 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 458 (MVT::SimpleValueType)LegalIntReg; 459 ValueTypeActions.setTypeAction(IVT, Promote); 460 } 461 } 462 463 // ppcf128 type is really two f64's. 464 if (!isTypeLegal(MVT::ppcf128)) { 465 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 466 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 467 TransformToType[MVT::ppcf128] = MVT::f64; 468 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 469 } 470 471 // Decide how to handle f64. If the target does not have native f64 support, 472 // expand it to i64 and we will be generating soft float library calls. 473 if (!isTypeLegal(MVT::f64)) { 474 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 475 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 476 TransformToType[MVT::f64] = MVT::i64; 477 ValueTypeActions.setTypeAction(MVT::f64, Expand); 478 } 479 480 // Decide how to handle f32. If the target does not have native support for 481 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 482 if (!isTypeLegal(MVT::f32)) { 483 if (isTypeLegal(MVT::f64)) { 484 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 485 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 486 TransformToType[MVT::f32] = MVT::f64; 487 ValueTypeActions.setTypeAction(MVT::f32, Promote); 488 } else { 489 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 490 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 491 TransformToType[MVT::f32] = MVT::i32; 492 ValueTypeActions.setTypeAction(MVT::f32, Expand); 493 } 494 } 495 496 // Loop over all of the vector value types to see which need transformations. 497 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 498 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 499 MVT VT = (MVT::SimpleValueType)i; 500 if (!isTypeLegal(VT)) { 501 MVT IntermediateVT, RegisterVT; 502 unsigned NumIntermediates; 503 NumRegistersForVT[i] = 504 getVectorTypeBreakdown(VT, 505 IntermediateVT, NumIntermediates, 506 RegisterVT); 507 RegisterTypeForVT[i] = RegisterVT; 508 TransformToType[i] = MVT::Other; // this isn't actually used 509 ValueTypeActions.setTypeAction(VT, Expand); 510 } 511 } 512} 513 514const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 515 return NULL; 516} 517 518 519MVT TargetLowering::getSetCCResultType(const SDOperand &) const { 520 return getValueType(TD->getIntPtrType()); 521} 522 523 524/// getVectorTypeBreakdown - Vector types are broken down into some number of 525/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 526/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 527/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 528/// 529/// This method returns the number of registers needed, and the VT for each 530/// register. It also returns the VT and quantity of the intermediate values 531/// before they are promoted/expanded. 532/// 533unsigned TargetLowering::getVectorTypeBreakdown(MVT VT, 534 MVT &IntermediateVT, 535 unsigned &NumIntermediates, 536 MVT &RegisterVT) const { 537 // Figure out the right, legal destination reg to copy into. 538 unsigned NumElts = VT.getVectorNumElements(); 539 MVT EltTy = VT.getVectorElementType(); 540 541 unsigned NumVectorRegs = 1; 542 543 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 544 // could break down into LHS/RHS like LegalizeDAG does. 545 if (!isPowerOf2_32(NumElts)) { 546 NumVectorRegs = NumElts; 547 NumElts = 1; 548 } 549 550 // Divide the input until we get to a supported size. This will always 551 // end with a scalar if the target doesn't support vectors. 552 while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 553 NumElts >>= 1; 554 NumVectorRegs <<= 1; 555 } 556 557 NumIntermediates = NumVectorRegs; 558 559 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 560 if (!isTypeLegal(NewVT)) 561 NewVT = EltTy; 562 IntermediateVT = NewVT; 563 564 MVT DestVT = getTypeToTransformTo(NewVT); 565 RegisterVT = DestVT; 566 if (DestVT.bitsLT(NewVT)) { 567 // Value is expanded, e.g. i64 -> i16. 568 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 569 } else { 570 // Otherwise, promotion or legal types use the same number of registers as 571 // the vector decimated to the appropriate level. 572 return NumVectorRegs; 573 } 574 575 return 1; 576} 577 578/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 579/// function arguments in the caller parameter area. This is the actual 580/// alignment, not its logarithm. 581unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 582 return TD->getCallFrameTypeAlignment(Ty); 583} 584 585SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 586 SelectionDAG &DAG) const { 587 if (usesGlobalOffsetTable()) 588 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 589 return Table; 590} 591 592//===----------------------------------------------------------------------===// 593// Optimization Methods 594//===----------------------------------------------------------------------===// 595 596/// ShrinkDemandedConstant - Check to see if the specified operand of the 597/// specified instruction is a constant integer. If so, check to see if there 598/// are any bits set in the constant that are not demanded. If so, shrink the 599/// constant and return true. 600bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, 601 const APInt &Demanded) { 602 // FIXME: ISD::SELECT, ISD::SELECT_CC 603 switch(Op.getOpcode()) { 604 default: break; 605 case ISD::AND: 606 case ISD::OR: 607 case ISD::XOR: 608 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 609 if (C->getAPIntValue().intersects(~Demanded)) { 610 MVT VT = Op.getValueType(); 611 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 612 DAG.getConstant(Demanded & 613 C->getAPIntValue(), 614 VT)); 615 return CombineTo(Op, New); 616 } 617 break; 618 } 619 return false; 620} 621 622/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 623/// DemandedMask bits of the result of Op are ever used downstream. If we can 624/// use this information to simplify Op, create a new simplified DAG node and 625/// return true, returning the original and new nodes in Old and New. Otherwise, 626/// analyze the expression and return a mask of KnownOne and KnownZero bits for 627/// the expression (used to simplify the caller). The KnownZero/One bits may 628/// only be accurate for those bits in the DemandedMask. 629bool TargetLowering::SimplifyDemandedBits(SDOperand Op, 630 const APInt &DemandedMask, 631 APInt &KnownZero, 632 APInt &KnownOne, 633 TargetLoweringOpt &TLO, 634 unsigned Depth) const { 635 unsigned BitWidth = DemandedMask.getBitWidth(); 636 assert(Op.getValueSizeInBits() == BitWidth && 637 "Mask size mismatches value type size!"); 638 APInt NewMask = DemandedMask; 639 640 // Don't know anything. 641 KnownZero = KnownOne = APInt(BitWidth, 0); 642 643 // Other users may use these bits. 644 if (!Op.Val->hasOneUse()) { 645 if (Depth != 0) { 646 // If not at the root, Just compute the KnownZero/KnownOne bits to 647 // simplify things downstream. 648 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 649 return false; 650 } 651 // If this is the root being simplified, allow it to have multiple uses, 652 // just set the NewMask to all bits. 653 NewMask = APInt::getAllOnesValue(BitWidth); 654 } else if (DemandedMask == 0) { 655 // Not demanding any bits from Op. 656 if (Op.getOpcode() != ISD::UNDEF) 657 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 658 return false; 659 } else if (Depth == 6) { // Limit search depth. 660 return false; 661 } 662 663 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 664 switch (Op.getOpcode()) { 665 case ISD::Constant: 666 // We know all of the bits for a constant! 667 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 668 KnownZero = ~KnownOne & NewMask; 669 return false; // Don't fall through, will infinitely loop. 670 case ISD::AND: 671 // If the RHS is a constant, check to see if the LHS would be zero without 672 // using the bits from the RHS. Below, we use knowledge about the RHS to 673 // simplify the LHS, here we're using information from the LHS to simplify 674 // the RHS. 675 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 676 APInt LHSZero, LHSOne; 677 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 678 LHSZero, LHSOne, Depth+1); 679 // If the LHS already has zeros where RHSC does, this and is dead. 680 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 681 return TLO.CombineTo(Op, Op.getOperand(0)); 682 // If any of the set bits in the RHS are known zero on the LHS, shrink 683 // the constant. 684 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 685 return true; 686 } 687 688 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 689 KnownOne, TLO, Depth+1)) 690 return true; 691 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 692 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 693 KnownZero2, KnownOne2, TLO, Depth+1)) 694 return true; 695 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 696 697 // If all of the demanded bits are known one on one side, return the other. 698 // These bits cannot contribute to the result of the 'and'. 699 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 700 return TLO.CombineTo(Op, Op.getOperand(0)); 701 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 702 return TLO.CombineTo(Op, Op.getOperand(1)); 703 // If all of the demanded bits in the inputs are known zeros, return zero. 704 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 705 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 706 // If the RHS is a constant, see if we can simplify it. 707 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 708 return true; 709 710 // Output known-1 bits are only known if set in both the LHS & RHS. 711 KnownOne &= KnownOne2; 712 // Output known-0 are known to be clear if zero in either the LHS | RHS. 713 KnownZero |= KnownZero2; 714 break; 715 case ISD::OR: 716 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 717 KnownOne, TLO, Depth+1)) 718 return true; 719 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 720 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 721 KnownZero2, KnownOne2, TLO, Depth+1)) 722 return true; 723 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 724 725 // If all of the demanded bits are known zero on one side, return the other. 726 // These bits cannot contribute to the result of the 'or'. 727 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 728 return TLO.CombineTo(Op, Op.getOperand(0)); 729 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 730 return TLO.CombineTo(Op, Op.getOperand(1)); 731 // If all of the potentially set bits on one side are known to be set on 732 // the other side, just use the 'other' side. 733 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 734 return TLO.CombineTo(Op, Op.getOperand(0)); 735 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 736 return TLO.CombineTo(Op, Op.getOperand(1)); 737 // If the RHS is a constant, see if we can simplify it. 738 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 739 return true; 740 741 // Output known-0 bits are only known if clear in both the LHS & RHS. 742 KnownZero &= KnownZero2; 743 // Output known-1 are known to be set if set in either the LHS | RHS. 744 KnownOne |= KnownOne2; 745 break; 746 case ISD::XOR: 747 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 748 KnownOne, TLO, Depth+1)) 749 return true; 750 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 751 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 752 KnownOne2, TLO, Depth+1)) 753 return true; 754 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 755 756 // If all of the demanded bits are known zero on one side, return the other. 757 // These bits cannot contribute to the result of the 'xor'. 758 if ((KnownZero & NewMask) == NewMask) 759 return TLO.CombineTo(Op, Op.getOperand(0)); 760 if ((KnownZero2 & NewMask) == NewMask) 761 return TLO.CombineTo(Op, Op.getOperand(1)); 762 763 // If all of the unknown bits are known to be zero on one side or the other 764 // (but not both) turn this into an *inclusive* or. 765 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 766 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 767 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 768 Op.getOperand(0), 769 Op.getOperand(1))); 770 771 // Output known-0 bits are known if clear or set in both the LHS & RHS. 772 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 773 // Output known-1 are known to be set if set in only one of the LHS, RHS. 774 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 775 776 // If all of the demanded bits on one side are known, and all of the set 777 // bits on that side are also known to be set on the other side, turn this 778 // into an AND, as we know the bits will be cleared. 779 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 780 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 781 if ((KnownOne & KnownOne2) == KnownOne) { 782 MVT VT = Op.getValueType(); 783 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 784 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 785 ANDC)); 786 } 787 } 788 789 // If the RHS is a constant, see if we can simplify it. 790 // for XOR, we prefer to force bits to 1 if they will make a -1. 791 // if we can't force bits, try to shrink constant 792 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 793 APInt Expanded = C->getAPIntValue() | (~NewMask); 794 // if we can expand it to have all bits set, do it 795 if (Expanded.isAllOnesValue()) { 796 if (Expanded != C->getAPIntValue()) { 797 MVT VT = Op.getValueType(); 798 SDOperand New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 799 TLO.DAG.getConstant(Expanded, VT)); 800 return TLO.CombineTo(Op, New); 801 } 802 // if it already has all the bits set, nothing to change 803 // but don't shrink either! 804 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 805 return true; 806 } 807 } 808 809 KnownZero = KnownZeroOut; 810 KnownOne = KnownOneOut; 811 break; 812 case ISD::SELECT: 813 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 814 KnownOne, TLO, Depth+1)) 815 return true; 816 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 817 KnownOne2, TLO, Depth+1)) 818 return true; 819 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 820 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 821 822 // If the operands are constants, see if we can simplify them. 823 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 824 return true; 825 826 // Only known if known in both the LHS and RHS. 827 KnownOne &= KnownOne2; 828 KnownZero &= KnownZero2; 829 break; 830 case ISD::SELECT_CC: 831 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 832 KnownOne, TLO, Depth+1)) 833 return true; 834 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 835 KnownOne2, TLO, Depth+1)) 836 return true; 837 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 838 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 839 840 // If the operands are constants, see if we can simplify them. 841 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 842 return true; 843 844 // Only known if known in both the LHS and RHS. 845 KnownOne &= KnownOne2; 846 KnownZero &= KnownZero2; 847 break; 848 case ISD::SHL: 849 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 850 unsigned ShAmt = SA->getValue(); 851 SDOperand InOp = Op.getOperand(0); 852 853 // If the shift count is an invalid immediate, don't do anything. 854 if (ShAmt >= BitWidth) 855 break; 856 857 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 858 // single shift. We can do this if the bottom bits (which are shifted 859 // out) are never demanded. 860 if (InOp.getOpcode() == ISD::SRL && 861 isa<ConstantSDNode>(InOp.getOperand(1))) { 862 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 863 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue(); 864 unsigned Opc = ISD::SHL; 865 int Diff = ShAmt-C1; 866 if (Diff < 0) { 867 Diff = -Diff; 868 Opc = ISD::SRL; 869 } 870 871 SDOperand NewSA = 872 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 873 MVT VT = Op.getValueType(); 874 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 875 InOp.getOperand(0), NewSA)); 876 } 877 } 878 879 if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt), 880 KnownZero, KnownOne, TLO, Depth+1)) 881 return true; 882 KnownZero <<= SA->getValue(); 883 KnownOne <<= SA->getValue(); 884 // low bits known zero. 885 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getValue()); 886 } 887 break; 888 case ISD::SRL: 889 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 890 MVT VT = Op.getValueType(); 891 unsigned ShAmt = SA->getValue(); 892 unsigned VTSize = VT.getSizeInBits(); 893 SDOperand InOp = Op.getOperand(0); 894 895 // If the shift count is an invalid immediate, don't do anything. 896 if (ShAmt >= BitWidth) 897 break; 898 899 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 900 // single shift. We can do this if the top bits (which are shifted out) 901 // are never demanded. 902 if (InOp.getOpcode() == ISD::SHL && 903 isa<ConstantSDNode>(InOp.getOperand(1))) { 904 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 905 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue(); 906 unsigned Opc = ISD::SRL; 907 int Diff = ShAmt-C1; 908 if (Diff < 0) { 909 Diff = -Diff; 910 Opc = ISD::SHL; 911 } 912 913 SDOperand NewSA = 914 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 915 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 916 InOp.getOperand(0), NewSA)); 917 } 918 } 919 920 // Compute the new bits that are at the top now. 921 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 922 KnownZero, KnownOne, TLO, Depth+1)) 923 return true; 924 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 925 KnownZero = KnownZero.lshr(ShAmt); 926 KnownOne = KnownOne.lshr(ShAmt); 927 928 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 929 KnownZero |= HighBits; // High bits known zero. 930 } 931 break; 932 case ISD::SRA: 933 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 934 MVT VT = Op.getValueType(); 935 unsigned ShAmt = SA->getValue(); 936 937 // If the shift count is an invalid immediate, don't do anything. 938 if (ShAmt >= BitWidth) 939 break; 940 941 APInt InDemandedMask = (NewMask << ShAmt); 942 943 // If any of the demanded bits are produced by the sign extension, we also 944 // demand the input sign bit. 945 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 946 if (HighBits.intersects(NewMask)) 947 InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); 948 949 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 950 KnownZero, KnownOne, TLO, Depth+1)) 951 return true; 952 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 953 KnownZero = KnownZero.lshr(ShAmt); 954 KnownOne = KnownOne.lshr(ShAmt); 955 956 // Handle the sign bit, adjusted to where it is now in the mask. 957 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 958 959 // If the input sign bit is known to be zero, or if none of the top bits 960 // are demanded, turn this into an unsigned shift right. 961 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 962 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 963 Op.getOperand(1))); 964 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 965 KnownOne |= HighBits; 966 } 967 } 968 break; 969 case ISD::SIGN_EXTEND_INREG: { 970 MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 971 972 // Sign extension. Compute the demanded bits in the result that are not 973 // present in the input. 974 APInt NewBits = APInt::getHighBitsSet(BitWidth, 975 BitWidth - EVT.getSizeInBits()) & 976 NewMask; 977 978 // If none of the extended bits are demanded, eliminate the sextinreg. 979 if (NewBits == 0) 980 return TLO.CombineTo(Op, Op.getOperand(0)); 981 982 APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); 983 InSignBit.zext(BitWidth); 984 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, 985 EVT.getSizeInBits()) & 986 NewMask; 987 988 // Since the sign extended bits are demanded, we know that the sign 989 // bit is demanded. 990 InputDemandedBits |= InSignBit; 991 992 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 993 KnownZero, KnownOne, TLO, Depth+1)) 994 return true; 995 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 996 997 // If the sign bit of the input is known set or clear, then we know the 998 // top bits of the result. 999 1000 // If the input sign bit is known zero, convert this into a zero extension. 1001 if (KnownZero.intersects(InSignBit)) 1002 return TLO.CombineTo(Op, 1003 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT)); 1004 1005 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1006 KnownOne |= NewBits; 1007 KnownZero &= ~NewBits; 1008 } else { // Input sign bit unknown 1009 KnownZero &= ~NewBits; 1010 KnownOne &= ~NewBits; 1011 } 1012 break; 1013 } 1014 case ISD::ZERO_EXTEND: { 1015 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1016 APInt InMask = NewMask; 1017 InMask.trunc(OperandBitWidth); 1018 1019 // If none of the top bits are demanded, convert this into an any_extend. 1020 APInt NewBits = 1021 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1022 if (!NewBits.intersects(NewMask)) 1023 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, 1024 Op.getValueType(), 1025 Op.getOperand(0))); 1026 1027 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1028 KnownZero, KnownOne, TLO, Depth+1)) 1029 return true; 1030 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1031 KnownZero.zext(BitWidth); 1032 KnownOne.zext(BitWidth); 1033 KnownZero |= NewBits; 1034 break; 1035 } 1036 case ISD::SIGN_EXTEND: { 1037 MVT InVT = Op.getOperand(0).getValueType(); 1038 unsigned InBits = InVT.getSizeInBits(); 1039 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1040 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1041 APInt NewBits = ~InMask & NewMask; 1042 1043 // If none of the top bits are demanded, convert this into an any_extend. 1044 if (NewBits == 0) 1045 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(), 1046 Op.getOperand(0))); 1047 1048 // Since some of the sign extended bits are demanded, we know that the sign 1049 // bit is demanded. 1050 APInt InDemandedBits = InMask & NewMask; 1051 InDemandedBits |= InSignBit; 1052 InDemandedBits.trunc(InBits); 1053 1054 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1055 KnownOne, TLO, Depth+1)) 1056 return true; 1057 KnownZero.zext(BitWidth); 1058 KnownOne.zext(BitWidth); 1059 1060 // If the sign bit is known zero, convert this to a zero extend. 1061 if (KnownZero.intersects(InSignBit)) 1062 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, 1063 Op.getValueType(), 1064 Op.getOperand(0))); 1065 1066 // If the sign bit is known one, the top bits match. 1067 if (KnownOne.intersects(InSignBit)) { 1068 KnownOne |= NewBits; 1069 KnownZero &= ~NewBits; 1070 } else { // Otherwise, top bits aren't known. 1071 KnownOne &= ~NewBits; 1072 KnownZero &= ~NewBits; 1073 } 1074 break; 1075 } 1076 case ISD::ANY_EXTEND: { 1077 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1078 APInt InMask = NewMask; 1079 InMask.trunc(OperandBitWidth); 1080 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1081 KnownZero, KnownOne, TLO, Depth+1)) 1082 return true; 1083 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1084 KnownZero.zext(BitWidth); 1085 KnownOne.zext(BitWidth); 1086 break; 1087 } 1088 case ISD::TRUNCATE: { 1089 // Simplify the input, using demanded bit information, and compute the known 1090 // zero/one bits live out. 1091 APInt TruncMask = NewMask; 1092 TruncMask.zext(Op.getOperand(0).getValueSizeInBits()); 1093 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1094 KnownZero, KnownOne, TLO, Depth+1)) 1095 return true; 1096 KnownZero.trunc(BitWidth); 1097 KnownOne.trunc(BitWidth); 1098 1099 // If the input is only used by this truncate, see if we can shrink it based 1100 // on the known demanded bits. 1101 if (Op.getOperand(0).Val->hasOneUse()) { 1102 SDOperand In = Op.getOperand(0); 1103 unsigned InBitWidth = In.getValueSizeInBits(); 1104 switch (In.getOpcode()) { 1105 default: break; 1106 case ISD::SRL: 1107 // Shrink SRL by a constant if none of the high bits shifted in are 1108 // demanded. 1109 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 1110 APInt HighBits = APInt::getHighBitsSet(InBitWidth, 1111 InBitWidth - BitWidth); 1112 HighBits = HighBits.lshr(ShAmt->getValue()); 1113 HighBits.trunc(BitWidth); 1114 1115 if (ShAmt->getValue() < BitWidth && !(HighBits & NewMask)) { 1116 // None of the shifted in bits are needed. Add a truncate of the 1117 // shift input, then shift it. 1118 SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, 1119 Op.getValueType(), 1120 In.getOperand(0)); 1121 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(), 1122 NewTrunc, In.getOperand(1))); 1123 } 1124 } 1125 break; 1126 } 1127 } 1128 1129 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1130 break; 1131 } 1132 case ISD::AssertZext: { 1133 MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1134 APInt InMask = APInt::getLowBitsSet(BitWidth, 1135 VT.getSizeInBits()); 1136 if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, 1137 KnownZero, KnownOne, TLO, Depth+1)) 1138 return true; 1139 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1140 KnownZero |= ~InMask & NewMask; 1141 break; 1142 } 1143 case ISD::BIT_CONVERT: 1144#if 0 1145 // If this is an FP->Int bitcast and if the sign bit is the only thing that 1146 // is demanded, turn this into a FGETSIGN. 1147 if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) && 1148 MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && 1149 !MVT::isVector(Op.getOperand(0).getValueType())) { 1150 // Only do this xform if FGETSIGN is valid or if before legalize. 1151 if (!TLO.AfterLegalize || 1152 isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { 1153 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1154 // place. We expect the SHL to be eliminated by other optimizations. 1155 SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 1156 Op.getOperand(0)); 1157 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1158 SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); 1159 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), 1160 Sign, ShAmt)); 1161 } 1162 } 1163#endif 1164 break; 1165 default: 1166 // Just use ComputeMaskedBits to compute output bits. 1167 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1168 break; 1169 } 1170 1171 // If we know the value of all of the demanded bits, return this as a 1172 // constant. 1173 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1174 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1175 1176 return false; 1177} 1178 1179/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1180/// in Mask are known to be either zero or one and return them in the 1181/// KnownZero/KnownOne bitsets. 1182void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 1183 const APInt &Mask, 1184 APInt &KnownZero, 1185 APInt &KnownOne, 1186 const SelectionDAG &DAG, 1187 unsigned Depth) const { 1188 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1189 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1190 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1191 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1192 "Should use MaskedValueIsZero if you don't know whether Op" 1193 " is a target node!"); 1194 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1195} 1196 1197/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1198/// targets that want to expose additional information about sign bits to the 1199/// DAG Combiner. 1200unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op, 1201 unsigned Depth) const { 1202 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1203 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1204 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1205 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1206 "Should use ComputeNumSignBits if you don't know whether Op" 1207 " is a target node!"); 1208 return 1; 1209} 1210 1211 1212/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1213/// and cc. If it is unable to simplify it, return a null SDOperand. 1214SDOperand 1215TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, 1216 ISD::CondCode Cond, bool foldBooleans, 1217 DAGCombinerInfo &DCI) const { 1218 SelectionDAG &DAG = DCI.DAG; 1219 1220 // These setcc operations always fold. 1221 switch (Cond) { 1222 default: break; 1223 case ISD::SETFALSE: 1224 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1225 case ISD::SETTRUE: 1226 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1227 } 1228 1229 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) { 1230 const APInt &C1 = N1C->getAPIntValue(); 1231 if (isa<ConstantSDNode>(N0.Val)) { 1232 return DAG.FoldSetCC(VT, N0, N1, Cond); 1233 } else { 1234 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1235 // equality comparison, then we're just comparing whether X itself is 1236 // zero. 1237 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1238 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1239 N0.getOperand(1).getOpcode() == ISD::Constant) { 1240 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue(); 1241 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1242 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1243 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1244 // (srl (ctlz x), 5) == 0 -> X != 0 1245 // (srl (ctlz x), 5) != 1 -> X != 0 1246 Cond = ISD::SETNE; 1247 } else { 1248 // (srl (ctlz x), 5) != 0 -> X == 0 1249 // (srl (ctlz x), 5) == 1 -> X == 0 1250 Cond = ISD::SETEQ; 1251 } 1252 SDOperand Zero = DAG.getConstant(0, N0.getValueType()); 1253 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0), 1254 Zero, Cond); 1255 } 1256 } 1257 1258 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 1259 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 1260 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 1261 1262 // If the comparison constant has bits in the upper part, the 1263 // zero-extended value could never match. 1264 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 1265 C1.getBitWidth() - InSize))) { 1266 switch (Cond) { 1267 case ISD::SETUGT: 1268 case ISD::SETUGE: 1269 case ISD::SETEQ: return DAG.getConstant(0, VT); 1270 case ISD::SETULT: 1271 case ISD::SETULE: 1272 case ISD::SETNE: return DAG.getConstant(1, VT); 1273 case ISD::SETGT: 1274 case ISD::SETGE: 1275 // True if the sign bit of C1 is set. 1276 return DAG.getConstant(C1.isNegative(), VT); 1277 case ISD::SETLT: 1278 case ISD::SETLE: 1279 // True if the sign bit of C1 isn't set. 1280 return DAG.getConstant(C1.isNonNegative(), VT); 1281 default: 1282 break; 1283 } 1284 } 1285 1286 // Otherwise, we can perform the comparison with the low bits. 1287 switch (Cond) { 1288 case ISD::SETEQ: 1289 case ISD::SETNE: 1290 case ISD::SETUGT: 1291 case ISD::SETUGE: 1292 case ISD::SETULT: 1293 case ISD::SETULE: 1294 return DAG.getSetCC(VT, N0.getOperand(0), 1295 DAG.getConstant(APInt(C1).trunc(InSize), 1296 N0.getOperand(0).getValueType()), 1297 Cond); 1298 default: 1299 break; // todo, be more careful with signed comparisons 1300 } 1301 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1302 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1303 MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1304 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 1305 MVT ExtDstTy = N0.getValueType(); 1306 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 1307 1308 // If the extended part has any inconsistent bits, it cannot ever 1309 // compare equal. In other words, they have to be all ones or all 1310 // zeros. 1311 APInt ExtBits = 1312 APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits); 1313 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1314 return DAG.getConstant(Cond == ISD::SETNE, VT); 1315 1316 SDOperand ZextOp; 1317 MVT Op0Ty = N0.getOperand(0).getValueType(); 1318 if (Op0Ty == ExtSrcTy) { 1319 ZextOp = N0.getOperand(0); 1320 } else { 1321 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 1322 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0), 1323 DAG.getConstant(Imm, Op0Ty)); 1324 } 1325 if (!DCI.isCalledByLegalizer()) 1326 DCI.AddToWorklist(ZextOp.Val); 1327 // Otherwise, make this a use of a zext. 1328 return DAG.getSetCC(VT, ZextOp, 1329 DAG.getConstant(C1 & APInt::getLowBitsSet( 1330 ExtDstTyBits, 1331 ExtSrcTyBits), 1332 ExtDstTy), 1333 Cond); 1334 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 1335 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1336 1337 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1338 if (N0.getOpcode() == ISD::SETCC) { 1339 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1); 1340 if (TrueWhenTrue) 1341 return N0; 1342 1343 // Invert the condition. 1344 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1345 CC = ISD::getSetCCInverse(CC, 1346 N0.getOperand(0).getValueType().isInteger()); 1347 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC); 1348 } 1349 1350 if ((N0.getOpcode() == ISD::XOR || 1351 (N0.getOpcode() == ISD::AND && 1352 N0.getOperand(0).getOpcode() == ISD::XOR && 1353 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1354 isa<ConstantSDNode>(N0.getOperand(1)) && 1355 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 1356 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1357 // can only do this if the top bits are known zero. 1358 unsigned BitWidth = N0.getValueSizeInBits(); 1359 if (DAG.MaskedValueIsZero(N0, 1360 APInt::getHighBitsSet(BitWidth, 1361 BitWidth-1))) { 1362 // Okay, get the un-inverted input value. 1363 SDOperand Val; 1364 if (N0.getOpcode() == ISD::XOR) 1365 Val = N0.getOperand(0); 1366 else { 1367 assert(N0.getOpcode() == ISD::AND && 1368 N0.getOperand(0).getOpcode() == ISD::XOR); 1369 // ((X^1)&1)^1 -> X & 1 1370 Val = DAG.getNode(ISD::AND, N0.getValueType(), 1371 N0.getOperand(0).getOperand(0), 1372 N0.getOperand(1)); 1373 } 1374 return DAG.getSetCC(VT, Val, N1, 1375 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1376 } 1377 } 1378 } 1379 1380 APInt MinVal, MaxVal; 1381 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 1382 if (ISD::isSignedIntSetCC(Cond)) { 1383 MinVal = APInt::getSignedMinValue(OperandBitSize); 1384 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 1385 } else { 1386 MinVal = APInt::getMinValue(OperandBitSize); 1387 MaxVal = APInt::getMaxValue(OperandBitSize); 1388 } 1389 1390 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1391 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1392 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1393 // X >= C0 --> X > (C0-1) 1394 return DAG.getSetCC(VT, N0, DAG.getConstant(C1-1, N1.getValueType()), 1395 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1396 } 1397 1398 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1399 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1400 // X <= C0 --> X < (C0+1) 1401 return DAG.getSetCC(VT, N0, DAG.getConstant(C1+1, N1.getValueType()), 1402 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1403 } 1404 1405 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1406 return DAG.getConstant(0, VT); // X < MIN --> false 1407 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1408 return DAG.getConstant(1, VT); // X >= MIN --> true 1409 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1410 return DAG.getConstant(0, VT); // X > MAX --> false 1411 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1412 return DAG.getConstant(1, VT); // X <= MAX --> true 1413 1414 // Canonicalize setgt X, Min --> setne X, Min 1415 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1416 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1417 // Canonicalize setlt X, Max --> setne X, Max 1418 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1419 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1420 1421 // If we have setult X, 1, turn it into seteq X, 0 1422 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1423 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()), 1424 ISD::SETEQ); 1425 // If we have setugt X, Max-1, turn it into seteq X, Max 1426 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1427 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()), 1428 ISD::SETEQ); 1429 1430 // If we have "setcc X, C0", check to see if we can shrink the immediate 1431 // by changing cc. 1432 1433 // SETUGT X, SINTMAX -> SETLT X, 0 1434 if (Cond == ISD::SETUGT && OperandBitSize != 1 && 1435 C1 == (~0ULL >> (65-OperandBitSize))) 1436 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()), 1437 ISD::SETLT); 1438 1439 // FIXME: Implement the rest of these. 1440 1441 // Fold bit comparisons when we can. 1442 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1443 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1444 if (ConstantSDNode *AndRHS = 1445 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1446 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1447 // Perform the xform if the AND RHS is a single bit. 1448 if (isPowerOf2_64(AndRHS->getValue())) { 1449 return DAG.getNode(ISD::SRL, VT, N0, 1450 DAG.getConstant(Log2_64(AndRHS->getValue()), 1451 getShiftAmountTy())); 1452 } 1453 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) { 1454 // (X & 8) == 8 --> (X & 8) >> 3 1455 // Perform the xform if C1 is a single bit. 1456 if (C1.isPowerOf2()) { 1457 return DAG.getNode(ISD::SRL, VT, N0, 1458 DAG.getConstant(C1.logBase2(), getShiftAmountTy())); 1459 } 1460 } 1461 } 1462 } 1463 } else if (isa<ConstantSDNode>(N0.Val)) { 1464 // Ensure that the constant occurs on the RHS. 1465 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1466 } 1467 1468 if (isa<ConstantFPSDNode>(N0.Val)) { 1469 // Constant fold or commute setcc. 1470 SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond); 1471 if (O.Val) return O; 1472 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.Val)) { 1473 // If the RHS of an FP comparison is a constant, simplify it away in 1474 // some cases. 1475 if (CFP->getValueAPF().isNaN()) { 1476 // If an operand is known to be a nan, we can fold it. 1477 switch (ISD::getUnorderedFlavor(Cond)) { 1478 default: assert(0 && "Unknown flavor!"); 1479 case 0: // Known false. 1480 return DAG.getConstant(0, VT); 1481 case 1: // Known true. 1482 return DAG.getConstant(1, VT); 1483 case 2: // Undefined. 1484 return DAG.getNode(ISD::UNDEF, VT); 1485 } 1486 } 1487 1488 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 1489 // constant if knowing that the operand is non-nan is enough. We prefer to 1490 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 1491 // materialize 0.0. 1492 if (Cond == ISD::SETO || Cond == ISD::SETUO) 1493 return DAG.getSetCC(VT, N0, N0, Cond); 1494 } 1495 1496 if (N0 == N1) { 1497 // We can always fold X == X for integer setcc's. 1498 if (N0.getValueType().isInteger()) 1499 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1500 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1501 if (UOF == 2) // FP operators that are undefined on NaNs. 1502 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1503 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1504 return DAG.getConstant(UOF, VT); 1505 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1506 // if it is not already. 1507 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1508 if (NewCond != Cond) 1509 return DAG.getSetCC(VT, N0, N1, NewCond); 1510 } 1511 1512 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1513 N0.getValueType().isInteger()) { 1514 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1515 N0.getOpcode() == ISD::XOR) { 1516 // Simplify (X+Y) == (X+Z) --> Y == Z 1517 if (N0.getOpcode() == N1.getOpcode()) { 1518 if (N0.getOperand(0) == N1.getOperand(0)) 1519 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond); 1520 if (N0.getOperand(1) == N1.getOperand(1)) 1521 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond); 1522 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1523 // If X op Y == Y op X, try other combinations. 1524 if (N0.getOperand(0) == N1.getOperand(1)) 1525 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond); 1526 if (N0.getOperand(1) == N1.getOperand(0)) 1527 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond); 1528 } 1529 } 1530 1531 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 1532 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1533 // Turn (X+C1) == C2 --> X == C2-C1 1534 if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) { 1535 return DAG.getSetCC(VT, N0.getOperand(0), 1536 DAG.getConstant(RHSC->getValue()-LHSR->getValue(), 1537 N0.getValueType()), Cond); 1538 } 1539 1540 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 1541 if (N0.getOpcode() == ISD::XOR) 1542 // If we know that all of the inverted bits are zero, don't bother 1543 // performing the inversion. 1544 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 1545 return 1546 DAG.getSetCC(VT, N0.getOperand(0), 1547 DAG.getConstant(LHSR->getAPIntValue() ^ 1548 RHSC->getAPIntValue(), 1549 N0.getValueType()), 1550 Cond); 1551 } 1552 1553 // Turn (C1-X) == C2 --> X == C1-C2 1554 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 1555 if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) { 1556 return 1557 DAG.getSetCC(VT, N0.getOperand(1), 1558 DAG.getConstant(SUBC->getAPIntValue() - 1559 RHSC->getAPIntValue(), 1560 N0.getValueType()), 1561 Cond); 1562 } 1563 } 1564 } 1565 1566 // Simplify (X+Z) == X --> Z == 0 1567 if (N0.getOperand(0) == N1) 1568 return DAG.getSetCC(VT, N0.getOperand(1), 1569 DAG.getConstant(0, N0.getValueType()), Cond); 1570 if (N0.getOperand(1) == N1) { 1571 if (DAG.isCommutativeBinOp(N0.getOpcode())) 1572 return DAG.getSetCC(VT, N0.getOperand(0), 1573 DAG.getConstant(0, N0.getValueType()), Cond); 1574 else if (N0.Val->hasOneUse()) { 1575 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 1576 // (Z-X) == X --> Z == X<<1 1577 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), 1578 N1, 1579 DAG.getConstant(1, getShiftAmountTy())); 1580 if (!DCI.isCalledByLegalizer()) 1581 DCI.AddToWorklist(SH.Val); 1582 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond); 1583 } 1584 } 1585 } 1586 1587 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 1588 N1.getOpcode() == ISD::XOR) { 1589 // Simplify X == (X+Z) --> Z == 0 1590 if (N1.getOperand(0) == N0) { 1591 return DAG.getSetCC(VT, N1.getOperand(1), 1592 DAG.getConstant(0, N1.getValueType()), Cond); 1593 } else if (N1.getOperand(1) == N0) { 1594 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 1595 return DAG.getSetCC(VT, N1.getOperand(0), 1596 DAG.getConstant(0, N1.getValueType()), Cond); 1597 } else if (N1.Val->hasOneUse()) { 1598 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 1599 // X == (Z-X) --> X<<1 == Z 1600 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, 1601 DAG.getConstant(1, getShiftAmountTy())); 1602 if (!DCI.isCalledByLegalizer()) 1603 DCI.AddToWorklist(SH.Val); 1604 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond); 1605 } 1606 } 1607 } 1608 } 1609 1610 // Fold away ALL boolean setcc's. 1611 SDOperand Temp; 1612 if (N0.getValueType() == MVT::i1 && foldBooleans) { 1613 switch (Cond) { 1614 default: assert(0 && "Unknown integer setcc!"); 1615 case ISD::SETEQ: // X == Y -> (X^Y)^1 1616 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1617 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1)); 1618 if (!DCI.isCalledByLegalizer()) 1619 DCI.AddToWorklist(Temp.Val); 1620 break; 1621 case ISD::SETNE: // X != Y --> (X^Y) 1622 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1623 break; 1624 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y 1625 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y 1626 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1627 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp); 1628 if (!DCI.isCalledByLegalizer()) 1629 DCI.AddToWorklist(Temp.Val); 1630 break; 1631 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X 1632 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X 1633 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1634 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp); 1635 if (!DCI.isCalledByLegalizer()) 1636 DCI.AddToWorklist(Temp.Val); 1637 break; 1638 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y 1639 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y 1640 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1641 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp); 1642 if (!DCI.isCalledByLegalizer()) 1643 DCI.AddToWorklist(Temp.Val); 1644 break; 1645 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X 1646 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X 1647 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1648 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp); 1649 break; 1650 } 1651 if (VT != MVT::i1) { 1652 if (!DCI.isCalledByLegalizer()) 1653 DCI.AddToWorklist(N0.Val); 1654 // FIXME: If running after legalize, we probably can't do this. 1655 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0); 1656 } 1657 return N0; 1658 } 1659 1660 // Could not fold it. 1661 return SDOperand(); 1662} 1663 1664/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 1665/// node is a GlobalAddress + offset. 1666bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, 1667 int64_t &Offset) const { 1668 if (isa<GlobalAddressSDNode>(N)) { 1669 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 1670 GA = GASD->getGlobal(); 1671 Offset += GASD->getOffset(); 1672 return true; 1673 } 1674 1675 if (N->getOpcode() == ISD::ADD) { 1676 SDOperand N1 = N->getOperand(0); 1677 SDOperand N2 = N->getOperand(1); 1678 if (isGAPlusOffset(N1.Val, GA, Offset)) { 1679 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 1680 if (V) { 1681 Offset += V->getSignExtended(); 1682 return true; 1683 } 1684 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 1685 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 1686 if (V) { 1687 Offset += V->getSignExtended(); 1688 return true; 1689 } 1690 } 1691 } 1692 return false; 1693} 1694 1695 1696/// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 1697/// loading 'Bytes' bytes from a location that is 'Dist' units away from the 1698/// location that the 'Base' load is loading from. 1699bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, 1700 unsigned Bytes, int Dist, 1701 const MachineFrameInfo *MFI) const { 1702 if (LD->getOperand(0).Val != Base->getOperand(0).Val) 1703 return false; 1704 MVT VT = LD->getValueType(0); 1705 if (VT.getSizeInBits() / 8 != Bytes) 1706 return false; 1707 1708 SDOperand Loc = LD->getOperand(1); 1709 SDOperand BaseLoc = Base->getOperand(1); 1710 if (Loc.getOpcode() == ISD::FrameIndex) { 1711 if (BaseLoc.getOpcode() != ISD::FrameIndex) 1712 return false; 1713 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 1714 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 1715 int FS = MFI->getObjectSize(FI); 1716 int BFS = MFI->getObjectSize(BFI); 1717 if (FS != BFS || FS != (int)Bytes) return false; 1718 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 1719 } 1720 1721 GlobalValue *GV1 = NULL; 1722 GlobalValue *GV2 = NULL; 1723 int64_t Offset1 = 0; 1724 int64_t Offset2 = 0; 1725 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 1726 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 1727 if (isGA1 && isGA2 && GV1 == GV2) 1728 return Offset1 == (Offset2 + Dist*Bytes); 1729 return false; 1730} 1731 1732 1733SDOperand TargetLowering:: 1734PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 1735 // Default implementation: no optimization. 1736 return SDOperand(); 1737} 1738 1739//===----------------------------------------------------------------------===// 1740// Inline Assembler Implementation Methods 1741//===----------------------------------------------------------------------===// 1742 1743 1744TargetLowering::ConstraintType 1745TargetLowering::getConstraintType(const std::string &Constraint) const { 1746 // FIXME: lots more standard ones to handle. 1747 if (Constraint.size() == 1) { 1748 switch (Constraint[0]) { 1749 default: break; 1750 case 'r': return C_RegisterClass; 1751 case 'm': // memory 1752 case 'o': // offsetable 1753 case 'V': // not offsetable 1754 return C_Memory; 1755 case 'i': // Simple Integer or Relocatable Constant 1756 case 'n': // Simple Integer 1757 case 's': // Relocatable Constant 1758 case 'X': // Allow ANY value. 1759 case 'I': // Target registers. 1760 case 'J': 1761 case 'K': 1762 case 'L': 1763 case 'M': 1764 case 'N': 1765 case 'O': 1766 case 'P': 1767 return C_Other; 1768 } 1769 } 1770 1771 if (Constraint.size() > 1 && Constraint[0] == '{' && 1772 Constraint[Constraint.size()-1] == '}') 1773 return C_Register; 1774 return C_Unknown; 1775} 1776 1777/// LowerXConstraint - try to replace an X constraint, which matches anything, 1778/// with another that has more specific requirements based on the type of the 1779/// corresponding operand. 1780const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ 1781 if (ConstraintVT.isInteger()) 1782 return "r"; 1783 if (ConstraintVT.isFloatingPoint()) 1784 return "f"; // works for many targets 1785 return 0; 1786} 1787 1788/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1789/// vector. If it is invalid, don't add anything to Ops. 1790void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 1791 char ConstraintLetter, 1792 std::vector<SDOperand> &Ops, 1793 SelectionDAG &DAG) const { 1794 switch (ConstraintLetter) { 1795 default: break; 1796 case 'X': // Allows any operand; labels (basic block) use this. 1797 if (Op.getOpcode() == ISD::BasicBlock) { 1798 Ops.push_back(Op); 1799 return; 1800 } 1801 // fall through 1802 case 'i': // Simple Integer or Relocatable Constant 1803 case 'n': // Simple Integer 1804 case 's': { // Relocatable Constant 1805 // These operands are interested in values of the form (GV+C), where C may 1806 // be folded in as an offset of GV, or it may be explicitly added. Also, it 1807 // is possible and fine if either GV or C are missing. 1808 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1809 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 1810 1811 // If we have "(add GV, C)", pull out GV/C 1812 if (Op.getOpcode() == ISD::ADD) { 1813 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 1814 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 1815 if (C == 0 || GA == 0) { 1816 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 1817 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 1818 } 1819 if (C == 0 || GA == 0) 1820 C = 0, GA = 0; 1821 } 1822 1823 // If we find a valid operand, map to the TargetXXX version so that the 1824 // value itself doesn't get selected. 1825 if (GA) { // Either &GV or &GV+C 1826 if (ConstraintLetter != 'n') { 1827 int64_t Offs = GA->getOffset(); 1828 if (C) Offs += C->getValue(); 1829 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 1830 Op.getValueType(), Offs)); 1831 return; 1832 } 1833 } 1834 if (C) { // just C, no GV. 1835 // Simple constants are not allowed for 's'. 1836 if (ConstraintLetter != 's') { 1837 Ops.push_back(DAG.getTargetConstant(C->getValue(), Op.getValueType())); 1838 return; 1839 } 1840 } 1841 break; 1842 } 1843 } 1844} 1845 1846std::vector<unsigned> TargetLowering:: 1847getRegClassForInlineAsmConstraint(const std::string &Constraint, 1848 MVT VT) const { 1849 return std::vector<unsigned>(); 1850} 1851 1852 1853std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 1854getRegForInlineAsmConstraint(const std::string &Constraint, 1855 MVT VT) const { 1856 if (Constraint[0] != '{') 1857 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1858 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 1859 1860 // Remove the braces from around the name. 1861 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 1862 1863 // Figure out which register class contains this reg. 1864 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 1865 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 1866 E = RI->regclass_end(); RCI != E; ++RCI) { 1867 const TargetRegisterClass *RC = *RCI; 1868 1869 // If none of the the value types for this register class are valid, we 1870 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1871 bool isLegal = false; 1872 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1873 I != E; ++I) { 1874 if (isTypeLegal(*I)) { 1875 isLegal = true; 1876 break; 1877 } 1878 } 1879 1880 if (!isLegal) continue; 1881 1882 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1883 I != E; ++I) { 1884 if (StringsEqualNoCase(RegName, RI->get(*I).AsmName)) 1885 return std::make_pair(*I, RC); 1886 } 1887 } 1888 1889 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1890} 1891 1892//===----------------------------------------------------------------------===// 1893// Constraint Selection. 1894 1895/// getConstraintGenerality - Return an integer indicating how general CT 1896/// is. 1897static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 1898 switch (CT) { 1899 default: assert(0 && "Unknown constraint type!"); 1900 case TargetLowering::C_Other: 1901 case TargetLowering::C_Unknown: 1902 return 0; 1903 case TargetLowering::C_Register: 1904 return 1; 1905 case TargetLowering::C_RegisterClass: 1906 return 2; 1907 case TargetLowering::C_Memory: 1908 return 3; 1909 } 1910} 1911 1912/// ChooseConstraint - If there are multiple different constraints that we 1913/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 1914/// This is somewhat tricky: constraints fall into four classes: 1915/// Other -> immediates and magic values 1916/// Register -> one specific register 1917/// RegisterClass -> a group of regs 1918/// Memory -> memory 1919/// Ideally, we would pick the most specific constraint possible: if we have 1920/// something that fits into a register, we would pick it. The problem here 1921/// is that if we have something that could either be in a register or in 1922/// memory that use of the register could cause selection of *other* 1923/// operands to fail: they might only succeed if we pick memory. Because of 1924/// this the heuristic we use is: 1925/// 1926/// 1) If there is an 'other' constraint, and if the operand is valid for 1927/// that constraint, use it. This makes us take advantage of 'i' 1928/// constraints when available. 1929/// 2) Otherwise, pick the most general constraint present. This prefers 1930/// 'm' over 'r', for example. 1931/// 1932static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 1933 const TargetLowering &TLI, 1934 SDOperand Op, SelectionDAG *DAG) { 1935 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 1936 unsigned BestIdx = 0; 1937 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 1938 int BestGenerality = -1; 1939 1940 // Loop over the options, keeping track of the most general one. 1941 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 1942 TargetLowering::ConstraintType CType = 1943 TLI.getConstraintType(OpInfo.Codes[i]); 1944 1945 // If this is an 'other' constraint, see if the operand is valid for it. 1946 // For example, on X86 we might have an 'rI' constraint. If the operand 1947 // is an integer in the range [0..31] we want to use I (saving a load 1948 // of a register), otherwise we must use 'r'. 1949 if (CType == TargetLowering::C_Other && Op.Val) { 1950 assert(OpInfo.Codes[i].size() == 1 && 1951 "Unhandled multi-letter 'other' constraint"); 1952 std::vector<SDOperand> ResultOps; 1953 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], 1954 ResultOps, *DAG); 1955 if (!ResultOps.empty()) { 1956 BestType = CType; 1957 BestIdx = i; 1958 break; 1959 } 1960 } 1961 1962 // This constraint letter is more general than the previous one, use it. 1963 int Generality = getConstraintGenerality(CType); 1964 if (Generality > BestGenerality) { 1965 BestType = CType; 1966 BestIdx = i; 1967 BestGenerality = Generality; 1968 } 1969 } 1970 1971 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 1972 OpInfo.ConstraintType = BestType; 1973} 1974 1975/// ComputeConstraintToUse - Determines the constraint code and constraint 1976/// type to use for the specific AsmOperandInfo, setting 1977/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 1978void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1979 SDOperand Op, 1980 SelectionDAG *DAG) const { 1981 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 1982 1983 // Single-letter constraints ('r') are very common. 1984 if (OpInfo.Codes.size() == 1) { 1985 OpInfo.ConstraintCode = OpInfo.Codes[0]; 1986 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 1987 } else { 1988 ChooseConstraint(OpInfo, *this, Op, DAG); 1989 } 1990 1991 // 'X' matches anything. 1992 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 1993 // Labels and constants are handled elsewhere ('X' is the only thing 1994 // that matches labels). 1995 if (isa<BasicBlock>(OpInfo.CallOperandVal) || 1996 isa<ConstantInt>(OpInfo.CallOperandVal)) 1997 return; 1998 1999 // Otherwise, try to resolve it to something we know about by looking at 2000 // the actual operand type. 2001 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 2002 OpInfo.ConstraintCode = Repl; 2003 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2004 } 2005 } 2006} 2007 2008//===----------------------------------------------------------------------===// 2009// Loop Strength Reduction hooks 2010//===----------------------------------------------------------------------===// 2011 2012/// isLegalAddressingMode - Return true if the addressing mode represented 2013/// by AM is legal for this target, for a load/store of the specified type. 2014bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 2015 const Type *Ty) const { 2016 // The default implementation of this implements a conservative RISCy, r+r and 2017 // r+i addr mode. 2018 2019 // Allows a sign-extended 16-bit immediate field. 2020 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 2021 return false; 2022 2023 // No global is ever allowed as a base. 2024 if (AM.BaseGV) 2025 return false; 2026 2027 // Only support r+r, 2028 switch (AM.Scale) { 2029 case 0: // "r+i" or just "i", depending on HasBaseReg. 2030 break; 2031 case 1: 2032 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2033 return false; 2034 // Otherwise we have r+r or r+i. 2035 break; 2036 case 2: 2037 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2038 return false; 2039 // Allow 2*r as r+r. 2040 break; 2041 } 2042 2043 return true; 2044} 2045 2046// Magic for divide replacement 2047 2048struct ms { 2049 int64_t m; // magic number 2050 int64_t s; // shift amount 2051}; 2052 2053struct mu { 2054 uint64_t m; // magic number 2055 int64_t a; // add indicator 2056 int64_t s; // shift amount 2057}; 2058 2059/// magic - calculate the magic numbers required to codegen an integer sdiv as 2060/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 2061/// or -1. 2062static ms magic32(int32_t d) { 2063 int32_t p; 2064 uint32_t ad, anc, delta, q1, r1, q2, r2, t; 2065 const uint32_t two31 = 0x80000000U; 2066 struct ms mag; 2067 2068 ad = abs(d); 2069 t = two31 + ((uint32_t)d >> 31); 2070 anc = t - 1 - t%ad; // absolute value of nc 2071 p = 31; // initialize p 2072 q1 = two31/anc; // initialize q1 = 2p/abs(nc) 2073 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 2074 q2 = two31/ad; // initialize q2 = 2p/abs(d) 2075 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d)) 2076 do { 2077 p = p + 1; 2078 q1 = 2*q1; // update q1 = 2p/abs(nc) 2079 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 2080 if (r1 >= anc) { // must be unsigned comparison 2081 q1 = q1 + 1; 2082 r1 = r1 - anc; 2083 } 2084 q2 = 2*q2; // update q2 = 2p/abs(d) 2085 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 2086 if (r2 >= ad) { // must be unsigned comparison 2087 q2 = q2 + 1; 2088 r2 = r2 - ad; 2089 } 2090 delta = ad - r2; 2091 } while (q1 < delta || (q1 == delta && r1 == 0)); 2092 2093 mag.m = (int32_t)(q2 + 1); // make sure to sign extend 2094 if (d < 0) mag.m = -mag.m; // resulting magic number 2095 mag.s = p - 32; // resulting shift 2096 return mag; 2097} 2098 2099/// magicu - calculate the magic numbers required to codegen an integer udiv as 2100/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 2101static mu magicu32(uint32_t d) { 2102 int32_t p; 2103 uint32_t nc, delta, q1, r1, q2, r2; 2104 struct mu magu; 2105 magu.a = 0; // initialize "add" indicator 2106 nc = - 1 - (-d)%d; 2107 p = 31; // initialize p 2108 q1 = 0x80000000/nc; // initialize q1 = 2p/nc 2109 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc) 2110 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d 2111 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d) 2112 do { 2113 p = p + 1; 2114 if (r1 >= nc - r1 ) { 2115 q1 = 2*q1 + 1; // update q1 2116 r1 = 2*r1 - nc; // update r1 2117 } 2118 else { 2119 q1 = 2*q1; // update q1 2120 r1 = 2*r1; // update r1 2121 } 2122 if (r2 + 1 >= d - r2) { 2123 if (q2 >= 0x7FFFFFFF) magu.a = 1; 2124 q2 = 2*q2 + 1; // update q2 2125 r2 = 2*r2 + 1 - d; // update r2 2126 } 2127 else { 2128 if (q2 >= 0x80000000) magu.a = 1; 2129 q2 = 2*q2; // update q2 2130 r2 = 2*r2 + 1; // update r2 2131 } 2132 delta = d - 1 - r2; 2133 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0))); 2134 magu.m = q2 + 1; // resulting magic number 2135 magu.s = p - 32; // resulting shift 2136 return magu; 2137} 2138 2139/// magic - calculate the magic numbers required to codegen an integer sdiv as 2140/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 2141/// or -1. 2142static ms magic64(int64_t d) { 2143 int64_t p; 2144 uint64_t ad, anc, delta, q1, r1, q2, r2, t; 2145 const uint64_t two63 = 9223372036854775808ULL; // 2^63 2146 struct ms mag; 2147 2148 ad = d >= 0 ? d : -d; 2149 t = two63 + ((uint64_t)d >> 63); 2150 anc = t - 1 - t%ad; // absolute value of nc 2151 p = 63; // initialize p 2152 q1 = two63/anc; // initialize q1 = 2p/abs(nc) 2153 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 2154 q2 = two63/ad; // initialize q2 = 2p/abs(d) 2155 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d)) 2156 do { 2157 p = p + 1; 2158 q1 = 2*q1; // update q1 = 2p/abs(nc) 2159 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 2160 if (r1 >= anc) { // must be unsigned comparison 2161 q1 = q1 + 1; 2162 r1 = r1 - anc; 2163 } 2164 q2 = 2*q2; // update q2 = 2p/abs(d) 2165 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 2166 if (r2 >= ad) { // must be unsigned comparison 2167 q2 = q2 + 1; 2168 r2 = r2 - ad; 2169 } 2170 delta = ad - r2; 2171 } while (q1 < delta || (q1 == delta && r1 == 0)); 2172 2173 mag.m = q2 + 1; 2174 if (d < 0) mag.m = -mag.m; // resulting magic number 2175 mag.s = p - 64; // resulting shift 2176 return mag; 2177} 2178 2179/// magicu - calculate the magic numbers required to codegen an integer udiv as 2180/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 2181static mu magicu64(uint64_t d) 2182{ 2183 int64_t p; 2184 uint64_t nc, delta, q1, r1, q2, r2; 2185 struct mu magu; 2186 magu.a = 0; // initialize "add" indicator 2187 nc = - 1 - (-d)%d; 2188 p = 63; // initialize p 2189 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc 2190 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc) 2191 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d 2192 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d) 2193 do { 2194 p = p + 1; 2195 if (r1 >= nc - r1 ) { 2196 q1 = 2*q1 + 1; // update q1 2197 r1 = 2*r1 - nc; // update r1 2198 } 2199 else { 2200 q1 = 2*q1; // update q1 2201 r1 = 2*r1; // update r1 2202 } 2203 if (r2 + 1 >= d - r2) { 2204 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1; 2205 q2 = 2*q2 + 1; // update q2 2206 r2 = 2*r2 + 1 - d; // update r2 2207 } 2208 else { 2209 if (q2 >= 0x8000000000000000ull) magu.a = 1; 2210 q2 = 2*q2; // update q2 2211 r2 = 2*r2 + 1; // update r2 2212 } 2213 delta = d - 1 - r2; 2214 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0))); 2215 magu.m = q2 + 1; // resulting magic number 2216 magu.s = p - 64; // resulting shift 2217 return magu; 2218} 2219 2220/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 2221/// return a DAG expression to select that will generate the same value by 2222/// multiplying by a magic number. See: 2223/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2224SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 2225 std::vector<SDNode*>* Created) const { 2226 MVT VT = N->getValueType(0); 2227 2228 // Check to see if we can do this. 2229 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 2230 return SDOperand(); // BuildSDIV only operates on i32 or i64 2231 2232 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended(); 2233 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d); 2234 2235 // Multiply the numerator (operand 0) by the magic value 2236 SDOperand Q; 2237 if (isOperationLegal(ISD::MULHS, VT)) 2238 Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), 2239 DAG.getConstant(magics.m, VT)); 2240 else if (isOperationLegal(ISD::SMUL_LOHI, VT)) 2241 Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), 2242 N->getOperand(0), 2243 DAG.getConstant(magics.m, VT)).Val, 1); 2244 else 2245 return SDOperand(); // No mulhs or equvialent 2246 // If d > 0 and m < 0, add the numerator 2247 if (d > 0 && magics.m < 0) { 2248 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); 2249 if (Created) 2250 Created->push_back(Q.Val); 2251 } 2252 // If d < 0 and m > 0, subtract the numerator. 2253 if (d < 0 && magics.m > 0) { 2254 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0)); 2255 if (Created) 2256 Created->push_back(Q.Val); 2257 } 2258 // Shift right algebraic if shift value is nonzero 2259 if (magics.s > 0) { 2260 Q = DAG.getNode(ISD::SRA, VT, Q, 2261 DAG.getConstant(magics.s, getShiftAmountTy())); 2262 if (Created) 2263 Created->push_back(Q.Val); 2264 } 2265 // Extract the sign bit and add it to the quotient 2266 SDOperand T = 2267 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 2268 getShiftAmountTy())); 2269 if (Created) 2270 Created->push_back(T.Val); 2271 return DAG.getNode(ISD::ADD, VT, Q, T); 2272} 2273 2274/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 2275/// return a DAG expression to select that will generate the same value by 2276/// multiplying by a magic number. See: 2277/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2278SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 2279 std::vector<SDNode*>* Created) const { 2280 MVT VT = N->getValueType(0); 2281 2282 // Check to see if we can do this. 2283 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 2284 return SDOperand(); // BuildUDIV only operates on i32 or i64 2285 2286 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue(); 2287 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d); 2288 2289 // Multiply the numerator (operand 0) by the magic value 2290 SDOperand Q; 2291 if (isOperationLegal(ISD::MULHU, VT)) 2292 Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), 2293 DAG.getConstant(magics.m, VT)); 2294 else if (isOperationLegal(ISD::UMUL_LOHI, VT)) 2295 Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), 2296 N->getOperand(0), 2297 DAG.getConstant(magics.m, VT)).Val, 1); 2298 else 2299 return SDOperand(); // No mulhu or equvialent 2300 if (Created) 2301 Created->push_back(Q.Val); 2302 2303 if (magics.a == 0) { 2304 return DAG.getNode(ISD::SRL, VT, Q, 2305 DAG.getConstant(magics.s, getShiftAmountTy())); 2306 } else { 2307 SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); 2308 if (Created) 2309 Created->push_back(NPQ.Val); 2310 NPQ = DAG.getNode(ISD::SRL, VT, NPQ, 2311 DAG.getConstant(1, getShiftAmountTy())); 2312 if (Created) 2313 Created->push_back(NPQ.Val); 2314 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q); 2315 if (Created) 2316 Created->push_back(NPQ.Val); 2317 return DAG.getNode(ISD::SRL, VT, NPQ, 2318 DAG.getConstant(magics.s-1, getShiftAmountTy())); 2319 } 2320} 2321