TargetLowering.cpp revision b9333ccdd5658c826f2c7bdd6a542343eed56871
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetAsmInfo.h" 15#include "llvm/Target/TargetLowering.h" 16#include "llvm/Target/TargetSubtarget.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetRegisterInfo.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/STLExtras.h" 26#include "llvm/Support/MathExtras.h" 27using namespace llvm; 28 29/// InitLibcallNames - Set default libcall names. 30/// 31static void InitLibcallNames(const char **Names) { 32 Names[RTLIB::SHL_I32] = "__ashlsi3"; 33 Names[RTLIB::SHL_I64] = "__ashldi3"; 34 Names[RTLIB::SHL_I128] = "__ashlti3"; 35 Names[RTLIB::SRL_I32] = "__lshrsi3"; 36 Names[RTLIB::SRL_I64] = "__lshrdi3"; 37 Names[RTLIB::SRL_I128] = "__lshrti3"; 38 Names[RTLIB::SRA_I32] = "__ashrsi3"; 39 Names[RTLIB::SRA_I64] = "__ashrdi3"; 40 Names[RTLIB::SRA_I128] = "__ashrti3"; 41 Names[RTLIB::MUL_I32] = "__mulsi3"; 42 Names[RTLIB::MUL_I64] = "__muldi3"; 43 Names[RTLIB::MUL_I128] = "__multi3"; 44 Names[RTLIB::SDIV_I32] = "__divsi3"; 45 Names[RTLIB::SDIV_I64] = "__divdi3"; 46 Names[RTLIB::SDIV_I128] = "__divti3"; 47 Names[RTLIB::UDIV_I32] = "__udivsi3"; 48 Names[RTLIB::UDIV_I64] = "__udivdi3"; 49 Names[RTLIB::UDIV_I128] = "__udivti3"; 50 Names[RTLIB::SREM_I32] = "__modsi3"; 51 Names[RTLIB::SREM_I64] = "__moddi3"; 52 Names[RTLIB::SREM_I128] = "__modti3"; 53 Names[RTLIB::UREM_I32] = "__umodsi3"; 54 Names[RTLIB::UREM_I64] = "__umoddi3"; 55 Names[RTLIB::UREM_I128] = "__umodti3"; 56 Names[RTLIB::NEG_I32] = "__negsi2"; 57 Names[RTLIB::NEG_I64] = "__negdi2"; 58 Names[RTLIB::ADD_F32] = "__addsf3"; 59 Names[RTLIB::ADD_F64] = "__adddf3"; 60 Names[RTLIB::ADD_F80] = "__addxf3"; 61 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 62 Names[RTLIB::SUB_F32] = "__subsf3"; 63 Names[RTLIB::SUB_F64] = "__subdf3"; 64 Names[RTLIB::SUB_F80] = "__subxf3"; 65 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 66 Names[RTLIB::MUL_F32] = "__mulsf3"; 67 Names[RTLIB::MUL_F64] = "__muldf3"; 68 Names[RTLIB::MUL_F80] = "__mulxf3"; 69 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 70 Names[RTLIB::DIV_F32] = "__divsf3"; 71 Names[RTLIB::DIV_F64] = "__divdf3"; 72 Names[RTLIB::DIV_F80] = "__divxf3"; 73 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 74 Names[RTLIB::REM_F32] = "fmodf"; 75 Names[RTLIB::REM_F64] = "fmod"; 76 Names[RTLIB::REM_F80] = "fmodl"; 77 Names[RTLIB::REM_PPCF128] = "fmodl"; 78 Names[RTLIB::POWI_F32] = "__powisf2"; 79 Names[RTLIB::POWI_F64] = "__powidf2"; 80 Names[RTLIB::POWI_F80] = "__powixf2"; 81 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 82 Names[RTLIB::SQRT_F32] = "sqrtf"; 83 Names[RTLIB::SQRT_F64] = "sqrt"; 84 Names[RTLIB::SQRT_F80] = "sqrtl"; 85 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 86 Names[RTLIB::LOG_F32] = "logf"; 87 Names[RTLIB::LOG_F64] = "log"; 88 Names[RTLIB::LOG_F80] = "logl"; 89 Names[RTLIB::LOG_PPCF128] = "logl"; 90 Names[RTLIB::LOG2_F32] = "log2f"; 91 Names[RTLIB::LOG2_F64] = "log2"; 92 Names[RTLIB::LOG2_F80] = "log2l"; 93 Names[RTLIB::LOG2_PPCF128] = "log2l"; 94 Names[RTLIB::LOG10_F32] = "log10f"; 95 Names[RTLIB::LOG10_F64] = "log10"; 96 Names[RTLIB::LOG10_F80] = "log10l"; 97 Names[RTLIB::LOG10_PPCF128] = "log10l"; 98 Names[RTLIB::EXP_F32] = "expf"; 99 Names[RTLIB::EXP_F64] = "exp"; 100 Names[RTLIB::EXP_F80] = "expl"; 101 Names[RTLIB::EXP_PPCF128] = "expl"; 102 Names[RTLIB::EXP2_F32] = "exp2f"; 103 Names[RTLIB::EXP2_F64] = "exp2"; 104 Names[RTLIB::EXP2_F80] = "exp2l"; 105 Names[RTLIB::EXP2_PPCF128] = "exp2l"; 106 Names[RTLIB::SIN_F32] = "sinf"; 107 Names[RTLIB::SIN_F64] = "sin"; 108 Names[RTLIB::SIN_F80] = "sinl"; 109 Names[RTLIB::SIN_PPCF128] = "sinl"; 110 Names[RTLIB::COS_F32] = "cosf"; 111 Names[RTLIB::COS_F64] = "cos"; 112 Names[RTLIB::COS_F80] = "cosl"; 113 Names[RTLIB::COS_PPCF128] = "cosl"; 114 Names[RTLIB::POW_F32] = "powf"; 115 Names[RTLIB::POW_F64] = "pow"; 116 Names[RTLIB::POW_F80] = "powl"; 117 Names[RTLIB::POW_PPCF128] = "powl"; 118 Names[RTLIB::CEIL_F32] = "ceilf"; 119 Names[RTLIB::CEIL_F64] = "ceil"; 120 Names[RTLIB::CEIL_F80] = "ceill"; 121 Names[RTLIB::CEIL_PPCF128] = "ceill"; 122 Names[RTLIB::TRUNC_F32] = "truncf"; 123 Names[RTLIB::TRUNC_F64] = "trunc"; 124 Names[RTLIB::TRUNC_F80] = "truncl"; 125 Names[RTLIB::TRUNC_PPCF128] = "truncl"; 126 Names[RTLIB::RINT_F32] = "rintf"; 127 Names[RTLIB::RINT_F64] = "rint"; 128 Names[RTLIB::RINT_F80] = "rintl"; 129 Names[RTLIB::RINT_PPCF128] = "rintl"; 130 Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; 131 Names[RTLIB::NEARBYINT_F64] = "nearbyint"; 132 Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; 133 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; 134 Names[RTLIB::FLOOR_F32] = "floorf"; 135 Names[RTLIB::FLOOR_F64] = "floor"; 136 Names[RTLIB::FLOOR_F80] = "floorl"; 137 Names[RTLIB::FLOOR_PPCF128] = "floorl"; 138 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 139 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 140 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; 141 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; 142 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; 143 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; 144 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 145 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 146 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 147 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 148 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 149 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 150 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 151 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 152 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 153 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 154 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 155 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 156 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 157 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 158 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 159 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 160 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 161 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 162 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 163 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 164 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 165 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 166 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 167 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 168 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 169 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 170 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 171 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 172 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 173 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 174 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 175 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 176 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 177 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 178 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 179 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 180 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 181 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 182 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 183 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 184 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 185 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 186 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 187 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 188 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 189 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 190 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 191 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 192 Names[RTLIB::OEQ_F32] = "__eqsf2"; 193 Names[RTLIB::OEQ_F64] = "__eqdf2"; 194 Names[RTLIB::UNE_F32] = "__nesf2"; 195 Names[RTLIB::UNE_F64] = "__nedf2"; 196 Names[RTLIB::OGE_F32] = "__gesf2"; 197 Names[RTLIB::OGE_F64] = "__gedf2"; 198 Names[RTLIB::OLT_F32] = "__ltsf2"; 199 Names[RTLIB::OLT_F64] = "__ltdf2"; 200 Names[RTLIB::OLE_F32] = "__lesf2"; 201 Names[RTLIB::OLE_F64] = "__ledf2"; 202 Names[RTLIB::OGT_F32] = "__gtsf2"; 203 Names[RTLIB::OGT_F64] = "__gtdf2"; 204 Names[RTLIB::UO_F32] = "__unordsf2"; 205 Names[RTLIB::UO_F64] = "__unorddf2"; 206 Names[RTLIB::O_F32] = "__unordsf2"; 207 Names[RTLIB::O_F64] = "__unorddf2"; 208} 209 210/// getFPEXT - Return the FPEXT_*_* value for the given types, or 211/// UNKNOWN_LIBCALL if there is none. 212RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) { 213 if (OpVT == MVT::f32) { 214 if (RetVT == MVT::f64) 215 return FPEXT_F32_F64; 216 } 217 return UNKNOWN_LIBCALL; 218} 219 220/// getFPROUND - Return the FPROUND_*_* value for the given types, or 221/// UNKNOWN_LIBCALL if there is none. 222RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) { 223 if (RetVT == MVT::f32) { 224 if (OpVT == MVT::f64) 225 return FPROUND_F64_F32; 226 if (OpVT == MVT::f80) 227 return FPROUND_F80_F32; 228 if (OpVT == MVT::ppcf128) 229 return FPROUND_PPCF128_F32; 230 } else if (RetVT == MVT::f64) { 231 if (OpVT == MVT::f80) 232 return FPROUND_F80_F64; 233 if (OpVT == MVT::ppcf128) 234 return FPROUND_PPCF128_F64; 235 } 236 return UNKNOWN_LIBCALL; 237} 238 239/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 240/// UNKNOWN_LIBCALL if there is none. 241RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) { 242 if (OpVT == MVT::f32) { 243 if (RetVT == MVT::i32) 244 return FPTOSINT_F32_I32; 245 if (RetVT == MVT::i64) 246 return FPTOSINT_F32_I64; 247 if (RetVT == MVT::i128) 248 return FPTOSINT_F32_I128; 249 } else if (OpVT == MVT::f64) { 250 if (RetVT == MVT::i32) 251 return FPTOSINT_F64_I32; 252 if (RetVT == MVT::i64) 253 return FPTOSINT_F64_I64; 254 if (RetVT == MVT::i128) 255 return FPTOSINT_F64_I128; 256 } else if (OpVT == MVT::f80) { 257 if (RetVT == MVT::i32) 258 return FPTOSINT_F80_I32; 259 if (RetVT == MVT::i64) 260 return FPTOSINT_F80_I64; 261 if (RetVT == MVT::i128) 262 return FPTOSINT_F80_I128; 263 } else if (OpVT == MVT::ppcf128) { 264 if (RetVT == MVT::i32) 265 return FPTOSINT_PPCF128_I32; 266 if (RetVT == MVT::i64) 267 return FPTOSINT_PPCF128_I64; 268 if (RetVT == MVT::i128) 269 return FPTOSINT_PPCF128_I128; 270 } 271 return UNKNOWN_LIBCALL; 272} 273 274/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 275/// UNKNOWN_LIBCALL if there is none. 276RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) { 277 if (OpVT == MVT::f32) { 278 if (RetVT == MVT::i32) 279 return FPTOUINT_F32_I32; 280 if (RetVT == MVT::i64) 281 return FPTOUINT_F32_I64; 282 if (RetVT == MVT::i128) 283 return FPTOUINT_F32_I128; 284 } else if (OpVT == MVT::f64) { 285 if (RetVT == MVT::i32) 286 return FPTOUINT_F64_I32; 287 if (RetVT == MVT::i64) 288 return FPTOUINT_F64_I64; 289 if (RetVT == MVT::i128) 290 return FPTOUINT_F64_I128; 291 } else if (OpVT == MVT::f80) { 292 if (RetVT == MVT::i32) 293 return FPTOUINT_F80_I32; 294 if (RetVT == MVT::i64) 295 return FPTOUINT_F80_I64; 296 if (RetVT == MVT::i128) 297 return FPTOUINT_F80_I128; 298 } else if (OpVT == MVT::ppcf128) { 299 if (RetVT == MVT::i32) 300 return FPTOUINT_PPCF128_I32; 301 if (RetVT == MVT::i64) 302 return FPTOUINT_PPCF128_I64; 303 if (RetVT == MVT::i128) 304 return FPTOUINT_PPCF128_I128; 305 } 306 return UNKNOWN_LIBCALL; 307} 308 309/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 310/// UNKNOWN_LIBCALL if there is none. 311RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) { 312 if (OpVT == MVT::i32) { 313 if (RetVT == MVT::f32) 314 return SINTTOFP_I32_F32; 315 else if (RetVT == MVT::f64) 316 return SINTTOFP_I32_F64; 317 else if (RetVT == MVT::f80) 318 return SINTTOFP_I32_F80; 319 else if (RetVT == MVT::ppcf128) 320 return SINTTOFP_I32_PPCF128; 321 } else if (OpVT == MVT::i64) { 322 if (RetVT == MVT::f32) 323 return SINTTOFP_I64_F32; 324 else if (RetVT == MVT::f64) 325 return SINTTOFP_I64_F64; 326 else if (RetVT == MVT::f80) 327 return SINTTOFP_I64_F80; 328 else if (RetVT == MVT::ppcf128) 329 return SINTTOFP_I64_PPCF128; 330 } else if (OpVT == MVT::i128) { 331 if (RetVT == MVT::f32) 332 return SINTTOFP_I128_F32; 333 else if (RetVT == MVT::f64) 334 return SINTTOFP_I128_F64; 335 else if (RetVT == MVT::f80) 336 return SINTTOFP_I128_F80; 337 else if (RetVT == MVT::ppcf128) 338 return SINTTOFP_I128_PPCF128; 339 } 340 return UNKNOWN_LIBCALL; 341} 342 343/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 344/// UNKNOWN_LIBCALL if there is none. 345RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) { 346 if (OpVT == MVT::i32) { 347 if (RetVT == MVT::f32) 348 return UINTTOFP_I32_F32; 349 else if (RetVT == MVT::f64) 350 return UINTTOFP_I32_F64; 351 else if (RetVT == MVT::f80) 352 return UINTTOFP_I32_F80; 353 else if (RetVT == MVT::ppcf128) 354 return UINTTOFP_I32_PPCF128; 355 } else if (OpVT == MVT::i64) { 356 if (RetVT == MVT::f32) 357 return UINTTOFP_I64_F32; 358 else if (RetVT == MVT::f64) 359 return UINTTOFP_I64_F64; 360 else if (RetVT == MVT::f80) 361 return UINTTOFP_I64_F80; 362 else if (RetVT == MVT::ppcf128) 363 return UINTTOFP_I64_PPCF128; 364 } else if (OpVT == MVT::i128) { 365 if (RetVT == MVT::f32) 366 return UINTTOFP_I128_F32; 367 else if (RetVT == MVT::f64) 368 return UINTTOFP_I128_F64; 369 else if (RetVT == MVT::f80) 370 return UINTTOFP_I128_F80; 371 else if (RetVT == MVT::ppcf128) 372 return UINTTOFP_I128_PPCF128; 373 } 374 return UNKNOWN_LIBCALL; 375} 376 377/// InitCmpLibcallCCs - Set default comparison libcall CC. 378/// 379static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 380 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 381 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 382 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 383 CCs[RTLIB::UNE_F32] = ISD::SETNE; 384 CCs[RTLIB::UNE_F64] = ISD::SETNE; 385 CCs[RTLIB::OGE_F32] = ISD::SETGE; 386 CCs[RTLIB::OGE_F64] = ISD::SETGE; 387 CCs[RTLIB::OLT_F32] = ISD::SETLT; 388 CCs[RTLIB::OLT_F64] = ISD::SETLT; 389 CCs[RTLIB::OLE_F32] = ISD::SETLE; 390 CCs[RTLIB::OLE_F64] = ISD::SETLE; 391 CCs[RTLIB::OGT_F32] = ISD::SETGT; 392 CCs[RTLIB::OGT_F64] = ISD::SETGT; 393 CCs[RTLIB::UO_F32] = ISD::SETNE; 394 CCs[RTLIB::UO_F64] = ISD::SETNE; 395 CCs[RTLIB::O_F32] = ISD::SETEQ; 396 CCs[RTLIB::O_F64] = ISD::SETEQ; 397} 398 399TargetLowering::TargetLowering(TargetMachine &tm) 400 : TM(tm), TD(TM.getTargetData()) { 401 assert(ISD::BUILTIN_OP_END <= OpActionsCapacity && 402 "Fixed size array in TargetLowering is not large enough!"); 403 // All operations default to being supported. 404 memset(OpActions, 0, sizeof(OpActions)); 405 memset(LoadXActions, 0, sizeof(LoadXActions)); 406 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 407 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 408 memset(ConvertActions, 0, sizeof(ConvertActions)); 409 410 // Set default actions for various operations. 411 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 412 // Default all indexed load / store to expand. 413 for (unsigned IM = (unsigned)ISD::PRE_INC; 414 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 415 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 416 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 417 } 418 419 // These operations default to expand. 420 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 421 } 422 423 // Most targets ignore the @llvm.prefetch intrinsic. 424 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 425 426 // ConstantFP nodes default to expand. Targets can either change this to 427 // Legal, in which case all fp constants are legal, or use addLegalFPImmediate 428 // to optimize expansions for certain constants. 429 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 430 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 431 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 432 433 // These library functions default to expand. 434 setOperationAction(ISD::FLOG , MVT::f64, Expand); 435 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 436 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 437 setOperationAction(ISD::FEXP , MVT::f64, Expand); 438 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 439 setOperationAction(ISD::FLOG , MVT::f32, Expand); 440 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 441 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 442 setOperationAction(ISD::FEXP , MVT::f32, Expand); 443 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 444 445 // Default ISD::TRAP to expand (which turns it into abort). 446 setOperationAction(ISD::TRAP, MVT::Other, Expand); 447 448 IsLittleEndian = TD->isLittleEndian(); 449 UsesGlobalOffsetTable = false; 450 ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType()); 451 ShiftAmtHandling = Undefined; 452 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 453 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 454 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 455 allowUnalignedMemoryAccesses = false; 456 UseUnderscoreSetJmp = false; 457 UseUnderscoreLongJmp = false; 458 SelectIsExpensive = false; 459 IntDivIsCheap = false; 460 Pow2DivIsCheap = false; 461 StackPointerRegisterToSaveRestore = 0; 462 ExceptionPointerRegister = 0; 463 ExceptionSelectorRegister = 0; 464 SetCCResultContents = UndefinedSetCCResult; 465 SchedPreferenceInfo = SchedulingForLatency; 466 JumpBufSize = 0; 467 JumpBufAlignment = 0; 468 IfCvtBlockSizeLimit = 2; 469 IfCvtDupBlockSizeLimit = 0; 470 PrefLoopAlignment = 0; 471 472 InitLibcallNames(LibcallRoutineNames); 473 InitCmpLibcallCCs(CmpLibcallCCs); 474 475 // Tell Legalize whether the assembler supports DEBUG_LOC. 476 const TargetAsmInfo *TASM = TM.getTargetAsmInfo(); 477 if (!TASM || !TASM->hasDotLocAndDotFile()) 478 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 479} 480 481TargetLowering::~TargetLowering() {} 482 483/// computeRegisterProperties - Once all of the register classes are added, 484/// this allows us to compute derived properties we expose. 485void TargetLowering::computeRegisterProperties() { 486 assert(MVT::LAST_VALUETYPE <= 32 && 487 "Too many value types for ValueTypeActions to hold!"); 488 489 // Everything defaults to needing one register. 490 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 491 NumRegistersForVT[i] = 1; 492 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 493 } 494 // ...except isVoid, which doesn't need any registers. 495 NumRegistersForVT[MVT::isVoid] = 0; 496 497 // Find the largest integer register class. 498 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 499 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 500 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 501 502 // Every integer value type larger than this largest register takes twice as 503 // many registers to represent as the previous ValueType. 504 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 505 MVT EVT = (MVT::SimpleValueType)ExpandedReg; 506 if (!EVT.isInteger()) 507 break; 508 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 509 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 510 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 511 ValueTypeActions.setTypeAction(EVT, Expand); 512 } 513 514 // Inspect all of the ValueType's smaller than the largest integer 515 // register to see which ones need promotion. 516 unsigned LegalIntReg = LargestIntReg; 517 for (unsigned IntReg = LargestIntReg - 1; 518 IntReg >= (unsigned)MVT::i1; --IntReg) { 519 MVT IVT = (MVT::SimpleValueType)IntReg; 520 if (isTypeLegal(IVT)) { 521 LegalIntReg = IntReg; 522 } else { 523 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 524 (MVT::SimpleValueType)LegalIntReg; 525 ValueTypeActions.setTypeAction(IVT, Promote); 526 } 527 } 528 529 // ppcf128 type is really two f64's. 530 if (!isTypeLegal(MVT::ppcf128)) { 531 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 532 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 533 TransformToType[MVT::ppcf128] = MVT::f64; 534 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 535 } 536 537 // Decide how to handle f64. If the target does not have native f64 support, 538 // expand it to i64 and we will be generating soft float library calls. 539 if (!isTypeLegal(MVT::f64)) { 540 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 541 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 542 TransformToType[MVT::f64] = MVT::i64; 543 ValueTypeActions.setTypeAction(MVT::f64, Expand); 544 } 545 546 // Decide how to handle f32. If the target does not have native support for 547 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 548 if (!isTypeLegal(MVT::f32)) { 549 if (isTypeLegal(MVT::f64)) { 550 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 551 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 552 TransformToType[MVT::f32] = MVT::f64; 553 ValueTypeActions.setTypeAction(MVT::f32, Promote); 554 } else { 555 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 556 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 557 TransformToType[MVT::f32] = MVT::i32; 558 ValueTypeActions.setTypeAction(MVT::f32, Expand); 559 } 560 } 561 562 // Loop over all of the vector value types to see which need transformations. 563 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 564 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 565 MVT VT = (MVT::SimpleValueType)i; 566 if (!isTypeLegal(VT)) { 567 MVT IntermediateVT, RegisterVT; 568 unsigned NumIntermediates; 569 NumRegistersForVT[i] = 570 getVectorTypeBreakdown(VT, 571 IntermediateVT, NumIntermediates, 572 RegisterVT); 573 RegisterTypeForVT[i] = RegisterVT; 574 TransformToType[i] = MVT::Other; // this isn't actually used 575 ValueTypeActions.setTypeAction(VT, Expand); 576 } 577 } 578} 579 580const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 581 return NULL; 582} 583 584 585MVT TargetLowering::getSetCCResultType(const SDValue &) const { 586 return getValueType(TD->getIntPtrType()); 587} 588 589 590/// getVectorTypeBreakdown - Vector types are broken down into some number of 591/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 592/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 593/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 594/// 595/// This method returns the number of registers needed, and the VT for each 596/// register. It also returns the VT and quantity of the intermediate values 597/// before they are promoted/expanded. 598/// 599unsigned TargetLowering::getVectorTypeBreakdown(MVT VT, 600 MVT &IntermediateVT, 601 unsigned &NumIntermediates, 602 MVT &RegisterVT) const { 603 // Figure out the right, legal destination reg to copy into. 604 unsigned NumElts = VT.getVectorNumElements(); 605 MVT EltTy = VT.getVectorElementType(); 606 607 unsigned NumVectorRegs = 1; 608 609 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 610 // could break down into LHS/RHS like LegalizeDAG does. 611 if (!isPowerOf2_32(NumElts)) { 612 NumVectorRegs = NumElts; 613 NumElts = 1; 614 } 615 616 // Divide the input until we get to a supported size. This will always 617 // end with a scalar if the target doesn't support vectors. 618 while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 619 NumElts >>= 1; 620 NumVectorRegs <<= 1; 621 } 622 623 NumIntermediates = NumVectorRegs; 624 625 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 626 if (!isTypeLegal(NewVT)) 627 NewVT = EltTy; 628 IntermediateVT = NewVT; 629 630 MVT DestVT = getTypeToTransformTo(NewVT); 631 RegisterVT = DestVT; 632 if (DestVT.bitsLT(NewVT)) { 633 // Value is expanded, e.g. i64 -> i16. 634 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 635 } else { 636 // Otherwise, promotion or legal types use the same number of registers as 637 // the vector decimated to the appropriate level. 638 return NumVectorRegs; 639 } 640 641 return 1; 642} 643 644/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 645/// function arguments in the caller parameter area. This is the actual 646/// alignment, not its logarithm. 647unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 648 return TD->getCallFrameTypeAlignment(Ty); 649} 650 651SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 652 SelectionDAG &DAG) const { 653 if (usesGlobalOffsetTable()) 654 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 655 return Table; 656} 657 658//===----------------------------------------------------------------------===// 659// Optimization Methods 660//===----------------------------------------------------------------------===// 661 662/// ShrinkDemandedConstant - Check to see if the specified operand of the 663/// specified instruction is a constant integer. If so, check to see if there 664/// are any bits set in the constant that are not demanded. If so, shrink the 665/// constant and return true. 666bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, 667 const APInt &Demanded) { 668 // FIXME: ISD::SELECT, ISD::SELECT_CC 669 switch(Op.getOpcode()) { 670 default: break; 671 case ISD::AND: 672 case ISD::OR: 673 case ISD::XOR: 674 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 675 if (C->getAPIntValue().intersects(~Demanded)) { 676 MVT VT = Op.getValueType(); 677 SDValue New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 678 DAG.getConstant(Demanded & 679 C->getAPIntValue(), 680 VT)); 681 return CombineTo(Op, New); 682 } 683 break; 684 } 685 return false; 686} 687 688/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 689/// DemandedMask bits of the result of Op are ever used downstream. If we can 690/// use this information to simplify Op, create a new simplified DAG node and 691/// return true, returning the original and new nodes in Old and New. Otherwise, 692/// analyze the expression and return a mask of KnownOne and KnownZero bits for 693/// the expression (used to simplify the caller). The KnownZero/One bits may 694/// only be accurate for those bits in the DemandedMask. 695bool TargetLowering::SimplifyDemandedBits(SDValue Op, 696 const APInt &DemandedMask, 697 APInt &KnownZero, 698 APInt &KnownOne, 699 TargetLoweringOpt &TLO, 700 unsigned Depth) const { 701 unsigned BitWidth = DemandedMask.getBitWidth(); 702 assert(Op.getValueSizeInBits() == BitWidth && 703 "Mask size mismatches value type size!"); 704 APInt NewMask = DemandedMask; 705 706 // Don't know anything. 707 KnownZero = KnownOne = APInt(BitWidth, 0); 708 709 // Other users may use these bits. 710 if (!Op.getNode()->hasOneUse()) { 711 if (Depth != 0) { 712 // If not at the root, Just compute the KnownZero/KnownOne bits to 713 // simplify things downstream. 714 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 715 return false; 716 } 717 // If this is the root being simplified, allow it to have multiple uses, 718 // just set the NewMask to all bits. 719 NewMask = APInt::getAllOnesValue(BitWidth); 720 } else if (DemandedMask == 0) { 721 // Not demanding any bits from Op. 722 if (Op.getOpcode() != ISD::UNDEF) 723 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 724 return false; 725 } else if (Depth == 6) { // Limit search depth. 726 return false; 727 } 728 729 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 730 switch (Op.getOpcode()) { 731 case ISD::Constant: 732 // We know all of the bits for a constant! 733 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 734 KnownZero = ~KnownOne & NewMask; 735 return false; // Don't fall through, will infinitely loop. 736 case ISD::AND: 737 // If the RHS is a constant, check to see if the LHS would be zero without 738 // using the bits from the RHS. Below, we use knowledge about the RHS to 739 // simplify the LHS, here we're using information from the LHS to simplify 740 // the RHS. 741 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 742 APInt LHSZero, LHSOne; 743 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 744 LHSZero, LHSOne, Depth+1); 745 // If the LHS already has zeros where RHSC does, this and is dead. 746 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 747 return TLO.CombineTo(Op, Op.getOperand(0)); 748 // If any of the set bits in the RHS are known zero on the LHS, shrink 749 // the constant. 750 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 751 return true; 752 } 753 754 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 755 KnownOne, TLO, Depth+1)) 756 return true; 757 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 758 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 759 KnownZero2, KnownOne2, TLO, Depth+1)) 760 return true; 761 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 762 763 // If all of the demanded bits are known one on one side, return the other. 764 // These bits cannot contribute to the result of the 'and'. 765 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 766 return TLO.CombineTo(Op, Op.getOperand(0)); 767 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 768 return TLO.CombineTo(Op, Op.getOperand(1)); 769 // If all of the demanded bits in the inputs are known zeros, return zero. 770 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 771 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 772 // If the RHS is a constant, see if we can simplify it. 773 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 774 return true; 775 776 // Output known-1 bits are only known if set in both the LHS & RHS. 777 KnownOne &= KnownOne2; 778 // Output known-0 are known to be clear if zero in either the LHS | RHS. 779 KnownZero |= KnownZero2; 780 break; 781 case ISD::OR: 782 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 783 KnownOne, TLO, Depth+1)) 784 return true; 785 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 786 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 787 KnownZero2, KnownOne2, TLO, Depth+1)) 788 return true; 789 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 790 791 // If all of the demanded bits are known zero on one side, return the other. 792 // These bits cannot contribute to the result of the 'or'. 793 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 794 return TLO.CombineTo(Op, Op.getOperand(0)); 795 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 796 return TLO.CombineTo(Op, Op.getOperand(1)); 797 // If all of the potentially set bits on one side are known to be set on 798 // the other side, just use the 'other' side. 799 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 800 return TLO.CombineTo(Op, Op.getOperand(0)); 801 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 802 return TLO.CombineTo(Op, Op.getOperand(1)); 803 // If the RHS is a constant, see if we can simplify it. 804 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 805 return true; 806 807 // Output known-0 bits are only known if clear in both the LHS & RHS. 808 KnownZero &= KnownZero2; 809 // Output known-1 are known to be set if set in either the LHS | RHS. 810 KnownOne |= KnownOne2; 811 break; 812 case ISD::XOR: 813 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 814 KnownOne, TLO, Depth+1)) 815 return true; 816 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 817 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 818 KnownOne2, TLO, Depth+1)) 819 return true; 820 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 821 822 // If all of the demanded bits are known zero on one side, return the other. 823 // These bits cannot contribute to the result of the 'xor'. 824 if ((KnownZero & NewMask) == NewMask) 825 return TLO.CombineTo(Op, Op.getOperand(0)); 826 if ((KnownZero2 & NewMask) == NewMask) 827 return TLO.CombineTo(Op, Op.getOperand(1)); 828 829 // If all of the unknown bits are known to be zero on one side or the other 830 // (but not both) turn this into an *inclusive* or. 831 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 832 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 833 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 834 Op.getOperand(0), 835 Op.getOperand(1))); 836 837 // Output known-0 bits are known if clear or set in both the LHS & RHS. 838 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 839 // Output known-1 are known to be set if set in only one of the LHS, RHS. 840 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 841 842 // If all of the demanded bits on one side are known, and all of the set 843 // bits on that side are also known to be set on the other side, turn this 844 // into an AND, as we know the bits will be cleared. 845 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 846 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 847 if ((KnownOne & KnownOne2) == KnownOne) { 848 MVT VT = Op.getValueType(); 849 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 850 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 851 ANDC)); 852 } 853 } 854 855 // If the RHS is a constant, see if we can simplify it. 856 // for XOR, we prefer to force bits to 1 if they will make a -1. 857 // if we can't force bits, try to shrink constant 858 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 859 APInt Expanded = C->getAPIntValue() | (~NewMask); 860 // if we can expand it to have all bits set, do it 861 if (Expanded.isAllOnesValue()) { 862 if (Expanded != C->getAPIntValue()) { 863 MVT VT = Op.getValueType(); 864 SDValue New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 865 TLO.DAG.getConstant(Expanded, VT)); 866 return TLO.CombineTo(Op, New); 867 } 868 // if it already has all the bits set, nothing to change 869 // but don't shrink either! 870 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 871 return true; 872 } 873 } 874 875 KnownZero = KnownZeroOut; 876 KnownOne = KnownOneOut; 877 break; 878 case ISD::SELECT: 879 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 880 KnownOne, TLO, Depth+1)) 881 return true; 882 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 883 KnownOne2, TLO, Depth+1)) 884 return true; 885 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 886 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 887 888 // If the operands are constants, see if we can simplify them. 889 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 890 return true; 891 892 // Only known if known in both the LHS and RHS. 893 KnownOne &= KnownOne2; 894 KnownZero &= KnownZero2; 895 break; 896 case ISD::SELECT_CC: 897 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 898 KnownOne, TLO, Depth+1)) 899 return true; 900 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 901 KnownOne2, TLO, Depth+1)) 902 return true; 903 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 904 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 905 906 // If the operands are constants, see if we can simplify them. 907 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 908 return true; 909 910 // Only known if known in both the LHS and RHS. 911 KnownOne &= KnownOne2; 912 KnownZero &= KnownZero2; 913 break; 914 case ISD::SHL: 915 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 916 unsigned ShAmt = SA->getZExtValue(); 917 SDValue InOp = Op.getOperand(0); 918 919 // If the shift count is an invalid immediate, don't do anything. 920 if (ShAmt >= BitWidth) 921 break; 922 923 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 924 // single shift. We can do this if the bottom bits (which are shifted 925 // out) are never demanded. 926 if (InOp.getOpcode() == ISD::SRL && 927 isa<ConstantSDNode>(InOp.getOperand(1))) { 928 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 929 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 930 unsigned Opc = ISD::SHL; 931 int Diff = ShAmt-C1; 932 if (Diff < 0) { 933 Diff = -Diff; 934 Opc = ISD::SRL; 935 } 936 937 SDValue NewSA = 938 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 939 MVT VT = Op.getValueType(); 940 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 941 InOp.getOperand(0), NewSA)); 942 } 943 } 944 945 if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt), 946 KnownZero, KnownOne, TLO, Depth+1)) 947 return true; 948 KnownZero <<= SA->getZExtValue(); 949 KnownOne <<= SA->getZExtValue(); 950 // low bits known zero. 951 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue()); 952 } 953 break; 954 case ISD::SRL: 955 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 956 MVT VT = Op.getValueType(); 957 unsigned ShAmt = SA->getZExtValue(); 958 unsigned VTSize = VT.getSizeInBits(); 959 SDValue InOp = Op.getOperand(0); 960 961 // If the shift count is an invalid immediate, don't do anything. 962 if (ShAmt >= BitWidth) 963 break; 964 965 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 966 // single shift. We can do this if the top bits (which are shifted out) 967 // are never demanded. 968 if (InOp.getOpcode() == ISD::SHL && 969 isa<ConstantSDNode>(InOp.getOperand(1))) { 970 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 971 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 972 unsigned Opc = ISD::SRL; 973 int Diff = ShAmt-C1; 974 if (Diff < 0) { 975 Diff = -Diff; 976 Opc = ISD::SHL; 977 } 978 979 SDValue NewSA = 980 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 981 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 982 InOp.getOperand(0), NewSA)); 983 } 984 } 985 986 // Compute the new bits that are at the top now. 987 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 988 KnownZero, KnownOne, TLO, Depth+1)) 989 return true; 990 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 991 KnownZero = KnownZero.lshr(ShAmt); 992 KnownOne = KnownOne.lshr(ShAmt); 993 994 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 995 KnownZero |= HighBits; // High bits known zero. 996 } 997 break; 998 case ISD::SRA: 999 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1000 MVT VT = Op.getValueType(); 1001 unsigned ShAmt = SA->getZExtValue(); 1002 1003 // If the shift count is an invalid immediate, don't do anything. 1004 if (ShAmt >= BitWidth) 1005 break; 1006 1007 APInt InDemandedMask = (NewMask << ShAmt); 1008 1009 // If any of the demanded bits are produced by the sign extension, we also 1010 // demand the input sign bit. 1011 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1012 if (HighBits.intersects(NewMask)) 1013 InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); 1014 1015 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 1016 KnownZero, KnownOne, TLO, Depth+1)) 1017 return true; 1018 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1019 KnownZero = KnownZero.lshr(ShAmt); 1020 KnownOne = KnownOne.lshr(ShAmt); 1021 1022 // Handle the sign bit, adjusted to where it is now in the mask. 1023 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 1024 1025 // If the input sign bit is known to be zero, or if none of the top bits 1026 // are demanded, turn this into an unsigned shift right. 1027 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 1028 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 1029 Op.getOperand(1))); 1030 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 1031 KnownOne |= HighBits; 1032 } 1033 } 1034 break; 1035 case ISD::SIGN_EXTEND_INREG: { 1036 MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1037 1038 // Sign extension. Compute the demanded bits in the result that are not 1039 // present in the input. 1040 APInt NewBits = APInt::getHighBitsSet(BitWidth, 1041 BitWidth - EVT.getSizeInBits()) & 1042 NewMask; 1043 1044 // If none of the extended bits are demanded, eliminate the sextinreg. 1045 if (NewBits == 0) 1046 return TLO.CombineTo(Op, Op.getOperand(0)); 1047 1048 APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); 1049 InSignBit.zext(BitWidth); 1050 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, 1051 EVT.getSizeInBits()) & 1052 NewMask; 1053 1054 // Since the sign extended bits are demanded, we know that the sign 1055 // bit is demanded. 1056 InputDemandedBits |= InSignBit; 1057 1058 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 1059 KnownZero, KnownOne, TLO, Depth+1)) 1060 return true; 1061 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1062 1063 // If the sign bit of the input is known set or clear, then we know the 1064 // top bits of the result. 1065 1066 // If the input sign bit is known zero, convert this into a zero extension. 1067 if (KnownZero.intersects(InSignBit)) 1068 return TLO.CombineTo(Op, 1069 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT)); 1070 1071 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1072 KnownOne |= NewBits; 1073 KnownZero &= ~NewBits; 1074 } else { // Input sign bit unknown 1075 KnownZero &= ~NewBits; 1076 KnownOne &= ~NewBits; 1077 } 1078 break; 1079 } 1080 case ISD::ZERO_EXTEND: { 1081 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1082 APInt InMask = NewMask; 1083 InMask.trunc(OperandBitWidth); 1084 1085 // If none of the top bits are demanded, convert this into an any_extend. 1086 APInt NewBits = 1087 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1088 if (!NewBits.intersects(NewMask)) 1089 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, 1090 Op.getValueType(), 1091 Op.getOperand(0))); 1092 1093 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1094 KnownZero, KnownOne, TLO, Depth+1)) 1095 return true; 1096 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1097 KnownZero.zext(BitWidth); 1098 KnownOne.zext(BitWidth); 1099 KnownZero |= NewBits; 1100 break; 1101 } 1102 case ISD::SIGN_EXTEND: { 1103 MVT InVT = Op.getOperand(0).getValueType(); 1104 unsigned InBits = InVT.getSizeInBits(); 1105 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1106 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1107 APInt NewBits = ~InMask & NewMask; 1108 1109 // If none of the top bits are demanded, convert this into an any_extend. 1110 if (NewBits == 0) 1111 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(), 1112 Op.getOperand(0))); 1113 1114 // Since some of the sign extended bits are demanded, we know that the sign 1115 // bit is demanded. 1116 APInt InDemandedBits = InMask & NewMask; 1117 InDemandedBits |= InSignBit; 1118 InDemandedBits.trunc(InBits); 1119 1120 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1121 KnownOne, TLO, Depth+1)) 1122 return true; 1123 KnownZero.zext(BitWidth); 1124 KnownOne.zext(BitWidth); 1125 1126 // If the sign bit is known zero, convert this to a zero extend. 1127 if (KnownZero.intersects(InSignBit)) 1128 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, 1129 Op.getValueType(), 1130 Op.getOperand(0))); 1131 1132 // If the sign bit is known one, the top bits match. 1133 if (KnownOne.intersects(InSignBit)) { 1134 KnownOne |= NewBits; 1135 KnownZero &= ~NewBits; 1136 } else { // Otherwise, top bits aren't known. 1137 KnownOne &= ~NewBits; 1138 KnownZero &= ~NewBits; 1139 } 1140 break; 1141 } 1142 case ISD::ANY_EXTEND: { 1143 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1144 APInt InMask = NewMask; 1145 InMask.trunc(OperandBitWidth); 1146 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1147 KnownZero, KnownOne, TLO, Depth+1)) 1148 return true; 1149 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1150 KnownZero.zext(BitWidth); 1151 KnownOne.zext(BitWidth); 1152 break; 1153 } 1154 case ISD::TRUNCATE: { 1155 // Simplify the input, using demanded bit information, and compute the known 1156 // zero/one bits live out. 1157 APInt TruncMask = NewMask; 1158 TruncMask.zext(Op.getOperand(0).getValueSizeInBits()); 1159 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1160 KnownZero, KnownOne, TLO, Depth+1)) 1161 return true; 1162 KnownZero.trunc(BitWidth); 1163 KnownOne.trunc(BitWidth); 1164 1165 // If the input is only used by this truncate, see if we can shrink it based 1166 // on the known demanded bits. 1167 if (Op.getOperand(0).getNode()->hasOneUse()) { 1168 SDValue In = Op.getOperand(0); 1169 unsigned InBitWidth = In.getValueSizeInBits(); 1170 switch (In.getOpcode()) { 1171 default: break; 1172 case ISD::SRL: 1173 // Shrink SRL by a constant if none of the high bits shifted in are 1174 // demanded. 1175 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 1176 APInt HighBits = APInt::getHighBitsSet(InBitWidth, 1177 InBitWidth - BitWidth); 1178 HighBits = HighBits.lshr(ShAmt->getZExtValue()); 1179 HighBits.trunc(BitWidth); 1180 1181 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { 1182 // None of the shifted in bits are needed. Add a truncate of the 1183 // shift input, then shift it. 1184 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, 1185 Op.getValueType(), 1186 In.getOperand(0)); 1187 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(), 1188 NewTrunc, In.getOperand(1))); 1189 } 1190 } 1191 break; 1192 } 1193 } 1194 1195 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1196 break; 1197 } 1198 case ISD::AssertZext: { 1199 MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1200 APInt InMask = APInt::getLowBitsSet(BitWidth, 1201 VT.getSizeInBits()); 1202 if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, 1203 KnownZero, KnownOne, TLO, Depth+1)) 1204 return true; 1205 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1206 KnownZero |= ~InMask & NewMask; 1207 break; 1208 } 1209 case ISD::BIT_CONVERT: 1210#if 0 1211 // If this is an FP->Int bitcast and if the sign bit is the only thing that 1212 // is demanded, turn this into a FGETSIGN. 1213 if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) && 1214 MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && 1215 !MVT::isVector(Op.getOperand(0).getValueType())) { 1216 // Only do this xform if FGETSIGN is valid or if before legalize. 1217 if (!TLO.AfterLegalize || 1218 isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { 1219 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1220 // place. We expect the SHL to be eliminated by other optimizations. 1221 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 1222 Op.getOperand(0)); 1223 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1224 SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); 1225 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), 1226 Sign, ShAmt)); 1227 } 1228 } 1229#endif 1230 break; 1231 default: 1232 // Just use ComputeMaskedBits to compute output bits. 1233 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1234 break; 1235 } 1236 1237 // If we know the value of all of the demanded bits, return this as a 1238 // constant. 1239 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1240 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1241 1242 return false; 1243} 1244 1245/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1246/// in Mask are known to be either zero or one and return them in the 1247/// KnownZero/KnownOne bitsets. 1248void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1249 const APInt &Mask, 1250 APInt &KnownZero, 1251 APInt &KnownOne, 1252 const SelectionDAG &DAG, 1253 unsigned Depth) const { 1254 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1255 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1256 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1257 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1258 "Should use MaskedValueIsZero if you don't know whether Op" 1259 " is a target node!"); 1260 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1261} 1262 1263/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1264/// targets that want to expose additional information about sign bits to the 1265/// DAG Combiner. 1266unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 1267 unsigned Depth) const { 1268 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1269 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1270 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1271 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1272 "Should use ComputeNumSignBits if you don't know whether Op" 1273 " is a target node!"); 1274 return 1; 1275} 1276 1277 1278/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1279/// and cc. If it is unable to simplify it, return a null SDValue. 1280SDValue 1281TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1, 1282 ISD::CondCode Cond, bool foldBooleans, 1283 DAGCombinerInfo &DCI) const { 1284 SelectionDAG &DAG = DCI.DAG; 1285 1286 // These setcc operations always fold. 1287 switch (Cond) { 1288 default: break; 1289 case ISD::SETFALSE: 1290 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1291 case ISD::SETTRUE: 1292 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1293 } 1294 1295 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1296 const APInt &C1 = N1C->getAPIntValue(); 1297 if (isa<ConstantSDNode>(N0.getNode())) { 1298 return DAG.FoldSetCC(VT, N0, N1, Cond); 1299 } else { 1300 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1301 // equality comparison, then we're just comparing whether X itself is 1302 // zero. 1303 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1304 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1305 N0.getOperand(1).getOpcode() == ISD::Constant) { 1306 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1307 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1308 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1309 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1310 // (srl (ctlz x), 5) == 0 -> X != 0 1311 // (srl (ctlz x), 5) != 1 -> X != 0 1312 Cond = ISD::SETNE; 1313 } else { 1314 // (srl (ctlz x), 5) != 0 -> X == 0 1315 // (srl (ctlz x), 5) == 1 -> X == 0 1316 Cond = ISD::SETEQ; 1317 } 1318 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1319 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0), 1320 Zero, Cond); 1321 } 1322 } 1323 1324 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 1325 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 1326 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 1327 1328 // If the comparison constant has bits in the upper part, the 1329 // zero-extended value could never match. 1330 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 1331 C1.getBitWidth() - InSize))) { 1332 switch (Cond) { 1333 case ISD::SETUGT: 1334 case ISD::SETUGE: 1335 case ISD::SETEQ: return DAG.getConstant(0, VT); 1336 case ISD::SETULT: 1337 case ISD::SETULE: 1338 case ISD::SETNE: return DAG.getConstant(1, VT); 1339 case ISD::SETGT: 1340 case ISD::SETGE: 1341 // True if the sign bit of C1 is set. 1342 return DAG.getConstant(C1.isNegative(), VT); 1343 case ISD::SETLT: 1344 case ISD::SETLE: 1345 // True if the sign bit of C1 isn't set. 1346 return DAG.getConstant(C1.isNonNegative(), VT); 1347 default: 1348 break; 1349 } 1350 } 1351 1352 // Otherwise, we can perform the comparison with the low bits. 1353 switch (Cond) { 1354 case ISD::SETEQ: 1355 case ISD::SETNE: 1356 case ISD::SETUGT: 1357 case ISD::SETUGE: 1358 case ISD::SETULT: 1359 case ISD::SETULE: 1360 return DAG.getSetCC(VT, N0.getOperand(0), 1361 DAG.getConstant(APInt(C1).trunc(InSize), 1362 N0.getOperand(0).getValueType()), 1363 Cond); 1364 default: 1365 break; // todo, be more careful with signed comparisons 1366 } 1367 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1368 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1369 MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1370 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 1371 MVT ExtDstTy = N0.getValueType(); 1372 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 1373 1374 // If the extended part has any inconsistent bits, it cannot ever 1375 // compare equal. In other words, they have to be all ones or all 1376 // zeros. 1377 APInt ExtBits = 1378 APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits); 1379 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1380 return DAG.getConstant(Cond == ISD::SETNE, VT); 1381 1382 SDValue ZextOp; 1383 MVT Op0Ty = N0.getOperand(0).getValueType(); 1384 if (Op0Ty == ExtSrcTy) { 1385 ZextOp = N0.getOperand(0); 1386 } else { 1387 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 1388 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0), 1389 DAG.getConstant(Imm, Op0Ty)); 1390 } 1391 if (!DCI.isCalledByLegalizer()) 1392 DCI.AddToWorklist(ZextOp.getNode()); 1393 // Otherwise, make this a use of a zext. 1394 return DAG.getSetCC(VT, ZextOp, 1395 DAG.getConstant(C1 & APInt::getLowBitsSet( 1396 ExtDstTyBits, 1397 ExtSrcTyBits), 1398 ExtDstTy), 1399 Cond); 1400 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 1401 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1402 1403 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1404 if (N0.getOpcode() == ISD::SETCC) { 1405 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1); 1406 if (TrueWhenTrue) 1407 return N0; 1408 1409 // Invert the condition. 1410 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1411 CC = ISD::getSetCCInverse(CC, 1412 N0.getOperand(0).getValueType().isInteger()); 1413 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC); 1414 } 1415 1416 if ((N0.getOpcode() == ISD::XOR || 1417 (N0.getOpcode() == ISD::AND && 1418 N0.getOperand(0).getOpcode() == ISD::XOR && 1419 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1420 isa<ConstantSDNode>(N0.getOperand(1)) && 1421 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 1422 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1423 // can only do this if the top bits are known zero. 1424 unsigned BitWidth = N0.getValueSizeInBits(); 1425 if (DAG.MaskedValueIsZero(N0, 1426 APInt::getHighBitsSet(BitWidth, 1427 BitWidth-1))) { 1428 // Okay, get the un-inverted input value. 1429 SDValue Val; 1430 if (N0.getOpcode() == ISD::XOR) 1431 Val = N0.getOperand(0); 1432 else { 1433 assert(N0.getOpcode() == ISD::AND && 1434 N0.getOperand(0).getOpcode() == ISD::XOR); 1435 // ((X^1)&1)^1 -> X & 1 1436 Val = DAG.getNode(ISD::AND, N0.getValueType(), 1437 N0.getOperand(0).getOperand(0), 1438 N0.getOperand(1)); 1439 } 1440 return DAG.getSetCC(VT, Val, N1, 1441 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1442 } 1443 } 1444 } 1445 1446 APInt MinVal, MaxVal; 1447 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 1448 if (ISD::isSignedIntSetCC(Cond)) { 1449 MinVal = APInt::getSignedMinValue(OperandBitSize); 1450 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 1451 } else { 1452 MinVal = APInt::getMinValue(OperandBitSize); 1453 MaxVal = APInt::getMaxValue(OperandBitSize); 1454 } 1455 1456 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1457 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1458 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1459 // X >= C0 --> X > (C0-1) 1460 return DAG.getSetCC(VT, N0, DAG.getConstant(C1-1, N1.getValueType()), 1461 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1462 } 1463 1464 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1465 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1466 // X <= C0 --> X < (C0+1) 1467 return DAG.getSetCC(VT, N0, DAG.getConstant(C1+1, N1.getValueType()), 1468 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1469 } 1470 1471 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1472 return DAG.getConstant(0, VT); // X < MIN --> false 1473 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1474 return DAG.getConstant(1, VT); // X >= MIN --> true 1475 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1476 return DAG.getConstant(0, VT); // X > MAX --> false 1477 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1478 return DAG.getConstant(1, VT); // X <= MAX --> true 1479 1480 // Canonicalize setgt X, Min --> setne X, Min 1481 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1482 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1483 // Canonicalize setlt X, Max --> setne X, Max 1484 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1485 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1486 1487 // If we have setult X, 1, turn it into seteq X, 0 1488 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1489 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()), 1490 ISD::SETEQ); 1491 // If we have setugt X, Max-1, turn it into seteq X, Max 1492 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1493 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()), 1494 ISD::SETEQ); 1495 1496 // If we have "setcc X, C0", check to see if we can shrink the immediate 1497 // by changing cc. 1498 1499 // SETUGT X, SINTMAX -> SETLT X, 0 1500 if (Cond == ISD::SETUGT && OperandBitSize != 1 && 1501 C1 == (~0ULL >> (65-OperandBitSize))) 1502 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()), 1503 ISD::SETLT); 1504 1505 // FIXME: Implement the rest of these. 1506 1507 // Fold bit comparisons when we can. 1508 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1509 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1510 if (ConstantSDNode *AndRHS = 1511 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1512 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1513 // Perform the xform if the AND RHS is a single bit. 1514 if (isPowerOf2_64(AndRHS->getZExtValue())) { 1515 return DAG.getNode(ISD::SRL, VT, N0, 1516 DAG.getConstant(Log2_64(AndRHS->getZExtValue()), 1517 getShiftAmountTy())); 1518 } 1519 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) { 1520 // (X & 8) == 8 --> (X & 8) >> 3 1521 // Perform the xform if C1 is a single bit. 1522 if (C1.isPowerOf2()) { 1523 return DAG.getNode(ISD::SRL, VT, N0, 1524 DAG.getConstant(C1.logBase2(), getShiftAmountTy())); 1525 } 1526 } 1527 } 1528 } 1529 } else if (isa<ConstantSDNode>(N0.getNode())) { 1530 // Ensure that the constant occurs on the RHS. 1531 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1532 } 1533 1534 if (isa<ConstantFPSDNode>(N0.getNode())) { 1535 // Constant fold or commute setcc. 1536 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond); 1537 if (O.getNode()) return O; 1538 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1539 // If the RHS of an FP comparison is a constant, simplify it away in 1540 // some cases. 1541 if (CFP->getValueAPF().isNaN()) { 1542 // If an operand is known to be a nan, we can fold it. 1543 switch (ISD::getUnorderedFlavor(Cond)) { 1544 default: assert(0 && "Unknown flavor!"); 1545 case 0: // Known false. 1546 return DAG.getConstant(0, VT); 1547 case 1: // Known true. 1548 return DAG.getConstant(1, VT); 1549 case 2: // Undefined. 1550 return DAG.getNode(ISD::UNDEF, VT); 1551 } 1552 } 1553 1554 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 1555 // constant if knowing that the operand is non-nan is enough. We prefer to 1556 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 1557 // materialize 0.0. 1558 if (Cond == ISD::SETO || Cond == ISD::SETUO) 1559 return DAG.getSetCC(VT, N0, N0, Cond); 1560 } 1561 1562 if (N0 == N1) { 1563 // We can always fold X == X for integer setcc's. 1564 if (N0.getValueType().isInteger()) 1565 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1566 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1567 if (UOF == 2) // FP operators that are undefined on NaNs. 1568 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1569 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1570 return DAG.getConstant(UOF, VT); 1571 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1572 // if it is not already. 1573 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1574 if (NewCond != Cond) 1575 return DAG.getSetCC(VT, N0, N1, NewCond); 1576 } 1577 1578 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1579 N0.getValueType().isInteger()) { 1580 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1581 N0.getOpcode() == ISD::XOR) { 1582 // Simplify (X+Y) == (X+Z) --> Y == Z 1583 if (N0.getOpcode() == N1.getOpcode()) { 1584 if (N0.getOperand(0) == N1.getOperand(0)) 1585 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond); 1586 if (N0.getOperand(1) == N1.getOperand(1)) 1587 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond); 1588 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1589 // If X op Y == Y op X, try other combinations. 1590 if (N0.getOperand(0) == N1.getOperand(1)) 1591 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond); 1592 if (N0.getOperand(1) == N1.getOperand(0)) 1593 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond); 1594 } 1595 } 1596 1597 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 1598 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1599 // Turn (X+C1) == C2 --> X == C2-C1 1600 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 1601 return DAG.getSetCC(VT, N0.getOperand(0), 1602 DAG.getConstant(RHSC->getAPIntValue()- 1603 LHSR->getAPIntValue(), 1604 N0.getValueType()), Cond); 1605 } 1606 1607 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 1608 if (N0.getOpcode() == ISD::XOR) 1609 // If we know that all of the inverted bits are zero, don't bother 1610 // performing the inversion. 1611 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 1612 return 1613 DAG.getSetCC(VT, N0.getOperand(0), 1614 DAG.getConstant(LHSR->getAPIntValue() ^ 1615 RHSC->getAPIntValue(), 1616 N0.getValueType()), 1617 Cond); 1618 } 1619 1620 // Turn (C1-X) == C2 --> X == C1-C2 1621 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 1622 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 1623 return 1624 DAG.getSetCC(VT, N0.getOperand(1), 1625 DAG.getConstant(SUBC->getAPIntValue() - 1626 RHSC->getAPIntValue(), 1627 N0.getValueType()), 1628 Cond); 1629 } 1630 } 1631 } 1632 1633 // Simplify (X+Z) == X --> Z == 0 1634 if (N0.getOperand(0) == N1) 1635 return DAG.getSetCC(VT, N0.getOperand(1), 1636 DAG.getConstant(0, N0.getValueType()), Cond); 1637 if (N0.getOperand(1) == N1) { 1638 if (DAG.isCommutativeBinOp(N0.getOpcode())) 1639 return DAG.getSetCC(VT, N0.getOperand(0), 1640 DAG.getConstant(0, N0.getValueType()), Cond); 1641 else if (N0.getNode()->hasOneUse()) { 1642 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 1643 // (Z-X) == X --> Z == X<<1 1644 SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), 1645 N1, 1646 DAG.getConstant(1, getShiftAmountTy())); 1647 if (!DCI.isCalledByLegalizer()) 1648 DCI.AddToWorklist(SH.getNode()); 1649 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond); 1650 } 1651 } 1652 } 1653 1654 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 1655 N1.getOpcode() == ISD::XOR) { 1656 // Simplify X == (X+Z) --> Z == 0 1657 if (N1.getOperand(0) == N0) { 1658 return DAG.getSetCC(VT, N1.getOperand(1), 1659 DAG.getConstant(0, N1.getValueType()), Cond); 1660 } else if (N1.getOperand(1) == N0) { 1661 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 1662 return DAG.getSetCC(VT, N1.getOperand(0), 1663 DAG.getConstant(0, N1.getValueType()), Cond); 1664 } else if (N1.getNode()->hasOneUse()) { 1665 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 1666 // X == (Z-X) --> X<<1 == Z 1667 SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, 1668 DAG.getConstant(1, getShiftAmountTy())); 1669 if (!DCI.isCalledByLegalizer()) 1670 DCI.AddToWorklist(SH.getNode()); 1671 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond); 1672 } 1673 } 1674 } 1675 } 1676 1677 // Fold away ALL boolean setcc's. 1678 SDValue Temp; 1679 if (N0.getValueType() == MVT::i1 && foldBooleans) { 1680 switch (Cond) { 1681 default: assert(0 && "Unknown integer setcc!"); 1682 case ISD::SETEQ: // X == Y -> (X^Y)^1 1683 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1684 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1)); 1685 if (!DCI.isCalledByLegalizer()) 1686 DCI.AddToWorklist(Temp.getNode()); 1687 break; 1688 case ISD::SETNE: // X != Y --> (X^Y) 1689 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1690 break; 1691 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y 1692 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y 1693 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1694 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp); 1695 if (!DCI.isCalledByLegalizer()) 1696 DCI.AddToWorklist(Temp.getNode()); 1697 break; 1698 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X 1699 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X 1700 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1701 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp); 1702 if (!DCI.isCalledByLegalizer()) 1703 DCI.AddToWorklist(Temp.getNode()); 1704 break; 1705 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y 1706 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y 1707 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1708 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp); 1709 if (!DCI.isCalledByLegalizer()) 1710 DCI.AddToWorklist(Temp.getNode()); 1711 break; 1712 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X 1713 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X 1714 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1715 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp); 1716 break; 1717 } 1718 if (VT != MVT::i1) { 1719 if (!DCI.isCalledByLegalizer()) 1720 DCI.AddToWorklist(N0.getNode()); 1721 // FIXME: If running after legalize, we probably can't do this. 1722 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0); 1723 } 1724 return N0; 1725 } 1726 1727 // Could not fold it. 1728 return SDValue(); 1729} 1730 1731/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 1732/// node is a GlobalAddress + offset. 1733bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, 1734 int64_t &Offset) const { 1735 if (isa<GlobalAddressSDNode>(N)) { 1736 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 1737 GA = GASD->getGlobal(); 1738 Offset += GASD->getOffset(); 1739 return true; 1740 } 1741 1742 if (N->getOpcode() == ISD::ADD) { 1743 SDValue N1 = N->getOperand(0); 1744 SDValue N2 = N->getOperand(1); 1745 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 1746 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 1747 if (V) { 1748 Offset += V->getSExtValue(); 1749 return true; 1750 } 1751 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 1752 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 1753 if (V) { 1754 Offset += V->getSExtValue(); 1755 return true; 1756 } 1757 } 1758 } 1759 return false; 1760} 1761 1762 1763/// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 1764/// loading 'Bytes' bytes from a location that is 'Dist' units away from the 1765/// location that the 'Base' load is loading from. 1766bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, 1767 unsigned Bytes, int Dist, 1768 const MachineFrameInfo *MFI) const { 1769 if (LD->getOperand(0).getNode() != Base->getOperand(0).getNode()) 1770 return false; 1771 MVT VT = LD->getValueType(0); 1772 if (VT.getSizeInBits() / 8 != Bytes) 1773 return false; 1774 1775 SDValue Loc = LD->getOperand(1); 1776 SDValue BaseLoc = Base->getOperand(1); 1777 if (Loc.getOpcode() == ISD::FrameIndex) { 1778 if (BaseLoc.getOpcode() != ISD::FrameIndex) 1779 return false; 1780 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 1781 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 1782 int FS = MFI->getObjectSize(FI); 1783 int BFS = MFI->getObjectSize(BFI); 1784 if (FS != BFS || FS != (int)Bytes) return false; 1785 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 1786 } 1787 1788 GlobalValue *GV1 = NULL; 1789 GlobalValue *GV2 = NULL; 1790 int64_t Offset1 = 0; 1791 int64_t Offset2 = 0; 1792 bool isGA1 = isGAPlusOffset(Loc.getNode(), GV1, Offset1); 1793 bool isGA2 = isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 1794 if (isGA1 && isGA2 && GV1 == GV2) 1795 return Offset1 == (Offset2 + Dist*Bytes); 1796 return false; 1797} 1798 1799 1800SDValue TargetLowering:: 1801PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 1802 // Default implementation: no optimization. 1803 return SDValue(); 1804} 1805 1806//===----------------------------------------------------------------------===// 1807// Inline Assembler Implementation Methods 1808//===----------------------------------------------------------------------===// 1809 1810 1811TargetLowering::ConstraintType 1812TargetLowering::getConstraintType(const std::string &Constraint) const { 1813 // FIXME: lots more standard ones to handle. 1814 if (Constraint.size() == 1) { 1815 switch (Constraint[0]) { 1816 default: break; 1817 case 'r': return C_RegisterClass; 1818 case 'm': // memory 1819 case 'o': // offsetable 1820 case 'V': // not offsetable 1821 return C_Memory; 1822 case 'i': // Simple Integer or Relocatable Constant 1823 case 'n': // Simple Integer 1824 case 's': // Relocatable Constant 1825 case 'X': // Allow ANY value. 1826 case 'I': // Target registers. 1827 case 'J': 1828 case 'K': 1829 case 'L': 1830 case 'M': 1831 case 'N': 1832 case 'O': 1833 case 'P': 1834 return C_Other; 1835 } 1836 } 1837 1838 if (Constraint.size() > 1 && Constraint[0] == '{' && 1839 Constraint[Constraint.size()-1] == '}') 1840 return C_Register; 1841 return C_Unknown; 1842} 1843 1844/// LowerXConstraint - try to replace an X constraint, which matches anything, 1845/// with another that has more specific requirements based on the type of the 1846/// corresponding operand. 1847const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ 1848 if (ConstraintVT.isInteger()) 1849 return "r"; 1850 if (ConstraintVT.isFloatingPoint()) 1851 return "f"; // works for many targets 1852 return 0; 1853} 1854 1855/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1856/// vector. If it is invalid, don't add anything to Ops. 1857void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 1858 char ConstraintLetter, 1859 bool hasMemory, 1860 std::vector<SDValue> &Ops, 1861 SelectionDAG &DAG) const { 1862 switch (ConstraintLetter) { 1863 default: break; 1864 case 'X': // Allows any operand; labels (basic block) use this. 1865 if (Op.getOpcode() == ISD::BasicBlock) { 1866 Ops.push_back(Op); 1867 return; 1868 } 1869 // fall through 1870 case 'i': // Simple Integer or Relocatable Constant 1871 case 'n': // Simple Integer 1872 case 's': { // Relocatable Constant 1873 // These operands are interested in values of the form (GV+C), where C may 1874 // be folded in as an offset of GV, or it may be explicitly added. Also, it 1875 // is possible and fine if either GV or C are missing. 1876 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1877 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 1878 1879 // If we have "(add GV, C)", pull out GV/C 1880 if (Op.getOpcode() == ISD::ADD) { 1881 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 1882 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 1883 if (C == 0 || GA == 0) { 1884 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 1885 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 1886 } 1887 if (C == 0 || GA == 0) 1888 C = 0, GA = 0; 1889 } 1890 1891 // If we find a valid operand, map to the TargetXXX version so that the 1892 // value itself doesn't get selected. 1893 if (GA) { // Either &GV or &GV+C 1894 if (ConstraintLetter != 'n') { 1895 int64_t Offs = GA->getOffset(); 1896 if (C) Offs += C->getZExtValue(); 1897 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 1898 Op.getValueType(), Offs)); 1899 return; 1900 } 1901 } 1902 if (C) { // just C, no GV. 1903 // Simple constants are not allowed for 's'. 1904 if (ConstraintLetter != 's') { 1905 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue(), 1906 Op.getValueType())); 1907 return; 1908 } 1909 } 1910 break; 1911 } 1912 } 1913} 1914 1915std::vector<unsigned> TargetLowering:: 1916getRegClassForInlineAsmConstraint(const std::string &Constraint, 1917 MVT VT) const { 1918 return std::vector<unsigned>(); 1919} 1920 1921 1922std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 1923getRegForInlineAsmConstraint(const std::string &Constraint, 1924 MVT VT) const { 1925 if (Constraint[0] != '{') 1926 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1927 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 1928 1929 // Remove the braces from around the name. 1930 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 1931 1932 // Figure out which register class contains this reg. 1933 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 1934 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 1935 E = RI->regclass_end(); RCI != E; ++RCI) { 1936 const TargetRegisterClass *RC = *RCI; 1937 1938 // If none of the the value types for this register class are valid, we 1939 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1940 bool isLegal = false; 1941 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1942 I != E; ++I) { 1943 if (isTypeLegal(*I)) { 1944 isLegal = true; 1945 break; 1946 } 1947 } 1948 1949 if (!isLegal) continue; 1950 1951 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1952 I != E; ++I) { 1953 if (StringsEqualNoCase(RegName, RI->get(*I).AsmName)) 1954 return std::make_pair(*I, RC); 1955 } 1956 } 1957 1958 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1959} 1960 1961//===----------------------------------------------------------------------===// 1962// Constraint Selection. 1963 1964/// getConstraintGenerality - Return an integer indicating how general CT 1965/// is. 1966static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 1967 switch (CT) { 1968 default: assert(0 && "Unknown constraint type!"); 1969 case TargetLowering::C_Other: 1970 case TargetLowering::C_Unknown: 1971 return 0; 1972 case TargetLowering::C_Register: 1973 return 1; 1974 case TargetLowering::C_RegisterClass: 1975 return 2; 1976 case TargetLowering::C_Memory: 1977 return 3; 1978 } 1979} 1980 1981/// ChooseConstraint - If there are multiple different constraints that we 1982/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 1983/// This is somewhat tricky: constraints fall into four classes: 1984/// Other -> immediates and magic values 1985/// Register -> one specific register 1986/// RegisterClass -> a group of regs 1987/// Memory -> memory 1988/// Ideally, we would pick the most specific constraint possible: if we have 1989/// something that fits into a register, we would pick it. The problem here 1990/// is that if we have something that could either be in a register or in 1991/// memory that use of the register could cause selection of *other* 1992/// operands to fail: they might only succeed if we pick memory. Because of 1993/// this the heuristic we use is: 1994/// 1995/// 1) If there is an 'other' constraint, and if the operand is valid for 1996/// that constraint, use it. This makes us take advantage of 'i' 1997/// constraints when available. 1998/// 2) Otherwise, pick the most general constraint present. This prefers 1999/// 'm' over 'r', for example. 2000/// 2001static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 2002 bool hasMemory, const TargetLowering &TLI, 2003 SDValue Op, SelectionDAG *DAG) { 2004 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 2005 unsigned BestIdx = 0; 2006 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 2007 int BestGenerality = -1; 2008 2009 // Loop over the options, keeping track of the most general one. 2010 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 2011 TargetLowering::ConstraintType CType = 2012 TLI.getConstraintType(OpInfo.Codes[i]); 2013 2014 // If this is an 'other' constraint, see if the operand is valid for it. 2015 // For example, on X86 we might have an 'rI' constraint. If the operand 2016 // is an integer in the range [0..31] we want to use I (saving a load 2017 // of a register), otherwise we must use 'r'. 2018 if (CType == TargetLowering::C_Other && Op.getNode()) { 2019 assert(OpInfo.Codes[i].size() == 1 && 2020 "Unhandled multi-letter 'other' constraint"); 2021 std::vector<SDValue> ResultOps; 2022 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory, 2023 ResultOps, *DAG); 2024 if (!ResultOps.empty()) { 2025 BestType = CType; 2026 BestIdx = i; 2027 break; 2028 } 2029 } 2030 2031 // This constraint letter is more general than the previous one, use it. 2032 int Generality = getConstraintGenerality(CType); 2033 if (Generality > BestGenerality) { 2034 BestType = CType; 2035 BestIdx = i; 2036 BestGenerality = Generality; 2037 } 2038 } 2039 2040 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 2041 OpInfo.ConstraintType = BestType; 2042} 2043 2044/// ComputeConstraintToUse - Determines the constraint code and constraint 2045/// type to use for the specific AsmOperandInfo, setting 2046/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 2047void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 2048 SDValue Op, 2049 bool hasMemory, 2050 SelectionDAG *DAG) const { 2051 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 2052 2053 // Single-letter constraints ('r') are very common. 2054 if (OpInfo.Codes.size() == 1) { 2055 OpInfo.ConstraintCode = OpInfo.Codes[0]; 2056 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2057 } else { 2058 ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG); 2059 } 2060 2061 // 'X' matches anything. 2062 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 2063 // Labels and constants are handled elsewhere ('X' is the only thing 2064 // that matches labels). 2065 if (isa<BasicBlock>(OpInfo.CallOperandVal) || 2066 isa<ConstantInt>(OpInfo.CallOperandVal)) 2067 return; 2068 2069 // Otherwise, try to resolve it to something we know about by looking at 2070 // the actual operand type. 2071 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 2072 OpInfo.ConstraintCode = Repl; 2073 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2074 } 2075 } 2076} 2077 2078//===----------------------------------------------------------------------===// 2079// Loop Strength Reduction hooks 2080//===----------------------------------------------------------------------===// 2081 2082/// isLegalAddressingMode - Return true if the addressing mode represented 2083/// by AM is legal for this target, for a load/store of the specified type. 2084bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 2085 const Type *Ty) const { 2086 // The default implementation of this implements a conservative RISCy, r+r and 2087 // r+i addr mode. 2088 2089 // Allows a sign-extended 16-bit immediate field. 2090 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 2091 return false; 2092 2093 // No global is ever allowed as a base. 2094 if (AM.BaseGV) 2095 return false; 2096 2097 // Only support r+r, 2098 switch (AM.Scale) { 2099 case 0: // "r+i" or just "i", depending on HasBaseReg. 2100 break; 2101 case 1: 2102 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2103 return false; 2104 // Otherwise we have r+r or r+i. 2105 break; 2106 case 2: 2107 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2108 return false; 2109 // Allow 2*r as r+r. 2110 break; 2111 } 2112 2113 return true; 2114} 2115 2116// Magic for divide replacement 2117 2118struct ms { 2119 int64_t m; // magic number 2120 int64_t s; // shift amount 2121}; 2122 2123struct mu { 2124 uint64_t m; // magic number 2125 int64_t a; // add indicator 2126 int64_t s; // shift amount 2127}; 2128 2129/// magic - calculate the magic numbers required to codegen an integer sdiv as 2130/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 2131/// or -1. 2132static ms magic32(int32_t d) { 2133 int32_t p; 2134 uint32_t ad, anc, delta, q1, r1, q2, r2, t; 2135 const uint32_t two31 = 0x80000000U; 2136 struct ms mag; 2137 2138 ad = abs(d); 2139 t = two31 + ((uint32_t)d >> 31); 2140 anc = t - 1 - t%ad; // absolute value of nc 2141 p = 31; // initialize p 2142 q1 = two31/anc; // initialize q1 = 2p/abs(nc) 2143 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 2144 q2 = two31/ad; // initialize q2 = 2p/abs(d) 2145 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d)) 2146 do { 2147 p = p + 1; 2148 q1 = 2*q1; // update q1 = 2p/abs(nc) 2149 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 2150 if (r1 >= anc) { // must be unsigned comparison 2151 q1 = q1 + 1; 2152 r1 = r1 - anc; 2153 } 2154 q2 = 2*q2; // update q2 = 2p/abs(d) 2155 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 2156 if (r2 >= ad) { // must be unsigned comparison 2157 q2 = q2 + 1; 2158 r2 = r2 - ad; 2159 } 2160 delta = ad - r2; 2161 } while (q1 < delta || (q1 == delta && r1 == 0)); 2162 2163 mag.m = (int32_t)(q2 + 1); // make sure to sign extend 2164 if (d < 0) mag.m = -mag.m; // resulting magic number 2165 mag.s = p - 32; // resulting shift 2166 return mag; 2167} 2168 2169/// magicu - calculate the magic numbers required to codegen an integer udiv as 2170/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 2171static mu magicu32(uint32_t d) { 2172 int32_t p; 2173 uint32_t nc, delta, q1, r1, q2, r2; 2174 struct mu magu; 2175 magu.a = 0; // initialize "add" indicator 2176 nc = - 1 - (-d)%d; 2177 p = 31; // initialize p 2178 q1 = 0x80000000/nc; // initialize q1 = 2p/nc 2179 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc) 2180 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d 2181 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d) 2182 do { 2183 p = p + 1; 2184 if (r1 >= nc - r1 ) { 2185 q1 = 2*q1 + 1; // update q1 2186 r1 = 2*r1 - nc; // update r1 2187 } 2188 else { 2189 q1 = 2*q1; // update q1 2190 r1 = 2*r1; // update r1 2191 } 2192 if (r2 + 1 >= d - r2) { 2193 if (q2 >= 0x7FFFFFFF) magu.a = 1; 2194 q2 = 2*q2 + 1; // update q2 2195 r2 = 2*r2 + 1 - d; // update r2 2196 } 2197 else { 2198 if (q2 >= 0x80000000) magu.a = 1; 2199 q2 = 2*q2; // update q2 2200 r2 = 2*r2 + 1; // update r2 2201 } 2202 delta = d - 1 - r2; 2203 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0))); 2204 magu.m = q2 + 1; // resulting magic number 2205 magu.s = p - 32; // resulting shift 2206 return magu; 2207} 2208 2209/// magic - calculate the magic numbers required to codegen an integer sdiv as 2210/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 2211/// or -1. 2212static ms magic64(int64_t d) { 2213 int64_t p; 2214 uint64_t ad, anc, delta, q1, r1, q2, r2, t; 2215 const uint64_t two63 = 9223372036854775808ULL; // 2^63 2216 struct ms mag; 2217 2218 ad = d >= 0 ? d : -d; 2219 t = two63 + ((uint64_t)d >> 63); 2220 anc = t - 1 - t%ad; // absolute value of nc 2221 p = 63; // initialize p 2222 q1 = two63/anc; // initialize q1 = 2p/abs(nc) 2223 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 2224 q2 = two63/ad; // initialize q2 = 2p/abs(d) 2225 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d)) 2226 do { 2227 p = p + 1; 2228 q1 = 2*q1; // update q1 = 2p/abs(nc) 2229 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 2230 if (r1 >= anc) { // must be unsigned comparison 2231 q1 = q1 + 1; 2232 r1 = r1 - anc; 2233 } 2234 q2 = 2*q2; // update q2 = 2p/abs(d) 2235 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 2236 if (r2 >= ad) { // must be unsigned comparison 2237 q2 = q2 + 1; 2238 r2 = r2 - ad; 2239 } 2240 delta = ad - r2; 2241 } while (q1 < delta || (q1 == delta && r1 == 0)); 2242 2243 mag.m = q2 + 1; 2244 if (d < 0) mag.m = -mag.m; // resulting magic number 2245 mag.s = p - 64; // resulting shift 2246 return mag; 2247} 2248 2249/// magicu - calculate the magic numbers required to codegen an integer udiv as 2250/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 2251static mu magicu64(uint64_t d) 2252{ 2253 int64_t p; 2254 uint64_t nc, delta, q1, r1, q2, r2; 2255 struct mu magu; 2256 magu.a = 0; // initialize "add" indicator 2257 nc = - 1 - (-d)%d; 2258 p = 63; // initialize p 2259 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc 2260 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc) 2261 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d 2262 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d) 2263 do { 2264 p = p + 1; 2265 if (r1 >= nc - r1 ) { 2266 q1 = 2*q1 + 1; // update q1 2267 r1 = 2*r1 - nc; // update r1 2268 } 2269 else { 2270 q1 = 2*q1; // update q1 2271 r1 = 2*r1; // update r1 2272 } 2273 if (r2 + 1 >= d - r2) { 2274 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1; 2275 q2 = 2*q2 + 1; // update q2 2276 r2 = 2*r2 + 1 - d; // update r2 2277 } 2278 else { 2279 if (q2 >= 0x8000000000000000ull) magu.a = 1; 2280 q2 = 2*q2; // update q2 2281 r2 = 2*r2 + 1; // update r2 2282 } 2283 delta = d - 1 - r2; 2284 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0))); 2285 magu.m = q2 + 1; // resulting magic number 2286 magu.s = p - 64; // resulting shift 2287 return magu; 2288} 2289 2290/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 2291/// return a DAG expression to select that will generate the same value by 2292/// multiplying by a magic number. See: 2293/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2294SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 2295 std::vector<SDNode*>* Created) const { 2296 MVT VT = N->getValueType(0); 2297 2298 // Check to see if we can do this. 2299 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 2300 return SDValue(); // BuildSDIV only operates on i32 or i64 2301 2302 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue(); 2303 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d); 2304 2305 // Multiply the numerator (operand 0) by the magic value 2306 SDValue Q; 2307 if (isOperationLegal(ISD::MULHS, VT)) 2308 Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), 2309 DAG.getConstant(magics.m, VT)); 2310 else if (isOperationLegal(ISD::SMUL_LOHI, VT)) 2311 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), 2312 N->getOperand(0), 2313 DAG.getConstant(magics.m, VT)).getNode(), 1); 2314 else 2315 return SDValue(); // No mulhs or equvialent 2316 // If d > 0 and m < 0, add the numerator 2317 if (d > 0 && magics.m < 0) { 2318 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); 2319 if (Created) 2320 Created->push_back(Q.getNode()); 2321 } 2322 // If d < 0 and m > 0, subtract the numerator. 2323 if (d < 0 && magics.m > 0) { 2324 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0)); 2325 if (Created) 2326 Created->push_back(Q.getNode()); 2327 } 2328 // Shift right algebraic if shift value is nonzero 2329 if (magics.s > 0) { 2330 Q = DAG.getNode(ISD::SRA, VT, Q, 2331 DAG.getConstant(magics.s, getShiftAmountTy())); 2332 if (Created) 2333 Created->push_back(Q.getNode()); 2334 } 2335 // Extract the sign bit and add it to the quotient 2336 SDValue T = 2337 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 2338 getShiftAmountTy())); 2339 if (Created) 2340 Created->push_back(T.getNode()); 2341 return DAG.getNode(ISD::ADD, VT, Q, T); 2342} 2343 2344/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 2345/// return a DAG expression to select that will generate the same value by 2346/// multiplying by a magic number. See: 2347/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2348SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 2349 std::vector<SDNode*>* Created) const { 2350 MVT VT = N->getValueType(0); 2351 2352 // Check to see if we can do this. 2353 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 2354 return SDValue(); // BuildUDIV only operates on i32 or i64 2355 2356 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 2357 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d); 2358 2359 // Multiply the numerator (operand 0) by the magic value 2360 SDValue Q; 2361 if (isOperationLegal(ISD::MULHU, VT)) 2362 Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), 2363 DAG.getConstant(magics.m, VT)); 2364 else if (isOperationLegal(ISD::UMUL_LOHI, VT)) 2365 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), 2366 N->getOperand(0), 2367 DAG.getConstant(magics.m, VT)).getNode(), 1); 2368 else 2369 return SDValue(); // No mulhu or equvialent 2370 if (Created) 2371 Created->push_back(Q.getNode()); 2372 2373 if (magics.a == 0) { 2374 return DAG.getNode(ISD::SRL, VT, Q, 2375 DAG.getConstant(magics.s, getShiftAmountTy())); 2376 } else { 2377 SDValue NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); 2378 if (Created) 2379 Created->push_back(NPQ.getNode()); 2380 NPQ = DAG.getNode(ISD::SRL, VT, NPQ, 2381 DAG.getConstant(1, getShiftAmountTy())); 2382 if (Created) 2383 Created->push_back(NPQ.getNode()); 2384 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q); 2385 if (Created) 2386 Created->push_back(NPQ.getNode()); 2387 return DAG.getNode(ISD::SRL, VT, NPQ, 2388 DAG.getConstant(magics.s-1, getShiftAmountTy())); 2389 } 2390} 2391