TargetLowering.cpp revision 15c94d08ab2be2e3d00de4edbfc7adde6545a7db
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetAsmInfo.h" 15#include "llvm/Target/TargetLowering.h" 16#include "llvm/Target/TargetSubtarget.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetRegisterInfo.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/STLExtras.h" 26#include "llvm/Support/MathExtras.h" 27using namespace llvm; 28 29/// InitLibcallNames - Set default libcall names. 30/// 31static void InitLibcallNames(const char **Names) { 32 Names[RTLIB::SHL_I16] = "__ashli16"; 33 Names[RTLIB::SHL_I32] = "__ashlsi3"; 34 Names[RTLIB::SHL_I64] = "__ashldi3"; 35 Names[RTLIB::SHL_I128] = "__ashlti3"; 36 Names[RTLIB::SRL_I16] = "__lshri16"; 37 Names[RTLIB::SRL_I32] = "__lshrsi3"; 38 Names[RTLIB::SRL_I64] = "__lshrdi3"; 39 Names[RTLIB::SRL_I128] = "__lshrti3"; 40 Names[RTLIB::SRA_I16] = "__ashri16"; 41 Names[RTLIB::SRA_I32] = "__ashrsi3"; 42 Names[RTLIB::SRA_I64] = "__ashrdi3"; 43 Names[RTLIB::SRA_I128] = "__ashrti3"; 44 Names[RTLIB::MUL_I16] = "__muli16"; 45 Names[RTLIB::MUL_I32] = "__mulsi3"; 46 Names[RTLIB::MUL_I64] = "__muldi3"; 47 Names[RTLIB::MUL_I128] = "__multi3"; 48 Names[RTLIB::SDIV_I32] = "__divsi3"; 49 Names[RTLIB::SDIV_I64] = "__divdi3"; 50 Names[RTLIB::SDIV_I128] = "__divti3"; 51 Names[RTLIB::UDIV_I32] = "__udivsi3"; 52 Names[RTLIB::UDIV_I64] = "__udivdi3"; 53 Names[RTLIB::UDIV_I128] = "__udivti3"; 54 Names[RTLIB::SREM_I32] = "__modsi3"; 55 Names[RTLIB::SREM_I64] = "__moddi3"; 56 Names[RTLIB::SREM_I128] = "__modti3"; 57 Names[RTLIB::UREM_I32] = "__umodsi3"; 58 Names[RTLIB::UREM_I64] = "__umoddi3"; 59 Names[RTLIB::UREM_I128] = "__umodti3"; 60 Names[RTLIB::NEG_I32] = "__negsi2"; 61 Names[RTLIB::NEG_I64] = "__negdi2"; 62 Names[RTLIB::ADD_F32] = "__addsf3"; 63 Names[RTLIB::ADD_F64] = "__adddf3"; 64 Names[RTLIB::ADD_F80] = "__addxf3"; 65 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 66 Names[RTLIB::SUB_F32] = "__subsf3"; 67 Names[RTLIB::SUB_F64] = "__subdf3"; 68 Names[RTLIB::SUB_F80] = "__subxf3"; 69 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 70 Names[RTLIB::MUL_F32] = "__mulsf3"; 71 Names[RTLIB::MUL_F64] = "__muldf3"; 72 Names[RTLIB::MUL_F80] = "__mulxf3"; 73 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 74 Names[RTLIB::DIV_F32] = "__divsf3"; 75 Names[RTLIB::DIV_F64] = "__divdf3"; 76 Names[RTLIB::DIV_F80] = "__divxf3"; 77 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 78 Names[RTLIB::REM_F32] = "fmodf"; 79 Names[RTLIB::REM_F64] = "fmod"; 80 Names[RTLIB::REM_F80] = "fmodl"; 81 Names[RTLIB::REM_PPCF128] = "fmodl"; 82 Names[RTLIB::POWI_F32] = "__powisf2"; 83 Names[RTLIB::POWI_F64] = "__powidf2"; 84 Names[RTLIB::POWI_F80] = "__powixf2"; 85 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 86 Names[RTLIB::SQRT_F32] = "sqrtf"; 87 Names[RTLIB::SQRT_F64] = "sqrt"; 88 Names[RTLIB::SQRT_F80] = "sqrtl"; 89 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 90 Names[RTLIB::LOG_F32] = "logf"; 91 Names[RTLIB::LOG_F64] = "log"; 92 Names[RTLIB::LOG_F80] = "logl"; 93 Names[RTLIB::LOG_PPCF128] = "logl"; 94 Names[RTLIB::LOG2_F32] = "log2f"; 95 Names[RTLIB::LOG2_F64] = "log2"; 96 Names[RTLIB::LOG2_F80] = "log2l"; 97 Names[RTLIB::LOG2_PPCF128] = "log2l"; 98 Names[RTLIB::LOG10_F32] = "log10f"; 99 Names[RTLIB::LOG10_F64] = "log10"; 100 Names[RTLIB::LOG10_F80] = "log10l"; 101 Names[RTLIB::LOG10_PPCF128] = "log10l"; 102 Names[RTLIB::EXP_F32] = "expf"; 103 Names[RTLIB::EXP_F64] = "exp"; 104 Names[RTLIB::EXP_F80] = "expl"; 105 Names[RTLIB::EXP_PPCF128] = "expl"; 106 Names[RTLIB::EXP2_F32] = "exp2f"; 107 Names[RTLIB::EXP2_F64] = "exp2"; 108 Names[RTLIB::EXP2_F80] = "exp2l"; 109 Names[RTLIB::EXP2_PPCF128] = "exp2l"; 110 Names[RTLIB::SIN_F32] = "sinf"; 111 Names[RTLIB::SIN_F64] = "sin"; 112 Names[RTLIB::SIN_F80] = "sinl"; 113 Names[RTLIB::SIN_PPCF128] = "sinl"; 114 Names[RTLIB::COS_F32] = "cosf"; 115 Names[RTLIB::COS_F64] = "cos"; 116 Names[RTLIB::COS_F80] = "cosl"; 117 Names[RTLIB::COS_PPCF128] = "cosl"; 118 Names[RTLIB::POW_F32] = "powf"; 119 Names[RTLIB::POW_F64] = "pow"; 120 Names[RTLIB::POW_F80] = "powl"; 121 Names[RTLIB::POW_PPCF128] = "powl"; 122 Names[RTLIB::CEIL_F32] = "ceilf"; 123 Names[RTLIB::CEIL_F64] = "ceil"; 124 Names[RTLIB::CEIL_F80] = "ceill"; 125 Names[RTLIB::CEIL_PPCF128] = "ceill"; 126 Names[RTLIB::TRUNC_F32] = "truncf"; 127 Names[RTLIB::TRUNC_F64] = "trunc"; 128 Names[RTLIB::TRUNC_F80] = "truncl"; 129 Names[RTLIB::TRUNC_PPCF128] = "truncl"; 130 Names[RTLIB::RINT_F32] = "rintf"; 131 Names[RTLIB::RINT_F64] = "rint"; 132 Names[RTLIB::RINT_F80] = "rintl"; 133 Names[RTLIB::RINT_PPCF128] = "rintl"; 134 Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; 135 Names[RTLIB::NEARBYINT_F64] = "nearbyint"; 136 Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; 137 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; 138 Names[RTLIB::FLOOR_F32] = "floorf"; 139 Names[RTLIB::FLOOR_F64] = "floor"; 140 Names[RTLIB::FLOOR_F80] = "floorl"; 141 Names[RTLIB::FLOOR_PPCF128] = "floorl"; 142 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 143 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 144 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; 145 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; 146 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; 147 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; 148 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 149 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 150 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 151 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 152 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 153 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 154 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 155 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 156 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 157 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 158 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 159 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 160 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 161 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 162 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 163 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 164 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 165 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 166 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 167 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 168 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 169 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 170 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 171 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 172 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 173 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 174 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 175 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 176 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 177 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 178 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 179 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 180 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 181 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 182 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 183 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 184 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 185 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 186 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 187 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 188 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 189 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 190 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 191 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 192 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 193 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 194 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 195 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 196 Names[RTLIB::OEQ_F32] = "__eqsf2"; 197 Names[RTLIB::OEQ_F64] = "__eqdf2"; 198 Names[RTLIB::UNE_F32] = "__nesf2"; 199 Names[RTLIB::UNE_F64] = "__nedf2"; 200 Names[RTLIB::OGE_F32] = "__gesf2"; 201 Names[RTLIB::OGE_F64] = "__gedf2"; 202 Names[RTLIB::OLT_F32] = "__ltsf2"; 203 Names[RTLIB::OLT_F64] = "__ltdf2"; 204 Names[RTLIB::OLE_F32] = "__lesf2"; 205 Names[RTLIB::OLE_F64] = "__ledf2"; 206 Names[RTLIB::OGT_F32] = "__gtsf2"; 207 Names[RTLIB::OGT_F64] = "__gtdf2"; 208 Names[RTLIB::UO_F32] = "__unordsf2"; 209 Names[RTLIB::UO_F64] = "__unorddf2"; 210 Names[RTLIB::O_F32] = "__unordsf2"; 211 Names[RTLIB::O_F64] = "__unorddf2"; 212} 213 214/// getFPEXT - Return the FPEXT_*_* value for the given types, or 215/// UNKNOWN_LIBCALL if there is none. 216RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) { 217 if (OpVT == MVT::f32) { 218 if (RetVT == MVT::f64) 219 return FPEXT_F32_F64; 220 } 221 return UNKNOWN_LIBCALL; 222} 223 224/// getFPROUND - Return the FPROUND_*_* value for the given types, or 225/// UNKNOWN_LIBCALL if there is none. 226RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) { 227 if (RetVT == MVT::f32) { 228 if (OpVT == MVT::f64) 229 return FPROUND_F64_F32; 230 if (OpVT == MVT::f80) 231 return FPROUND_F80_F32; 232 if (OpVT == MVT::ppcf128) 233 return FPROUND_PPCF128_F32; 234 } else if (RetVT == MVT::f64) { 235 if (OpVT == MVT::f80) 236 return FPROUND_F80_F64; 237 if (OpVT == MVT::ppcf128) 238 return FPROUND_PPCF128_F64; 239 } 240 return UNKNOWN_LIBCALL; 241} 242 243/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 244/// UNKNOWN_LIBCALL if there is none. 245RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) { 246 if (OpVT == MVT::f32) { 247 if (RetVT == MVT::i32) 248 return FPTOSINT_F32_I32; 249 if (RetVT == MVT::i64) 250 return FPTOSINT_F32_I64; 251 if (RetVT == MVT::i128) 252 return FPTOSINT_F32_I128; 253 } else if (OpVT == MVT::f64) { 254 if (RetVT == MVT::i32) 255 return FPTOSINT_F64_I32; 256 if (RetVT == MVT::i64) 257 return FPTOSINT_F64_I64; 258 if (RetVT == MVT::i128) 259 return FPTOSINT_F64_I128; 260 } else if (OpVT == MVT::f80) { 261 if (RetVT == MVT::i32) 262 return FPTOSINT_F80_I32; 263 if (RetVT == MVT::i64) 264 return FPTOSINT_F80_I64; 265 if (RetVT == MVT::i128) 266 return FPTOSINT_F80_I128; 267 } else if (OpVT == MVT::ppcf128) { 268 if (RetVT == MVT::i32) 269 return FPTOSINT_PPCF128_I32; 270 if (RetVT == MVT::i64) 271 return FPTOSINT_PPCF128_I64; 272 if (RetVT == MVT::i128) 273 return FPTOSINT_PPCF128_I128; 274 } 275 return UNKNOWN_LIBCALL; 276} 277 278/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 279/// UNKNOWN_LIBCALL if there is none. 280RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) { 281 if (OpVT == MVT::f32) { 282 if (RetVT == MVT::i32) 283 return FPTOUINT_F32_I32; 284 if (RetVT == MVT::i64) 285 return FPTOUINT_F32_I64; 286 if (RetVT == MVT::i128) 287 return FPTOUINT_F32_I128; 288 } else if (OpVT == MVT::f64) { 289 if (RetVT == MVT::i32) 290 return FPTOUINT_F64_I32; 291 if (RetVT == MVT::i64) 292 return FPTOUINT_F64_I64; 293 if (RetVT == MVT::i128) 294 return FPTOUINT_F64_I128; 295 } else if (OpVT == MVT::f80) { 296 if (RetVT == MVT::i32) 297 return FPTOUINT_F80_I32; 298 if (RetVT == MVT::i64) 299 return FPTOUINT_F80_I64; 300 if (RetVT == MVT::i128) 301 return FPTOUINT_F80_I128; 302 } else if (OpVT == MVT::ppcf128) { 303 if (RetVT == MVT::i32) 304 return FPTOUINT_PPCF128_I32; 305 if (RetVT == MVT::i64) 306 return FPTOUINT_PPCF128_I64; 307 if (RetVT == MVT::i128) 308 return FPTOUINT_PPCF128_I128; 309 } 310 return UNKNOWN_LIBCALL; 311} 312 313/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 314/// UNKNOWN_LIBCALL if there is none. 315RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) { 316 if (OpVT == MVT::i32) { 317 if (RetVT == MVT::f32) 318 return SINTTOFP_I32_F32; 319 else if (RetVT == MVT::f64) 320 return SINTTOFP_I32_F64; 321 else if (RetVT == MVT::f80) 322 return SINTTOFP_I32_F80; 323 else if (RetVT == MVT::ppcf128) 324 return SINTTOFP_I32_PPCF128; 325 } else if (OpVT == MVT::i64) { 326 if (RetVT == MVT::f32) 327 return SINTTOFP_I64_F32; 328 else if (RetVT == MVT::f64) 329 return SINTTOFP_I64_F64; 330 else if (RetVT == MVT::f80) 331 return SINTTOFP_I64_F80; 332 else if (RetVT == MVT::ppcf128) 333 return SINTTOFP_I64_PPCF128; 334 } else if (OpVT == MVT::i128) { 335 if (RetVT == MVT::f32) 336 return SINTTOFP_I128_F32; 337 else if (RetVT == MVT::f64) 338 return SINTTOFP_I128_F64; 339 else if (RetVT == MVT::f80) 340 return SINTTOFP_I128_F80; 341 else if (RetVT == MVT::ppcf128) 342 return SINTTOFP_I128_PPCF128; 343 } 344 return UNKNOWN_LIBCALL; 345} 346 347/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 348/// UNKNOWN_LIBCALL if there is none. 349RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) { 350 if (OpVT == MVT::i32) { 351 if (RetVT == MVT::f32) 352 return UINTTOFP_I32_F32; 353 else if (RetVT == MVT::f64) 354 return UINTTOFP_I32_F64; 355 else if (RetVT == MVT::f80) 356 return UINTTOFP_I32_F80; 357 else if (RetVT == MVT::ppcf128) 358 return UINTTOFP_I32_PPCF128; 359 } else if (OpVT == MVT::i64) { 360 if (RetVT == MVT::f32) 361 return UINTTOFP_I64_F32; 362 else if (RetVT == MVT::f64) 363 return UINTTOFP_I64_F64; 364 else if (RetVT == MVT::f80) 365 return UINTTOFP_I64_F80; 366 else if (RetVT == MVT::ppcf128) 367 return UINTTOFP_I64_PPCF128; 368 } else if (OpVT == MVT::i128) { 369 if (RetVT == MVT::f32) 370 return UINTTOFP_I128_F32; 371 else if (RetVT == MVT::f64) 372 return UINTTOFP_I128_F64; 373 else if (RetVT == MVT::f80) 374 return UINTTOFP_I128_F80; 375 else if (RetVT == MVT::ppcf128) 376 return UINTTOFP_I128_PPCF128; 377 } 378 return UNKNOWN_LIBCALL; 379} 380 381/// InitCmpLibcallCCs - Set default comparison libcall CC. 382/// 383static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 384 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 385 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 386 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 387 CCs[RTLIB::UNE_F32] = ISD::SETNE; 388 CCs[RTLIB::UNE_F64] = ISD::SETNE; 389 CCs[RTLIB::OGE_F32] = ISD::SETGE; 390 CCs[RTLIB::OGE_F64] = ISD::SETGE; 391 CCs[RTLIB::OLT_F32] = ISD::SETLT; 392 CCs[RTLIB::OLT_F64] = ISD::SETLT; 393 CCs[RTLIB::OLE_F32] = ISD::SETLE; 394 CCs[RTLIB::OLE_F64] = ISD::SETLE; 395 CCs[RTLIB::OGT_F32] = ISD::SETGT; 396 CCs[RTLIB::OGT_F64] = ISD::SETGT; 397 CCs[RTLIB::UO_F32] = ISD::SETNE; 398 CCs[RTLIB::UO_F64] = ISD::SETNE; 399 CCs[RTLIB::O_F32] = ISD::SETEQ; 400 CCs[RTLIB::O_F64] = ISD::SETEQ; 401} 402 403TargetLowering::TargetLowering(TargetMachine &tm) 404 : TM(tm), TD(TM.getTargetData()) { 405 // All operations default to being supported. 406 memset(OpActions, 0, sizeof(OpActions)); 407 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 408 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 409 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 410 memset(ConvertActions, 0, sizeof(ConvertActions)); 411 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 412 413 // Set default actions for various operations. 414 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 415 // Default all indexed load / store to expand. 416 for (unsigned IM = (unsigned)ISD::PRE_INC; 417 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 418 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 419 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 420 } 421 422 // These operations default to expand. 423 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 424 } 425 426 // Most targets ignore the @llvm.prefetch intrinsic. 427 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 428 429 // ConstantFP nodes default to expand. Targets can either change this to 430 // Legal, in which case all fp constants are legal, or use addLegalFPImmediate 431 // to optimize expansions for certain constants. 432 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 433 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 434 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 435 436 // These library functions default to expand. 437 setOperationAction(ISD::FLOG , MVT::f64, Expand); 438 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 439 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 440 setOperationAction(ISD::FEXP , MVT::f64, Expand); 441 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 442 setOperationAction(ISD::FLOG , MVT::f32, Expand); 443 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 444 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 445 setOperationAction(ISD::FEXP , MVT::f32, Expand); 446 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 447 448 // Default ISD::TRAP to expand (which turns it into abort). 449 setOperationAction(ISD::TRAP, MVT::Other, Expand); 450 451 IsLittleEndian = TD->isLittleEndian(); 452 UsesGlobalOffsetTable = false; 453 ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType()); 454 ShiftAmtHandling = Undefined; 455 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 456 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 457 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 458 allowUnalignedMemoryAccesses = false; 459 UseUnderscoreSetJmp = false; 460 UseUnderscoreLongJmp = false; 461 SelectIsExpensive = false; 462 IntDivIsCheap = false; 463 Pow2DivIsCheap = false; 464 StackPointerRegisterToSaveRestore = 0; 465 ExceptionPointerRegister = 0; 466 ExceptionSelectorRegister = 0; 467 BooleanContents = UndefinedBooleanContent; 468 SchedPreferenceInfo = SchedulingForLatency; 469 JumpBufSize = 0; 470 JumpBufAlignment = 0; 471 IfCvtBlockSizeLimit = 2; 472 IfCvtDupBlockSizeLimit = 0; 473 PrefLoopAlignment = 0; 474 475 InitLibcallNames(LibcallRoutineNames); 476 InitCmpLibcallCCs(CmpLibcallCCs); 477 478 // Tell Legalize whether the assembler supports DEBUG_LOC. 479 const TargetAsmInfo *TASM = TM.getTargetAsmInfo(); 480 if (!TASM || !TASM->hasDotLocAndDotFile()) 481 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 482} 483 484TargetLowering::~TargetLowering() {} 485 486/// computeRegisterProperties - Once all of the register classes are added, 487/// this allows us to compute derived properties we expose. 488void TargetLowering::computeRegisterProperties() { 489 assert(MVT::LAST_VALUETYPE <= 32 && 490 "Too many value types for ValueTypeActions to hold!"); 491 492 // Everything defaults to needing one register. 493 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 494 NumRegistersForVT[i] = 1; 495 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 496 } 497 // ...except isVoid, which doesn't need any registers. 498 NumRegistersForVT[MVT::isVoid] = 0; 499 500 // Find the largest integer register class. 501 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 502 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 503 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 504 505 // Every integer value type larger than this largest register takes twice as 506 // many registers to represent as the previous ValueType. 507 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 508 MVT EVT = (MVT::SimpleValueType)ExpandedReg; 509 if (!EVT.isInteger()) 510 break; 511 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 512 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 513 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 514 ValueTypeActions.setTypeAction(EVT, Expand); 515 } 516 517 // Inspect all of the ValueType's smaller than the largest integer 518 // register to see which ones need promotion. 519 unsigned LegalIntReg = LargestIntReg; 520 for (unsigned IntReg = LargestIntReg - 1; 521 IntReg >= (unsigned)MVT::i1; --IntReg) { 522 MVT IVT = (MVT::SimpleValueType)IntReg; 523 if (isTypeLegal(IVT)) { 524 LegalIntReg = IntReg; 525 } else { 526 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 527 (MVT::SimpleValueType)LegalIntReg; 528 ValueTypeActions.setTypeAction(IVT, Promote); 529 } 530 } 531 532 // ppcf128 type is really two f64's. 533 if (!isTypeLegal(MVT::ppcf128)) { 534 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 535 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 536 TransformToType[MVT::ppcf128] = MVT::f64; 537 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 538 } 539 540 // Decide how to handle f64. If the target does not have native f64 support, 541 // expand it to i64 and we will be generating soft float library calls. 542 if (!isTypeLegal(MVT::f64)) { 543 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 544 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 545 TransformToType[MVT::f64] = MVT::i64; 546 ValueTypeActions.setTypeAction(MVT::f64, Expand); 547 } 548 549 // Decide how to handle f32. If the target does not have native support for 550 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 551 if (!isTypeLegal(MVT::f32)) { 552 if (isTypeLegal(MVT::f64)) { 553 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 554 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 555 TransformToType[MVT::f32] = MVT::f64; 556 ValueTypeActions.setTypeAction(MVT::f32, Promote); 557 } else { 558 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 559 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 560 TransformToType[MVT::f32] = MVT::i32; 561 ValueTypeActions.setTypeAction(MVT::f32, Expand); 562 } 563 } 564 565 // Loop over all of the vector value types to see which need transformations. 566 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 567 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 568 MVT VT = (MVT::SimpleValueType)i; 569 if (!isTypeLegal(VT)) { 570 MVT IntermediateVT, RegisterVT; 571 unsigned NumIntermediates; 572 NumRegistersForVT[i] = 573 getVectorTypeBreakdown(VT, 574 IntermediateVT, NumIntermediates, 575 RegisterVT); 576 RegisterTypeForVT[i] = RegisterVT; 577 578 // Determine if there is a legal wider type. 579 bool IsLegalWiderType = false; 580 MVT EltVT = VT.getVectorElementType(); 581 unsigned NElts = VT.getVectorNumElements(); 582 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 583 MVT SVT = (MVT::SimpleValueType)nVT; 584 if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT && 585 SVT.getVectorNumElements() > NElts) { 586 TransformToType[i] = SVT; 587 ValueTypeActions.setTypeAction(VT, Promote); 588 IsLegalWiderType = true; 589 break; 590 } 591 } 592 if (!IsLegalWiderType) { 593 MVT NVT = VT.getPow2VectorType(); 594 if (NVT == VT) { 595 // Type is already a power of 2. The default action is to split. 596 TransformToType[i] = MVT::Other; 597 ValueTypeActions.setTypeAction(VT, Expand); 598 } else { 599 TransformToType[i] = NVT; 600 ValueTypeActions.setTypeAction(VT, Promote); 601 } 602 } 603 } 604 } 605} 606 607const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 608 return NULL; 609} 610 611 612MVT TargetLowering::getSetCCResultType(MVT VT) const { 613 return getValueType(TD->getIntPtrType()); 614} 615 616 617/// getVectorTypeBreakdown - Vector types are broken down into some number of 618/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 619/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 620/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 621/// 622/// This method returns the number of registers needed, and the VT for each 623/// register. It also returns the VT and quantity of the intermediate values 624/// before they are promoted/expanded. 625/// 626unsigned TargetLowering::getVectorTypeBreakdown(MVT VT, 627 MVT &IntermediateVT, 628 unsigned &NumIntermediates, 629 MVT &RegisterVT) const { 630 // Figure out the right, legal destination reg to copy into. 631 unsigned NumElts = VT.getVectorNumElements(); 632 MVT EltTy = VT.getVectorElementType(); 633 634 unsigned NumVectorRegs = 1; 635 636 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 637 // could break down into LHS/RHS like LegalizeDAG does. 638 if (!isPowerOf2_32(NumElts)) { 639 NumVectorRegs = NumElts; 640 NumElts = 1; 641 } 642 643 // Divide the input until we get to a supported size. This will always 644 // end with a scalar if the target doesn't support vectors. 645 while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 646 NumElts >>= 1; 647 NumVectorRegs <<= 1; 648 } 649 650 NumIntermediates = NumVectorRegs; 651 652 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 653 if (!isTypeLegal(NewVT)) 654 NewVT = EltTy; 655 IntermediateVT = NewVT; 656 657 MVT DestVT = getTypeToTransformTo(NewVT); 658 RegisterVT = DestVT; 659 if (DestVT.bitsLT(NewVT)) { 660 // Value is expanded, e.g. i64 -> i16. 661 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 662 } else { 663 // Otherwise, promotion or legal types use the same number of registers as 664 // the vector decimated to the appropriate level. 665 return NumVectorRegs; 666 } 667 668 return 1; 669} 670 671/// getWidenVectorType: given a vector type, returns the type to widen to 672/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. 673/// If there is no vector type that we want to widen to, returns MVT::Other 674/// When and where to widen is target dependent based on the cost of 675/// scalarizing vs using the wider vector type. 676MVT TargetLowering::getWidenVectorType(MVT VT) const { 677 assert(VT.isVector()); 678 if (isTypeLegal(VT)) 679 return VT; 680 681 // Default is not to widen until moved to LegalizeTypes 682 return MVT::Other; 683} 684 685/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 686/// function arguments in the caller parameter area. This is the actual 687/// alignment, not its logarithm. 688unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 689 return TD->getCallFrameTypeAlignment(Ty); 690} 691 692SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 693 SelectionDAG &DAG) const { 694 if (usesGlobalOffsetTable()) 695 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 696 return Table; 697} 698 699bool 700TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 701 // Assume that everything is safe in static mode. 702 if (getTargetMachine().getRelocationModel() == Reloc::Static) 703 return true; 704 705 // In dynamic-no-pic mode, assume that known defined values are safe. 706 if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC && 707 GA && 708 !GA->getGlobal()->isDeclaration() && 709 !GA->getGlobal()->mayBeOverridden()) 710 return true; 711 712 // Otherwise assume nothing is safe. 713 return false; 714} 715 716//===----------------------------------------------------------------------===// 717// Optimization Methods 718//===----------------------------------------------------------------------===// 719 720/// ShrinkDemandedConstant - Check to see if the specified operand of the 721/// specified instruction is a constant integer. If so, check to see if there 722/// are any bits set in the constant that are not demanded. If so, shrink the 723/// constant and return true. 724bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, 725 const APInt &Demanded) { 726 // FIXME: ISD::SELECT, ISD::SELECT_CC 727 switch(Op.getOpcode()) { 728 default: break; 729 case ISD::AND: 730 case ISD::OR: 731 case ISD::XOR: 732 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 733 if (C->getAPIntValue().intersects(~Demanded)) { 734 MVT VT = Op.getValueType(); 735 SDValue New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 736 DAG.getConstant(Demanded & 737 C->getAPIntValue(), 738 VT)); 739 return CombineTo(Op, New); 740 } 741 break; 742 } 743 return false; 744} 745 746/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 747/// DemandedMask bits of the result of Op are ever used downstream. If we can 748/// use this information to simplify Op, create a new simplified DAG node and 749/// return true, returning the original and new nodes in Old and New. Otherwise, 750/// analyze the expression and return a mask of KnownOne and KnownZero bits for 751/// the expression (used to simplify the caller). The KnownZero/One bits may 752/// only be accurate for those bits in the DemandedMask. 753bool TargetLowering::SimplifyDemandedBits(SDValue Op, 754 const APInt &DemandedMask, 755 APInt &KnownZero, 756 APInt &KnownOne, 757 TargetLoweringOpt &TLO, 758 unsigned Depth) const { 759 unsigned BitWidth = DemandedMask.getBitWidth(); 760 assert(Op.getValueSizeInBits() == BitWidth && 761 "Mask size mismatches value type size!"); 762 APInt NewMask = DemandedMask; 763 764 // Don't know anything. 765 KnownZero = KnownOne = APInt(BitWidth, 0); 766 767 // Other users may use these bits. 768 if (!Op.getNode()->hasOneUse()) { 769 if (Depth != 0) { 770 // If not at the root, Just compute the KnownZero/KnownOne bits to 771 // simplify things downstream. 772 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 773 return false; 774 } 775 // If this is the root being simplified, allow it to have multiple uses, 776 // just set the NewMask to all bits. 777 NewMask = APInt::getAllOnesValue(BitWidth); 778 } else if (DemandedMask == 0) { 779 // Not demanding any bits from Op. 780 if (Op.getOpcode() != ISD::UNDEF) 781 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 782 return false; 783 } else if (Depth == 6) { // Limit search depth. 784 return false; 785 } 786 787 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 788 switch (Op.getOpcode()) { 789 case ISD::Constant: 790 // We know all of the bits for a constant! 791 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 792 KnownZero = ~KnownOne & NewMask; 793 return false; // Don't fall through, will infinitely loop. 794 case ISD::AND: 795 // If the RHS is a constant, check to see if the LHS would be zero without 796 // using the bits from the RHS. Below, we use knowledge about the RHS to 797 // simplify the LHS, here we're using information from the LHS to simplify 798 // the RHS. 799 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 800 APInt LHSZero, LHSOne; 801 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 802 LHSZero, LHSOne, Depth+1); 803 // If the LHS already has zeros where RHSC does, this and is dead. 804 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 805 return TLO.CombineTo(Op, Op.getOperand(0)); 806 // If any of the set bits in the RHS are known zero on the LHS, shrink 807 // the constant. 808 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 809 return true; 810 } 811 812 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 813 KnownOne, TLO, Depth+1)) 814 return true; 815 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 816 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 817 KnownZero2, KnownOne2, TLO, Depth+1)) 818 return true; 819 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 820 821 // If all of the demanded bits are known one on one side, return the other. 822 // These bits cannot contribute to the result of the 'and'. 823 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 824 return TLO.CombineTo(Op, Op.getOperand(0)); 825 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 826 return TLO.CombineTo(Op, Op.getOperand(1)); 827 // If all of the demanded bits in the inputs are known zeros, return zero. 828 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 829 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 830 // If the RHS is a constant, see if we can simplify it. 831 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 832 return true; 833 834 // Output known-1 bits are only known if set in both the LHS & RHS. 835 KnownOne &= KnownOne2; 836 // Output known-0 are known to be clear if zero in either the LHS | RHS. 837 KnownZero |= KnownZero2; 838 break; 839 case ISD::OR: 840 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 841 KnownOne, TLO, Depth+1)) 842 return true; 843 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 844 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 845 KnownZero2, KnownOne2, TLO, Depth+1)) 846 return true; 847 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 848 849 // If all of the demanded bits are known zero on one side, return the other. 850 // These bits cannot contribute to the result of the 'or'. 851 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 852 return TLO.CombineTo(Op, Op.getOperand(0)); 853 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 854 return TLO.CombineTo(Op, Op.getOperand(1)); 855 // If all of the potentially set bits on one side are known to be set on 856 // the other side, just use the 'other' side. 857 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 858 return TLO.CombineTo(Op, Op.getOperand(0)); 859 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 860 return TLO.CombineTo(Op, Op.getOperand(1)); 861 // If the RHS is a constant, see if we can simplify it. 862 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 863 return true; 864 865 // Output known-0 bits are only known if clear in both the LHS & RHS. 866 KnownZero &= KnownZero2; 867 // Output known-1 are known to be set if set in either the LHS | RHS. 868 KnownOne |= KnownOne2; 869 break; 870 case ISD::XOR: 871 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 872 KnownOne, TLO, Depth+1)) 873 return true; 874 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 875 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 876 KnownOne2, TLO, Depth+1)) 877 return true; 878 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 879 880 // If all of the demanded bits are known zero on one side, return the other. 881 // These bits cannot contribute to the result of the 'xor'. 882 if ((KnownZero & NewMask) == NewMask) 883 return TLO.CombineTo(Op, Op.getOperand(0)); 884 if ((KnownZero2 & NewMask) == NewMask) 885 return TLO.CombineTo(Op, Op.getOperand(1)); 886 887 // If all of the unknown bits are known to be zero on one side or the other 888 // (but not both) turn this into an *inclusive* or. 889 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 890 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 891 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 892 Op.getOperand(0), 893 Op.getOperand(1))); 894 895 // Output known-0 bits are known if clear or set in both the LHS & RHS. 896 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 897 // Output known-1 are known to be set if set in only one of the LHS, RHS. 898 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 899 900 // If all of the demanded bits on one side are known, and all of the set 901 // bits on that side are also known to be set on the other side, turn this 902 // into an AND, as we know the bits will be cleared. 903 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 904 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 905 if ((KnownOne & KnownOne2) == KnownOne) { 906 MVT VT = Op.getValueType(); 907 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 908 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 909 ANDC)); 910 } 911 } 912 913 // If the RHS is a constant, see if we can simplify it. 914 // for XOR, we prefer to force bits to 1 if they will make a -1. 915 // if we can't force bits, try to shrink constant 916 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 917 APInt Expanded = C->getAPIntValue() | (~NewMask); 918 // if we can expand it to have all bits set, do it 919 if (Expanded.isAllOnesValue()) { 920 if (Expanded != C->getAPIntValue()) { 921 MVT VT = Op.getValueType(); 922 SDValue New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 923 TLO.DAG.getConstant(Expanded, VT)); 924 return TLO.CombineTo(Op, New); 925 } 926 // if it already has all the bits set, nothing to change 927 // but don't shrink either! 928 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 929 return true; 930 } 931 } 932 933 KnownZero = KnownZeroOut; 934 KnownOne = KnownOneOut; 935 break; 936 case ISD::SELECT: 937 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 938 KnownOne, TLO, Depth+1)) 939 return true; 940 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 941 KnownOne2, TLO, Depth+1)) 942 return true; 943 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 944 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 945 946 // If the operands are constants, see if we can simplify them. 947 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 948 return true; 949 950 // Only known if known in both the LHS and RHS. 951 KnownOne &= KnownOne2; 952 KnownZero &= KnownZero2; 953 break; 954 case ISD::SELECT_CC: 955 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 956 KnownOne, TLO, Depth+1)) 957 return true; 958 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 959 KnownOne2, TLO, Depth+1)) 960 return true; 961 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 962 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 963 964 // If the operands are constants, see if we can simplify them. 965 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 966 return true; 967 968 // Only known if known in both the LHS and RHS. 969 KnownOne &= KnownOne2; 970 KnownZero &= KnownZero2; 971 break; 972 case ISD::SHL: 973 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 974 unsigned ShAmt = SA->getZExtValue(); 975 SDValue InOp = Op.getOperand(0); 976 977 // If the shift count is an invalid immediate, don't do anything. 978 if (ShAmt >= BitWidth) 979 break; 980 981 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 982 // single shift. We can do this if the bottom bits (which are shifted 983 // out) are never demanded. 984 if (InOp.getOpcode() == ISD::SRL && 985 isa<ConstantSDNode>(InOp.getOperand(1))) { 986 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 987 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 988 unsigned Opc = ISD::SHL; 989 int Diff = ShAmt-C1; 990 if (Diff < 0) { 991 Diff = -Diff; 992 Opc = ISD::SRL; 993 } 994 995 SDValue NewSA = 996 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 997 MVT VT = Op.getValueType(); 998 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 999 InOp.getOperand(0), NewSA)); 1000 } 1001 } 1002 1003 if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt), 1004 KnownZero, KnownOne, TLO, Depth+1)) 1005 return true; 1006 KnownZero <<= SA->getZExtValue(); 1007 KnownOne <<= SA->getZExtValue(); 1008 // low bits known zero. 1009 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue()); 1010 } 1011 break; 1012 case ISD::SRL: 1013 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1014 MVT VT = Op.getValueType(); 1015 unsigned ShAmt = SA->getZExtValue(); 1016 unsigned VTSize = VT.getSizeInBits(); 1017 SDValue InOp = Op.getOperand(0); 1018 1019 // If the shift count is an invalid immediate, don't do anything. 1020 if (ShAmt >= BitWidth) 1021 break; 1022 1023 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1024 // single shift. We can do this if the top bits (which are shifted out) 1025 // are never demanded. 1026 if (InOp.getOpcode() == ISD::SHL && 1027 isa<ConstantSDNode>(InOp.getOperand(1))) { 1028 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 1029 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1030 unsigned Opc = ISD::SRL; 1031 int Diff = ShAmt-C1; 1032 if (Diff < 0) { 1033 Diff = -Diff; 1034 Opc = ISD::SHL; 1035 } 1036 1037 SDValue NewSA = 1038 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1039 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 1040 InOp.getOperand(0), NewSA)); 1041 } 1042 } 1043 1044 // Compute the new bits that are at the top now. 1045 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 1046 KnownZero, KnownOne, TLO, Depth+1)) 1047 return true; 1048 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1049 KnownZero = KnownZero.lshr(ShAmt); 1050 KnownOne = KnownOne.lshr(ShAmt); 1051 1052 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1053 KnownZero |= HighBits; // High bits known zero. 1054 } 1055 break; 1056 case ISD::SRA: 1057 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1058 MVT VT = Op.getValueType(); 1059 unsigned ShAmt = SA->getZExtValue(); 1060 1061 // If the shift count is an invalid immediate, don't do anything. 1062 if (ShAmt >= BitWidth) 1063 break; 1064 1065 APInt InDemandedMask = (NewMask << ShAmt); 1066 1067 // If any of the demanded bits are produced by the sign extension, we also 1068 // demand the input sign bit. 1069 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1070 if (HighBits.intersects(NewMask)) 1071 InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); 1072 1073 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 1074 KnownZero, KnownOne, TLO, Depth+1)) 1075 return true; 1076 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1077 KnownZero = KnownZero.lshr(ShAmt); 1078 KnownOne = KnownOne.lshr(ShAmt); 1079 1080 // Handle the sign bit, adjusted to where it is now in the mask. 1081 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 1082 1083 // If the input sign bit is known to be zero, or if none of the top bits 1084 // are demanded, turn this into an unsigned shift right. 1085 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 1086 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 1087 Op.getOperand(1))); 1088 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 1089 KnownOne |= HighBits; 1090 } 1091 } 1092 break; 1093 case ISD::SIGN_EXTEND_INREG: { 1094 MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1095 1096 // Sign extension. Compute the demanded bits in the result that are not 1097 // present in the input. 1098 APInt NewBits = APInt::getHighBitsSet(BitWidth, 1099 BitWidth - EVT.getSizeInBits()) & 1100 NewMask; 1101 1102 // If none of the extended bits are demanded, eliminate the sextinreg. 1103 if (NewBits == 0) 1104 return TLO.CombineTo(Op, Op.getOperand(0)); 1105 1106 APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); 1107 InSignBit.zext(BitWidth); 1108 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, 1109 EVT.getSizeInBits()) & 1110 NewMask; 1111 1112 // Since the sign extended bits are demanded, we know that the sign 1113 // bit is demanded. 1114 InputDemandedBits |= InSignBit; 1115 1116 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 1117 KnownZero, KnownOne, TLO, Depth+1)) 1118 return true; 1119 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1120 1121 // If the sign bit of the input is known set or clear, then we know the 1122 // top bits of the result. 1123 1124 // If the input sign bit is known zero, convert this into a zero extension. 1125 if (KnownZero.intersects(InSignBit)) 1126 return TLO.CombineTo(Op, 1127 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT)); 1128 1129 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1130 KnownOne |= NewBits; 1131 KnownZero &= ~NewBits; 1132 } else { // Input sign bit unknown 1133 KnownZero &= ~NewBits; 1134 KnownOne &= ~NewBits; 1135 } 1136 break; 1137 } 1138 case ISD::ZERO_EXTEND: { 1139 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1140 APInt InMask = NewMask; 1141 InMask.trunc(OperandBitWidth); 1142 1143 // If none of the top bits are demanded, convert this into an any_extend. 1144 APInt NewBits = 1145 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1146 if (!NewBits.intersects(NewMask)) 1147 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, 1148 Op.getValueType(), 1149 Op.getOperand(0))); 1150 1151 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1152 KnownZero, KnownOne, TLO, Depth+1)) 1153 return true; 1154 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1155 KnownZero.zext(BitWidth); 1156 KnownOne.zext(BitWidth); 1157 KnownZero |= NewBits; 1158 break; 1159 } 1160 case ISD::SIGN_EXTEND: { 1161 MVT InVT = Op.getOperand(0).getValueType(); 1162 unsigned InBits = InVT.getSizeInBits(); 1163 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1164 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1165 APInt NewBits = ~InMask & NewMask; 1166 1167 // If none of the top bits are demanded, convert this into an any_extend. 1168 if (NewBits == 0) 1169 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(), 1170 Op.getOperand(0))); 1171 1172 // Since some of the sign extended bits are demanded, we know that the sign 1173 // bit is demanded. 1174 APInt InDemandedBits = InMask & NewMask; 1175 InDemandedBits |= InSignBit; 1176 InDemandedBits.trunc(InBits); 1177 1178 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1179 KnownOne, TLO, Depth+1)) 1180 return true; 1181 KnownZero.zext(BitWidth); 1182 KnownOne.zext(BitWidth); 1183 1184 // If the sign bit is known zero, convert this to a zero extend. 1185 if (KnownZero.intersects(InSignBit)) 1186 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, 1187 Op.getValueType(), 1188 Op.getOperand(0))); 1189 1190 // If the sign bit is known one, the top bits match. 1191 if (KnownOne.intersects(InSignBit)) { 1192 KnownOne |= NewBits; 1193 KnownZero &= ~NewBits; 1194 } else { // Otherwise, top bits aren't known. 1195 KnownOne &= ~NewBits; 1196 KnownZero &= ~NewBits; 1197 } 1198 break; 1199 } 1200 case ISD::ANY_EXTEND: { 1201 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1202 APInt InMask = NewMask; 1203 InMask.trunc(OperandBitWidth); 1204 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1205 KnownZero, KnownOne, TLO, Depth+1)) 1206 return true; 1207 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1208 KnownZero.zext(BitWidth); 1209 KnownOne.zext(BitWidth); 1210 break; 1211 } 1212 case ISD::TRUNCATE: { 1213 // Simplify the input, using demanded bit information, and compute the known 1214 // zero/one bits live out. 1215 APInt TruncMask = NewMask; 1216 TruncMask.zext(Op.getOperand(0).getValueSizeInBits()); 1217 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1218 KnownZero, KnownOne, TLO, Depth+1)) 1219 return true; 1220 KnownZero.trunc(BitWidth); 1221 KnownOne.trunc(BitWidth); 1222 1223 // If the input is only used by this truncate, see if we can shrink it based 1224 // on the known demanded bits. 1225 if (Op.getOperand(0).getNode()->hasOneUse()) { 1226 SDValue In = Op.getOperand(0); 1227 unsigned InBitWidth = In.getValueSizeInBits(); 1228 switch (In.getOpcode()) { 1229 default: break; 1230 case ISD::SRL: 1231 // Shrink SRL by a constant if none of the high bits shifted in are 1232 // demanded. 1233 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 1234 APInt HighBits = APInt::getHighBitsSet(InBitWidth, 1235 InBitWidth - BitWidth); 1236 HighBits = HighBits.lshr(ShAmt->getZExtValue()); 1237 HighBits.trunc(BitWidth); 1238 1239 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { 1240 // None of the shifted in bits are needed. Add a truncate of the 1241 // shift input, then shift it. 1242 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, 1243 Op.getValueType(), 1244 In.getOperand(0)); 1245 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(), 1246 NewTrunc, In.getOperand(1))); 1247 } 1248 } 1249 break; 1250 } 1251 } 1252 1253 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1254 break; 1255 } 1256 case ISD::AssertZext: { 1257 MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1258 APInt InMask = APInt::getLowBitsSet(BitWidth, 1259 VT.getSizeInBits()); 1260 if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, 1261 KnownZero, KnownOne, TLO, Depth+1)) 1262 return true; 1263 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1264 KnownZero |= ~InMask & NewMask; 1265 break; 1266 } 1267 case ISD::BIT_CONVERT: 1268#if 0 1269 // If this is an FP->Int bitcast and if the sign bit is the only thing that 1270 // is demanded, turn this into a FGETSIGN. 1271 if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) && 1272 MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && 1273 !MVT::isVector(Op.getOperand(0).getValueType())) { 1274 // Only do this xform if FGETSIGN is valid or if before legalize. 1275 if (!TLO.AfterLegalize || 1276 isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { 1277 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1278 // place. We expect the SHL to be eliminated by other optimizations. 1279 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 1280 Op.getOperand(0)); 1281 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1282 SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); 1283 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), 1284 Sign, ShAmt)); 1285 } 1286 } 1287#endif 1288 break; 1289 default: 1290 // Just use ComputeMaskedBits to compute output bits. 1291 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1292 break; 1293 } 1294 1295 // If we know the value of all of the demanded bits, return this as a 1296 // constant. 1297 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1298 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1299 1300 return false; 1301} 1302 1303/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1304/// in Mask are known to be either zero or one and return them in the 1305/// KnownZero/KnownOne bitsets. 1306void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1307 const APInt &Mask, 1308 APInt &KnownZero, 1309 APInt &KnownOne, 1310 const SelectionDAG &DAG, 1311 unsigned Depth) const { 1312 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1313 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1314 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1315 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1316 "Should use MaskedValueIsZero if you don't know whether Op" 1317 " is a target node!"); 1318 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1319} 1320 1321/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1322/// targets that want to expose additional information about sign bits to the 1323/// DAG Combiner. 1324unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 1325 unsigned Depth) const { 1326 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1327 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1328 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1329 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1330 "Should use ComputeNumSignBits if you don't know whether Op" 1331 " is a target node!"); 1332 return 1; 1333} 1334 1335 1336/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1337/// and cc. If it is unable to simplify it, return a null SDValue. 1338SDValue 1339TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1, 1340 ISD::CondCode Cond, bool foldBooleans, 1341 DAGCombinerInfo &DCI) const { 1342 SelectionDAG &DAG = DCI.DAG; 1343 1344 // These setcc operations always fold. 1345 switch (Cond) { 1346 default: break; 1347 case ISD::SETFALSE: 1348 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1349 case ISD::SETTRUE: 1350 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1351 } 1352 1353 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1354 const APInt &C1 = N1C->getAPIntValue(); 1355 if (isa<ConstantSDNode>(N0.getNode())) { 1356 return DAG.FoldSetCC(VT, N0, N1, Cond); 1357 } else { 1358 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1359 // equality comparison, then we're just comparing whether X itself is 1360 // zero. 1361 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1362 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1363 N0.getOperand(1).getOpcode() == ISD::Constant) { 1364 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1365 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1366 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1367 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1368 // (srl (ctlz x), 5) == 0 -> X != 0 1369 // (srl (ctlz x), 5) != 1 -> X != 0 1370 Cond = ISD::SETNE; 1371 } else { 1372 // (srl (ctlz x), 5) != 0 -> X == 0 1373 // (srl (ctlz x), 5) == 1 -> X == 0 1374 Cond = ISD::SETEQ; 1375 } 1376 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1377 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0), 1378 Zero, Cond); 1379 } 1380 } 1381 1382 // If the LHS is '(and load, const)', the RHS is 0, 1383 // the test is for equality or unsigned, and all 1 bits of the const are 1384 // in the same partial word, see if we can shorten the load. 1385 if (DCI.isBeforeLegalize() && 1386 N0.getOpcode() == ISD::AND && C1 == 0 && 1387 isa<LoadSDNode>(N0.getOperand(0)) && 1388 N0.getOperand(0).getNode()->hasOneUse() && 1389 isa<ConstantSDNode>(N0.getOperand(1))) { 1390 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 1391 uint64_t Mask = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1392 uint64_t bestMask = 0; 1393 unsigned bestWidth = 0, bestOffset = 0; 1394 if (!Lod->isVolatile() && Lod->isUnindexed()) { 1395 unsigned origWidth = N0.getValueType().getSizeInBits(); 1396 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 1397 // 8 bits, but have to be careful... 1398 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 1399 origWidth = Lod->getMemoryVT().getSizeInBits(); 1400 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 1401 uint64_t newMask = (1ULL << width) - 1; 1402 for (unsigned offset=0; offset<origWidth/width; offset++) { 1403 if ((newMask & Mask)==Mask) { 1404 if (!TD->isLittleEndian()) 1405 bestOffset = (origWidth/width - offset - 1) * (width/8); 1406 else 1407 bestOffset = (uint64_t)offset * (width/8); 1408 bestMask = Mask >> (offset * (width/8) * 8); 1409 bestWidth = width; 1410 break; 1411 } 1412 newMask = newMask << width; 1413 } 1414 } 1415 } 1416 if (bestWidth) { 1417 MVT newVT = MVT::getIntegerVT(bestWidth); 1418 if (newVT.isRound()) { 1419 MVT PtrType = Lod->getOperand(1).getValueType(); 1420 SDValue Ptr = Lod->getBasePtr(); 1421 if (bestOffset != 0) 1422 Ptr = DAG.getNode(ISD::ADD, PtrType, Lod->getBasePtr(), 1423 DAG.getConstant(bestOffset, PtrType)); 1424 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 1425 SDValue NewLoad = DAG.getLoad(newVT, Lod->getChain(), Ptr, 1426 Lod->getSrcValue(), 1427 Lod->getSrcValueOffset() + bestOffset, 1428 false, NewAlign); 1429 return DAG.getSetCC(VT, DAG.getNode(ISD::AND, newVT, NewLoad, 1430 DAG.getConstant(bestMask, newVT)), 1431 DAG.getConstant(0LL, newVT), Cond); 1432 } 1433 } 1434 } 1435 1436 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 1437 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 1438 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 1439 1440 // If the comparison constant has bits in the upper part, the 1441 // zero-extended value could never match. 1442 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 1443 C1.getBitWidth() - InSize))) { 1444 switch (Cond) { 1445 case ISD::SETUGT: 1446 case ISD::SETUGE: 1447 case ISD::SETEQ: return DAG.getConstant(0, VT); 1448 case ISD::SETULT: 1449 case ISD::SETULE: 1450 case ISD::SETNE: return DAG.getConstant(1, VT); 1451 case ISD::SETGT: 1452 case ISD::SETGE: 1453 // True if the sign bit of C1 is set. 1454 return DAG.getConstant(C1.isNegative(), VT); 1455 case ISD::SETLT: 1456 case ISD::SETLE: 1457 // True if the sign bit of C1 isn't set. 1458 return DAG.getConstant(C1.isNonNegative(), VT); 1459 default: 1460 break; 1461 } 1462 } 1463 1464 // Otherwise, we can perform the comparison with the low bits. 1465 switch (Cond) { 1466 case ISD::SETEQ: 1467 case ISD::SETNE: 1468 case ISD::SETUGT: 1469 case ISD::SETUGE: 1470 case ISD::SETULT: 1471 case ISD::SETULE: 1472 return DAG.getSetCC(VT, N0.getOperand(0), 1473 DAG.getConstant(APInt(C1).trunc(InSize), 1474 N0.getOperand(0).getValueType()), 1475 Cond); 1476 default: 1477 break; // todo, be more careful with signed comparisons 1478 } 1479 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1480 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1481 MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1482 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 1483 MVT ExtDstTy = N0.getValueType(); 1484 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 1485 1486 // If the extended part has any inconsistent bits, it cannot ever 1487 // compare equal. In other words, they have to be all ones or all 1488 // zeros. 1489 APInt ExtBits = 1490 APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits); 1491 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1492 return DAG.getConstant(Cond == ISD::SETNE, VT); 1493 1494 SDValue ZextOp; 1495 MVT Op0Ty = N0.getOperand(0).getValueType(); 1496 if (Op0Ty == ExtSrcTy) { 1497 ZextOp = N0.getOperand(0); 1498 } else { 1499 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 1500 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0), 1501 DAG.getConstant(Imm, Op0Ty)); 1502 } 1503 if (!DCI.isCalledByLegalizer()) 1504 DCI.AddToWorklist(ZextOp.getNode()); 1505 // Otherwise, make this a use of a zext. 1506 return DAG.getSetCC(VT, ZextOp, 1507 DAG.getConstant(C1 & APInt::getLowBitsSet( 1508 ExtDstTyBits, 1509 ExtSrcTyBits), 1510 ExtDstTy), 1511 Cond); 1512 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 1513 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1514 1515 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1516 if (N0.getOpcode() == ISD::SETCC) { 1517 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1); 1518 if (TrueWhenTrue) 1519 return N0; 1520 1521 // Invert the condition. 1522 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1523 CC = ISD::getSetCCInverse(CC, 1524 N0.getOperand(0).getValueType().isInteger()); 1525 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC); 1526 } 1527 1528 if ((N0.getOpcode() == ISD::XOR || 1529 (N0.getOpcode() == ISD::AND && 1530 N0.getOperand(0).getOpcode() == ISD::XOR && 1531 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1532 isa<ConstantSDNode>(N0.getOperand(1)) && 1533 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 1534 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1535 // can only do this if the top bits are known zero. 1536 unsigned BitWidth = N0.getValueSizeInBits(); 1537 if (DAG.MaskedValueIsZero(N0, 1538 APInt::getHighBitsSet(BitWidth, 1539 BitWidth-1))) { 1540 // Okay, get the un-inverted input value. 1541 SDValue Val; 1542 if (N0.getOpcode() == ISD::XOR) 1543 Val = N0.getOperand(0); 1544 else { 1545 assert(N0.getOpcode() == ISD::AND && 1546 N0.getOperand(0).getOpcode() == ISD::XOR); 1547 // ((X^1)&1)^1 -> X & 1 1548 Val = DAG.getNode(ISD::AND, N0.getValueType(), 1549 N0.getOperand(0).getOperand(0), 1550 N0.getOperand(1)); 1551 } 1552 return DAG.getSetCC(VT, Val, N1, 1553 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1554 } 1555 } 1556 } 1557 1558 APInt MinVal, MaxVal; 1559 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 1560 if (ISD::isSignedIntSetCC(Cond)) { 1561 MinVal = APInt::getSignedMinValue(OperandBitSize); 1562 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 1563 } else { 1564 MinVal = APInt::getMinValue(OperandBitSize); 1565 MaxVal = APInt::getMaxValue(OperandBitSize); 1566 } 1567 1568 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1569 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1570 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1571 // X >= C0 --> X > (C0-1) 1572 return DAG.getSetCC(VT, N0, DAG.getConstant(C1-1, N1.getValueType()), 1573 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1574 } 1575 1576 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1577 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1578 // X <= C0 --> X < (C0+1) 1579 return DAG.getSetCC(VT, N0, DAG.getConstant(C1+1, N1.getValueType()), 1580 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1581 } 1582 1583 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1584 return DAG.getConstant(0, VT); // X < MIN --> false 1585 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1586 return DAG.getConstant(1, VT); // X >= MIN --> true 1587 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1588 return DAG.getConstant(0, VT); // X > MAX --> false 1589 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1590 return DAG.getConstant(1, VT); // X <= MAX --> true 1591 1592 // Canonicalize setgt X, Min --> setne X, Min 1593 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1594 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1595 // Canonicalize setlt X, Max --> setne X, Max 1596 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1597 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1598 1599 // If we have setult X, 1, turn it into seteq X, 0 1600 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1601 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()), 1602 ISD::SETEQ); 1603 // If we have setugt X, Max-1, turn it into seteq X, Max 1604 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1605 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()), 1606 ISD::SETEQ); 1607 1608 // If we have "setcc X, C0", check to see if we can shrink the immediate 1609 // by changing cc. 1610 1611 // SETUGT X, SINTMAX -> SETLT X, 0 1612 if (Cond == ISD::SETUGT && 1613 C1 == APInt::getSignedMaxValue(OperandBitSize)) 1614 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()), 1615 ISD::SETLT); 1616 1617 // SETULT X, SINTMIN -> SETGT X, -1 1618 if (Cond == ISD::SETULT && 1619 C1 == APInt::getSignedMinValue(OperandBitSize)) { 1620 SDValue ConstMinusOne = 1621 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), 1622 N1.getValueType()); 1623 return DAG.getSetCC(VT, N0, ConstMinusOne, ISD::SETGT); 1624 } 1625 1626 // Fold bit comparisons when we can. 1627 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1628 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1629 if (ConstantSDNode *AndRHS = 1630 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1631 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1632 // Perform the xform if the AND RHS is a single bit. 1633 if (isPowerOf2_64(AndRHS->getZExtValue())) { 1634 return DAG.getNode(ISD::SRL, VT, N0, 1635 DAG.getConstant(Log2_64(AndRHS->getZExtValue()), 1636 getShiftAmountTy())); 1637 } 1638 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) { 1639 // (X & 8) == 8 --> (X & 8) >> 3 1640 // Perform the xform if C1 is a single bit. 1641 if (C1.isPowerOf2()) { 1642 return DAG.getNode(ISD::SRL, VT, N0, 1643 DAG.getConstant(C1.logBase2(), getShiftAmountTy())); 1644 } 1645 } 1646 } 1647 } 1648 } else if (isa<ConstantSDNode>(N0.getNode())) { 1649 // Ensure that the constant occurs on the RHS. 1650 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1651 } 1652 1653 if (isa<ConstantFPSDNode>(N0.getNode())) { 1654 // Constant fold or commute setcc. 1655 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond); 1656 if (O.getNode()) return O; 1657 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1658 // If the RHS of an FP comparison is a constant, simplify it away in 1659 // some cases. 1660 if (CFP->getValueAPF().isNaN()) { 1661 // If an operand is known to be a nan, we can fold it. 1662 switch (ISD::getUnorderedFlavor(Cond)) { 1663 default: assert(0 && "Unknown flavor!"); 1664 case 0: // Known false. 1665 return DAG.getConstant(0, VT); 1666 case 1: // Known true. 1667 return DAG.getConstant(1, VT); 1668 case 2: // Undefined. 1669 return DAG.getNode(ISD::UNDEF, VT); 1670 } 1671 } 1672 1673 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 1674 // constant if knowing that the operand is non-nan is enough. We prefer to 1675 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 1676 // materialize 0.0. 1677 if (Cond == ISD::SETO || Cond == ISD::SETUO) 1678 return DAG.getSetCC(VT, N0, N0, Cond); 1679 } 1680 1681 if (N0 == N1) { 1682 // We can always fold X == X for integer setcc's. 1683 if (N0.getValueType().isInteger()) 1684 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1685 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1686 if (UOF == 2) // FP operators that are undefined on NaNs. 1687 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1688 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1689 return DAG.getConstant(UOF, VT); 1690 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1691 // if it is not already. 1692 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1693 if (NewCond != Cond) 1694 return DAG.getSetCC(VT, N0, N1, NewCond); 1695 } 1696 1697 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1698 N0.getValueType().isInteger()) { 1699 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1700 N0.getOpcode() == ISD::XOR) { 1701 // Simplify (X+Y) == (X+Z) --> Y == Z 1702 if (N0.getOpcode() == N1.getOpcode()) { 1703 if (N0.getOperand(0) == N1.getOperand(0)) 1704 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond); 1705 if (N0.getOperand(1) == N1.getOperand(1)) 1706 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond); 1707 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1708 // If X op Y == Y op X, try other combinations. 1709 if (N0.getOperand(0) == N1.getOperand(1)) 1710 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond); 1711 if (N0.getOperand(1) == N1.getOperand(0)) 1712 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond); 1713 } 1714 } 1715 1716 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 1717 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1718 // Turn (X+C1) == C2 --> X == C2-C1 1719 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 1720 return DAG.getSetCC(VT, N0.getOperand(0), 1721 DAG.getConstant(RHSC->getAPIntValue()- 1722 LHSR->getAPIntValue(), 1723 N0.getValueType()), Cond); 1724 } 1725 1726 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 1727 if (N0.getOpcode() == ISD::XOR) 1728 // If we know that all of the inverted bits are zero, don't bother 1729 // performing the inversion. 1730 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 1731 return 1732 DAG.getSetCC(VT, N0.getOperand(0), 1733 DAG.getConstant(LHSR->getAPIntValue() ^ 1734 RHSC->getAPIntValue(), 1735 N0.getValueType()), 1736 Cond); 1737 } 1738 1739 // Turn (C1-X) == C2 --> X == C1-C2 1740 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 1741 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 1742 return 1743 DAG.getSetCC(VT, N0.getOperand(1), 1744 DAG.getConstant(SUBC->getAPIntValue() - 1745 RHSC->getAPIntValue(), 1746 N0.getValueType()), 1747 Cond); 1748 } 1749 } 1750 } 1751 1752 // Simplify (X+Z) == X --> Z == 0 1753 if (N0.getOperand(0) == N1) 1754 return DAG.getSetCC(VT, N0.getOperand(1), 1755 DAG.getConstant(0, N0.getValueType()), Cond); 1756 if (N0.getOperand(1) == N1) { 1757 if (DAG.isCommutativeBinOp(N0.getOpcode())) 1758 return DAG.getSetCC(VT, N0.getOperand(0), 1759 DAG.getConstant(0, N0.getValueType()), Cond); 1760 else if (N0.getNode()->hasOneUse()) { 1761 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 1762 // (Z-X) == X --> Z == X<<1 1763 SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), 1764 N1, 1765 DAG.getConstant(1, getShiftAmountTy())); 1766 if (!DCI.isCalledByLegalizer()) 1767 DCI.AddToWorklist(SH.getNode()); 1768 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond); 1769 } 1770 } 1771 } 1772 1773 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 1774 N1.getOpcode() == ISD::XOR) { 1775 // Simplify X == (X+Z) --> Z == 0 1776 if (N1.getOperand(0) == N0) { 1777 return DAG.getSetCC(VT, N1.getOperand(1), 1778 DAG.getConstant(0, N1.getValueType()), Cond); 1779 } else if (N1.getOperand(1) == N0) { 1780 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 1781 return DAG.getSetCC(VT, N1.getOperand(0), 1782 DAG.getConstant(0, N1.getValueType()), Cond); 1783 } else if (N1.getNode()->hasOneUse()) { 1784 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 1785 // X == (Z-X) --> X<<1 == Z 1786 SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, 1787 DAG.getConstant(1, getShiftAmountTy())); 1788 if (!DCI.isCalledByLegalizer()) 1789 DCI.AddToWorklist(SH.getNode()); 1790 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond); 1791 } 1792 } 1793 } 1794 } 1795 1796 // Fold away ALL boolean setcc's. 1797 SDValue Temp; 1798 if (N0.getValueType() == MVT::i1 && foldBooleans) { 1799 switch (Cond) { 1800 default: assert(0 && "Unknown integer setcc!"); 1801 case ISD::SETEQ: // X == Y -> (X^Y)^1 1802 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1803 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1)); 1804 if (!DCI.isCalledByLegalizer()) 1805 DCI.AddToWorklist(Temp.getNode()); 1806 break; 1807 case ISD::SETNE: // X != Y --> (X^Y) 1808 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1809 break; 1810 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y 1811 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y 1812 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1813 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp); 1814 if (!DCI.isCalledByLegalizer()) 1815 DCI.AddToWorklist(Temp.getNode()); 1816 break; 1817 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X 1818 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X 1819 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1820 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp); 1821 if (!DCI.isCalledByLegalizer()) 1822 DCI.AddToWorklist(Temp.getNode()); 1823 break; 1824 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y 1825 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y 1826 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1827 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp); 1828 if (!DCI.isCalledByLegalizer()) 1829 DCI.AddToWorklist(Temp.getNode()); 1830 break; 1831 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X 1832 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X 1833 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1834 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp); 1835 break; 1836 } 1837 if (VT != MVT::i1) { 1838 if (!DCI.isCalledByLegalizer()) 1839 DCI.AddToWorklist(N0.getNode()); 1840 // FIXME: If running after legalize, we probably can't do this. 1841 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0); 1842 } 1843 return N0; 1844 } 1845 1846 // Could not fold it. 1847 return SDValue(); 1848} 1849 1850/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 1851/// node is a GlobalAddress + offset. 1852bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, 1853 int64_t &Offset) const { 1854 if (isa<GlobalAddressSDNode>(N)) { 1855 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 1856 GA = GASD->getGlobal(); 1857 Offset += GASD->getOffset(); 1858 return true; 1859 } 1860 1861 if (N->getOpcode() == ISD::ADD) { 1862 SDValue N1 = N->getOperand(0); 1863 SDValue N2 = N->getOperand(1); 1864 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 1865 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 1866 if (V) { 1867 Offset += V->getSExtValue(); 1868 return true; 1869 } 1870 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 1871 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 1872 if (V) { 1873 Offset += V->getSExtValue(); 1874 return true; 1875 } 1876 } 1877 } 1878 return false; 1879} 1880 1881 1882/// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 1883/// loading 'Bytes' bytes from a location that is 'Dist' units away from the 1884/// location that the 'Base' load is loading from. 1885bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, 1886 unsigned Bytes, int Dist, 1887 const MachineFrameInfo *MFI) const { 1888 if (LD->getOperand(0).getNode() != Base->getOperand(0).getNode()) 1889 return false; 1890 MVT VT = LD->getValueType(0); 1891 if (VT.getSizeInBits() / 8 != Bytes) 1892 return false; 1893 1894 SDValue Loc = LD->getOperand(1); 1895 SDValue BaseLoc = Base->getOperand(1); 1896 if (Loc.getOpcode() == ISD::FrameIndex) { 1897 if (BaseLoc.getOpcode() != ISD::FrameIndex) 1898 return false; 1899 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 1900 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 1901 int FS = MFI->getObjectSize(FI); 1902 int BFS = MFI->getObjectSize(BFI); 1903 if (FS != BFS || FS != (int)Bytes) return false; 1904 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 1905 } 1906 1907 GlobalValue *GV1 = NULL; 1908 GlobalValue *GV2 = NULL; 1909 int64_t Offset1 = 0; 1910 int64_t Offset2 = 0; 1911 bool isGA1 = isGAPlusOffset(Loc.getNode(), GV1, Offset1); 1912 bool isGA2 = isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 1913 if (isGA1 && isGA2 && GV1 == GV2) 1914 return Offset1 == (Offset2 + Dist*Bytes); 1915 return false; 1916} 1917 1918 1919SDValue TargetLowering:: 1920PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 1921 // Default implementation: no optimization. 1922 return SDValue(); 1923} 1924 1925//===----------------------------------------------------------------------===// 1926// Inline Assembler Implementation Methods 1927//===----------------------------------------------------------------------===// 1928 1929 1930TargetLowering::ConstraintType 1931TargetLowering::getConstraintType(const std::string &Constraint) const { 1932 // FIXME: lots more standard ones to handle. 1933 if (Constraint.size() == 1) { 1934 switch (Constraint[0]) { 1935 default: break; 1936 case 'r': return C_RegisterClass; 1937 case 'm': // memory 1938 case 'o': // offsetable 1939 case 'V': // not offsetable 1940 return C_Memory; 1941 case 'i': // Simple Integer or Relocatable Constant 1942 case 'n': // Simple Integer 1943 case 's': // Relocatable Constant 1944 case 'X': // Allow ANY value. 1945 case 'I': // Target registers. 1946 case 'J': 1947 case 'K': 1948 case 'L': 1949 case 'M': 1950 case 'N': 1951 case 'O': 1952 case 'P': 1953 return C_Other; 1954 } 1955 } 1956 1957 if (Constraint.size() > 1 && Constraint[0] == '{' && 1958 Constraint[Constraint.size()-1] == '}') 1959 return C_Register; 1960 return C_Unknown; 1961} 1962 1963/// LowerXConstraint - try to replace an X constraint, which matches anything, 1964/// with another that has more specific requirements based on the type of the 1965/// corresponding operand. 1966const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ 1967 if (ConstraintVT.isInteger()) 1968 return "r"; 1969 if (ConstraintVT.isFloatingPoint()) 1970 return "f"; // works for many targets 1971 return 0; 1972} 1973 1974/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1975/// vector. If it is invalid, don't add anything to Ops. 1976void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 1977 char ConstraintLetter, 1978 bool hasMemory, 1979 std::vector<SDValue> &Ops, 1980 SelectionDAG &DAG) const { 1981 switch (ConstraintLetter) { 1982 default: break; 1983 case 'X': // Allows any operand; labels (basic block) use this. 1984 if (Op.getOpcode() == ISD::BasicBlock) { 1985 Ops.push_back(Op); 1986 return; 1987 } 1988 // fall through 1989 case 'i': // Simple Integer or Relocatable Constant 1990 case 'n': // Simple Integer 1991 case 's': { // Relocatable Constant 1992 // These operands are interested in values of the form (GV+C), where C may 1993 // be folded in as an offset of GV, or it may be explicitly added. Also, it 1994 // is possible and fine if either GV or C are missing. 1995 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1996 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 1997 1998 // If we have "(add GV, C)", pull out GV/C 1999 if (Op.getOpcode() == ISD::ADD) { 2000 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2001 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 2002 if (C == 0 || GA == 0) { 2003 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 2004 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 2005 } 2006 if (C == 0 || GA == 0) 2007 C = 0, GA = 0; 2008 } 2009 2010 // If we find a valid operand, map to the TargetXXX version so that the 2011 // value itself doesn't get selected. 2012 if (GA) { // Either &GV or &GV+C 2013 if (ConstraintLetter != 'n') { 2014 int64_t Offs = GA->getOffset(); 2015 if (C) Offs += C->getZExtValue(); 2016 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 2017 Op.getValueType(), Offs)); 2018 return; 2019 } 2020 } 2021 if (C) { // just C, no GV. 2022 // Simple constants are not allowed for 's'. 2023 if (ConstraintLetter != 's') { 2024 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue(), 2025 Op.getValueType())); 2026 return; 2027 } 2028 } 2029 break; 2030 } 2031 } 2032} 2033 2034std::vector<unsigned> TargetLowering:: 2035getRegClassForInlineAsmConstraint(const std::string &Constraint, 2036 MVT VT) const { 2037 return std::vector<unsigned>(); 2038} 2039 2040 2041std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 2042getRegForInlineAsmConstraint(const std::string &Constraint, 2043 MVT VT) const { 2044 if (Constraint[0] != '{') 2045 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 2046 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 2047 2048 // Remove the braces from around the name. 2049 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 2050 2051 // Figure out which register class contains this reg. 2052 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 2053 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 2054 E = RI->regclass_end(); RCI != E; ++RCI) { 2055 const TargetRegisterClass *RC = *RCI; 2056 2057 // If none of the the value types for this register class are valid, we 2058 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2059 bool isLegal = false; 2060 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2061 I != E; ++I) { 2062 if (isTypeLegal(*I)) { 2063 isLegal = true; 2064 break; 2065 } 2066 } 2067 2068 if (!isLegal) continue; 2069 2070 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 2071 I != E; ++I) { 2072 if (StringsEqualNoCase(RegName, RI->get(*I).AsmName)) 2073 return std::make_pair(*I, RC); 2074 } 2075 } 2076 2077 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 2078} 2079 2080//===----------------------------------------------------------------------===// 2081// Constraint Selection. 2082 2083/// isMatchingInputConstraint - Return true of this is an input operand that is 2084/// a matching constraint like "4". 2085bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 2086 assert(!ConstraintCode.empty() && "No known constraint!"); 2087 return isdigit(ConstraintCode[0]); 2088} 2089 2090/// getMatchedOperand - If this is an input matching constraint, this method 2091/// returns the output operand it matches. 2092unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 2093 assert(!ConstraintCode.empty() && "No known constraint!"); 2094 return atoi(ConstraintCode.c_str()); 2095} 2096 2097 2098/// getConstraintGenerality - Return an integer indicating how general CT 2099/// is. 2100static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 2101 switch (CT) { 2102 default: assert(0 && "Unknown constraint type!"); 2103 case TargetLowering::C_Other: 2104 case TargetLowering::C_Unknown: 2105 return 0; 2106 case TargetLowering::C_Register: 2107 return 1; 2108 case TargetLowering::C_RegisterClass: 2109 return 2; 2110 case TargetLowering::C_Memory: 2111 return 3; 2112 } 2113} 2114 2115/// ChooseConstraint - If there are multiple different constraints that we 2116/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 2117/// This is somewhat tricky: constraints fall into four classes: 2118/// Other -> immediates and magic values 2119/// Register -> one specific register 2120/// RegisterClass -> a group of regs 2121/// Memory -> memory 2122/// Ideally, we would pick the most specific constraint possible: if we have 2123/// something that fits into a register, we would pick it. The problem here 2124/// is that if we have something that could either be in a register or in 2125/// memory that use of the register could cause selection of *other* 2126/// operands to fail: they might only succeed if we pick memory. Because of 2127/// this the heuristic we use is: 2128/// 2129/// 1) If there is an 'other' constraint, and if the operand is valid for 2130/// that constraint, use it. This makes us take advantage of 'i' 2131/// constraints when available. 2132/// 2) Otherwise, pick the most general constraint present. This prefers 2133/// 'm' over 'r', for example. 2134/// 2135static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 2136 bool hasMemory, const TargetLowering &TLI, 2137 SDValue Op, SelectionDAG *DAG) { 2138 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 2139 unsigned BestIdx = 0; 2140 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 2141 int BestGenerality = -1; 2142 2143 // Loop over the options, keeping track of the most general one. 2144 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 2145 TargetLowering::ConstraintType CType = 2146 TLI.getConstraintType(OpInfo.Codes[i]); 2147 2148 // If this is an 'other' constraint, see if the operand is valid for it. 2149 // For example, on X86 we might have an 'rI' constraint. If the operand 2150 // is an integer in the range [0..31] we want to use I (saving a load 2151 // of a register), otherwise we must use 'r'. 2152 if (CType == TargetLowering::C_Other && Op.getNode()) { 2153 assert(OpInfo.Codes[i].size() == 1 && 2154 "Unhandled multi-letter 'other' constraint"); 2155 std::vector<SDValue> ResultOps; 2156 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory, 2157 ResultOps, *DAG); 2158 if (!ResultOps.empty()) { 2159 BestType = CType; 2160 BestIdx = i; 2161 break; 2162 } 2163 } 2164 2165 // This constraint letter is more general than the previous one, use it. 2166 int Generality = getConstraintGenerality(CType); 2167 if (Generality > BestGenerality) { 2168 BestType = CType; 2169 BestIdx = i; 2170 BestGenerality = Generality; 2171 } 2172 } 2173 2174 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 2175 OpInfo.ConstraintType = BestType; 2176} 2177 2178/// ComputeConstraintToUse - Determines the constraint code and constraint 2179/// type to use for the specific AsmOperandInfo, setting 2180/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 2181void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 2182 SDValue Op, 2183 bool hasMemory, 2184 SelectionDAG *DAG) const { 2185 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 2186 2187 // Single-letter constraints ('r') are very common. 2188 if (OpInfo.Codes.size() == 1) { 2189 OpInfo.ConstraintCode = OpInfo.Codes[0]; 2190 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2191 } else { 2192 ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG); 2193 } 2194 2195 // 'X' matches anything. 2196 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 2197 // Labels and constants are handled elsewhere ('X' is the only thing 2198 // that matches labels). 2199 if (isa<BasicBlock>(OpInfo.CallOperandVal) || 2200 isa<ConstantInt>(OpInfo.CallOperandVal)) 2201 return; 2202 2203 // Otherwise, try to resolve it to something we know about by looking at 2204 // the actual operand type. 2205 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 2206 OpInfo.ConstraintCode = Repl; 2207 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2208 } 2209 } 2210} 2211 2212//===----------------------------------------------------------------------===// 2213// Loop Strength Reduction hooks 2214//===----------------------------------------------------------------------===// 2215 2216/// isLegalAddressingMode - Return true if the addressing mode represented 2217/// by AM is legal for this target, for a load/store of the specified type. 2218bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 2219 const Type *Ty) const { 2220 // The default implementation of this implements a conservative RISCy, r+r and 2221 // r+i addr mode. 2222 2223 // Allows a sign-extended 16-bit immediate field. 2224 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 2225 return false; 2226 2227 // No global is ever allowed as a base. 2228 if (AM.BaseGV) 2229 return false; 2230 2231 // Only support r+r, 2232 switch (AM.Scale) { 2233 case 0: // "r+i" or just "i", depending on HasBaseReg. 2234 break; 2235 case 1: 2236 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2237 return false; 2238 // Otherwise we have r+r or r+i. 2239 break; 2240 case 2: 2241 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2242 return false; 2243 // Allow 2*r as r+r. 2244 break; 2245 } 2246 2247 return true; 2248} 2249 2250struct mu { 2251 APInt m; // magic number 2252 bool a; // add indicator 2253 unsigned s; // shift amount 2254}; 2255 2256/// magicu - calculate the magic numbers required to codegen an integer udiv as 2257/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 2258static mu magicu(const APInt& d) { 2259 unsigned p; 2260 APInt nc, delta, q1, r1, q2, r2; 2261 struct mu magu; 2262 magu.a = 0; // initialize "add" indicator 2263 APInt allOnes = APInt::getAllOnesValue(d.getBitWidth()); 2264 APInt signedMin = APInt::getSignedMinValue(d.getBitWidth()); 2265 APInt signedMax = APInt::getSignedMaxValue(d.getBitWidth()); 2266 2267 nc = allOnes - (-d).urem(d); 2268 p = d.getBitWidth() - 1; // initialize p 2269 q1 = signedMin.udiv(nc); // initialize q1 = 2p/nc 2270 r1 = signedMin - q1*nc; // initialize r1 = rem(2p,nc) 2271 q2 = signedMax.udiv(d); // initialize q2 = (2p-1)/d 2272 r2 = signedMax - q2*d; // initialize r2 = rem((2p-1),d) 2273 do { 2274 p = p + 1; 2275 if (r1.uge(nc - r1)) { 2276 q1 = q1 + q1 + 1; // update q1 2277 r1 = r1 + r1 - nc; // update r1 2278 } 2279 else { 2280 q1 = q1+q1; // update q1 2281 r1 = r1+r1; // update r1 2282 } 2283 if ((r2 + 1).uge(d - r2)) { 2284 if (q2.uge(signedMax)) magu.a = 1; 2285 q2 = q2+q2 + 1; // update q2 2286 r2 = r2+r2 + 1 - d; // update r2 2287 } 2288 else { 2289 if (q2.uge(signedMin)) magu.a = 1; 2290 q2 = q2+q2; // update q2 2291 r2 = r2+r2 + 1; // update r2 2292 } 2293 delta = d - 1 - r2; 2294 } while (p < d.getBitWidth()*2 && 2295 (q1.ult(delta) || (q1 == delta && r1 == 0))); 2296 magu.m = q2 + 1; // resulting magic number 2297 magu.s = p - d.getBitWidth(); // resulting shift 2298 return magu; 2299} 2300 2301// Magic for divide replacement 2302struct ms { 2303 APInt m; // magic number 2304 unsigned s; // shift amount 2305}; 2306 2307/// magic - calculate the magic numbers required to codegen an integer sdiv as 2308/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 2309/// or -1. 2310static ms magic(const APInt& d) { 2311 unsigned p; 2312 APInt ad, anc, delta, q1, r1, q2, r2, t; 2313 APInt allOnes = APInt::getAllOnesValue(d.getBitWidth()); 2314 APInt signedMin = APInt::getSignedMinValue(d.getBitWidth()); 2315 APInt signedMax = APInt::getSignedMaxValue(d.getBitWidth()); 2316 struct ms mag; 2317 2318 ad = d.abs(); 2319 t = signedMin + (d.lshr(d.getBitWidth() - 1)); 2320 anc = t - 1 - t.urem(ad); // absolute value of nc 2321 p = d.getBitWidth() - 1; // initialize p 2322 q1 = signedMin.udiv(anc); // initialize q1 = 2p/abs(nc) 2323 r1 = signedMin - q1*anc; // initialize r1 = rem(2p,abs(nc)) 2324 q2 = signedMin.udiv(ad); // initialize q2 = 2p/abs(d) 2325 r2 = signedMin - q2*ad; // initialize r2 = rem(2p,abs(d)) 2326 do { 2327 p = p + 1; 2328 q1 = q1<<1; // update q1 = 2p/abs(nc) 2329 r1 = r1<<1; // update r1 = rem(2p/abs(nc)) 2330 if (r1.uge(anc)) { // must be unsigned comparison 2331 q1 = q1 + 1; 2332 r1 = r1 - anc; 2333 } 2334 q2 = q2<<1; // update q2 = 2p/abs(d) 2335 r2 = r2<<1; // update r2 = rem(2p/abs(d)) 2336 if (r2.uge(ad)) { // must be unsigned comparison 2337 q2 = q2 + 1; 2338 r2 = r2 - ad; 2339 } 2340 delta = ad - r2; 2341 } while (q1.ule(delta) || (q1 == delta && r1 == 0)); 2342 2343 mag.m = q2 + 1; 2344 if (d.isNegative()) mag.m = -mag.m; // resulting magic number 2345 mag.s = p - d.getBitWidth(); // resulting shift 2346 return mag; 2347} 2348 2349/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 2350/// return a DAG expression to select that will generate the same value by 2351/// multiplying by a magic number. See: 2352/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2353SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 2354 std::vector<SDNode*>* Created) const { 2355 MVT VT = N->getValueType(0); 2356 2357 // Check to see if we can do this. 2358 // FIXME: We should be more aggressive here. 2359 if (!isTypeLegal(VT)) 2360 return SDValue(); 2361 2362 APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 2363 ms magics = magic(d); 2364 2365 // Multiply the numerator (operand 0) by the magic value 2366 // FIXME: We should support doing a MUL in a wider type 2367 SDValue Q; 2368 if (isOperationLegal(ISD::MULHS, VT)) 2369 Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), 2370 DAG.getConstant(magics.m, VT)); 2371 else if (isOperationLegal(ISD::SMUL_LOHI, VT)) 2372 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), 2373 N->getOperand(0), 2374 DAG.getConstant(magics.m, VT)).getNode(), 1); 2375 else 2376 return SDValue(); // No mulhs or equvialent 2377 // If d > 0 and m < 0, add the numerator 2378 if (d.isStrictlyPositive() && magics.m.isNegative()) { 2379 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); 2380 if (Created) 2381 Created->push_back(Q.getNode()); 2382 } 2383 // If d < 0 and m > 0, subtract the numerator. 2384 if (d.isNegative() && magics.m.isStrictlyPositive()) { 2385 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0)); 2386 if (Created) 2387 Created->push_back(Q.getNode()); 2388 } 2389 // Shift right algebraic if shift value is nonzero 2390 if (magics.s > 0) { 2391 Q = DAG.getNode(ISD::SRA, VT, Q, 2392 DAG.getConstant(magics.s, getShiftAmountTy())); 2393 if (Created) 2394 Created->push_back(Q.getNode()); 2395 } 2396 // Extract the sign bit and add it to the quotient 2397 SDValue T = 2398 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 2399 getShiftAmountTy())); 2400 if (Created) 2401 Created->push_back(T.getNode()); 2402 return DAG.getNode(ISD::ADD, VT, Q, T); 2403} 2404 2405/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 2406/// return a DAG expression to select that will generate the same value by 2407/// multiplying by a magic number. See: 2408/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2409SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 2410 std::vector<SDNode*>* Created) const { 2411 MVT VT = N->getValueType(0); 2412 2413 // Check to see if we can do this. 2414 // FIXME: We should be more aggressive here. 2415 if (!isTypeLegal(VT)) 2416 return SDValue(); 2417 2418 // FIXME: We should use a narrower constant when the upper 2419 // bits are known to be zero. 2420 ConstantSDNode *N1C = cast<ConstantSDNode>(N->getOperand(1)); 2421 mu magics = magicu(N1C->getAPIntValue()); 2422 2423 // Multiply the numerator (operand 0) by the magic value 2424 // FIXME: We should support doing a MUL in a wider type 2425 SDValue Q; 2426 if (isOperationLegal(ISD::MULHU, VT)) 2427 Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), 2428 DAG.getConstant(magics.m, VT)); 2429 else if (isOperationLegal(ISD::UMUL_LOHI, VT)) 2430 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), 2431 N->getOperand(0), 2432 DAG.getConstant(magics.m, VT)).getNode(), 1); 2433 else 2434 return SDValue(); // No mulhu or equvialent 2435 if (Created) 2436 Created->push_back(Q.getNode()); 2437 2438 if (magics.a == 0) { 2439 assert(magics.s < N1C->getAPIntValue().getBitWidth() && 2440 "We shouldn't generate an undefined shift!"); 2441 return DAG.getNode(ISD::SRL, VT, Q, 2442 DAG.getConstant(magics.s, getShiftAmountTy())); 2443 } else { 2444 SDValue NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); 2445 if (Created) 2446 Created->push_back(NPQ.getNode()); 2447 NPQ = DAG.getNode(ISD::SRL, VT, NPQ, 2448 DAG.getConstant(1, getShiftAmountTy())); 2449 if (Created) 2450 Created->push_back(NPQ.getNode()); 2451 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q); 2452 if (Created) 2453 Created->push_back(NPQ.getNode()); 2454 return DAG.getNode(ISD::SRL, VT, NPQ, 2455 DAG.getConstant(magics.s-1, getShiftAmountTy())); 2456 } 2457} 2458