TargetLowering.cpp revision 813090cf891325c715b9f6fb1546e6ce67fa8c8b
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetAsmInfo.h" 15#include "llvm/Target/TargetLowering.h" 16#include "llvm/Target/TargetSubtarget.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetRegisterInfo.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/STLExtras.h" 26#include "llvm/Support/MathExtras.h" 27using namespace llvm; 28 29namespace llvm { 30TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc) { 31 bool isLocal = GV->hasLocalLinkage(); 32 bool isDeclaration = GV->isDeclaration(); 33 // FIXME: what should we do for protected and internal visibility? 34 // For variables, is internal different from hidden? 35 bool isHidden = GV->hasHiddenVisibility(); 36 37 if (reloc == Reloc::PIC_) { 38 if (isLocal || isHidden) 39 return TLSModel::LocalDynamic; 40 else 41 return TLSModel::GeneralDynamic; 42 } else { 43 if (!isDeclaration || isHidden) 44 return TLSModel::LocalExec; 45 else 46 return TLSModel::InitialExec; 47 } 48} 49} 50 51/// InitLibcallNames - Set default libcall names. 52/// 53static void InitLibcallNames(const char **Names) { 54 Names[RTLIB::SHL_I16] = "__ashlhi3"; 55 Names[RTLIB::SHL_I32] = "__ashlsi3"; 56 Names[RTLIB::SHL_I64] = "__ashldi3"; 57 Names[RTLIB::SHL_I128] = "__ashlti3"; 58 Names[RTLIB::SRL_I16] = "__lshrhi3"; 59 Names[RTLIB::SRL_I32] = "__lshrsi3"; 60 Names[RTLIB::SRL_I64] = "__lshrdi3"; 61 Names[RTLIB::SRL_I128] = "__lshrti3"; 62 Names[RTLIB::SRA_I16] = "__ashrhi3"; 63 Names[RTLIB::SRA_I32] = "__ashrsi3"; 64 Names[RTLIB::SRA_I64] = "__ashrdi3"; 65 Names[RTLIB::SRA_I128] = "__ashrti3"; 66 Names[RTLIB::MUL_I16] = "__mulhi3"; 67 Names[RTLIB::MUL_I32] = "__mulsi3"; 68 Names[RTLIB::MUL_I64] = "__muldi3"; 69 Names[RTLIB::MUL_I128] = "__multi3"; 70 Names[RTLIB::SDIV_I16] = "__divhi3"; 71 Names[RTLIB::SDIV_I32] = "__divsi3"; 72 Names[RTLIB::SDIV_I64] = "__divdi3"; 73 Names[RTLIB::SDIV_I128] = "__divti3"; 74 Names[RTLIB::UDIV_I32] = "__udivhi3"; 75 Names[RTLIB::UDIV_I32] = "__udivsi3"; 76 Names[RTLIB::UDIV_I64] = "__udivdi3"; 77 Names[RTLIB::UDIV_I128] = "__udivti3"; 78 Names[RTLIB::SREM_I16] = "__modhi3"; 79 Names[RTLIB::SREM_I32] = "__modsi3"; 80 Names[RTLIB::SREM_I64] = "__moddi3"; 81 Names[RTLIB::SREM_I128] = "__modti3"; 82 Names[RTLIB::UREM_I16] = "__umodsi3"; 83 Names[RTLIB::UREM_I32] = "__umodsi3"; 84 Names[RTLIB::UREM_I64] = "__umoddi3"; 85 Names[RTLIB::UREM_I128] = "__umodti3"; 86 Names[RTLIB::NEG_I32] = "__negsi2"; 87 Names[RTLIB::NEG_I64] = "__negdi2"; 88 Names[RTLIB::ADD_F32] = "__addsf3"; 89 Names[RTLIB::ADD_F64] = "__adddf3"; 90 Names[RTLIB::ADD_F80] = "__addxf3"; 91 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 92 Names[RTLIB::SUB_F32] = "__subsf3"; 93 Names[RTLIB::SUB_F64] = "__subdf3"; 94 Names[RTLIB::SUB_F80] = "__subxf3"; 95 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 96 Names[RTLIB::MUL_F32] = "__mulsf3"; 97 Names[RTLIB::MUL_F64] = "__muldf3"; 98 Names[RTLIB::MUL_F80] = "__mulxf3"; 99 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 100 Names[RTLIB::DIV_F32] = "__divsf3"; 101 Names[RTLIB::DIV_F64] = "__divdf3"; 102 Names[RTLIB::DIV_F80] = "__divxf3"; 103 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 104 Names[RTLIB::REM_F32] = "fmodf"; 105 Names[RTLIB::REM_F64] = "fmod"; 106 Names[RTLIB::REM_F80] = "fmodl"; 107 Names[RTLIB::REM_PPCF128] = "fmodl"; 108 Names[RTLIB::POWI_F32] = "__powisf2"; 109 Names[RTLIB::POWI_F64] = "__powidf2"; 110 Names[RTLIB::POWI_F80] = "__powixf2"; 111 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 112 Names[RTLIB::SQRT_F32] = "sqrtf"; 113 Names[RTLIB::SQRT_F64] = "sqrt"; 114 Names[RTLIB::SQRT_F80] = "sqrtl"; 115 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 116 Names[RTLIB::LOG_F32] = "logf"; 117 Names[RTLIB::LOG_F64] = "log"; 118 Names[RTLIB::LOG_F80] = "logl"; 119 Names[RTLIB::LOG_PPCF128] = "logl"; 120 Names[RTLIB::LOG2_F32] = "log2f"; 121 Names[RTLIB::LOG2_F64] = "log2"; 122 Names[RTLIB::LOG2_F80] = "log2l"; 123 Names[RTLIB::LOG2_PPCF128] = "log2l"; 124 Names[RTLIB::LOG10_F32] = "log10f"; 125 Names[RTLIB::LOG10_F64] = "log10"; 126 Names[RTLIB::LOG10_F80] = "log10l"; 127 Names[RTLIB::LOG10_PPCF128] = "log10l"; 128 Names[RTLIB::EXP_F32] = "expf"; 129 Names[RTLIB::EXP_F64] = "exp"; 130 Names[RTLIB::EXP_F80] = "expl"; 131 Names[RTLIB::EXP_PPCF128] = "expl"; 132 Names[RTLIB::EXP2_F32] = "exp2f"; 133 Names[RTLIB::EXP2_F64] = "exp2"; 134 Names[RTLIB::EXP2_F80] = "exp2l"; 135 Names[RTLIB::EXP2_PPCF128] = "exp2l"; 136 Names[RTLIB::SIN_F32] = "sinf"; 137 Names[RTLIB::SIN_F64] = "sin"; 138 Names[RTLIB::SIN_F80] = "sinl"; 139 Names[RTLIB::SIN_PPCF128] = "sinl"; 140 Names[RTLIB::COS_F32] = "cosf"; 141 Names[RTLIB::COS_F64] = "cos"; 142 Names[RTLIB::COS_F80] = "cosl"; 143 Names[RTLIB::COS_PPCF128] = "cosl"; 144 Names[RTLIB::POW_F32] = "powf"; 145 Names[RTLIB::POW_F64] = "pow"; 146 Names[RTLIB::POW_F80] = "powl"; 147 Names[RTLIB::POW_PPCF128] = "powl"; 148 Names[RTLIB::CEIL_F32] = "ceilf"; 149 Names[RTLIB::CEIL_F64] = "ceil"; 150 Names[RTLIB::CEIL_F80] = "ceill"; 151 Names[RTLIB::CEIL_PPCF128] = "ceill"; 152 Names[RTLIB::TRUNC_F32] = "truncf"; 153 Names[RTLIB::TRUNC_F64] = "trunc"; 154 Names[RTLIB::TRUNC_F80] = "truncl"; 155 Names[RTLIB::TRUNC_PPCF128] = "truncl"; 156 Names[RTLIB::RINT_F32] = "rintf"; 157 Names[RTLIB::RINT_F64] = "rint"; 158 Names[RTLIB::RINT_F80] = "rintl"; 159 Names[RTLIB::RINT_PPCF128] = "rintl"; 160 Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; 161 Names[RTLIB::NEARBYINT_F64] = "nearbyint"; 162 Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; 163 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; 164 Names[RTLIB::FLOOR_F32] = "floorf"; 165 Names[RTLIB::FLOOR_F64] = "floor"; 166 Names[RTLIB::FLOOR_F80] = "floorl"; 167 Names[RTLIB::FLOOR_PPCF128] = "floorl"; 168 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 169 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 170 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; 171 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; 172 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; 173 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; 174 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 175 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 176 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 177 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 178 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 179 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 180 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 181 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 182 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 183 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 184 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 185 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 186 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 187 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 188 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 189 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 190 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 191 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 192 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 193 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 194 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 195 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 196 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 197 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 198 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 199 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 200 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 201 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 202 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 203 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 204 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 205 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 206 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 207 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 208 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 209 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 210 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 211 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 212 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 213 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 214 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 215 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 216 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 217 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 218 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 219 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 220 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 221 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 222 Names[RTLIB::OEQ_F32] = "__eqsf2"; 223 Names[RTLIB::OEQ_F64] = "__eqdf2"; 224 Names[RTLIB::UNE_F32] = "__nesf2"; 225 Names[RTLIB::UNE_F64] = "__nedf2"; 226 Names[RTLIB::OGE_F32] = "__gesf2"; 227 Names[RTLIB::OGE_F64] = "__gedf2"; 228 Names[RTLIB::OLT_F32] = "__ltsf2"; 229 Names[RTLIB::OLT_F64] = "__ltdf2"; 230 Names[RTLIB::OLE_F32] = "__lesf2"; 231 Names[RTLIB::OLE_F64] = "__ledf2"; 232 Names[RTLIB::OGT_F32] = "__gtsf2"; 233 Names[RTLIB::OGT_F64] = "__gtdf2"; 234 Names[RTLIB::UO_F32] = "__unordsf2"; 235 Names[RTLIB::UO_F64] = "__unorddf2"; 236 Names[RTLIB::O_F32] = "__unordsf2"; 237 Names[RTLIB::O_F64] = "__unorddf2"; 238} 239 240/// getFPEXT - Return the FPEXT_*_* value for the given types, or 241/// UNKNOWN_LIBCALL if there is none. 242RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) { 243 if (OpVT == MVT::f32) { 244 if (RetVT == MVT::f64) 245 return FPEXT_F32_F64; 246 } 247 return UNKNOWN_LIBCALL; 248} 249 250/// getFPROUND - Return the FPROUND_*_* value for the given types, or 251/// UNKNOWN_LIBCALL if there is none. 252RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) { 253 if (RetVT == MVT::f32) { 254 if (OpVT == MVT::f64) 255 return FPROUND_F64_F32; 256 if (OpVT == MVT::f80) 257 return FPROUND_F80_F32; 258 if (OpVT == MVT::ppcf128) 259 return FPROUND_PPCF128_F32; 260 } else if (RetVT == MVT::f64) { 261 if (OpVT == MVT::f80) 262 return FPROUND_F80_F64; 263 if (OpVT == MVT::ppcf128) 264 return FPROUND_PPCF128_F64; 265 } 266 return UNKNOWN_LIBCALL; 267} 268 269/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 270/// UNKNOWN_LIBCALL if there is none. 271RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) { 272 if (OpVT == MVT::f32) { 273 if (RetVT == MVT::i32) 274 return FPTOSINT_F32_I32; 275 if (RetVT == MVT::i64) 276 return FPTOSINT_F32_I64; 277 if (RetVT == MVT::i128) 278 return FPTOSINT_F32_I128; 279 } else if (OpVT == MVT::f64) { 280 if (RetVT == MVT::i32) 281 return FPTOSINT_F64_I32; 282 if (RetVT == MVT::i64) 283 return FPTOSINT_F64_I64; 284 if (RetVT == MVT::i128) 285 return FPTOSINT_F64_I128; 286 } else if (OpVT == MVT::f80) { 287 if (RetVT == MVT::i32) 288 return FPTOSINT_F80_I32; 289 if (RetVT == MVT::i64) 290 return FPTOSINT_F80_I64; 291 if (RetVT == MVT::i128) 292 return FPTOSINT_F80_I128; 293 } else if (OpVT == MVT::ppcf128) { 294 if (RetVT == MVT::i32) 295 return FPTOSINT_PPCF128_I32; 296 if (RetVT == MVT::i64) 297 return FPTOSINT_PPCF128_I64; 298 if (RetVT == MVT::i128) 299 return FPTOSINT_PPCF128_I128; 300 } 301 return UNKNOWN_LIBCALL; 302} 303 304/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 305/// UNKNOWN_LIBCALL if there is none. 306RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) { 307 if (OpVT == MVT::f32) { 308 if (RetVT == MVT::i32) 309 return FPTOUINT_F32_I32; 310 if (RetVT == MVT::i64) 311 return FPTOUINT_F32_I64; 312 if (RetVT == MVT::i128) 313 return FPTOUINT_F32_I128; 314 } else if (OpVT == MVT::f64) { 315 if (RetVT == MVT::i32) 316 return FPTOUINT_F64_I32; 317 if (RetVT == MVT::i64) 318 return FPTOUINT_F64_I64; 319 if (RetVT == MVT::i128) 320 return FPTOUINT_F64_I128; 321 } else if (OpVT == MVT::f80) { 322 if (RetVT == MVT::i32) 323 return FPTOUINT_F80_I32; 324 if (RetVT == MVT::i64) 325 return FPTOUINT_F80_I64; 326 if (RetVT == MVT::i128) 327 return FPTOUINT_F80_I128; 328 } else if (OpVT == MVT::ppcf128) { 329 if (RetVT == MVT::i32) 330 return FPTOUINT_PPCF128_I32; 331 if (RetVT == MVT::i64) 332 return FPTOUINT_PPCF128_I64; 333 if (RetVT == MVT::i128) 334 return FPTOUINT_PPCF128_I128; 335 } 336 return UNKNOWN_LIBCALL; 337} 338 339/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 340/// UNKNOWN_LIBCALL if there is none. 341RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) { 342 if (OpVT == MVT::i32) { 343 if (RetVT == MVT::f32) 344 return SINTTOFP_I32_F32; 345 else if (RetVT == MVT::f64) 346 return SINTTOFP_I32_F64; 347 else if (RetVT == MVT::f80) 348 return SINTTOFP_I32_F80; 349 else if (RetVT == MVT::ppcf128) 350 return SINTTOFP_I32_PPCF128; 351 } else if (OpVT == MVT::i64) { 352 if (RetVT == MVT::f32) 353 return SINTTOFP_I64_F32; 354 else if (RetVT == MVT::f64) 355 return SINTTOFP_I64_F64; 356 else if (RetVT == MVT::f80) 357 return SINTTOFP_I64_F80; 358 else if (RetVT == MVT::ppcf128) 359 return SINTTOFP_I64_PPCF128; 360 } else if (OpVT == MVT::i128) { 361 if (RetVT == MVT::f32) 362 return SINTTOFP_I128_F32; 363 else if (RetVT == MVT::f64) 364 return SINTTOFP_I128_F64; 365 else if (RetVT == MVT::f80) 366 return SINTTOFP_I128_F80; 367 else if (RetVT == MVT::ppcf128) 368 return SINTTOFP_I128_PPCF128; 369 } 370 return UNKNOWN_LIBCALL; 371} 372 373/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 374/// UNKNOWN_LIBCALL if there is none. 375RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) { 376 if (OpVT == MVT::i32) { 377 if (RetVT == MVT::f32) 378 return UINTTOFP_I32_F32; 379 else if (RetVT == MVT::f64) 380 return UINTTOFP_I32_F64; 381 else if (RetVT == MVT::f80) 382 return UINTTOFP_I32_F80; 383 else if (RetVT == MVT::ppcf128) 384 return UINTTOFP_I32_PPCF128; 385 } else if (OpVT == MVT::i64) { 386 if (RetVT == MVT::f32) 387 return UINTTOFP_I64_F32; 388 else if (RetVT == MVT::f64) 389 return UINTTOFP_I64_F64; 390 else if (RetVT == MVT::f80) 391 return UINTTOFP_I64_F80; 392 else if (RetVT == MVT::ppcf128) 393 return UINTTOFP_I64_PPCF128; 394 } else if (OpVT == MVT::i128) { 395 if (RetVT == MVT::f32) 396 return UINTTOFP_I128_F32; 397 else if (RetVT == MVT::f64) 398 return UINTTOFP_I128_F64; 399 else if (RetVT == MVT::f80) 400 return UINTTOFP_I128_F80; 401 else if (RetVT == MVT::ppcf128) 402 return UINTTOFP_I128_PPCF128; 403 } 404 return UNKNOWN_LIBCALL; 405} 406 407/// InitCmpLibcallCCs - Set default comparison libcall CC. 408/// 409static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 410 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 411 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 412 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 413 CCs[RTLIB::UNE_F32] = ISD::SETNE; 414 CCs[RTLIB::UNE_F64] = ISD::SETNE; 415 CCs[RTLIB::OGE_F32] = ISD::SETGE; 416 CCs[RTLIB::OGE_F64] = ISD::SETGE; 417 CCs[RTLIB::OLT_F32] = ISD::SETLT; 418 CCs[RTLIB::OLT_F64] = ISD::SETLT; 419 CCs[RTLIB::OLE_F32] = ISD::SETLE; 420 CCs[RTLIB::OLE_F64] = ISD::SETLE; 421 CCs[RTLIB::OGT_F32] = ISD::SETGT; 422 CCs[RTLIB::OGT_F64] = ISD::SETGT; 423 CCs[RTLIB::UO_F32] = ISD::SETNE; 424 CCs[RTLIB::UO_F64] = ISD::SETNE; 425 CCs[RTLIB::O_F32] = ISD::SETEQ; 426 CCs[RTLIB::O_F64] = ISD::SETEQ; 427} 428 429TargetLowering::TargetLowering(TargetMachine &tm) 430 : TM(tm), TD(TM.getTargetData()) { 431 // All operations default to being supported. 432 memset(OpActions, 0, sizeof(OpActions)); 433 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 434 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 435 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 436 memset(ConvertActions, 0, sizeof(ConvertActions)); 437 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 438 439 // Set default actions for various operations. 440 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 441 // Default all indexed load / store to expand. 442 for (unsigned IM = (unsigned)ISD::PRE_INC; 443 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 444 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 445 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 446 } 447 448 // These operations default to expand. 449 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 450 setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand); 451 } 452 453 // Most targets ignore the @llvm.prefetch intrinsic. 454 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 455 456 // ConstantFP nodes default to expand. Targets can either change this to 457 // Legal, in which case all fp constants are legal, or use addLegalFPImmediate 458 // to optimize expansions for certain constants. 459 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 460 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 461 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 462 463 // These library functions default to expand. 464 setOperationAction(ISD::FLOG , MVT::f64, Expand); 465 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 466 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 467 setOperationAction(ISD::FEXP , MVT::f64, Expand); 468 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 469 setOperationAction(ISD::FLOG , MVT::f32, Expand); 470 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 471 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 472 setOperationAction(ISD::FEXP , MVT::f32, Expand); 473 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 474 475 // Default ISD::TRAP to expand (which turns it into abort). 476 setOperationAction(ISD::TRAP, MVT::Other, Expand); 477 478 IsLittleEndian = TD->isLittleEndian(); 479 UsesGlobalOffsetTable = false; 480 ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType()); 481 ShiftAmtHandling = Undefined; 482 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 483 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 484 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 485 allowUnalignedMemoryAccesses = false; 486 UseUnderscoreSetJmp = false; 487 UseUnderscoreLongJmp = false; 488 SelectIsExpensive = false; 489 IntDivIsCheap = false; 490 Pow2DivIsCheap = false; 491 StackPointerRegisterToSaveRestore = 0; 492 ExceptionPointerRegister = 0; 493 ExceptionSelectorRegister = 0; 494 BooleanContents = UndefinedBooleanContent; 495 SchedPreferenceInfo = SchedulingForLatency; 496 JumpBufSize = 0; 497 JumpBufAlignment = 0; 498 IfCvtBlockSizeLimit = 2; 499 IfCvtDupBlockSizeLimit = 0; 500 PrefLoopAlignment = 0; 501 502 InitLibcallNames(LibcallRoutineNames); 503 InitCmpLibcallCCs(CmpLibcallCCs); 504 505 // Tell Legalize whether the assembler supports DEBUG_LOC. 506 const TargetAsmInfo *TASM = TM.getTargetAsmInfo(); 507 if (!TASM || !TASM->hasDotLocAndDotFile()) 508 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 509} 510 511TargetLowering::~TargetLowering() {} 512 513/// computeRegisterProperties - Once all of the register classes are added, 514/// this allows us to compute derived properties we expose. 515void TargetLowering::computeRegisterProperties() { 516 assert(MVT::LAST_VALUETYPE <= 32 && 517 "Too many value types for ValueTypeActions to hold!"); 518 519 // Everything defaults to needing one register. 520 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 521 NumRegistersForVT[i] = 1; 522 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 523 } 524 // ...except isVoid, which doesn't need any registers. 525 NumRegistersForVT[MVT::isVoid] = 0; 526 527 // Find the largest integer register class. 528 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 529 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 530 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 531 532 // Every integer value type larger than this largest register takes twice as 533 // many registers to represent as the previous ValueType. 534 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 535 MVT EVT = (MVT::SimpleValueType)ExpandedReg; 536 if (!EVT.isInteger()) 537 break; 538 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 539 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 540 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 541 ValueTypeActions.setTypeAction(EVT, Expand); 542 } 543 544 // Inspect all of the ValueType's smaller than the largest integer 545 // register to see which ones need promotion. 546 unsigned LegalIntReg = LargestIntReg; 547 for (unsigned IntReg = LargestIntReg - 1; 548 IntReg >= (unsigned)MVT::i1; --IntReg) { 549 MVT IVT = (MVT::SimpleValueType)IntReg; 550 if (isTypeLegal(IVT)) { 551 LegalIntReg = IntReg; 552 } else { 553 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 554 (MVT::SimpleValueType)LegalIntReg; 555 ValueTypeActions.setTypeAction(IVT, Promote); 556 } 557 } 558 559 // ppcf128 type is really two f64's. 560 if (!isTypeLegal(MVT::ppcf128)) { 561 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 562 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 563 TransformToType[MVT::ppcf128] = MVT::f64; 564 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 565 } 566 567 // Decide how to handle f64. If the target does not have native f64 support, 568 // expand it to i64 and we will be generating soft float library calls. 569 if (!isTypeLegal(MVT::f64)) { 570 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 571 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 572 TransformToType[MVT::f64] = MVT::i64; 573 ValueTypeActions.setTypeAction(MVT::f64, Expand); 574 } 575 576 // Decide how to handle f32. If the target does not have native support for 577 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 578 if (!isTypeLegal(MVT::f32)) { 579 if (isTypeLegal(MVT::f64)) { 580 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 581 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 582 TransformToType[MVT::f32] = MVT::f64; 583 ValueTypeActions.setTypeAction(MVT::f32, Promote); 584 } else { 585 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 586 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 587 TransformToType[MVT::f32] = MVT::i32; 588 ValueTypeActions.setTypeAction(MVT::f32, Expand); 589 } 590 } 591 592 // Loop over all of the vector value types to see which need transformations. 593 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 594 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 595 MVT VT = (MVT::SimpleValueType)i; 596 if (!isTypeLegal(VT)) { 597 MVT IntermediateVT, RegisterVT; 598 unsigned NumIntermediates; 599 NumRegistersForVT[i] = 600 getVectorTypeBreakdown(VT, 601 IntermediateVT, NumIntermediates, 602 RegisterVT); 603 RegisterTypeForVT[i] = RegisterVT; 604 605 // Determine if there is a legal wider type. 606 bool IsLegalWiderType = false; 607 MVT EltVT = VT.getVectorElementType(); 608 unsigned NElts = VT.getVectorNumElements(); 609 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 610 MVT SVT = (MVT::SimpleValueType)nVT; 611 if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT && 612 SVT.getVectorNumElements() > NElts) { 613 TransformToType[i] = SVT; 614 ValueTypeActions.setTypeAction(VT, Promote); 615 IsLegalWiderType = true; 616 break; 617 } 618 } 619 if (!IsLegalWiderType) { 620 MVT NVT = VT.getPow2VectorType(); 621 if (NVT == VT) { 622 // Type is already a power of 2. The default action is to split. 623 TransformToType[i] = MVT::Other; 624 ValueTypeActions.setTypeAction(VT, Expand); 625 } else { 626 TransformToType[i] = NVT; 627 ValueTypeActions.setTypeAction(VT, Promote); 628 } 629 } 630 } 631 } 632} 633 634const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 635 return NULL; 636} 637 638 639MVT TargetLowering::getSetCCResultType(MVT VT) const { 640 return getValueType(TD->getIntPtrType()); 641} 642 643 644/// getVectorTypeBreakdown - Vector types are broken down into some number of 645/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 646/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 647/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 648/// 649/// This method returns the number of registers needed, and the VT for each 650/// register. It also returns the VT and quantity of the intermediate values 651/// before they are promoted/expanded. 652/// 653unsigned TargetLowering::getVectorTypeBreakdown(MVT VT, 654 MVT &IntermediateVT, 655 unsigned &NumIntermediates, 656 MVT &RegisterVT) const { 657 // Figure out the right, legal destination reg to copy into. 658 unsigned NumElts = VT.getVectorNumElements(); 659 MVT EltTy = VT.getVectorElementType(); 660 661 unsigned NumVectorRegs = 1; 662 663 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 664 // could break down into LHS/RHS like LegalizeDAG does. 665 if (!isPowerOf2_32(NumElts)) { 666 NumVectorRegs = NumElts; 667 NumElts = 1; 668 } 669 670 // Divide the input until we get to a supported size. This will always 671 // end with a scalar if the target doesn't support vectors. 672 while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 673 NumElts >>= 1; 674 NumVectorRegs <<= 1; 675 } 676 677 NumIntermediates = NumVectorRegs; 678 679 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 680 if (!isTypeLegal(NewVT)) 681 NewVT = EltTy; 682 IntermediateVT = NewVT; 683 684 MVT DestVT = getRegisterType(NewVT); 685 RegisterVT = DestVT; 686 if (DestVT.bitsLT(NewVT)) { 687 // Value is expanded, e.g. i64 -> i16. 688 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 689 } else { 690 // Otherwise, promotion or legal types use the same number of registers as 691 // the vector decimated to the appropriate level. 692 return NumVectorRegs; 693 } 694 695 return 1; 696} 697 698/// getWidenVectorType: given a vector type, returns the type to widen to 699/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. 700/// If there is no vector type that we want to widen to, returns MVT::Other 701/// When and where to widen is target dependent based on the cost of 702/// scalarizing vs using the wider vector type. 703MVT TargetLowering::getWidenVectorType(MVT VT) const { 704 assert(VT.isVector()); 705 if (isTypeLegal(VT)) 706 return VT; 707 708 // Default is not to widen until moved to LegalizeTypes 709 return MVT::Other; 710} 711 712/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 713/// function arguments in the caller parameter area. This is the actual 714/// alignment, not its logarithm. 715unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 716 return TD->getCallFrameTypeAlignment(Ty); 717} 718 719SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 720 SelectionDAG &DAG) const { 721 if (usesGlobalOffsetTable()) 722 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy()); 723 return Table; 724} 725 726bool 727TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 728 // Assume that everything is safe in static mode. 729 if (getTargetMachine().getRelocationModel() == Reloc::Static) 730 return true; 731 732 // In dynamic-no-pic mode, assume that known defined values are safe. 733 if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC && 734 GA && 735 !GA->getGlobal()->isDeclaration() && 736 !GA->getGlobal()->isWeakForLinker()) 737 return true; 738 739 // Otherwise assume nothing is safe. 740 return false; 741} 742 743//===----------------------------------------------------------------------===// 744// Optimization Methods 745//===----------------------------------------------------------------------===// 746 747/// ShrinkDemandedConstant - Check to see if the specified operand of the 748/// specified instruction is a constant integer. If so, check to see if there 749/// are any bits set in the constant that are not demanded. If so, shrink the 750/// constant and return true. 751bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, 752 const APInt &Demanded) { 753 DebugLoc dl = Op.getDebugLoc(); 754 755 // FIXME: ISD::SELECT, ISD::SELECT_CC 756 switch (Op.getOpcode()) { 757 default: break; 758 case ISD::XOR: 759 case ISD::AND: 760 case ISD::OR: { 761 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 762 if (!C) return false; 763 764 if (Op.getOpcode() == ISD::XOR && 765 (C->getAPIntValue() | (~Demanded)).isAllOnesValue()) 766 return false; 767 768 // if we can expand it to have all bits set, do it 769 if (C->getAPIntValue().intersects(~Demanded)) { 770 MVT VT = Op.getValueType(); 771 SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0), 772 DAG.getConstant(Demanded & 773 C->getAPIntValue(), 774 VT)); 775 return CombineTo(Op, New); 776 } 777 778 break; 779 } 780 } 781 782 return false; 783} 784 785/// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 786/// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 787/// cast, but it could be generalized for targets with other types of 788/// implicit widening casts. 789bool 790TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op, 791 unsigned BitWidth, 792 const APInt &Demanded, 793 DebugLoc dl) { 794 assert(Op.getNumOperands() == 2 && 795 "ShrinkDemandedOp only supports binary operators!"); 796 assert(Op.getNode()->getNumValues() == 1 && 797 "ShrinkDemandedOp only supports nodes with one result!"); 798 799 // Don't do this if the node has another user, which may require the 800 // full value. 801 if (!Op.getNode()->hasOneUse()) 802 return false; 803 804 // Search for the smallest integer type with free casts to and from 805 // Op's type. For expedience, just check power-of-2 integer types. 806 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 807 unsigned SmallVTBits = BitWidth - Demanded.countLeadingZeros(); 808 if (!isPowerOf2_32(SmallVTBits)) 809 SmallVTBits = NextPowerOf2(SmallVTBits); 810 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 811 MVT SmallVT = MVT::getIntegerVT(SmallVTBits); 812 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 813 TLI.isZExtFree(SmallVT, Op.getValueType())) { 814 // We found a type with free casts. 815 SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT, 816 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 817 Op.getNode()->getOperand(0)), 818 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 819 Op.getNode()->getOperand(1))); 820 SDValue Z = DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), X); 821 return CombineTo(Op, Z); 822 } 823 } 824 return false; 825} 826 827/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 828/// DemandedMask bits of the result of Op are ever used downstream. If we can 829/// use this information to simplify Op, create a new simplified DAG node and 830/// return true, returning the original and new nodes in Old and New. Otherwise, 831/// analyze the expression and return a mask of KnownOne and KnownZero bits for 832/// the expression (used to simplify the caller). The KnownZero/One bits may 833/// only be accurate for those bits in the DemandedMask. 834bool TargetLowering::SimplifyDemandedBits(SDValue Op, 835 const APInt &DemandedMask, 836 APInt &KnownZero, 837 APInt &KnownOne, 838 TargetLoweringOpt &TLO, 839 unsigned Depth) const { 840 unsigned BitWidth = DemandedMask.getBitWidth(); 841 assert(Op.getValueSizeInBits() == BitWidth && 842 "Mask size mismatches value type size!"); 843 APInt NewMask = DemandedMask; 844 DebugLoc dl = Op.getDebugLoc(); 845 846 // Don't know anything. 847 KnownZero = KnownOne = APInt(BitWidth, 0); 848 849 // Other users may use these bits. 850 if (!Op.getNode()->hasOneUse()) { 851 if (Depth != 0) { 852 // If not at the root, Just compute the KnownZero/KnownOne bits to 853 // simplify things downstream. 854 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 855 return false; 856 } 857 // If this is the root being simplified, allow it to have multiple uses, 858 // just set the NewMask to all bits. 859 NewMask = APInt::getAllOnesValue(BitWidth); 860 } else if (DemandedMask == 0) { 861 // Not demanding any bits from Op. 862 if (Op.getOpcode() != ISD::UNDEF) 863 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType())); 864 return false; 865 } else if (Depth == 6) { // Limit search depth. 866 return false; 867 } 868 869 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 870 switch (Op.getOpcode()) { 871 case ISD::Constant: 872 // We know all of the bits for a constant! 873 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 874 KnownZero = ~KnownOne & NewMask; 875 return false; // Don't fall through, will infinitely loop. 876 case ISD::AND: 877 // If the RHS is a constant, check to see if the LHS would be zero without 878 // using the bits from the RHS. Below, we use knowledge about the RHS to 879 // simplify the LHS, here we're using information from the LHS to simplify 880 // the RHS. 881 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 882 APInt LHSZero, LHSOne; 883 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 884 LHSZero, LHSOne, Depth+1); 885 // If the LHS already has zeros where RHSC does, this and is dead. 886 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 887 return TLO.CombineTo(Op, Op.getOperand(0)); 888 // If any of the set bits in the RHS are known zero on the LHS, shrink 889 // the constant. 890 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 891 return true; 892 } 893 894 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 895 KnownOne, TLO, Depth+1)) 896 return true; 897 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 898 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 899 KnownZero2, KnownOne2, TLO, Depth+1)) 900 return true; 901 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 902 903 // If all of the demanded bits are known one on one side, return the other. 904 // These bits cannot contribute to the result of the 'and'. 905 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 906 return TLO.CombineTo(Op, Op.getOperand(0)); 907 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 908 return TLO.CombineTo(Op, Op.getOperand(1)); 909 // If all of the demanded bits in the inputs are known zeros, return zero. 910 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 911 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 912 // If the RHS is a constant, see if we can simplify it. 913 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 914 return true; 915 // If the operation can be done in a smaller type, do so. 916 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 917 return true; 918 919 // Output known-1 bits are only known if set in both the LHS & RHS. 920 KnownOne &= KnownOne2; 921 // Output known-0 are known to be clear if zero in either the LHS | RHS. 922 KnownZero |= KnownZero2; 923 break; 924 case ISD::OR: 925 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 926 KnownOne, TLO, Depth+1)) 927 return true; 928 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 929 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 930 KnownZero2, KnownOne2, TLO, Depth+1)) 931 return true; 932 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 933 934 // If all of the demanded bits are known zero on one side, return the other. 935 // These bits cannot contribute to the result of the 'or'. 936 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 937 return TLO.CombineTo(Op, Op.getOperand(0)); 938 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 939 return TLO.CombineTo(Op, Op.getOperand(1)); 940 // If all of the potentially set bits on one side are known to be set on 941 // the other side, just use the 'other' side. 942 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 943 return TLO.CombineTo(Op, Op.getOperand(0)); 944 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 945 return TLO.CombineTo(Op, Op.getOperand(1)); 946 // If the RHS is a constant, see if we can simplify it. 947 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 948 return true; 949 // If the operation can be done in a smaller type, do so. 950 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 951 return true; 952 953 // Output known-0 bits are only known if clear in both the LHS & RHS. 954 KnownZero &= KnownZero2; 955 // Output known-1 are known to be set if set in either the LHS | RHS. 956 KnownOne |= KnownOne2; 957 break; 958 case ISD::XOR: 959 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 960 KnownOne, TLO, Depth+1)) 961 return true; 962 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 963 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 964 KnownOne2, TLO, Depth+1)) 965 return true; 966 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 967 968 // If all of the demanded bits are known zero on one side, return the other. 969 // These bits cannot contribute to the result of the 'xor'. 970 if ((KnownZero & NewMask) == NewMask) 971 return TLO.CombineTo(Op, Op.getOperand(0)); 972 if ((KnownZero2 & NewMask) == NewMask) 973 return TLO.CombineTo(Op, Op.getOperand(1)); 974 // If the operation can be done in a smaller type, do so. 975 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 976 return true; 977 978 // If all of the unknown bits are known to be zero on one side or the other 979 // (but not both) turn this into an *inclusive* or. 980 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 981 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 982 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(), 983 Op.getOperand(0), 984 Op.getOperand(1))); 985 986 // Output known-0 bits are known if clear or set in both the LHS & RHS. 987 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 988 // Output known-1 are known to be set if set in only one of the LHS, RHS. 989 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 990 991 // If all of the demanded bits on one side are known, and all of the set 992 // bits on that side are also known to be set on the other side, turn this 993 // into an AND, as we know the bits will be cleared. 994 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 995 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 996 if ((KnownOne & KnownOne2) == KnownOne) { 997 MVT VT = Op.getValueType(); 998 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 999 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, 1000 Op.getOperand(0), ANDC)); 1001 } 1002 } 1003 1004 // If the RHS is a constant, see if we can simplify it. 1005 // for XOR, we prefer to force bits to 1 if they will make a -1. 1006 // if we can't force bits, try to shrink constant 1007 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1008 APInt Expanded = C->getAPIntValue() | (~NewMask); 1009 // if we can expand it to have all bits set, do it 1010 if (Expanded.isAllOnesValue()) { 1011 if (Expanded != C->getAPIntValue()) { 1012 MVT VT = Op.getValueType(); 1013 SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0), 1014 TLO.DAG.getConstant(Expanded, VT)); 1015 return TLO.CombineTo(Op, New); 1016 } 1017 // if it already has all the bits set, nothing to change 1018 // but don't shrink either! 1019 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 1020 return true; 1021 } 1022 } 1023 1024 KnownZero = KnownZeroOut; 1025 KnownOne = KnownOneOut; 1026 break; 1027 case ISD::SELECT: 1028 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 1029 KnownOne, TLO, Depth+1)) 1030 return true; 1031 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 1032 KnownOne2, TLO, Depth+1)) 1033 return true; 1034 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1035 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1036 1037 // If the operands are constants, see if we can simplify them. 1038 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1039 return true; 1040 1041 // Only known if known in both the LHS and RHS. 1042 KnownOne &= KnownOne2; 1043 KnownZero &= KnownZero2; 1044 break; 1045 case ISD::SELECT_CC: 1046 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 1047 KnownOne, TLO, Depth+1)) 1048 return true; 1049 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 1050 KnownOne2, TLO, Depth+1)) 1051 return true; 1052 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1053 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1054 1055 // If the operands are constants, see if we can simplify them. 1056 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1057 return true; 1058 1059 // Only known if known in both the LHS and RHS. 1060 KnownOne &= KnownOne2; 1061 KnownZero &= KnownZero2; 1062 break; 1063 case ISD::SHL: 1064 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1065 unsigned ShAmt = SA->getZExtValue(); 1066 SDValue InOp = Op.getOperand(0); 1067 1068 // If the shift count is an invalid immediate, don't do anything. 1069 if (ShAmt >= BitWidth) 1070 break; 1071 1072 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1073 // single shift. We can do this if the bottom bits (which are shifted 1074 // out) are never demanded. 1075 if (InOp.getOpcode() == ISD::SRL && 1076 isa<ConstantSDNode>(InOp.getOperand(1))) { 1077 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 1078 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1079 unsigned Opc = ISD::SHL; 1080 int Diff = ShAmt-C1; 1081 if (Diff < 0) { 1082 Diff = -Diff; 1083 Opc = ISD::SRL; 1084 } 1085 1086 SDValue NewSA = 1087 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1088 MVT VT = Op.getValueType(); 1089 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1090 InOp.getOperand(0), NewSA)); 1091 } 1092 } 1093 1094 if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt), 1095 KnownZero, KnownOne, TLO, Depth+1)) 1096 return true; 1097 KnownZero <<= SA->getZExtValue(); 1098 KnownOne <<= SA->getZExtValue(); 1099 // low bits known zero. 1100 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue()); 1101 } 1102 break; 1103 case ISD::SRL: 1104 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1105 MVT VT = Op.getValueType(); 1106 unsigned ShAmt = SA->getZExtValue(); 1107 unsigned VTSize = VT.getSizeInBits(); 1108 SDValue InOp = Op.getOperand(0); 1109 1110 // If the shift count is an invalid immediate, don't do anything. 1111 if (ShAmt >= BitWidth) 1112 break; 1113 1114 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1115 // single shift. We can do this if the top bits (which are shifted out) 1116 // are never demanded. 1117 if (InOp.getOpcode() == ISD::SHL && 1118 isa<ConstantSDNode>(InOp.getOperand(1))) { 1119 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 1120 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1121 unsigned Opc = ISD::SRL; 1122 int Diff = ShAmt-C1; 1123 if (Diff < 0) { 1124 Diff = -Diff; 1125 Opc = ISD::SHL; 1126 } 1127 1128 SDValue NewSA = 1129 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1130 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1131 InOp.getOperand(0), NewSA)); 1132 } 1133 } 1134 1135 // Compute the new bits that are at the top now. 1136 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 1137 KnownZero, KnownOne, TLO, Depth+1)) 1138 return true; 1139 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1140 KnownZero = KnownZero.lshr(ShAmt); 1141 KnownOne = KnownOne.lshr(ShAmt); 1142 1143 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1144 KnownZero |= HighBits; // High bits known zero. 1145 } 1146 break; 1147 case ISD::SRA: 1148 // If this is an arithmetic shift right and only the low-bit is set, we can 1149 // always convert this into a logical shr, even if the shift amount is 1150 // variable. The low bit of the shift cannot be an input sign bit unless 1151 // the shift amount is >= the size of the datatype, which is undefined. 1152 if (DemandedMask == 1) 1153 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(), 1154 Op.getOperand(0), Op.getOperand(1))); 1155 1156 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1157 MVT VT = Op.getValueType(); 1158 unsigned ShAmt = SA->getZExtValue(); 1159 1160 // If the shift count is an invalid immediate, don't do anything. 1161 if (ShAmt >= BitWidth) 1162 break; 1163 1164 APInt InDemandedMask = (NewMask << ShAmt); 1165 1166 // If any of the demanded bits are produced by the sign extension, we also 1167 // demand the input sign bit. 1168 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1169 if (HighBits.intersects(NewMask)) 1170 InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); 1171 1172 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 1173 KnownZero, KnownOne, TLO, Depth+1)) 1174 return true; 1175 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1176 KnownZero = KnownZero.lshr(ShAmt); 1177 KnownOne = KnownOne.lshr(ShAmt); 1178 1179 // Handle the sign bit, adjusted to where it is now in the mask. 1180 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 1181 1182 // If the input sign bit is known to be zero, or if none of the top bits 1183 // are demanded, turn this into an unsigned shift right. 1184 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 1185 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, 1186 Op.getOperand(0), 1187 Op.getOperand(1))); 1188 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 1189 KnownOne |= HighBits; 1190 } 1191 } 1192 break; 1193 case ISD::SIGN_EXTEND_INREG: { 1194 MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1195 1196 // Sign extension. Compute the demanded bits in the result that are not 1197 // present in the input. 1198 APInt NewBits = APInt::getHighBitsSet(BitWidth, 1199 BitWidth - EVT.getSizeInBits()) & 1200 NewMask; 1201 1202 // If none of the extended bits are demanded, eliminate the sextinreg. 1203 if (NewBits == 0) 1204 return TLO.CombineTo(Op, Op.getOperand(0)); 1205 1206 APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); 1207 InSignBit.zext(BitWidth); 1208 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, 1209 EVT.getSizeInBits()) & 1210 NewMask; 1211 1212 // Since the sign extended bits are demanded, we know that the sign 1213 // bit is demanded. 1214 InputDemandedBits |= InSignBit; 1215 1216 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 1217 KnownZero, KnownOne, TLO, Depth+1)) 1218 return true; 1219 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1220 1221 // If the sign bit of the input is known set or clear, then we know the 1222 // top bits of the result. 1223 1224 // If the input sign bit is known zero, convert this into a zero extension. 1225 if (KnownZero.intersects(InSignBit)) 1226 return TLO.CombineTo(Op, 1227 TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT)); 1228 1229 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1230 KnownOne |= NewBits; 1231 KnownZero &= ~NewBits; 1232 } else { // Input sign bit unknown 1233 KnownZero &= ~NewBits; 1234 KnownOne &= ~NewBits; 1235 } 1236 break; 1237 } 1238 case ISD::ZERO_EXTEND: { 1239 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1240 APInt InMask = NewMask; 1241 InMask.trunc(OperandBitWidth); 1242 1243 // If none of the top bits are demanded, convert this into an any_extend. 1244 APInt NewBits = 1245 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1246 if (!NewBits.intersects(NewMask)) 1247 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1248 Op.getValueType(), 1249 Op.getOperand(0))); 1250 1251 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1252 KnownZero, KnownOne, TLO, Depth+1)) 1253 return true; 1254 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1255 KnownZero.zext(BitWidth); 1256 KnownOne.zext(BitWidth); 1257 KnownZero |= NewBits; 1258 break; 1259 } 1260 case ISD::SIGN_EXTEND: { 1261 MVT InVT = Op.getOperand(0).getValueType(); 1262 unsigned InBits = InVT.getSizeInBits(); 1263 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1264 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1265 APInt NewBits = ~InMask & NewMask; 1266 1267 // If none of the top bits are demanded, convert this into an any_extend. 1268 if (NewBits == 0) 1269 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1270 Op.getValueType(), 1271 Op.getOperand(0))); 1272 1273 // Since some of the sign extended bits are demanded, we know that the sign 1274 // bit is demanded. 1275 APInt InDemandedBits = InMask & NewMask; 1276 InDemandedBits |= InSignBit; 1277 InDemandedBits.trunc(InBits); 1278 1279 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1280 KnownOne, TLO, Depth+1)) 1281 return true; 1282 KnownZero.zext(BitWidth); 1283 KnownOne.zext(BitWidth); 1284 1285 // If the sign bit is known zero, convert this to a zero extend. 1286 if (KnownZero.intersects(InSignBit)) 1287 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, 1288 Op.getValueType(), 1289 Op.getOperand(0))); 1290 1291 // If the sign bit is known one, the top bits match. 1292 if (KnownOne.intersects(InSignBit)) { 1293 KnownOne |= NewBits; 1294 KnownZero &= ~NewBits; 1295 } else { // Otherwise, top bits aren't known. 1296 KnownOne &= ~NewBits; 1297 KnownZero &= ~NewBits; 1298 } 1299 break; 1300 } 1301 case ISD::ANY_EXTEND: { 1302 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1303 APInt InMask = NewMask; 1304 InMask.trunc(OperandBitWidth); 1305 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1306 KnownZero, KnownOne, TLO, Depth+1)) 1307 return true; 1308 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1309 KnownZero.zext(BitWidth); 1310 KnownOne.zext(BitWidth); 1311 break; 1312 } 1313 case ISD::TRUNCATE: { 1314 // Simplify the input, using demanded bit information, and compute the known 1315 // zero/one bits live out. 1316 APInt TruncMask = NewMask; 1317 TruncMask.zext(Op.getOperand(0).getValueSizeInBits()); 1318 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1319 KnownZero, KnownOne, TLO, Depth+1)) 1320 return true; 1321 KnownZero.trunc(BitWidth); 1322 KnownOne.trunc(BitWidth); 1323 1324 // If the input is only used by this truncate, see if we can shrink it based 1325 // on the known demanded bits. 1326 if (Op.getOperand(0).getNode()->hasOneUse()) { 1327 SDValue In = Op.getOperand(0); 1328 unsigned InBitWidth = In.getValueSizeInBits(); 1329 switch (In.getOpcode()) { 1330 default: break; 1331 case ISD::SRL: 1332 // Shrink SRL by a constant if none of the high bits shifted in are 1333 // demanded. 1334 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 1335 APInt HighBits = APInt::getHighBitsSet(InBitWidth, 1336 InBitWidth - BitWidth); 1337 HighBits = HighBits.lshr(ShAmt->getZExtValue()); 1338 HighBits.trunc(BitWidth); 1339 1340 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { 1341 // None of the shifted in bits are needed. Add a truncate of the 1342 // shift input, then shift it. 1343 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, 1344 Op.getValueType(), 1345 In.getOperand(0)); 1346 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, 1347 Op.getValueType(), 1348 NewTrunc, 1349 In.getOperand(1))); 1350 } 1351 } 1352 break; 1353 } 1354 } 1355 1356 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1357 break; 1358 } 1359 case ISD::AssertZext: { 1360 MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1361 APInt InMask = APInt::getLowBitsSet(BitWidth, 1362 VT.getSizeInBits()); 1363 if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, 1364 KnownZero, KnownOne, TLO, Depth+1)) 1365 return true; 1366 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1367 KnownZero |= ~InMask & NewMask; 1368 break; 1369 } 1370 case ISD::BIT_CONVERT: 1371#if 0 1372 // If this is an FP->Int bitcast and if the sign bit is the only thing that 1373 // is demanded, turn this into a FGETSIGN. 1374 if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) && 1375 MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && 1376 !MVT::isVector(Op.getOperand(0).getValueType())) { 1377 // Only do this xform if FGETSIGN is valid or if before legalize. 1378 if (!TLO.AfterLegalize || 1379 isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { 1380 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1381 // place. We expect the SHL to be eliminated by other optimizations. 1382 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 1383 Op.getOperand(0)); 1384 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1385 SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); 1386 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), 1387 Sign, ShAmt)); 1388 } 1389 } 1390#endif 1391 break; 1392 case ISD::ADD: 1393 case ISD::MUL: 1394 case ISD::SUB: { 1395 // Add, Sub, and Mul don't demand any bits in positions beyond that 1396 // of the highest bit demanded of them. 1397 APInt LoMask = APInt::getLowBitsSet(BitWidth, 1398 BitWidth - NewMask.countLeadingZeros()); 1399 if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2, 1400 KnownOne2, TLO, Depth+1)) 1401 return true; 1402 if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2, 1403 KnownOne2, TLO, Depth+1)) 1404 return true; 1405 // See if the operation should be performed at a smaller bit width. 1406 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1407 return true; 1408 } 1409 // FALL THROUGH 1410 default: 1411 // Just use ComputeMaskedBits to compute output bits. 1412 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1413 break; 1414 } 1415 1416 // If we know the value of all of the demanded bits, return this as a 1417 // constant. 1418 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1419 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1420 1421 return false; 1422} 1423 1424/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1425/// in Mask are known to be either zero or one and return them in the 1426/// KnownZero/KnownOne bitsets. 1427void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1428 const APInt &Mask, 1429 APInt &KnownZero, 1430 APInt &KnownOne, 1431 const SelectionDAG &DAG, 1432 unsigned Depth) const { 1433 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1434 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1435 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1436 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1437 "Should use MaskedValueIsZero if you don't know whether Op" 1438 " is a target node!"); 1439 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1440} 1441 1442/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1443/// targets that want to expose additional information about sign bits to the 1444/// DAG Combiner. 1445unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 1446 unsigned Depth) const { 1447 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1448 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1449 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1450 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1451 "Should use ComputeNumSignBits if you don't know whether Op" 1452 " is a target node!"); 1453 return 1; 1454} 1455 1456/// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly 1457/// one bit set. This differs from ComputeMaskedBits in that it doesn't need to 1458/// determine which bit is set. 1459/// 1460static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) { 1461 // A left-shift of a constant one will have exactly one bit set, because 1462 // shifting the bit off the end is undefined. 1463 if (Val.getOpcode() == ISD::SHL) 1464 if (ConstantSDNode *C = 1465 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1466 if (C->getAPIntValue() == 1) 1467 return true; 1468 1469 // Similarly, a right-shift of a constant sign-bit will have exactly 1470 // one bit set. 1471 if (Val.getOpcode() == ISD::SRL) 1472 if (ConstantSDNode *C = 1473 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1474 if (C->getAPIntValue().isSignBit()) 1475 return true; 1476 1477 // More could be done here, though the above checks are enough 1478 // to handle some common cases. 1479 1480 // Fall back to ComputeMaskedBits to catch other known cases. 1481 MVT OpVT = Val.getValueType(); 1482 unsigned BitWidth = OpVT.getSizeInBits(); 1483 APInt Mask = APInt::getAllOnesValue(BitWidth); 1484 APInt KnownZero, KnownOne; 1485 DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne); 1486 return (KnownZero.countPopulation() == BitWidth - 1) && 1487 (KnownOne.countPopulation() == 1); 1488} 1489 1490/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1491/// and cc. If it is unable to simplify it, return a null SDValue. 1492SDValue 1493TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1, 1494 ISD::CondCode Cond, bool foldBooleans, 1495 DAGCombinerInfo &DCI, DebugLoc dl) const { 1496 SelectionDAG &DAG = DCI.DAG; 1497 1498 // These setcc operations always fold. 1499 switch (Cond) { 1500 default: break; 1501 case ISD::SETFALSE: 1502 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1503 case ISD::SETTRUE: 1504 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1505 } 1506 1507 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1508 const APInt &C1 = N1C->getAPIntValue(); 1509 if (isa<ConstantSDNode>(N0.getNode())) { 1510 return DAG.FoldSetCC(VT, N0, N1, Cond, dl); 1511 } else { 1512 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1513 // equality comparison, then we're just comparing whether X itself is 1514 // zero. 1515 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1516 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1517 N0.getOperand(1).getOpcode() == ISD::Constant) { 1518 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1519 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1520 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1521 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1522 // (srl (ctlz x), 5) == 0 -> X != 0 1523 // (srl (ctlz x), 5) != 1 -> X != 0 1524 Cond = ISD::SETNE; 1525 } else { 1526 // (srl (ctlz x), 5) != 0 -> X == 0 1527 // (srl (ctlz x), 5) == 1 -> X == 0 1528 Cond = ISD::SETEQ; 1529 } 1530 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1531 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 1532 Zero, Cond); 1533 } 1534 } 1535 1536 // If the LHS is '(and load, const)', the RHS is 0, 1537 // the test is for equality or unsigned, and all 1 bits of the const are 1538 // in the same partial word, see if we can shorten the load. 1539 if (DCI.isBeforeLegalize() && 1540 N0.getOpcode() == ISD::AND && C1 == 0 && 1541 N0.getNode()->hasOneUse() && 1542 isa<LoadSDNode>(N0.getOperand(0)) && 1543 N0.getOperand(0).getNode()->hasOneUse() && 1544 isa<ConstantSDNode>(N0.getOperand(1))) { 1545 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 1546 uint64_t bestMask = 0; 1547 unsigned bestWidth = 0, bestOffset = 0; 1548 if (!Lod->isVolatile() && Lod->isUnindexed() && 1549 // FIXME: This uses getZExtValue() below so it only works on i64 and 1550 // below. 1551 N0.getValueType().getSizeInBits() <= 64) { 1552 unsigned origWidth = N0.getValueType().getSizeInBits(); 1553 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 1554 // 8 bits, but have to be careful... 1555 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 1556 origWidth = Lod->getMemoryVT().getSizeInBits(); 1557 uint64_t Mask =cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1558 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 1559 uint64_t newMask = (1ULL << width) - 1; 1560 for (unsigned offset=0; offset<origWidth/width; offset++) { 1561 if ((newMask & Mask) == Mask) { 1562 if (!TD->isLittleEndian()) 1563 bestOffset = (origWidth/width - offset - 1) * (width/8); 1564 else 1565 bestOffset = (uint64_t)offset * (width/8); 1566 bestMask = Mask >> (offset * (width/8) * 8); 1567 bestWidth = width; 1568 break; 1569 } 1570 newMask = newMask << width; 1571 } 1572 } 1573 } 1574 if (bestWidth) { 1575 MVT newVT = MVT::getIntegerVT(bestWidth); 1576 if (newVT.isRound()) { 1577 MVT PtrType = Lod->getOperand(1).getValueType(); 1578 SDValue Ptr = Lod->getBasePtr(); 1579 if (bestOffset != 0) 1580 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(), 1581 DAG.getConstant(bestOffset, PtrType)); 1582 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 1583 SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 1584 Lod->getSrcValue(), 1585 Lod->getSrcValueOffset() + bestOffset, 1586 false, NewAlign); 1587 return DAG.getSetCC(dl, VT, 1588 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 1589 DAG.getConstant(bestMask, newVT)), 1590 DAG.getConstant(0LL, newVT), Cond); 1591 } 1592 } 1593 } 1594 1595 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 1596 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 1597 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 1598 1599 // If the comparison constant has bits in the upper part, the 1600 // zero-extended value could never match. 1601 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 1602 C1.getBitWidth() - InSize))) { 1603 switch (Cond) { 1604 case ISD::SETUGT: 1605 case ISD::SETUGE: 1606 case ISD::SETEQ: return DAG.getConstant(0, VT); 1607 case ISD::SETULT: 1608 case ISD::SETULE: 1609 case ISD::SETNE: return DAG.getConstant(1, VT); 1610 case ISD::SETGT: 1611 case ISD::SETGE: 1612 // True if the sign bit of C1 is set. 1613 return DAG.getConstant(C1.isNegative(), VT); 1614 case ISD::SETLT: 1615 case ISD::SETLE: 1616 // True if the sign bit of C1 isn't set. 1617 return DAG.getConstant(C1.isNonNegative(), VT); 1618 default: 1619 break; 1620 } 1621 } 1622 1623 // Otherwise, we can perform the comparison with the low bits. 1624 switch (Cond) { 1625 case ISD::SETEQ: 1626 case ISD::SETNE: 1627 case ISD::SETUGT: 1628 case ISD::SETUGE: 1629 case ISD::SETULT: 1630 case ISD::SETULE: 1631 return DAG.getSetCC(dl, VT, N0.getOperand(0), 1632 DAG.getConstant(APInt(C1).trunc(InSize), 1633 N0.getOperand(0).getValueType()), 1634 Cond); 1635 default: 1636 break; // todo, be more careful with signed comparisons 1637 } 1638 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1639 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1640 MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1641 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 1642 MVT ExtDstTy = N0.getValueType(); 1643 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 1644 1645 // If the extended part has any inconsistent bits, it cannot ever 1646 // compare equal. In other words, they have to be all ones or all 1647 // zeros. 1648 APInt ExtBits = 1649 APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits); 1650 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1651 return DAG.getConstant(Cond == ISD::SETNE, VT); 1652 1653 SDValue ZextOp; 1654 MVT Op0Ty = N0.getOperand(0).getValueType(); 1655 if (Op0Ty == ExtSrcTy) { 1656 ZextOp = N0.getOperand(0); 1657 } else { 1658 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 1659 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 1660 DAG.getConstant(Imm, Op0Ty)); 1661 } 1662 if (!DCI.isCalledByLegalizer()) 1663 DCI.AddToWorklist(ZextOp.getNode()); 1664 // Otherwise, make this a use of a zext. 1665 return DAG.getSetCC(dl, VT, ZextOp, 1666 DAG.getConstant(C1 & APInt::getLowBitsSet( 1667 ExtDstTyBits, 1668 ExtSrcTyBits), 1669 ExtDstTy), 1670 Cond); 1671 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 1672 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1673 1674 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1675 if (N0.getOpcode() == ISD::SETCC) { 1676 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1); 1677 if (TrueWhenTrue) 1678 return N0; 1679 1680 // Invert the condition. 1681 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1682 CC = ISD::getSetCCInverse(CC, 1683 N0.getOperand(0).getValueType().isInteger()); 1684 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 1685 } 1686 1687 if ((N0.getOpcode() == ISD::XOR || 1688 (N0.getOpcode() == ISD::AND && 1689 N0.getOperand(0).getOpcode() == ISD::XOR && 1690 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1691 isa<ConstantSDNode>(N0.getOperand(1)) && 1692 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 1693 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1694 // can only do this if the top bits are known zero. 1695 unsigned BitWidth = N0.getValueSizeInBits(); 1696 if (DAG.MaskedValueIsZero(N0, 1697 APInt::getHighBitsSet(BitWidth, 1698 BitWidth-1))) { 1699 // Okay, get the un-inverted input value. 1700 SDValue Val; 1701 if (N0.getOpcode() == ISD::XOR) 1702 Val = N0.getOperand(0); 1703 else { 1704 assert(N0.getOpcode() == ISD::AND && 1705 N0.getOperand(0).getOpcode() == ISD::XOR); 1706 // ((X^1)&1)^1 -> X & 1 1707 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 1708 N0.getOperand(0).getOperand(0), 1709 N0.getOperand(1)); 1710 } 1711 return DAG.getSetCC(dl, VT, Val, N1, 1712 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1713 } 1714 } 1715 } 1716 1717 APInt MinVal, MaxVal; 1718 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 1719 if (ISD::isSignedIntSetCC(Cond)) { 1720 MinVal = APInt::getSignedMinValue(OperandBitSize); 1721 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 1722 } else { 1723 MinVal = APInt::getMinValue(OperandBitSize); 1724 MaxVal = APInt::getMaxValue(OperandBitSize); 1725 } 1726 1727 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1728 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1729 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1730 // X >= C0 --> X > (C0-1) 1731 return DAG.getSetCC(dl, VT, N0, 1732 DAG.getConstant(C1-1, N1.getValueType()), 1733 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1734 } 1735 1736 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1737 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1738 // X <= C0 --> X < (C0+1) 1739 return DAG.getSetCC(dl, VT, N0, 1740 DAG.getConstant(C1+1, N1.getValueType()), 1741 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1742 } 1743 1744 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1745 return DAG.getConstant(0, VT); // X < MIN --> false 1746 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1747 return DAG.getConstant(1, VT); // X >= MIN --> true 1748 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1749 return DAG.getConstant(0, VT); // X > MAX --> false 1750 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1751 return DAG.getConstant(1, VT); // X <= MAX --> true 1752 1753 // Canonicalize setgt X, Min --> setne X, Min 1754 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1755 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 1756 // Canonicalize setlt X, Max --> setne X, Max 1757 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1758 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 1759 1760 // If we have setult X, 1, turn it into seteq X, 0 1761 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1762 return DAG.getSetCC(dl, VT, N0, 1763 DAG.getConstant(MinVal, N0.getValueType()), 1764 ISD::SETEQ); 1765 // If we have setugt X, Max-1, turn it into seteq X, Max 1766 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1767 return DAG.getSetCC(dl, VT, N0, 1768 DAG.getConstant(MaxVal, N0.getValueType()), 1769 ISD::SETEQ); 1770 1771 // If we have "setcc X, C0", check to see if we can shrink the immediate 1772 // by changing cc. 1773 1774 // SETUGT X, SINTMAX -> SETLT X, 0 1775 if (Cond == ISD::SETUGT && 1776 C1 == APInt::getSignedMaxValue(OperandBitSize)) 1777 return DAG.getSetCC(dl, VT, N0, 1778 DAG.getConstant(0, N1.getValueType()), 1779 ISD::SETLT); 1780 1781 // SETULT X, SINTMIN -> SETGT X, -1 1782 if (Cond == ISD::SETULT && 1783 C1 == APInt::getSignedMinValue(OperandBitSize)) { 1784 SDValue ConstMinusOne = 1785 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), 1786 N1.getValueType()); 1787 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 1788 } 1789 1790 // Fold bit comparisons when we can. 1791 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1792 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1793 if (ConstantSDNode *AndRHS = 1794 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1795 MVT ShiftTy = DCI.isBeforeLegalize() ? 1796 getPointerTy() : getShiftAmountTy(); 1797 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1798 // Perform the xform if the AND RHS is a single bit. 1799 if (isPowerOf2_64(AndRHS->getZExtValue())) { 1800 return DAG.getNode(ISD::SRL, dl, VT, N0, 1801 DAG.getConstant(Log2_64(AndRHS->getZExtValue()), 1802 ShiftTy)); 1803 } 1804 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) { 1805 // (X & 8) == 8 --> (X & 8) >> 3 1806 // Perform the xform if C1 is a single bit. 1807 if (C1.isPowerOf2()) { 1808 return DAG.getNode(ISD::SRL, dl, VT, N0, 1809 DAG.getConstant(C1.logBase2(), ShiftTy)); 1810 } 1811 } 1812 } 1813 } 1814 } else if (isa<ConstantSDNode>(N0.getNode())) { 1815 // Ensure that the constant occurs on the RHS. 1816 return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1817 } 1818 1819 if (isa<ConstantFPSDNode>(N0.getNode())) { 1820 // Constant fold or commute setcc. 1821 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl); 1822 if (O.getNode()) return O; 1823 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1824 // If the RHS of an FP comparison is a constant, simplify it away in 1825 // some cases. 1826 if (CFP->getValueAPF().isNaN()) { 1827 // If an operand is known to be a nan, we can fold it. 1828 switch (ISD::getUnorderedFlavor(Cond)) { 1829 default: assert(0 && "Unknown flavor!"); 1830 case 0: // Known false. 1831 return DAG.getConstant(0, VT); 1832 case 1: // Known true. 1833 return DAG.getConstant(1, VT); 1834 case 2: // Undefined. 1835 return DAG.getUNDEF(VT); 1836 } 1837 } 1838 1839 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 1840 // constant if knowing that the operand is non-nan is enough. We prefer to 1841 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 1842 // materialize 0.0. 1843 if (Cond == ISD::SETO || Cond == ISD::SETUO) 1844 return DAG.getSetCC(dl, VT, N0, N0, Cond); 1845 } 1846 1847 if (N0 == N1) { 1848 // We can always fold X == X for integer setcc's. 1849 if (N0.getValueType().isInteger()) 1850 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1851 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1852 if (UOF == 2) // FP operators that are undefined on NaNs. 1853 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1854 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1855 return DAG.getConstant(UOF, VT); 1856 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1857 // if it is not already. 1858 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1859 if (NewCond != Cond) 1860 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 1861 } 1862 1863 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1864 N0.getValueType().isInteger()) { 1865 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1866 N0.getOpcode() == ISD::XOR) { 1867 // Simplify (X+Y) == (X+Z) --> Y == Z 1868 if (N0.getOpcode() == N1.getOpcode()) { 1869 if (N0.getOperand(0) == N1.getOperand(0)) 1870 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 1871 if (N0.getOperand(1) == N1.getOperand(1)) 1872 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 1873 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1874 // If X op Y == Y op X, try other combinations. 1875 if (N0.getOperand(0) == N1.getOperand(1)) 1876 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 1877 Cond); 1878 if (N0.getOperand(1) == N1.getOperand(0)) 1879 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 1880 Cond); 1881 } 1882 } 1883 1884 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 1885 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1886 // Turn (X+C1) == C2 --> X == C2-C1 1887 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 1888 return DAG.getSetCC(dl, VT, N0.getOperand(0), 1889 DAG.getConstant(RHSC->getAPIntValue()- 1890 LHSR->getAPIntValue(), 1891 N0.getValueType()), Cond); 1892 } 1893 1894 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 1895 if (N0.getOpcode() == ISD::XOR) 1896 // If we know that all of the inverted bits are zero, don't bother 1897 // performing the inversion. 1898 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 1899 return 1900 DAG.getSetCC(dl, VT, N0.getOperand(0), 1901 DAG.getConstant(LHSR->getAPIntValue() ^ 1902 RHSC->getAPIntValue(), 1903 N0.getValueType()), 1904 Cond); 1905 } 1906 1907 // Turn (C1-X) == C2 --> X == C1-C2 1908 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 1909 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 1910 return 1911 DAG.getSetCC(dl, VT, N0.getOperand(1), 1912 DAG.getConstant(SUBC->getAPIntValue() - 1913 RHSC->getAPIntValue(), 1914 N0.getValueType()), 1915 Cond); 1916 } 1917 } 1918 } 1919 1920 // Simplify (X+Z) == X --> Z == 0 1921 if (N0.getOperand(0) == N1) 1922 return DAG.getSetCC(dl, VT, N0.getOperand(1), 1923 DAG.getConstant(0, N0.getValueType()), Cond); 1924 if (N0.getOperand(1) == N1) { 1925 if (DAG.isCommutativeBinOp(N0.getOpcode())) 1926 return DAG.getSetCC(dl, VT, N0.getOperand(0), 1927 DAG.getConstant(0, N0.getValueType()), Cond); 1928 else if (N0.getNode()->hasOneUse()) { 1929 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 1930 // (Z-X) == X --> Z == X<<1 1931 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), 1932 N1, 1933 DAG.getConstant(1, getShiftAmountTy())); 1934 if (!DCI.isCalledByLegalizer()) 1935 DCI.AddToWorklist(SH.getNode()); 1936 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond); 1937 } 1938 } 1939 } 1940 1941 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 1942 N1.getOpcode() == ISD::XOR) { 1943 // Simplify X == (X+Z) --> Z == 0 1944 if (N1.getOperand(0) == N0) { 1945 return DAG.getSetCC(dl, VT, N1.getOperand(1), 1946 DAG.getConstant(0, N1.getValueType()), Cond); 1947 } else if (N1.getOperand(1) == N0) { 1948 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 1949 return DAG.getSetCC(dl, VT, N1.getOperand(0), 1950 DAG.getConstant(0, N1.getValueType()), Cond); 1951 } else if (N1.getNode()->hasOneUse()) { 1952 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 1953 // X == (Z-X) --> X<<1 == Z 1954 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0, 1955 DAG.getConstant(1, getShiftAmountTy())); 1956 if (!DCI.isCalledByLegalizer()) 1957 DCI.AddToWorklist(SH.getNode()); 1958 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond); 1959 } 1960 } 1961 } 1962 1963 // Simplify x&y == y to x&y != 0 if y has exactly one bit set. 1964 // Note that where y is variable and is known to have at most 1965 // one bit set (for example, if it is z&1) we cannot do this; 1966 // the expressions are not equivalent when y==0. 1967 if (N0.getOpcode() == ISD::AND) 1968 if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) { 1969 if (ValueHasExactlyOneBitSet(N1, DAG)) { 1970 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 1971 SDValue Zero = DAG.getConstant(0, N1.getValueType()); 1972 return DAG.getSetCC(dl, VT, N0, Zero, Cond); 1973 } 1974 } 1975 if (N1.getOpcode() == ISD::AND) 1976 if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) { 1977 if (ValueHasExactlyOneBitSet(N0, DAG)) { 1978 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 1979 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1980 return DAG.getSetCC(dl, VT, N1, Zero, Cond); 1981 } 1982 } 1983 } 1984 1985 // Fold away ALL boolean setcc's. 1986 SDValue Temp; 1987 if (N0.getValueType() == MVT::i1 && foldBooleans) { 1988 switch (Cond) { 1989 default: assert(0 && "Unknown integer setcc!"); 1990 case ISD::SETEQ: // X == Y -> ~(X^Y) 1991 Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 1992 N0 = DAG.getNOT(dl, Temp, MVT::i1); 1993 if (!DCI.isCalledByLegalizer()) 1994 DCI.AddToWorklist(Temp.getNode()); 1995 break; 1996 case ISD::SETNE: // X != Y --> (X^Y) 1997 N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 1998 break; 1999 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 2000 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 2001 Temp = DAG.getNOT(dl, N0, MVT::i1); 2002 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp); 2003 if (!DCI.isCalledByLegalizer()) 2004 DCI.AddToWorklist(Temp.getNode()); 2005 break; 2006 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 2007 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 2008 Temp = DAG.getNOT(dl, N1, MVT::i1); 2009 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp); 2010 if (!DCI.isCalledByLegalizer()) 2011 DCI.AddToWorklist(Temp.getNode()); 2012 break; 2013 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 2014 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 2015 Temp = DAG.getNOT(dl, N0, MVT::i1); 2016 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp); 2017 if (!DCI.isCalledByLegalizer()) 2018 DCI.AddToWorklist(Temp.getNode()); 2019 break; 2020 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 2021 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 2022 Temp = DAG.getNOT(dl, N1, MVT::i1); 2023 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp); 2024 break; 2025 } 2026 if (VT != MVT::i1) { 2027 if (!DCI.isCalledByLegalizer()) 2028 DCI.AddToWorklist(N0.getNode()); 2029 // FIXME: If running after legalize, we probably can't do this. 2030 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0); 2031 } 2032 return N0; 2033 } 2034 2035 // Could not fold it. 2036 return SDValue(); 2037} 2038 2039/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 2040/// node is a GlobalAddress + offset. 2041bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, 2042 int64_t &Offset) const { 2043 if (isa<GlobalAddressSDNode>(N)) { 2044 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 2045 GA = GASD->getGlobal(); 2046 Offset += GASD->getOffset(); 2047 return true; 2048 } 2049 2050 if (N->getOpcode() == ISD::ADD) { 2051 SDValue N1 = N->getOperand(0); 2052 SDValue N2 = N->getOperand(1); 2053 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 2054 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 2055 if (V) { 2056 Offset += V->getSExtValue(); 2057 return true; 2058 } 2059 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 2060 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 2061 if (V) { 2062 Offset += V->getSExtValue(); 2063 return true; 2064 } 2065 } 2066 } 2067 return false; 2068} 2069 2070 2071/// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 2072/// loading 'Bytes' bytes from a location that is 'Dist' units away from the 2073/// location that the 'Base' load is loading from. 2074bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, 2075 unsigned Bytes, int Dist, 2076 const MachineFrameInfo *MFI) const { 2077 if (LD->getOperand(0).getNode() != Base->getOperand(0).getNode()) 2078 return false; 2079 MVT VT = LD->getValueType(0); 2080 if (VT.getSizeInBits() / 8 != Bytes) 2081 return false; 2082 2083 SDValue Loc = LD->getOperand(1); 2084 SDValue BaseLoc = Base->getOperand(1); 2085 if (Loc.getOpcode() == ISD::FrameIndex) { 2086 if (BaseLoc.getOpcode() != ISD::FrameIndex) 2087 return false; 2088 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 2089 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 2090 int FS = MFI->getObjectSize(FI); 2091 int BFS = MFI->getObjectSize(BFI); 2092 if (FS != BFS || FS != (int)Bytes) return false; 2093 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 2094 } 2095 2096 GlobalValue *GV1 = NULL; 2097 GlobalValue *GV2 = NULL; 2098 int64_t Offset1 = 0; 2099 int64_t Offset2 = 0; 2100 bool isGA1 = isGAPlusOffset(Loc.getNode(), GV1, Offset1); 2101 bool isGA2 = isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 2102 if (isGA1 && isGA2 && GV1 == GV2) 2103 return Offset1 == (Offset2 + Dist*Bytes); 2104 return false; 2105} 2106 2107 2108SDValue TargetLowering:: 2109PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 2110 // Default implementation: no optimization. 2111 return SDValue(); 2112} 2113 2114//===----------------------------------------------------------------------===// 2115// Inline Assembler Implementation Methods 2116//===----------------------------------------------------------------------===// 2117 2118 2119TargetLowering::ConstraintType 2120TargetLowering::getConstraintType(const std::string &Constraint) const { 2121 // FIXME: lots more standard ones to handle. 2122 if (Constraint.size() == 1) { 2123 switch (Constraint[0]) { 2124 default: break; 2125 case 'r': return C_RegisterClass; 2126 case 'm': // memory 2127 case 'o': // offsetable 2128 case 'V': // not offsetable 2129 return C_Memory; 2130 case 'i': // Simple Integer or Relocatable Constant 2131 case 'n': // Simple Integer 2132 case 's': // Relocatable Constant 2133 case 'X': // Allow ANY value. 2134 case 'I': // Target registers. 2135 case 'J': 2136 case 'K': 2137 case 'L': 2138 case 'M': 2139 case 'N': 2140 case 'O': 2141 case 'P': 2142 return C_Other; 2143 } 2144 } 2145 2146 if (Constraint.size() > 1 && Constraint[0] == '{' && 2147 Constraint[Constraint.size()-1] == '}') 2148 return C_Register; 2149 return C_Unknown; 2150} 2151 2152/// LowerXConstraint - try to replace an X constraint, which matches anything, 2153/// with another that has more specific requirements based on the type of the 2154/// corresponding operand. 2155const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ 2156 if (ConstraintVT.isInteger()) 2157 return "r"; 2158 if (ConstraintVT.isFloatingPoint()) 2159 return "f"; // works for many targets 2160 return 0; 2161} 2162 2163/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 2164/// vector. If it is invalid, don't add anything to Ops. 2165void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 2166 char ConstraintLetter, 2167 bool hasMemory, 2168 std::vector<SDValue> &Ops, 2169 SelectionDAG &DAG) const { 2170 switch (ConstraintLetter) { 2171 default: break; 2172 case 'X': // Allows any operand; labels (basic block) use this. 2173 if (Op.getOpcode() == ISD::BasicBlock) { 2174 Ops.push_back(Op); 2175 return; 2176 } 2177 // fall through 2178 case 'i': // Simple Integer or Relocatable Constant 2179 case 'n': // Simple Integer 2180 case 's': { // Relocatable Constant 2181 // These operands are interested in values of the form (GV+C), where C may 2182 // be folded in as an offset of GV, or it may be explicitly added. Also, it 2183 // is possible and fine if either GV or C are missing. 2184 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2185 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 2186 2187 // If we have "(add GV, C)", pull out GV/C 2188 if (Op.getOpcode() == ISD::ADD) { 2189 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2190 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 2191 if (C == 0 || GA == 0) { 2192 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 2193 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 2194 } 2195 if (C == 0 || GA == 0) 2196 C = 0, GA = 0; 2197 } 2198 2199 // If we find a valid operand, map to the TargetXXX version so that the 2200 // value itself doesn't get selected. 2201 if (GA) { // Either &GV or &GV+C 2202 if (ConstraintLetter != 'n') { 2203 int64_t Offs = GA->getOffset(); 2204 if (C) Offs += C->getZExtValue(); 2205 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 2206 Op.getValueType(), Offs)); 2207 return; 2208 } 2209 } 2210 if (C) { // just C, no GV. 2211 // Simple constants are not allowed for 's'. 2212 if (ConstraintLetter != 's') { 2213 // gcc prints these as sign extended. Sign extend value to 64 bits 2214 // now; without this it would get ZExt'd later in 2215 // ScheduleDAGSDNodes::EmitNode, which is very generic. 2216 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(), 2217 MVT::i64)); 2218 return; 2219 } 2220 } 2221 break; 2222 } 2223 } 2224} 2225 2226std::vector<unsigned> TargetLowering:: 2227getRegClassForInlineAsmConstraint(const std::string &Constraint, 2228 MVT VT) const { 2229 return std::vector<unsigned>(); 2230} 2231 2232 2233std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 2234getRegForInlineAsmConstraint(const std::string &Constraint, 2235 MVT VT) const { 2236 if (Constraint[0] != '{') 2237 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 2238 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 2239 2240 // Remove the braces from around the name. 2241 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 2242 2243 // Figure out which register class contains this reg. 2244 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 2245 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 2246 E = RI->regclass_end(); RCI != E; ++RCI) { 2247 const TargetRegisterClass *RC = *RCI; 2248 2249 // If none of the the value types for this register class are valid, we 2250 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2251 bool isLegal = false; 2252 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2253 I != E; ++I) { 2254 if (isTypeLegal(*I)) { 2255 isLegal = true; 2256 break; 2257 } 2258 } 2259 2260 if (!isLegal) continue; 2261 2262 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 2263 I != E; ++I) { 2264 if (StringsEqualNoCase(RegName, RI->get(*I).AsmName)) 2265 return std::make_pair(*I, RC); 2266 } 2267 } 2268 2269 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 2270} 2271 2272//===----------------------------------------------------------------------===// 2273// Constraint Selection. 2274 2275/// isMatchingInputConstraint - Return true of this is an input operand that is 2276/// a matching constraint like "4". 2277bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 2278 assert(!ConstraintCode.empty() && "No known constraint!"); 2279 return isdigit(ConstraintCode[0]); 2280} 2281 2282/// getMatchedOperand - If this is an input matching constraint, this method 2283/// returns the output operand it matches. 2284unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 2285 assert(!ConstraintCode.empty() && "No known constraint!"); 2286 return atoi(ConstraintCode.c_str()); 2287} 2288 2289 2290/// getConstraintGenerality - Return an integer indicating how general CT 2291/// is. 2292static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 2293 switch (CT) { 2294 default: assert(0 && "Unknown constraint type!"); 2295 case TargetLowering::C_Other: 2296 case TargetLowering::C_Unknown: 2297 return 0; 2298 case TargetLowering::C_Register: 2299 return 1; 2300 case TargetLowering::C_RegisterClass: 2301 return 2; 2302 case TargetLowering::C_Memory: 2303 return 3; 2304 } 2305} 2306 2307/// ChooseConstraint - If there are multiple different constraints that we 2308/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 2309/// This is somewhat tricky: constraints fall into four classes: 2310/// Other -> immediates and magic values 2311/// Register -> one specific register 2312/// RegisterClass -> a group of regs 2313/// Memory -> memory 2314/// Ideally, we would pick the most specific constraint possible: if we have 2315/// something that fits into a register, we would pick it. The problem here 2316/// is that if we have something that could either be in a register or in 2317/// memory that use of the register could cause selection of *other* 2318/// operands to fail: they might only succeed if we pick memory. Because of 2319/// this the heuristic we use is: 2320/// 2321/// 1) If there is an 'other' constraint, and if the operand is valid for 2322/// that constraint, use it. This makes us take advantage of 'i' 2323/// constraints when available. 2324/// 2) Otherwise, pick the most general constraint present. This prefers 2325/// 'm' over 'r', for example. 2326/// 2327static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 2328 bool hasMemory, const TargetLowering &TLI, 2329 SDValue Op, SelectionDAG *DAG) { 2330 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 2331 unsigned BestIdx = 0; 2332 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 2333 int BestGenerality = -1; 2334 2335 // Loop over the options, keeping track of the most general one. 2336 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 2337 TargetLowering::ConstraintType CType = 2338 TLI.getConstraintType(OpInfo.Codes[i]); 2339 2340 // If this is an 'other' constraint, see if the operand is valid for it. 2341 // For example, on X86 we might have an 'rI' constraint. If the operand 2342 // is an integer in the range [0..31] we want to use I (saving a load 2343 // of a register), otherwise we must use 'r'. 2344 if (CType == TargetLowering::C_Other && Op.getNode()) { 2345 assert(OpInfo.Codes[i].size() == 1 && 2346 "Unhandled multi-letter 'other' constraint"); 2347 std::vector<SDValue> ResultOps; 2348 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory, 2349 ResultOps, *DAG); 2350 if (!ResultOps.empty()) { 2351 BestType = CType; 2352 BestIdx = i; 2353 break; 2354 } 2355 } 2356 2357 // This constraint letter is more general than the previous one, use it. 2358 int Generality = getConstraintGenerality(CType); 2359 if (Generality > BestGenerality) { 2360 BestType = CType; 2361 BestIdx = i; 2362 BestGenerality = Generality; 2363 } 2364 } 2365 2366 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 2367 OpInfo.ConstraintType = BestType; 2368} 2369 2370/// ComputeConstraintToUse - Determines the constraint code and constraint 2371/// type to use for the specific AsmOperandInfo, setting 2372/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 2373void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 2374 SDValue Op, 2375 bool hasMemory, 2376 SelectionDAG *DAG) const { 2377 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 2378 2379 // Single-letter constraints ('r') are very common. 2380 if (OpInfo.Codes.size() == 1) { 2381 OpInfo.ConstraintCode = OpInfo.Codes[0]; 2382 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2383 } else { 2384 ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG); 2385 } 2386 2387 // 'X' matches anything. 2388 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 2389 // Labels and constants are handled elsewhere ('X' is the only thing 2390 // that matches labels). 2391 if (isa<BasicBlock>(OpInfo.CallOperandVal) || 2392 isa<ConstantInt>(OpInfo.CallOperandVal)) 2393 return; 2394 2395 // Otherwise, try to resolve it to something we know about by looking at 2396 // the actual operand type. 2397 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 2398 OpInfo.ConstraintCode = Repl; 2399 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2400 } 2401 } 2402} 2403 2404//===----------------------------------------------------------------------===// 2405// Loop Strength Reduction hooks 2406//===----------------------------------------------------------------------===// 2407 2408/// isLegalAddressingMode - Return true if the addressing mode represented 2409/// by AM is legal for this target, for a load/store of the specified type. 2410bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 2411 const Type *Ty) const { 2412 // The default implementation of this implements a conservative RISCy, r+r and 2413 // r+i addr mode. 2414 2415 // Allows a sign-extended 16-bit immediate field. 2416 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 2417 return false; 2418 2419 // No global is ever allowed as a base. 2420 if (AM.BaseGV) 2421 return false; 2422 2423 // Only support r+r, 2424 switch (AM.Scale) { 2425 case 0: // "r+i" or just "i", depending on HasBaseReg. 2426 break; 2427 case 1: 2428 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2429 return false; 2430 // Otherwise we have r+r or r+i. 2431 break; 2432 case 2: 2433 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2434 return false; 2435 // Allow 2*r as r+r. 2436 break; 2437 } 2438 2439 return true; 2440} 2441 2442/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 2443/// return a DAG expression to select that will generate the same value by 2444/// multiplying by a magic number. See: 2445/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2446SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 2447 std::vector<SDNode*>* Created) const { 2448 MVT VT = N->getValueType(0); 2449 DebugLoc dl= N->getDebugLoc(); 2450 2451 // Check to see if we can do this. 2452 // FIXME: We should be more aggressive here. 2453 if (!isTypeLegal(VT)) 2454 return SDValue(); 2455 2456 APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 2457 APInt::ms magics = d.magic(); 2458 2459 // Multiply the numerator (operand 0) by the magic value 2460 // FIXME: We should support doing a MUL in a wider type 2461 SDValue Q; 2462 if (isOperationLegalOrCustom(ISD::MULHS, VT)) 2463 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0), 2464 DAG.getConstant(magics.m, VT)); 2465 else if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) 2466 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), 2467 N->getOperand(0), 2468 DAG.getConstant(magics.m, VT)).getNode(), 1); 2469 else 2470 return SDValue(); // No mulhs or equvialent 2471 // If d > 0 and m < 0, add the numerator 2472 if (d.isStrictlyPositive() && magics.m.isNegative()) { 2473 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0)); 2474 if (Created) 2475 Created->push_back(Q.getNode()); 2476 } 2477 // If d < 0 and m > 0, subtract the numerator. 2478 if (d.isNegative() && magics.m.isStrictlyPositive()) { 2479 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0)); 2480 if (Created) 2481 Created->push_back(Q.getNode()); 2482 } 2483 // Shift right algebraic if shift value is nonzero 2484 if (magics.s > 0) { 2485 Q = DAG.getNode(ISD::SRA, dl, VT, Q, 2486 DAG.getConstant(magics.s, getShiftAmountTy())); 2487 if (Created) 2488 Created->push_back(Q.getNode()); 2489 } 2490 // Extract the sign bit and add it to the quotient 2491 SDValue T = 2492 DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 2493 getShiftAmountTy())); 2494 if (Created) 2495 Created->push_back(T.getNode()); 2496 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 2497} 2498 2499/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 2500/// return a DAG expression to select that will generate the same value by 2501/// multiplying by a magic number. See: 2502/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2503SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 2504 std::vector<SDNode*>* Created) const { 2505 MVT VT = N->getValueType(0); 2506 DebugLoc dl = N->getDebugLoc(); 2507 2508 // Check to see if we can do this. 2509 // FIXME: We should be more aggressive here. 2510 if (!isTypeLegal(VT)) 2511 return SDValue(); 2512 2513 // FIXME: We should use a narrower constant when the upper 2514 // bits are known to be zero. 2515 ConstantSDNode *N1C = cast<ConstantSDNode>(N->getOperand(1)); 2516 APInt::mu magics = N1C->getAPIntValue().magicu(); 2517 2518 // Multiply the numerator (operand 0) by the magic value 2519 // FIXME: We should support doing a MUL in a wider type 2520 SDValue Q; 2521 if (isOperationLegalOrCustom(ISD::MULHU, VT)) 2522 Q = DAG.getNode(ISD::MULHU, dl, VT, N->getOperand(0), 2523 DAG.getConstant(magics.m, VT)); 2524 else if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) 2525 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), 2526 N->getOperand(0), 2527 DAG.getConstant(magics.m, VT)).getNode(), 1); 2528 else 2529 return SDValue(); // No mulhu or equvialent 2530 if (Created) 2531 Created->push_back(Q.getNode()); 2532 2533 if (magics.a == 0) { 2534 assert(magics.s < N1C->getAPIntValue().getBitWidth() && 2535 "We shouldn't generate an undefined shift!"); 2536 return DAG.getNode(ISD::SRL, dl, VT, Q, 2537 DAG.getConstant(magics.s, getShiftAmountTy())); 2538 } else { 2539 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q); 2540 if (Created) 2541 Created->push_back(NPQ.getNode()); 2542 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, 2543 DAG.getConstant(1, getShiftAmountTy())); 2544 if (Created) 2545 Created->push_back(NPQ.getNode()); 2546 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 2547 if (Created) 2548 Created->push_back(NPQ.getNode()); 2549 return DAG.getNode(ISD::SRL, dl, VT, NPQ, 2550 DAG.getConstant(magics.s-1, getShiftAmountTy())); 2551 } 2552} 2553 2554/// IgnoreHarmlessInstructions - Ignore instructions between a CALL and RET 2555/// node that don't prevent tail call optimization. 2556static SDValue IgnoreHarmlessInstructions(SDValue node) { 2557 // Found call return. 2558 if (node.getOpcode() == ISD::CALL) return node; 2559 // Ignore MERGE_VALUES. Will have at least one operand. 2560 if (node.getOpcode() == ISD::MERGE_VALUES) 2561 return IgnoreHarmlessInstructions(node.getOperand(0)); 2562 // Ignore ANY_EXTEND node. 2563 if (node.getOpcode() == ISD::ANY_EXTEND) 2564 return IgnoreHarmlessInstructions(node.getOperand(0)); 2565 if (node.getOpcode() == ISD::TRUNCATE) 2566 return IgnoreHarmlessInstructions(node.getOperand(0)); 2567 // Any other node type. 2568 return node; 2569} 2570 2571bool TargetLowering::CheckTailCallReturnConstraints(CallSDNode *TheCall, 2572 SDValue Ret) { 2573 unsigned NumOps = Ret.getNumOperands(); 2574 // ISD::CALL results:(value0, ..., valuen, chain) 2575 // ISD::RET operands:(chain, value0, flag0, ..., valuen, flagn) 2576 // Value return: 2577 // Check that operand of the RET node sources from the CALL node. The RET node 2578 // has at least two operands. Operand 0 holds the chain. Operand 1 holds the 2579 // value. 2580 if (NumOps > 1 && 2581 IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0)) 2582 return true; 2583 // void return: The RET node has the chain result value of the CALL node as 2584 // input. 2585 if (NumOps == 1 && 2586 Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1)) 2587 return true; 2588 2589 return false; 2590} 2591