TargetLowering.cpp revision 0c3e67860af417febb1fa9e870ece912a16085ac
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetLowering.h" 15#include "llvm/MC/MCAsmInfo.h" 16#include "llvm/MC/MCExpr.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Target/TargetLoweringObjectFile.h" 19#include "llvm/Target/TargetMachine.h" 20#include "llvm/Target/TargetRegisterInfo.h" 21#include "llvm/GlobalVariable.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/CodeGen/Analysis.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineJumpTableInfo.h" 26#include "llvm/CodeGen/MachineFunction.h" 27#include "llvm/CodeGen/SelectionDAG.h" 28#include "llvm/ADT/STLExtras.h" 29#include "llvm/Support/CommandLine.h" 30#include "llvm/Support/ErrorHandling.h" 31#include "llvm/Support/MathExtras.h" 32#include <cctype> 33using namespace llvm; 34 35/// We are in the process of implementing a new TypeLegalization action 36/// - the promotion of vector elements. This feature is disabled by default 37/// and only enabled using this flag. 38static cl::opt<bool> 39AllowPromoteIntElem("promote-elements", cl::Hidden, 40 cl::desc("Allow promotion of integer vector element types")); 41 42namespace llvm { 43TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc) { 44 bool isLocal = GV->hasLocalLinkage(); 45 bool isDeclaration = GV->isDeclaration(); 46 // FIXME: what should we do for protected and internal visibility? 47 // For variables, is internal different from hidden? 48 bool isHidden = GV->hasHiddenVisibility(); 49 50 if (reloc == Reloc::PIC_) { 51 if (isLocal || isHidden) 52 return TLSModel::LocalDynamic; 53 else 54 return TLSModel::GeneralDynamic; 55 } else { 56 if (!isDeclaration || isHidden) 57 return TLSModel::LocalExec; 58 else 59 return TLSModel::InitialExec; 60 } 61} 62} 63 64/// InitLibcallNames - Set default libcall names. 65/// 66static void InitLibcallNames(const char **Names) { 67 Names[RTLIB::SHL_I16] = "__ashlhi3"; 68 Names[RTLIB::SHL_I32] = "__ashlsi3"; 69 Names[RTLIB::SHL_I64] = "__ashldi3"; 70 Names[RTLIB::SHL_I128] = "__ashlti3"; 71 Names[RTLIB::SRL_I16] = "__lshrhi3"; 72 Names[RTLIB::SRL_I32] = "__lshrsi3"; 73 Names[RTLIB::SRL_I64] = "__lshrdi3"; 74 Names[RTLIB::SRL_I128] = "__lshrti3"; 75 Names[RTLIB::SRA_I16] = "__ashrhi3"; 76 Names[RTLIB::SRA_I32] = "__ashrsi3"; 77 Names[RTLIB::SRA_I64] = "__ashrdi3"; 78 Names[RTLIB::SRA_I128] = "__ashrti3"; 79 Names[RTLIB::MUL_I8] = "__mulqi3"; 80 Names[RTLIB::MUL_I16] = "__mulhi3"; 81 Names[RTLIB::MUL_I32] = "__mulsi3"; 82 Names[RTLIB::MUL_I64] = "__muldi3"; 83 Names[RTLIB::MUL_I128] = "__multi3"; 84 Names[RTLIB::SDIV_I8] = "__divqi3"; 85 Names[RTLIB::SDIV_I16] = "__divhi3"; 86 Names[RTLIB::SDIV_I32] = "__divsi3"; 87 Names[RTLIB::SDIV_I64] = "__divdi3"; 88 Names[RTLIB::SDIV_I128] = "__divti3"; 89 Names[RTLIB::UDIV_I8] = "__udivqi3"; 90 Names[RTLIB::UDIV_I16] = "__udivhi3"; 91 Names[RTLIB::UDIV_I32] = "__udivsi3"; 92 Names[RTLIB::UDIV_I64] = "__udivdi3"; 93 Names[RTLIB::UDIV_I128] = "__udivti3"; 94 Names[RTLIB::SREM_I8] = "__modqi3"; 95 Names[RTLIB::SREM_I16] = "__modhi3"; 96 Names[RTLIB::SREM_I32] = "__modsi3"; 97 Names[RTLIB::SREM_I64] = "__moddi3"; 98 Names[RTLIB::SREM_I128] = "__modti3"; 99 Names[RTLIB::UREM_I8] = "__umodqi3"; 100 Names[RTLIB::UREM_I16] = "__umodhi3"; 101 Names[RTLIB::UREM_I32] = "__umodsi3"; 102 Names[RTLIB::UREM_I64] = "__umoddi3"; 103 Names[RTLIB::UREM_I128] = "__umodti3"; 104 105 // These are generally not available. 106 Names[RTLIB::SDIVREM_I8] = 0; 107 Names[RTLIB::SDIVREM_I16] = 0; 108 Names[RTLIB::SDIVREM_I32] = 0; 109 Names[RTLIB::SDIVREM_I64] = 0; 110 Names[RTLIB::SDIVREM_I128] = 0; 111 Names[RTLIB::UDIVREM_I8] = 0; 112 Names[RTLIB::UDIVREM_I16] = 0; 113 Names[RTLIB::UDIVREM_I32] = 0; 114 Names[RTLIB::UDIVREM_I64] = 0; 115 Names[RTLIB::UDIVREM_I128] = 0; 116 117 Names[RTLIB::NEG_I32] = "__negsi2"; 118 Names[RTLIB::NEG_I64] = "__negdi2"; 119 Names[RTLIB::ADD_F32] = "__addsf3"; 120 Names[RTLIB::ADD_F64] = "__adddf3"; 121 Names[RTLIB::ADD_F80] = "__addxf3"; 122 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 123 Names[RTLIB::SUB_F32] = "__subsf3"; 124 Names[RTLIB::SUB_F64] = "__subdf3"; 125 Names[RTLIB::SUB_F80] = "__subxf3"; 126 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 127 Names[RTLIB::MUL_F32] = "__mulsf3"; 128 Names[RTLIB::MUL_F64] = "__muldf3"; 129 Names[RTLIB::MUL_F80] = "__mulxf3"; 130 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 131 Names[RTLIB::DIV_F32] = "__divsf3"; 132 Names[RTLIB::DIV_F64] = "__divdf3"; 133 Names[RTLIB::DIV_F80] = "__divxf3"; 134 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 135 Names[RTLIB::REM_F32] = "fmodf"; 136 Names[RTLIB::REM_F64] = "fmod"; 137 Names[RTLIB::REM_F80] = "fmodl"; 138 Names[RTLIB::REM_PPCF128] = "fmodl"; 139 Names[RTLIB::POWI_F32] = "__powisf2"; 140 Names[RTLIB::POWI_F64] = "__powidf2"; 141 Names[RTLIB::POWI_F80] = "__powixf2"; 142 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 143 Names[RTLIB::SQRT_F32] = "sqrtf"; 144 Names[RTLIB::SQRT_F64] = "sqrt"; 145 Names[RTLIB::SQRT_F80] = "sqrtl"; 146 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 147 Names[RTLIB::LOG_F32] = "logf"; 148 Names[RTLIB::LOG_F64] = "log"; 149 Names[RTLIB::LOG_F80] = "logl"; 150 Names[RTLIB::LOG_PPCF128] = "logl"; 151 Names[RTLIB::LOG2_F32] = "log2f"; 152 Names[RTLIB::LOG2_F64] = "log2"; 153 Names[RTLIB::LOG2_F80] = "log2l"; 154 Names[RTLIB::LOG2_PPCF128] = "log2l"; 155 Names[RTLIB::LOG10_F32] = "log10f"; 156 Names[RTLIB::LOG10_F64] = "log10"; 157 Names[RTLIB::LOG10_F80] = "log10l"; 158 Names[RTLIB::LOG10_PPCF128] = "log10l"; 159 Names[RTLIB::EXP_F32] = "expf"; 160 Names[RTLIB::EXP_F64] = "exp"; 161 Names[RTLIB::EXP_F80] = "expl"; 162 Names[RTLIB::EXP_PPCF128] = "expl"; 163 Names[RTLIB::EXP2_F32] = "exp2f"; 164 Names[RTLIB::EXP2_F64] = "exp2"; 165 Names[RTLIB::EXP2_F80] = "exp2l"; 166 Names[RTLIB::EXP2_PPCF128] = "exp2l"; 167 Names[RTLIB::SIN_F32] = "sinf"; 168 Names[RTLIB::SIN_F64] = "sin"; 169 Names[RTLIB::SIN_F80] = "sinl"; 170 Names[RTLIB::SIN_PPCF128] = "sinl"; 171 Names[RTLIB::COS_F32] = "cosf"; 172 Names[RTLIB::COS_F64] = "cos"; 173 Names[RTLIB::COS_F80] = "cosl"; 174 Names[RTLIB::COS_PPCF128] = "cosl"; 175 Names[RTLIB::POW_F32] = "powf"; 176 Names[RTLIB::POW_F64] = "pow"; 177 Names[RTLIB::POW_F80] = "powl"; 178 Names[RTLIB::POW_PPCF128] = "powl"; 179 Names[RTLIB::CEIL_F32] = "ceilf"; 180 Names[RTLIB::CEIL_F64] = "ceil"; 181 Names[RTLIB::CEIL_F80] = "ceill"; 182 Names[RTLIB::CEIL_PPCF128] = "ceill"; 183 Names[RTLIB::TRUNC_F32] = "truncf"; 184 Names[RTLIB::TRUNC_F64] = "trunc"; 185 Names[RTLIB::TRUNC_F80] = "truncl"; 186 Names[RTLIB::TRUNC_PPCF128] = "truncl"; 187 Names[RTLIB::RINT_F32] = "rintf"; 188 Names[RTLIB::RINT_F64] = "rint"; 189 Names[RTLIB::RINT_F80] = "rintl"; 190 Names[RTLIB::RINT_PPCF128] = "rintl"; 191 Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; 192 Names[RTLIB::NEARBYINT_F64] = "nearbyint"; 193 Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; 194 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; 195 Names[RTLIB::FLOOR_F32] = "floorf"; 196 Names[RTLIB::FLOOR_F64] = "floor"; 197 Names[RTLIB::FLOOR_F80] = "floorl"; 198 Names[RTLIB::FLOOR_PPCF128] = "floorl"; 199 Names[RTLIB::COPYSIGN_F32] = "copysignf"; 200 Names[RTLIB::COPYSIGN_F64] = "copysign"; 201 Names[RTLIB::COPYSIGN_F80] = "copysignl"; 202 Names[RTLIB::COPYSIGN_PPCF128] = "copysignl"; 203 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 204 Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee"; 205 Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee"; 206 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 207 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; 208 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; 209 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; 210 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; 211 Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi"; 212 Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi"; 213 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 214 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 215 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 216 Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi"; 217 Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi"; 218 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 219 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 220 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 221 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 222 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 223 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 224 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 225 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 226 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 227 Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi"; 228 Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi"; 229 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 230 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 231 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 232 Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi"; 233 Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi"; 234 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 235 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 236 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 237 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 238 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 239 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 240 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 241 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 242 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 243 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 244 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 245 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 246 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 247 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 248 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 249 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 250 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 251 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 252 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 253 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 254 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 255 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 256 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 257 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 258 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 259 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 260 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 261 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 262 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 263 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 264 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 265 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 266 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 267 Names[RTLIB::OEQ_F32] = "__eqsf2"; 268 Names[RTLIB::OEQ_F64] = "__eqdf2"; 269 Names[RTLIB::UNE_F32] = "__nesf2"; 270 Names[RTLIB::UNE_F64] = "__nedf2"; 271 Names[RTLIB::OGE_F32] = "__gesf2"; 272 Names[RTLIB::OGE_F64] = "__gedf2"; 273 Names[RTLIB::OLT_F32] = "__ltsf2"; 274 Names[RTLIB::OLT_F64] = "__ltdf2"; 275 Names[RTLIB::OLE_F32] = "__lesf2"; 276 Names[RTLIB::OLE_F64] = "__ledf2"; 277 Names[RTLIB::OGT_F32] = "__gtsf2"; 278 Names[RTLIB::OGT_F64] = "__gtdf2"; 279 Names[RTLIB::UO_F32] = "__unordsf2"; 280 Names[RTLIB::UO_F64] = "__unorddf2"; 281 Names[RTLIB::O_F32] = "__unordsf2"; 282 Names[RTLIB::O_F64] = "__unorddf2"; 283 Names[RTLIB::MEMCPY] = "memcpy"; 284 Names[RTLIB::MEMMOVE] = "memmove"; 285 Names[RTLIB::MEMSET] = "memset"; 286 Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume"; 287 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1"; 288 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2"; 289 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4"; 290 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8"; 291 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1"; 292 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2"; 293 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4"; 294 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8"; 295 Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1"; 296 Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2"; 297 Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4"; 298 Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8"; 299 Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1"; 300 Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2"; 301 Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4"; 302 Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8"; 303 Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1"; 304 Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2"; 305 Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4"; 306 Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8"; 307 Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1"; 308 Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2"; 309 Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4"; 310 Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8"; 311 Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1"; 312 Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2"; 313 Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and-xor_4"; 314 Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8"; 315 Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1"; 316 Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2"; 317 Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4"; 318 Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8"; 319} 320 321/// InitLibcallCallingConvs - Set default libcall CallingConvs. 322/// 323static void InitLibcallCallingConvs(CallingConv::ID *CCs) { 324 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) { 325 CCs[i] = CallingConv::C; 326 } 327} 328 329/// getFPEXT - Return the FPEXT_*_* value for the given types, or 330/// UNKNOWN_LIBCALL if there is none. 331RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 332 if (OpVT == MVT::f32) { 333 if (RetVT == MVT::f64) 334 return FPEXT_F32_F64; 335 } 336 337 return UNKNOWN_LIBCALL; 338} 339 340/// getFPROUND - Return the FPROUND_*_* value for the given types, or 341/// UNKNOWN_LIBCALL if there is none. 342RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 343 if (RetVT == MVT::f32) { 344 if (OpVT == MVT::f64) 345 return FPROUND_F64_F32; 346 if (OpVT == MVT::f80) 347 return FPROUND_F80_F32; 348 if (OpVT == MVT::ppcf128) 349 return FPROUND_PPCF128_F32; 350 } else if (RetVT == MVT::f64) { 351 if (OpVT == MVT::f80) 352 return FPROUND_F80_F64; 353 if (OpVT == MVT::ppcf128) 354 return FPROUND_PPCF128_F64; 355 } 356 357 return UNKNOWN_LIBCALL; 358} 359 360/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 361/// UNKNOWN_LIBCALL if there is none. 362RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 363 if (OpVT == MVT::f32) { 364 if (RetVT == MVT::i8) 365 return FPTOSINT_F32_I8; 366 if (RetVT == MVT::i16) 367 return FPTOSINT_F32_I16; 368 if (RetVT == MVT::i32) 369 return FPTOSINT_F32_I32; 370 if (RetVT == MVT::i64) 371 return FPTOSINT_F32_I64; 372 if (RetVT == MVT::i128) 373 return FPTOSINT_F32_I128; 374 } else if (OpVT == MVT::f64) { 375 if (RetVT == MVT::i8) 376 return FPTOSINT_F64_I8; 377 if (RetVT == MVT::i16) 378 return FPTOSINT_F64_I16; 379 if (RetVT == MVT::i32) 380 return FPTOSINT_F64_I32; 381 if (RetVT == MVT::i64) 382 return FPTOSINT_F64_I64; 383 if (RetVT == MVT::i128) 384 return FPTOSINT_F64_I128; 385 } else if (OpVT == MVT::f80) { 386 if (RetVT == MVT::i32) 387 return FPTOSINT_F80_I32; 388 if (RetVT == MVT::i64) 389 return FPTOSINT_F80_I64; 390 if (RetVT == MVT::i128) 391 return FPTOSINT_F80_I128; 392 } else if (OpVT == MVT::ppcf128) { 393 if (RetVT == MVT::i32) 394 return FPTOSINT_PPCF128_I32; 395 if (RetVT == MVT::i64) 396 return FPTOSINT_PPCF128_I64; 397 if (RetVT == MVT::i128) 398 return FPTOSINT_PPCF128_I128; 399 } 400 return UNKNOWN_LIBCALL; 401} 402 403/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 404/// UNKNOWN_LIBCALL if there is none. 405RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 406 if (OpVT == MVT::f32) { 407 if (RetVT == MVT::i8) 408 return FPTOUINT_F32_I8; 409 if (RetVT == MVT::i16) 410 return FPTOUINT_F32_I16; 411 if (RetVT == MVT::i32) 412 return FPTOUINT_F32_I32; 413 if (RetVT == MVT::i64) 414 return FPTOUINT_F32_I64; 415 if (RetVT == MVT::i128) 416 return FPTOUINT_F32_I128; 417 } else if (OpVT == MVT::f64) { 418 if (RetVT == MVT::i8) 419 return FPTOUINT_F64_I8; 420 if (RetVT == MVT::i16) 421 return FPTOUINT_F64_I16; 422 if (RetVT == MVT::i32) 423 return FPTOUINT_F64_I32; 424 if (RetVT == MVT::i64) 425 return FPTOUINT_F64_I64; 426 if (RetVT == MVT::i128) 427 return FPTOUINT_F64_I128; 428 } else if (OpVT == MVT::f80) { 429 if (RetVT == MVT::i32) 430 return FPTOUINT_F80_I32; 431 if (RetVT == MVT::i64) 432 return FPTOUINT_F80_I64; 433 if (RetVT == MVT::i128) 434 return FPTOUINT_F80_I128; 435 } else if (OpVT == MVT::ppcf128) { 436 if (RetVT == MVT::i32) 437 return FPTOUINT_PPCF128_I32; 438 if (RetVT == MVT::i64) 439 return FPTOUINT_PPCF128_I64; 440 if (RetVT == MVT::i128) 441 return FPTOUINT_PPCF128_I128; 442 } 443 return UNKNOWN_LIBCALL; 444} 445 446/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 447/// UNKNOWN_LIBCALL if there is none. 448RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 449 if (OpVT == MVT::i32) { 450 if (RetVT == MVT::f32) 451 return SINTTOFP_I32_F32; 452 else if (RetVT == MVT::f64) 453 return SINTTOFP_I32_F64; 454 else if (RetVT == MVT::f80) 455 return SINTTOFP_I32_F80; 456 else if (RetVT == MVT::ppcf128) 457 return SINTTOFP_I32_PPCF128; 458 } else if (OpVT == MVT::i64) { 459 if (RetVT == MVT::f32) 460 return SINTTOFP_I64_F32; 461 else if (RetVT == MVT::f64) 462 return SINTTOFP_I64_F64; 463 else if (RetVT == MVT::f80) 464 return SINTTOFP_I64_F80; 465 else if (RetVT == MVT::ppcf128) 466 return SINTTOFP_I64_PPCF128; 467 } else if (OpVT == MVT::i128) { 468 if (RetVT == MVT::f32) 469 return SINTTOFP_I128_F32; 470 else if (RetVT == MVT::f64) 471 return SINTTOFP_I128_F64; 472 else if (RetVT == MVT::f80) 473 return SINTTOFP_I128_F80; 474 else if (RetVT == MVT::ppcf128) 475 return SINTTOFP_I128_PPCF128; 476 } 477 return UNKNOWN_LIBCALL; 478} 479 480/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 481/// UNKNOWN_LIBCALL if there is none. 482RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 483 if (OpVT == MVT::i32) { 484 if (RetVT == MVT::f32) 485 return UINTTOFP_I32_F32; 486 else if (RetVT == MVT::f64) 487 return UINTTOFP_I32_F64; 488 else if (RetVT == MVT::f80) 489 return UINTTOFP_I32_F80; 490 else if (RetVT == MVT::ppcf128) 491 return UINTTOFP_I32_PPCF128; 492 } else if (OpVT == MVT::i64) { 493 if (RetVT == MVT::f32) 494 return UINTTOFP_I64_F32; 495 else if (RetVT == MVT::f64) 496 return UINTTOFP_I64_F64; 497 else if (RetVT == MVT::f80) 498 return UINTTOFP_I64_F80; 499 else if (RetVT == MVT::ppcf128) 500 return UINTTOFP_I64_PPCF128; 501 } else if (OpVT == MVT::i128) { 502 if (RetVT == MVT::f32) 503 return UINTTOFP_I128_F32; 504 else if (RetVT == MVT::f64) 505 return UINTTOFP_I128_F64; 506 else if (RetVT == MVT::f80) 507 return UINTTOFP_I128_F80; 508 else if (RetVT == MVT::ppcf128) 509 return UINTTOFP_I128_PPCF128; 510 } 511 return UNKNOWN_LIBCALL; 512} 513 514/// InitCmpLibcallCCs - Set default comparison libcall CC. 515/// 516static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 517 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 518 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 519 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 520 CCs[RTLIB::UNE_F32] = ISD::SETNE; 521 CCs[RTLIB::UNE_F64] = ISD::SETNE; 522 CCs[RTLIB::OGE_F32] = ISD::SETGE; 523 CCs[RTLIB::OGE_F64] = ISD::SETGE; 524 CCs[RTLIB::OLT_F32] = ISD::SETLT; 525 CCs[RTLIB::OLT_F64] = ISD::SETLT; 526 CCs[RTLIB::OLE_F32] = ISD::SETLE; 527 CCs[RTLIB::OLE_F64] = ISD::SETLE; 528 CCs[RTLIB::OGT_F32] = ISD::SETGT; 529 CCs[RTLIB::OGT_F64] = ISD::SETGT; 530 CCs[RTLIB::UO_F32] = ISD::SETNE; 531 CCs[RTLIB::UO_F64] = ISD::SETNE; 532 CCs[RTLIB::O_F32] = ISD::SETEQ; 533 CCs[RTLIB::O_F64] = ISD::SETEQ; 534} 535 536/// NOTE: The constructor takes ownership of TLOF. 537TargetLowering::TargetLowering(const TargetMachine &tm, 538 const TargetLoweringObjectFile *tlof) 539 : TM(tm), TD(TM.getTargetData()), TLOF(*tlof), 540 mayPromoteElements(AllowPromoteIntElem) { 541 // All operations default to being supported. 542 memset(OpActions, 0, sizeof(OpActions)); 543 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 544 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 545 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 546 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 547 548 // Set default actions for various operations. 549 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 550 // Default all indexed load / store to expand. 551 for (unsigned IM = (unsigned)ISD::PRE_INC; 552 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 553 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 554 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 555 } 556 557 // These operations default to expand. 558 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 559 setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand); 560 } 561 562 // Most targets ignore the @llvm.prefetch intrinsic. 563 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 564 565 // ConstantFP nodes default to expand. Targets can either change this to 566 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 567 // to optimize expansions for certain constants. 568 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 569 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 570 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 571 572 // These library functions default to expand. 573 setOperationAction(ISD::FLOG , MVT::f64, Expand); 574 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 575 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 576 setOperationAction(ISD::FEXP , MVT::f64, Expand); 577 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 578 setOperationAction(ISD::FLOG , MVT::f32, Expand); 579 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 580 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 581 setOperationAction(ISD::FEXP , MVT::f32, Expand); 582 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 583 584 // Default ISD::TRAP to expand (which turns it into abort). 585 setOperationAction(ISD::TRAP, MVT::Other, Expand); 586 587 IsLittleEndian = TD->isLittleEndian(); 588 PointerTy = MVT::getIntegerVT(8*TD->getPointerSize()); 589 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 590 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 591 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 592 maxStoresPerMemsetOptSize = maxStoresPerMemcpyOptSize 593 = maxStoresPerMemmoveOptSize = 4; 594 benefitFromCodePlacementOpt = false; 595 UseUnderscoreSetJmp = false; 596 UseUnderscoreLongJmp = false; 597 SelectIsExpensive = false; 598 IntDivIsCheap = false; 599 Pow2DivIsCheap = false; 600 JumpIsExpensive = false; 601 StackPointerRegisterToSaveRestore = 0; 602 ExceptionPointerRegister = 0; 603 ExceptionSelectorRegister = 0; 604 BooleanContents = UndefinedBooleanContent; 605 SchedPreferenceInfo = Sched::Latency; 606 JumpBufSize = 0; 607 JumpBufAlignment = 0; 608 MinFunctionAlignment = 0; 609 PrefFunctionAlignment = 0; 610 PrefLoopAlignment = 0; 611 MinStackArgumentAlignment = 1; 612 ShouldFoldAtomicFences = false; 613 614 InitLibcallNames(LibcallRoutineNames); 615 InitCmpLibcallCCs(CmpLibcallCCs); 616 InitLibcallCallingConvs(LibcallCallingConvs); 617} 618 619TargetLowering::~TargetLowering() { 620 delete &TLOF; 621} 622 623MVT TargetLowering::getShiftAmountTy(EVT LHSTy) const { 624 return MVT::getIntegerVT(8*TD->getPointerSize()); 625} 626 627/// canOpTrap - Returns true if the operation can trap for the value type. 628/// VT must be a legal type. 629bool TargetLowering::canOpTrap(unsigned Op, EVT VT) const { 630 assert(isTypeLegal(VT)); 631 switch (Op) { 632 default: 633 return false; 634 case ISD::FDIV: 635 case ISD::FREM: 636 case ISD::SDIV: 637 case ISD::UDIV: 638 case ISD::SREM: 639 case ISD::UREM: 640 return true; 641 } 642} 643 644 645static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 646 unsigned &NumIntermediates, 647 EVT &RegisterVT, 648 TargetLowering *TLI) { 649 // Figure out the right, legal destination reg to copy into. 650 unsigned NumElts = VT.getVectorNumElements(); 651 MVT EltTy = VT.getVectorElementType(); 652 653 unsigned NumVectorRegs = 1; 654 655 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 656 // could break down into LHS/RHS like LegalizeDAG does. 657 if (!isPowerOf2_32(NumElts)) { 658 NumVectorRegs = NumElts; 659 NumElts = 1; 660 } 661 662 // Divide the input until we get to a supported size. This will always 663 // end with a scalar if the target doesn't support vectors. 664 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 665 NumElts >>= 1; 666 NumVectorRegs <<= 1; 667 } 668 669 NumIntermediates = NumVectorRegs; 670 671 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 672 if (!TLI->isTypeLegal(NewVT)) 673 NewVT = EltTy; 674 IntermediateVT = NewVT; 675 676 unsigned NewVTSize = NewVT.getSizeInBits(); 677 678 // Convert sizes such as i33 to i64. 679 if (!isPowerOf2_32(NewVTSize)) 680 NewVTSize = NextPowerOf2(NewVTSize); 681 682 EVT DestVT = TLI->getRegisterType(NewVT); 683 RegisterVT = DestVT; 684 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 685 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 686 687 // Otherwise, promotion or legal types use the same number of registers as 688 // the vector decimated to the appropriate level. 689 return NumVectorRegs; 690} 691 692/// isLegalRC - Return true if the value types that can be represented by the 693/// specified register class are all legal. 694bool TargetLowering::isLegalRC(const TargetRegisterClass *RC) const { 695 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 696 I != E; ++I) { 697 if (isTypeLegal(*I)) 698 return true; 699 } 700 return false; 701} 702 703/// hasLegalSuperRegRegClasses - Return true if the specified register class 704/// has one or more super-reg register classes that are legal. 705bool 706TargetLowering::hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const{ 707 if (*RC->superregclasses_begin() == 0) 708 return false; 709 for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(), 710 E = RC->superregclasses_end(); I != E; ++I) { 711 const TargetRegisterClass *RRC = *I; 712 if (isLegalRC(RRC)) 713 return true; 714 } 715 return false; 716} 717 718/// findRepresentativeClass - Return the largest legal super-reg register class 719/// of the register class for the specified type and its associated "cost". 720std::pair<const TargetRegisterClass*, uint8_t> 721TargetLowering::findRepresentativeClass(EVT VT) const { 722 const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; 723 if (!RC) 724 return std::make_pair(RC, 0); 725 const TargetRegisterClass *BestRC = RC; 726 for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(), 727 E = RC->superregclasses_end(); I != E; ++I) { 728 const TargetRegisterClass *RRC = *I; 729 if (RRC->isASubClass() || !isLegalRC(RRC)) 730 continue; 731 if (!hasLegalSuperRegRegClasses(RRC)) 732 return std::make_pair(RRC, 1); 733 BestRC = RRC; 734 } 735 return std::make_pair(BestRC, 1); 736} 737 738 739/// computeRegisterProperties - Once all of the register classes are added, 740/// this allows us to compute derived properties we expose. 741void TargetLowering::computeRegisterProperties() { 742 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 743 "Too many value types for ValueTypeActions to hold!"); 744 745 // Everything defaults to needing one register. 746 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 747 NumRegistersForVT[i] = 1; 748 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 749 } 750 // ...except isVoid, which doesn't need any registers. 751 NumRegistersForVT[MVT::isVoid] = 0; 752 753 // Find the largest integer register class. 754 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 755 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 756 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 757 758 // Every integer value type larger than this largest register takes twice as 759 // many registers to represent as the previous ValueType. 760 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 761 EVT ExpandedVT = (MVT::SimpleValueType)ExpandedReg; 762 if (!ExpandedVT.isInteger()) 763 break; 764 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 765 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 766 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 767 ValueTypeActions.setTypeAction(ExpandedVT, TypeExpandInteger); 768 } 769 770 // Inspect all of the ValueType's smaller than the largest integer 771 // register to see which ones need promotion. 772 unsigned LegalIntReg = LargestIntReg; 773 for (unsigned IntReg = LargestIntReg - 1; 774 IntReg >= (unsigned)MVT::i1; --IntReg) { 775 EVT IVT = (MVT::SimpleValueType)IntReg; 776 if (isTypeLegal(IVT)) { 777 LegalIntReg = IntReg; 778 } else { 779 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 780 (MVT::SimpleValueType)LegalIntReg; 781 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 782 } 783 } 784 785 // ppcf128 type is really two f64's. 786 if (!isTypeLegal(MVT::ppcf128)) { 787 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 788 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 789 TransformToType[MVT::ppcf128] = MVT::f64; 790 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 791 } 792 793 // Decide how to handle f64. If the target does not have native f64 support, 794 // expand it to i64 and we will be generating soft float library calls. 795 if (!isTypeLegal(MVT::f64)) { 796 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 797 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 798 TransformToType[MVT::f64] = MVT::i64; 799 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 800 } 801 802 // Decide how to handle f32. If the target does not have native support for 803 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 804 if (!isTypeLegal(MVT::f32)) { 805 if (isTypeLegal(MVT::f64)) { 806 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 807 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 808 TransformToType[MVT::f32] = MVT::f64; 809 ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger); 810 } else { 811 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 812 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 813 TransformToType[MVT::f32] = MVT::i32; 814 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 815 } 816 } 817 818 // Loop over all of the vector value types to see which need transformations. 819 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 820 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 821 MVT VT = (MVT::SimpleValueType)i; 822 if (isTypeLegal(VT)) continue; 823 824 // Determine if there is a legal wider type. If so, we should promote to 825 // that wider vector type. 826 EVT EltVT = VT.getVectorElementType(); 827 unsigned NElts = VT.getVectorNumElements(); 828 if (NElts != 1) { 829 bool IsLegalWiderType = false; 830 // If we allow the promotion of vector elements using a flag, 831 // then return TypePromoteInteger on vector elements. 832 // First try to promote the elements of integer vectors. If no legal 833 // promotion was found, fallback to the widen-vector method. 834 if (mayPromoteElements) 835 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 836 EVT SVT = (MVT::SimpleValueType)nVT; 837 // Promote vectors of integers to vectors with the same number 838 // of elements, with a wider element type. 839 if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits() 840 && SVT.getVectorNumElements() == NElts && 841 isTypeLegal(SVT) && SVT.getScalarType().isInteger()) { 842 TransformToType[i] = SVT; 843 RegisterTypeForVT[i] = SVT; 844 NumRegistersForVT[i] = 1; 845 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 846 IsLegalWiderType = true; 847 break; 848 } 849 } 850 851 if (IsLegalWiderType) continue; 852 853 // Try to widen the vector. 854 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 855 EVT SVT = (MVT::SimpleValueType)nVT; 856 if (SVT.getVectorElementType() == EltVT && 857 SVT.getVectorNumElements() > NElts && 858 isTypeLegal(SVT)) { 859 TransformToType[i] = SVT; 860 RegisterTypeForVT[i] = SVT; 861 NumRegistersForVT[i] = 1; 862 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 863 IsLegalWiderType = true; 864 break; 865 } 866 } 867 if (IsLegalWiderType) continue; 868 } 869 870 MVT IntermediateVT; 871 EVT RegisterVT; 872 unsigned NumIntermediates; 873 NumRegistersForVT[i] = 874 getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates, 875 RegisterVT, this); 876 RegisterTypeForVT[i] = RegisterVT; 877 878 EVT NVT = VT.getPow2VectorType(); 879 if (NVT == VT) { 880 // Type is already a power of 2. The default action is to split. 881 TransformToType[i] = MVT::Other; 882 unsigned NumElts = VT.getVectorNumElements(); 883 ValueTypeActions.setTypeAction(VT, 884 NumElts > 1 ? TypeSplitVector : TypeScalarizeVector); 885 } else { 886 TransformToType[i] = NVT; 887 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 888 } 889 } 890 891 // Determine the 'representative' register class for each value type. 892 // An representative register class is the largest (meaning one which is 893 // not a sub-register class / subreg register class) legal register class for 894 // a group of value types. For example, on i386, i8, i16, and i32 895 // representative would be GR32; while on x86_64 it's GR64. 896 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 897 const TargetRegisterClass* RRC; 898 uint8_t Cost; 899 tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i); 900 RepRegClassForVT[i] = RRC; 901 RepRegClassCostForVT[i] = Cost; 902 } 903} 904 905const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 906 return NULL; 907} 908 909 910MVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const { 911 return PointerTy.SimpleTy; 912} 913 914MVT::SimpleValueType TargetLowering::getCmpLibcallReturnType() const { 915 return MVT::i32; // return the default value 916} 917 918/// getVectorTypeBreakdown - Vector types are broken down into some number of 919/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 920/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 921/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 922/// 923/// This method returns the number of registers needed, and the VT for each 924/// register. It also returns the VT and quantity of the intermediate values 925/// before they are promoted/expanded. 926/// 927unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 928 EVT &IntermediateVT, 929 unsigned &NumIntermediates, 930 EVT &RegisterVT) const { 931 unsigned NumElts = VT.getVectorNumElements(); 932 933 // If there is a wider vector type with the same element type as this one, 934 // we should widen to that legal vector type. This handles things like 935 // <2 x float> -> <4 x float>. 936 if (NumElts != 1 && getTypeAction(Context, VT) == TypeWidenVector) { 937 RegisterVT = getTypeToTransformTo(Context, VT); 938 if (isTypeLegal(RegisterVT)) { 939 IntermediateVT = RegisterVT; 940 NumIntermediates = 1; 941 return 1; 942 } 943 } 944 945 // Figure out the right, legal destination reg to copy into. 946 EVT EltTy = VT.getVectorElementType(); 947 948 unsigned NumVectorRegs = 1; 949 950 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 951 // could break down into LHS/RHS like LegalizeDAG does. 952 if (!isPowerOf2_32(NumElts)) { 953 NumVectorRegs = NumElts; 954 NumElts = 1; 955 } 956 957 // Divide the input until we get to a supported size. This will always 958 // end with a scalar if the target doesn't support vectors. 959 while (NumElts > 1 && !isTypeLegal( 960 EVT::getVectorVT(Context, EltTy, NumElts))) { 961 NumElts >>= 1; 962 NumVectorRegs <<= 1; 963 } 964 965 NumIntermediates = NumVectorRegs; 966 967 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts); 968 if (!isTypeLegal(NewVT)) 969 NewVT = EltTy; 970 IntermediateVT = NewVT; 971 972 EVT DestVT = getRegisterType(Context, NewVT); 973 RegisterVT = DestVT; 974 unsigned NewVTSize = NewVT.getSizeInBits(); 975 976 // Convert sizes such as i33 to i64. 977 if (!isPowerOf2_32(NewVTSize)) 978 NewVTSize = NextPowerOf2(NewVTSize); 979 980 if (DestVT.bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 981 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 982 983 // Otherwise, promotion or legal types use the same number of registers as 984 // the vector decimated to the appropriate level. 985 return NumVectorRegs; 986} 987 988/// Get the EVTs and ArgFlags collections that represent the legalized return 989/// type of the given function. This does not require a DAG or a return value, 990/// and is suitable for use before any DAGs for the function are constructed. 991/// TODO: Move this out of TargetLowering.cpp. 992void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr, 993 SmallVectorImpl<ISD::OutputArg> &Outs, 994 const TargetLowering &TLI, 995 SmallVectorImpl<uint64_t> *Offsets) { 996 SmallVector<EVT, 4> ValueVTs; 997 ComputeValueVTs(TLI, ReturnType, ValueVTs); 998 unsigned NumValues = ValueVTs.size(); 999 if (NumValues == 0) return; 1000 unsigned Offset = 0; 1001 1002 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1003 EVT VT = ValueVTs[j]; 1004 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1005 1006 if (attr & Attribute::SExt) 1007 ExtendKind = ISD::SIGN_EXTEND; 1008 else if (attr & Attribute::ZExt) 1009 ExtendKind = ISD::ZERO_EXTEND; 1010 1011 // FIXME: C calling convention requires the return type to be promoted to 1012 // at least 32-bit. But this is not necessary for non-C calling 1013 // conventions. The frontend should mark functions whose return values 1014 // require promoting with signext or zeroext attributes. 1015 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { 1016 EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); 1017 if (VT.bitsLT(MinVT)) 1018 VT = MinVT; 1019 } 1020 1021 unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); 1022 EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); 1023 unsigned PartSize = TLI.getTargetData()->getTypeAllocSize( 1024 PartVT.getTypeForEVT(ReturnType->getContext())); 1025 1026 // 'inreg' on function refers to return value 1027 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1028 if (attr & Attribute::InReg) 1029 Flags.setInReg(); 1030 1031 // Propagate extension type if any 1032 if (attr & Attribute::SExt) 1033 Flags.setSExt(); 1034 else if (attr & Attribute::ZExt) 1035 Flags.setZExt(); 1036 1037 for (unsigned i = 0; i < NumParts; ++i) { 1038 Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true)); 1039 if (Offsets) { 1040 Offsets->push_back(Offset); 1041 Offset += PartSize; 1042 } 1043 } 1044 } 1045} 1046 1047/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1048/// function arguments in the caller parameter area. This is the actual 1049/// alignment, not its logarithm. 1050unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 1051 return TD->getCallFrameTypeAlignment(Ty); 1052} 1053 1054/// getJumpTableEncoding - Return the entry encoding for a jump table in the 1055/// current function. The returned value is a member of the 1056/// MachineJumpTableInfo::JTEntryKind enum. 1057unsigned TargetLowering::getJumpTableEncoding() const { 1058 // In non-pic modes, just use the address of a block. 1059 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 1060 return MachineJumpTableInfo::EK_BlockAddress; 1061 1062 // In PIC mode, if the target supports a GPRel32 directive, use it. 1063 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != 0) 1064 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 1065 1066 // Otherwise, use a label difference. 1067 return MachineJumpTableInfo::EK_LabelDifference32; 1068} 1069 1070SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1071 SelectionDAG &DAG) const { 1072 // If our PIC model is GP relative, use the global offset table as the base. 1073 if (getJumpTableEncoding() == MachineJumpTableInfo::EK_GPRel32BlockAddress) 1074 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy()); 1075 return Table; 1076} 1077 1078/// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1079/// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1080/// MCExpr. 1081const MCExpr * 1082TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 1083 unsigned JTI,MCContext &Ctx) const{ 1084 // The normal PIC reloc base is the label at the start of the jump table. 1085 return MCSymbolRefExpr::Create(MF->getJTISymbol(JTI, Ctx), Ctx); 1086} 1087 1088bool 1089TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 1090 // Assume that everything is safe in static mode. 1091 if (getTargetMachine().getRelocationModel() == Reloc::Static) 1092 return true; 1093 1094 // In dynamic-no-pic mode, assume that known defined values are safe. 1095 if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC && 1096 GA && 1097 !GA->getGlobal()->isDeclaration() && 1098 !GA->getGlobal()->isWeakForLinker()) 1099 return true; 1100 1101 // Otherwise assume nothing is safe. 1102 return false; 1103} 1104 1105//===----------------------------------------------------------------------===// 1106// Optimization Methods 1107//===----------------------------------------------------------------------===// 1108 1109/// ShrinkDemandedConstant - Check to see if the specified operand of the 1110/// specified instruction is a constant integer. If so, check to see if there 1111/// are any bits set in the constant that are not demanded. If so, shrink the 1112/// constant and return true. 1113bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, 1114 const APInt &Demanded) { 1115 DebugLoc dl = Op.getDebugLoc(); 1116 1117 // FIXME: ISD::SELECT, ISD::SELECT_CC 1118 switch (Op.getOpcode()) { 1119 default: break; 1120 case ISD::XOR: 1121 case ISD::AND: 1122 case ISD::OR: { 1123 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 1124 if (!C) return false; 1125 1126 if (Op.getOpcode() == ISD::XOR && 1127 (C->getAPIntValue() | (~Demanded)).isAllOnesValue()) 1128 return false; 1129 1130 // if we can expand it to have all bits set, do it 1131 if (C->getAPIntValue().intersects(~Demanded)) { 1132 EVT VT = Op.getValueType(); 1133 SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0), 1134 DAG.getConstant(Demanded & 1135 C->getAPIntValue(), 1136 VT)); 1137 return CombineTo(Op, New); 1138 } 1139 1140 break; 1141 } 1142 } 1143 1144 return false; 1145} 1146 1147/// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 1148/// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 1149/// cast, but it could be generalized for targets with other types of 1150/// implicit widening casts. 1151bool 1152TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op, 1153 unsigned BitWidth, 1154 const APInt &Demanded, 1155 DebugLoc dl) { 1156 assert(Op.getNumOperands() == 2 && 1157 "ShrinkDemandedOp only supports binary operators!"); 1158 assert(Op.getNode()->getNumValues() == 1 && 1159 "ShrinkDemandedOp only supports nodes with one result!"); 1160 1161 // Don't do this if the node has another user, which may require the 1162 // full value. 1163 if (!Op.getNode()->hasOneUse()) 1164 return false; 1165 1166 // Search for the smallest integer type with free casts to and from 1167 // Op's type. For expedience, just check power-of-2 integer types. 1168 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1169 unsigned SmallVTBits = BitWidth - Demanded.countLeadingZeros(); 1170 if (!isPowerOf2_32(SmallVTBits)) 1171 SmallVTBits = NextPowerOf2(SmallVTBits); 1172 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 1173 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 1174 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 1175 TLI.isZExtFree(SmallVT, Op.getValueType())) { 1176 // We found a type with free casts. 1177 SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT, 1178 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 1179 Op.getNode()->getOperand(0)), 1180 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 1181 Op.getNode()->getOperand(1))); 1182 SDValue Z = DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), X); 1183 return CombineTo(Op, Z); 1184 } 1185 } 1186 return false; 1187} 1188 1189/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 1190/// DemandedMask bits of the result of Op are ever used downstream. If we can 1191/// use this information to simplify Op, create a new simplified DAG node and 1192/// return true, returning the original and new nodes in Old and New. Otherwise, 1193/// analyze the expression and return a mask of KnownOne and KnownZero bits for 1194/// the expression (used to simplify the caller). The KnownZero/One bits may 1195/// only be accurate for those bits in the DemandedMask. 1196bool TargetLowering::SimplifyDemandedBits(SDValue Op, 1197 const APInt &DemandedMask, 1198 APInt &KnownZero, 1199 APInt &KnownOne, 1200 TargetLoweringOpt &TLO, 1201 unsigned Depth) const { 1202 unsigned BitWidth = DemandedMask.getBitWidth(); 1203 assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth && 1204 "Mask size mismatches value type size!"); 1205 APInt NewMask = DemandedMask; 1206 DebugLoc dl = Op.getDebugLoc(); 1207 1208 // Don't know anything. 1209 KnownZero = KnownOne = APInt(BitWidth, 0); 1210 1211 // Other users may use these bits. 1212 if (!Op.getNode()->hasOneUse()) { 1213 if (Depth != 0) { 1214 // If not at the root, Just compute the KnownZero/KnownOne bits to 1215 // simplify things downstream. 1216 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 1217 return false; 1218 } 1219 // If this is the root being simplified, allow it to have multiple uses, 1220 // just set the NewMask to all bits. 1221 NewMask = APInt::getAllOnesValue(BitWidth); 1222 } else if (DemandedMask == 0) { 1223 // Not demanding any bits from Op. 1224 if (Op.getOpcode() != ISD::UNDEF) 1225 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType())); 1226 return false; 1227 } else if (Depth == 6) { // Limit search depth. 1228 return false; 1229 } 1230 1231 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 1232 switch (Op.getOpcode()) { 1233 case ISD::Constant: 1234 // We know all of the bits for a constant! 1235 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 1236 KnownZero = ~KnownOne & NewMask; 1237 return false; // Don't fall through, will infinitely loop. 1238 case ISD::AND: 1239 // If the RHS is a constant, check to see if the LHS would be zero without 1240 // using the bits from the RHS. Below, we use knowledge about the RHS to 1241 // simplify the LHS, here we're using information from the LHS to simplify 1242 // the RHS. 1243 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1244 APInt LHSZero, LHSOne; 1245 // Do not increment Depth here; that can cause an infinite loop. 1246 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 1247 LHSZero, LHSOne, Depth); 1248 // If the LHS already has zeros where RHSC does, this and is dead. 1249 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 1250 return TLO.CombineTo(Op, Op.getOperand(0)); 1251 // If any of the set bits in the RHS are known zero on the LHS, shrink 1252 // the constant. 1253 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 1254 return true; 1255 } 1256 1257 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1258 KnownOne, TLO, Depth+1)) 1259 return true; 1260 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1261 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 1262 KnownZero2, KnownOne2, TLO, Depth+1)) 1263 return true; 1264 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1265 1266 // If all of the demanded bits are known one on one side, return the other. 1267 // These bits cannot contribute to the result of the 'and'. 1268 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 1269 return TLO.CombineTo(Op, Op.getOperand(0)); 1270 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 1271 return TLO.CombineTo(Op, Op.getOperand(1)); 1272 // If all of the demanded bits in the inputs are known zeros, return zero. 1273 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 1274 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 1275 // If the RHS is a constant, see if we can simplify it. 1276 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 1277 return true; 1278 // If the operation can be done in a smaller type, do so. 1279 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1280 return true; 1281 1282 // Output known-1 bits are only known if set in both the LHS & RHS. 1283 KnownOne &= KnownOne2; 1284 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1285 KnownZero |= KnownZero2; 1286 break; 1287 case ISD::OR: 1288 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1289 KnownOne, TLO, Depth+1)) 1290 return true; 1291 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1292 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 1293 KnownZero2, KnownOne2, TLO, Depth+1)) 1294 return true; 1295 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1296 1297 // If all of the demanded bits are known zero on one side, return the other. 1298 // These bits cannot contribute to the result of the 'or'. 1299 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 1300 return TLO.CombineTo(Op, Op.getOperand(0)); 1301 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 1302 return TLO.CombineTo(Op, Op.getOperand(1)); 1303 // If all of the potentially set bits on one side are known to be set on 1304 // the other side, just use the 'other' side. 1305 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 1306 return TLO.CombineTo(Op, Op.getOperand(0)); 1307 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 1308 return TLO.CombineTo(Op, Op.getOperand(1)); 1309 // If the RHS is a constant, see if we can simplify it. 1310 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1311 return true; 1312 // If the operation can be done in a smaller type, do so. 1313 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1314 return true; 1315 1316 // Output known-0 bits are only known if clear in both the LHS & RHS. 1317 KnownZero &= KnownZero2; 1318 // Output known-1 are known to be set if set in either the LHS | RHS. 1319 KnownOne |= KnownOne2; 1320 break; 1321 case ISD::XOR: 1322 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1323 KnownOne, TLO, Depth+1)) 1324 return true; 1325 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1326 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 1327 KnownOne2, TLO, Depth+1)) 1328 return true; 1329 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1330 1331 // If all of the demanded bits are known zero on one side, return the other. 1332 // These bits cannot contribute to the result of the 'xor'. 1333 if ((KnownZero & NewMask) == NewMask) 1334 return TLO.CombineTo(Op, Op.getOperand(0)); 1335 if ((KnownZero2 & NewMask) == NewMask) 1336 return TLO.CombineTo(Op, Op.getOperand(1)); 1337 // If the operation can be done in a smaller type, do so. 1338 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1339 return true; 1340 1341 // If all of the unknown bits are known to be zero on one side or the other 1342 // (but not both) turn this into an *inclusive* or. 1343 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1344 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 1345 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(), 1346 Op.getOperand(0), 1347 Op.getOperand(1))); 1348 1349 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1350 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1351 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1352 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1353 1354 // If all of the demanded bits on one side are known, and all of the set 1355 // bits on that side are also known to be set on the other side, turn this 1356 // into an AND, as we know the bits will be cleared. 1357 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1358 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 1359 if ((KnownOne & KnownOne2) == KnownOne) { 1360 EVT VT = Op.getValueType(); 1361 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 1362 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, 1363 Op.getOperand(0), ANDC)); 1364 } 1365 } 1366 1367 // If the RHS is a constant, see if we can simplify it. 1368 // for XOR, we prefer to force bits to 1 if they will make a -1. 1369 // if we can't force bits, try to shrink constant 1370 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1371 APInt Expanded = C->getAPIntValue() | (~NewMask); 1372 // if we can expand it to have all bits set, do it 1373 if (Expanded.isAllOnesValue()) { 1374 if (Expanded != C->getAPIntValue()) { 1375 EVT VT = Op.getValueType(); 1376 SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0), 1377 TLO.DAG.getConstant(Expanded, VT)); 1378 return TLO.CombineTo(Op, New); 1379 } 1380 // if it already has all the bits set, nothing to change 1381 // but don't shrink either! 1382 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 1383 return true; 1384 } 1385 } 1386 1387 KnownZero = KnownZeroOut; 1388 KnownOne = KnownOneOut; 1389 break; 1390 case ISD::SELECT: 1391 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 1392 KnownOne, TLO, Depth+1)) 1393 return true; 1394 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 1395 KnownOne2, TLO, Depth+1)) 1396 return true; 1397 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1398 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1399 1400 // If the operands are constants, see if we can simplify them. 1401 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1402 return true; 1403 1404 // Only known if known in both the LHS and RHS. 1405 KnownOne &= KnownOne2; 1406 KnownZero &= KnownZero2; 1407 break; 1408 case ISD::SELECT_CC: 1409 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 1410 KnownOne, TLO, Depth+1)) 1411 return true; 1412 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 1413 KnownOne2, TLO, Depth+1)) 1414 return true; 1415 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1416 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1417 1418 // If the operands are constants, see if we can simplify them. 1419 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1420 return true; 1421 1422 // Only known if known in both the LHS and RHS. 1423 KnownOne &= KnownOne2; 1424 KnownZero &= KnownZero2; 1425 break; 1426 case ISD::SHL: 1427 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1428 unsigned ShAmt = SA->getZExtValue(); 1429 SDValue InOp = Op.getOperand(0); 1430 1431 // If the shift count is an invalid immediate, don't do anything. 1432 if (ShAmt >= BitWidth) 1433 break; 1434 1435 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1436 // single shift. We can do this if the bottom bits (which are shifted 1437 // out) are never demanded. 1438 if (InOp.getOpcode() == ISD::SRL && 1439 isa<ConstantSDNode>(InOp.getOperand(1))) { 1440 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 1441 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1442 unsigned Opc = ISD::SHL; 1443 int Diff = ShAmt-C1; 1444 if (Diff < 0) { 1445 Diff = -Diff; 1446 Opc = ISD::SRL; 1447 } 1448 1449 SDValue NewSA = 1450 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1451 EVT VT = Op.getValueType(); 1452 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1453 InOp.getOperand(0), NewSA)); 1454 } 1455 } 1456 1457 if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt), 1458 KnownZero, KnownOne, TLO, Depth+1)) 1459 return true; 1460 1461 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1462 // are not demanded. This will likely allow the anyext to be folded away. 1463 if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) { 1464 SDValue InnerOp = InOp.getNode()->getOperand(0); 1465 EVT InnerVT = InnerOp.getValueType(); 1466 if ((APInt::getHighBitsSet(BitWidth, 1467 BitWidth - InnerVT.getSizeInBits()) & 1468 DemandedMask) == 0 && 1469 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1470 EVT ShTy = getShiftAmountTy(InnerVT); 1471 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1472 ShTy = InnerVT; 1473 SDValue NarrowShl = 1474 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1475 TLO.DAG.getConstant(ShAmt, ShTy)); 1476 return 1477 TLO.CombineTo(Op, 1478 TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), 1479 NarrowShl)); 1480 } 1481 } 1482 1483 KnownZero <<= SA->getZExtValue(); 1484 KnownOne <<= SA->getZExtValue(); 1485 // low bits known zero. 1486 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue()); 1487 } 1488 break; 1489 case ISD::SRL: 1490 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1491 EVT VT = Op.getValueType(); 1492 unsigned ShAmt = SA->getZExtValue(); 1493 unsigned VTSize = VT.getSizeInBits(); 1494 SDValue InOp = Op.getOperand(0); 1495 1496 // If the shift count is an invalid immediate, don't do anything. 1497 if (ShAmt >= BitWidth) 1498 break; 1499 1500 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1501 // single shift. We can do this if the top bits (which are shifted out) 1502 // are never demanded. 1503 if (InOp.getOpcode() == ISD::SHL && 1504 isa<ConstantSDNode>(InOp.getOperand(1))) { 1505 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 1506 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1507 unsigned Opc = ISD::SRL; 1508 int Diff = ShAmt-C1; 1509 if (Diff < 0) { 1510 Diff = -Diff; 1511 Opc = ISD::SHL; 1512 } 1513 1514 SDValue NewSA = 1515 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1516 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1517 InOp.getOperand(0), NewSA)); 1518 } 1519 } 1520 1521 // Compute the new bits that are at the top now. 1522 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 1523 KnownZero, KnownOne, TLO, Depth+1)) 1524 return true; 1525 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1526 KnownZero = KnownZero.lshr(ShAmt); 1527 KnownOne = KnownOne.lshr(ShAmt); 1528 1529 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1530 KnownZero |= HighBits; // High bits known zero. 1531 } 1532 break; 1533 case ISD::SRA: 1534 // If this is an arithmetic shift right and only the low-bit is set, we can 1535 // always convert this into a logical shr, even if the shift amount is 1536 // variable. The low bit of the shift cannot be an input sign bit unless 1537 // the shift amount is >= the size of the datatype, which is undefined. 1538 if (DemandedMask == 1) 1539 return TLO.CombineTo(Op, 1540 TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(), 1541 Op.getOperand(0), Op.getOperand(1))); 1542 1543 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1544 EVT VT = Op.getValueType(); 1545 unsigned ShAmt = SA->getZExtValue(); 1546 1547 // If the shift count is an invalid immediate, don't do anything. 1548 if (ShAmt >= BitWidth) 1549 break; 1550 1551 APInt InDemandedMask = (NewMask << ShAmt); 1552 1553 // If any of the demanded bits are produced by the sign extension, we also 1554 // demand the input sign bit. 1555 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1556 if (HighBits.intersects(NewMask)) 1557 InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits()); 1558 1559 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 1560 KnownZero, KnownOne, TLO, Depth+1)) 1561 return true; 1562 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1563 KnownZero = KnownZero.lshr(ShAmt); 1564 KnownOne = KnownOne.lshr(ShAmt); 1565 1566 // Handle the sign bit, adjusted to where it is now in the mask. 1567 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 1568 1569 // If the input sign bit is known to be zero, or if none of the top bits 1570 // are demanded, turn this into an unsigned shift right. 1571 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 1572 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, 1573 Op.getOperand(0), 1574 Op.getOperand(1))); 1575 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 1576 KnownOne |= HighBits; 1577 } 1578 } 1579 break; 1580 case ISD::SIGN_EXTEND_INREG: { 1581 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1582 1583 // Sign extension. Compute the demanded bits in the result that are not 1584 // present in the input. 1585 APInt NewBits = 1586 APInt::getHighBitsSet(BitWidth, 1587 BitWidth - EVT.getScalarType().getSizeInBits()); 1588 1589 // If none of the extended bits are demanded, eliminate the sextinreg. 1590 if ((NewBits & NewMask) == 0) 1591 return TLO.CombineTo(Op, Op.getOperand(0)); 1592 1593 APInt InSignBit = 1594 APInt::getSignBit(EVT.getScalarType().getSizeInBits()).zext(BitWidth); 1595 APInt InputDemandedBits = 1596 APInt::getLowBitsSet(BitWidth, 1597 EVT.getScalarType().getSizeInBits()) & 1598 NewMask; 1599 1600 // Since the sign extended bits are demanded, we know that the sign 1601 // bit is demanded. 1602 InputDemandedBits |= InSignBit; 1603 1604 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 1605 KnownZero, KnownOne, TLO, Depth+1)) 1606 return true; 1607 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1608 1609 // If the sign bit of the input is known set or clear, then we know the 1610 // top bits of the result. 1611 1612 // If the input sign bit is known zero, convert this into a zero extension. 1613 if (KnownZero.intersects(InSignBit)) 1614 return TLO.CombineTo(Op, 1615 TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT)); 1616 1617 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1618 KnownOne |= NewBits; 1619 KnownZero &= ~NewBits; 1620 } else { // Input sign bit unknown 1621 KnownZero &= ~NewBits; 1622 KnownOne &= ~NewBits; 1623 } 1624 break; 1625 } 1626 case ISD::ZERO_EXTEND: { 1627 unsigned OperandBitWidth = 1628 Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 1629 APInt InMask = NewMask.trunc(OperandBitWidth); 1630 1631 // If none of the top bits are demanded, convert this into an any_extend. 1632 APInt NewBits = 1633 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1634 if (!NewBits.intersects(NewMask)) 1635 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1636 Op.getValueType(), 1637 Op.getOperand(0))); 1638 1639 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1640 KnownZero, KnownOne, TLO, Depth+1)) 1641 return true; 1642 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1643 KnownZero = KnownZero.zext(BitWidth); 1644 KnownOne = KnownOne.zext(BitWidth); 1645 KnownZero |= NewBits; 1646 break; 1647 } 1648 case ISD::SIGN_EXTEND: { 1649 EVT InVT = Op.getOperand(0).getValueType(); 1650 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1651 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1652 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1653 APInt NewBits = ~InMask & NewMask; 1654 1655 // If none of the top bits are demanded, convert this into an any_extend. 1656 if (NewBits == 0) 1657 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1658 Op.getValueType(), 1659 Op.getOperand(0))); 1660 1661 // Since some of the sign extended bits are demanded, we know that the sign 1662 // bit is demanded. 1663 APInt InDemandedBits = InMask & NewMask; 1664 InDemandedBits |= InSignBit; 1665 InDemandedBits = InDemandedBits.trunc(InBits); 1666 1667 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1668 KnownOne, TLO, Depth+1)) 1669 return true; 1670 KnownZero = KnownZero.zext(BitWidth); 1671 KnownOne = KnownOne.zext(BitWidth); 1672 1673 // If the sign bit is known zero, convert this to a zero extend. 1674 if (KnownZero.intersects(InSignBit)) 1675 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, 1676 Op.getValueType(), 1677 Op.getOperand(0))); 1678 1679 // If the sign bit is known one, the top bits match. 1680 if (KnownOne.intersects(InSignBit)) { 1681 KnownOne |= NewBits; 1682 KnownZero &= ~NewBits; 1683 } else { // Otherwise, top bits aren't known. 1684 KnownOne &= ~NewBits; 1685 KnownZero &= ~NewBits; 1686 } 1687 break; 1688 } 1689 case ISD::ANY_EXTEND: { 1690 unsigned OperandBitWidth = 1691 Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 1692 APInt InMask = NewMask.trunc(OperandBitWidth); 1693 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1694 KnownZero, KnownOne, TLO, Depth+1)) 1695 return true; 1696 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1697 KnownZero = KnownZero.zext(BitWidth); 1698 KnownOne = KnownOne.zext(BitWidth); 1699 break; 1700 } 1701 case ISD::TRUNCATE: { 1702 // Simplify the input, using demanded bit information, and compute the known 1703 // zero/one bits live out. 1704 unsigned OperandBitWidth = 1705 Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 1706 APInt TruncMask = NewMask.zext(OperandBitWidth); 1707 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1708 KnownZero, KnownOne, TLO, Depth+1)) 1709 return true; 1710 KnownZero = KnownZero.trunc(BitWidth); 1711 KnownOne = KnownOne.trunc(BitWidth); 1712 1713 // If the input is only used by this truncate, see if we can shrink it based 1714 // on the known demanded bits. 1715 if (Op.getOperand(0).getNode()->hasOneUse()) { 1716 SDValue In = Op.getOperand(0); 1717 switch (In.getOpcode()) { 1718 default: break; 1719 case ISD::SRL: 1720 // Shrink SRL by a constant if none of the high bits shifted in are 1721 // demanded. 1722 if (TLO.LegalTypes() && 1723 !isTypeDesirableForOp(ISD::SRL, Op.getValueType())) 1724 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 1725 // undesirable. 1726 break; 1727 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1)); 1728 if (!ShAmt) 1729 break; 1730 SDValue Shift = In.getOperand(1); 1731 if (TLO.LegalTypes()) { 1732 uint64_t ShVal = ShAmt->getZExtValue(); 1733 Shift = 1734 TLO.DAG.getConstant(ShVal, getShiftAmountTy(Op.getValueType())); 1735 } 1736 1737 APInt HighBits = APInt::getHighBitsSet(OperandBitWidth, 1738 OperandBitWidth - BitWidth); 1739 HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth); 1740 1741 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { 1742 // None of the shifted in bits are needed. Add a truncate of the 1743 // shift input, then shift it. 1744 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, 1745 Op.getValueType(), 1746 In.getOperand(0)); 1747 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, 1748 Op.getValueType(), 1749 NewTrunc, 1750 Shift)); 1751 } 1752 break; 1753 } 1754 } 1755 1756 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1757 break; 1758 } 1759 case ISD::AssertZext: { 1760 // Demand all the bits of the input that are demanded in the output. 1761 // The low bits are obvious; the high bits are demanded because we're 1762 // asserting that they're zero here. 1763 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, 1764 KnownZero, KnownOne, TLO, Depth+1)) 1765 return true; 1766 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1767 1768 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1769 APInt InMask = APInt::getLowBitsSet(BitWidth, 1770 VT.getSizeInBits()); 1771 KnownZero |= ~InMask & NewMask; 1772 break; 1773 } 1774 case ISD::BITCAST: 1775 // If this is an FP->Int bitcast and if the sign bit is the only 1776 // thing demanded, turn this into a FGETSIGN. 1777 if (!Op.getOperand(0).getValueType().isVector() && 1778 NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) && 1779 Op.getOperand(0).getValueType().isFloatingPoint()) { 1780 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType()); 1781 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 1782 if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple()) { 1783 EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32; 1784 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1785 // place. We expect the SHL to be eliminated by other optimizations. 1786 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0)); 1787 unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits(); 1788 if (!OpVTLegal && OpVTSizeInBits > 32) 1789 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign); 1790 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1791 SDValue ShAmt = TLO.DAG.getConstant(ShVal, Op.getValueType()); 1792 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, 1793 Op.getValueType(), 1794 Sign, ShAmt)); 1795 } 1796 } 1797 break; 1798 case ISD::ADD: 1799 case ISD::MUL: 1800 case ISD::SUB: { 1801 // Add, Sub, and Mul don't demand any bits in positions beyond that 1802 // of the highest bit demanded of them. 1803 APInt LoMask = APInt::getLowBitsSet(BitWidth, 1804 BitWidth - NewMask.countLeadingZeros()); 1805 if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2, 1806 KnownOne2, TLO, Depth+1)) 1807 return true; 1808 if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2, 1809 KnownOne2, TLO, Depth+1)) 1810 return true; 1811 // See if the operation should be performed at a smaller bit width. 1812 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1813 return true; 1814 } 1815 // FALL THROUGH 1816 default: 1817 // Just use ComputeMaskedBits to compute output bits. 1818 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1819 break; 1820 } 1821 1822 // If we know the value of all of the demanded bits, return this as a 1823 // constant. 1824 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1825 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1826 1827 return false; 1828} 1829 1830/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1831/// in Mask are known to be either zero or one and return them in the 1832/// KnownZero/KnownOne bitsets. 1833void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1834 const APInt &Mask, 1835 APInt &KnownZero, 1836 APInt &KnownOne, 1837 const SelectionDAG &DAG, 1838 unsigned Depth) const { 1839 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1840 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1841 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1842 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1843 "Should use MaskedValueIsZero if you don't know whether Op" 1844 " is a target node!"); 1845 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1846} 1847 1848/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1849/// targets that want to expose additional information about sign bits to the 1850/// DAG Combiner. 1851unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 1852 unsigned Depth) const { 1853 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1854 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1855 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1856 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1857 "Should use ComputeNumSignBits if you don't know whether Op" 1858 " is a target node!"); 1859 return 1; 1860} 1861 1862/// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly 1863/// one bit set. This differs from ComputeMaskedBits in that it doesn't need to 1864/// determine which bit is set. 1865/// 1866static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) { 1867 // A left-shift of a constant one will have exactly one bit set, because 1868 // shifting the bit off the end is undefined. 1869 if (Val.getOpcode() == ISD::SHL) 1870 if (ConstantSDNode *C = 1871 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1872 if (C->getAPIntValue() == 1) 1873 return true; 1874 1875 // Similarly, a right-shift of a constant sign-bit will have exactly 1876 // one bit set. 1877 if (Val.getOpcode() == ISD::SRL) 1878 if (ConstantSDNode *C = 1879 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1880 if (C->getAPIntValue().isSignBit()) 1881 return true; 1882 1883 // More could be done here, though the above checks are enough 1884 // to handle some common cases. 1885 1886 // Fall back to ComputeMaskedBits to catch other known cases. 1887 EVT OpVT = Val.getValueType(); 1888 unsigned BitWidth = OpVT.getScalarType().getSizeInBits(); 1889 APInt Mask = APInt::getAllOnesValue(BitWidth); 1890 APInt KnownZero, KnownOne; 1891 DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne); 1892 return (KnownZero.countPopulation() == BitWidth - 1) && 1893 (KnownOne.countPopulation() == 1); 1894} 1895 1896/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1897/// and cc. If it is unable to simplify it, return a null SDValue. 1898SDValue 1899TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 1900 ISD::CondCode Cond, bool foldBooleans, 1901 DAGCombinerInfo &DCI, DebugLoc dl) const { 1902 SelectionDAG &DAG = DCI.DAG; 1903 1904 // These setcc operations always fold. 1905 switch (Cond) { 1906 default: break; 1907 case ISD::SETFALSE: 1908 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1909 case ISD::SETTRUE: 1910 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1911 } 1912 1913 // Ensure that the constant occurs on the RHS, and fold constant 1914 // comparisons. 1915 if (isa<ConstantSDNode>(N0.getNode())) 1916 return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1917 1918 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1919 const APInt &C1 = N1C->getAPIntValue(); 1920 1921 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1922 // equality comparison, then we're just comparing whether X itself is 1923 // zero. 1924 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1925 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1926 N0.getOperand(1).getOpcode() == ISD::Constant) { 1927 const APInt &ShAmt 1928 = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 1929 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1930 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1931 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1932 // (srl (ctlz x), 5) == 0 -> X != 0 1933 // (srl (ctlz x), 5) != 1 -> X != 0 1934 Cond = ISD::SETNE; 1935 } else { 1936 // (srl (ctlz x), 5) != 0 -> X == 0 1937 // (srl (ctlz x), 5) == 1 -> X == 0 1938 Cond = ISD::SETEQ; 1939 } 1940 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1941 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 1942 Zero, Cond); 1943 } 1944 } 1945 1946 SDValue CTPOP = N0; 1947 // Look through truncs that don't change the value of a ctpop. 1948 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE) 1949 CTPOP = N0.getOperand(0); 1950 1951 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP && 1952 (N0 == CTPOP || N0.getValueType().getSizeInBits() > 1953 Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) { 1954 EVT CTVT = CTPOP.getValueType(); 1955 SDValue CTOp = CTPOP.getOperand(0); 1956 1957 // (ctpop x) u< 2 -> (x & x-1) == 0 1958 // (ctpop x) u> 1 -> (x & x-1) != 0 1959 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){ 1960 SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp, 1961 DAG.getConstant(1, CTVT)); 1962 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub); 1963 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 1964 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, CTVT), CC); 1965 } 1966 1967 // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal. 1968 } 1969 1970 // (zext x) == C --> x == (trunc C) 1971 if (DCI.isBeforeLegalize() && N0->hasOneUse() && 1972 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1973 unsigned MinBits = N0.getValueSizeInBits(); 1974 SDValue PreZExt; 1975 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 1976 // ZExt 1977 MinBits = N0->getOperand(0).getValueSizeInBits(); 1978 PreZExt = N0->getOperand(0); 1979 } else if (N0->getOpcode() == ISD::AND) { 1980 // DAGCombine turns costly ZExts into ANDs 1981 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 1982 if ((C->getAPIntValue()+1).isPowerOf2()) { 1983 MinBits = C->getAPIntValue().countTrailingOnes(); 1984 PreZExt = N0->getOperand(0); 1985 } 1986 } else if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(N0)) { 1987 // ZEXTLOAD 1988 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 1989 MinBits = LN0->getMemoryVT().getSizeInBits(); 1990 PreZExt = N0; 1991 } 1992 } 1993 1994 // Make sure we're not loosing bits from the constant. 1995 if (MinBits < C1.getBitWidth() && MinBits > C1.getActiveBits()) { 1996 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 1997 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 1998 // Will get folded away. 1999 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreZExt); 2000 SDValue C = DAG.getConstant(C1.trunc(MinBits), MinVT); 2001 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 2002 } 2003 } 2004 } 2005 2006 // If the LHS is '(and load, const)', the RHS is 0, 2007 // the test is for equality or unsigned, and all 1 bits of the const are 2008 // in the same partial word, see if we can shorten the load. 2009 if (DCI.isBeforeLegalize() && 2010 N0.getOpcode() == ISD::AND && C1 == 0 && 2011 N0.getNode()->hasOneUse() && 2012 isa<LoadSDNode>(N0.getOperand(0)) && 2013 N0.getOperand(0).getNode()->hasOneUse() && 2014 isa<ConstantSDNode>(N0.getOperand(1))) { 2015 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 2016 APInt bestMask; 2017 unsigned bestWidth = 0, bestOffset = 0; 2018 if (!Lod->isVolatile() && Lod->isUnindexed()) { 2019 unsigned origWidth = N0.getValueType().getSizeInBits(); 2020 unsigned maskWidth = origWidth; 2021 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 2022 // 8 bits, but have to be careful... 2023 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 2024 origWidth = Lod->getMemoryVT().getSizeInBits(); 2025 const APInt &Mask = 2026 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 2027 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 2028 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 2029 for (unsigned offset=0; offset<origWidth/width; offset++) { 2030 if ((newMask & Mask) == Mask) { 2031 if (!TD->isLittleEndian()) 2032 bestOffset = (origWidth/width - offset - 1) * (width/8); 2033 else 2034 bestOffset = (uint64_t)offset * (width/8); 2035 bestMask = Mask.lshr(offset * (width/8) * 8); 2036 bestWidth = width; 2037 break; 2038 } 2039 newMask = newMask << width; 2040 } 2041 } 2042 } 2043 if (bestWidth) { 2044 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 2045 if (newVT.isRound()) { 2046 EVT PtrType = Lod->getOperand(1).getValueType(); 2047 SDValue Ptr = Lod->getBasePtr(); 2048 if (bestOffset != 0) 2049 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(), 2050 DAG.getConstant(bestOffset, PtrType)); 2051 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 2052 SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 2053 Lod->getPointerInfo().getWithOffset(bestOffset), 2054 false, false, NewAlign); 2055 return DAG.getSetCC(dl, VT, 2056 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 2057 DAG.getConstant(bestMask.trunc(bestWidth), 2058 newVT)), 2059 DAG.getConstant(0LL, newVT), Cond); 2060 } 2061 } 2062 } 2063 2064 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 2065 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 2066 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 2067 2068 // If the comparison constant has bits in the upper part, the 2069 // zero-extended value could never match. 2070 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 2071 C1.getBitWidth() - InSize))) { 2072 switch (Cond) { 2073 case ISD::SETUGT: 2074 case ISD::SETUGE: 2075 case ISD::SETEQ: return DAG.getConstant(0, VT); 2076 case ISD::SETULT: 2077 case ISD::SETULE: 2078 case ISD::SETNE: return DAG.getConstant(1, VT); 2079 case ISD::SETGT: 2080 case ISD::SETGE: 2081 // True if the sign bit of C1 is set. 2082 return DAG.getConstant(C1.isNegative(), VT); 2083 case ISD::SETLT: 2084 case ISD::SETLE: 2085 // True if the sign bit of C1 isn't set. 2086 return DAG.getConstant(C1.isNonNegative(), VT); 2087 default: 2088 break; 2089 } 2090 } 2091 2092 // Otherwise, we can perform the comparison with the low bits. 2093 switch (Cond) { 2094 case ISD::SETEQ: 2095 case ISD::SETNE: 2096 case ISD::SETUGT: 2097 case ISD::SETUGE: 2098 case ISD::SETULT: 2099 case ISD::SETULE: { 2100 EVT newVT = N0.getOperand(0).getValueType(); 2101 if (DCI.isBeforeLegalizeOps() || 2102 (isOperationLegal(ISD::SETCC, newVT) && 2103 getCondCodeAction(Cond, newVT)==Legal)) 2104 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2105 DAG.getConstant(C1.trunc(InSize), newVT), 2106 Cond); 2107 break; 2108 } 2109 default: 2110 break; // todo, be more careful with signed comparisons 2111 } 2112 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 2113 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 2114 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 2115 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 2116 EVT ExtDstTy = N0.getValueType(); 2117 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 2118 2119 // If the constant doesn't fit into the number of bits for the source of 2120 // the sign extension, it is impossible for both sides to be equal. 2121 if (C1.getMinSignedBits() > ExtSrcTyBits) 2122 return DAG.getConstant(Cond == ISD::SETNE, VT); 2123 2124 SDValue ZextOp; 2125 EVT Op0Ty = N0.getOperand(0).getValueType(); 2126 if (Op0Ty == ExtSrcTy) { 2127 ZextOp = N0.getOperand(0); 2128 } else { 2129 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 2130 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 2131 DAG.getConstant(Imm, Op0Ty)); 2132 } 2133 if (!DCI.isCalledByLegalizer()) 2134 DCI.AddToWorklist(ZextOp.getNode()); 2135 // Otherwise, make this a use of a zext. 2136 return DAG.getSetCC(dl, VT, ZextOp, 2137 DAG.getConstant(C1 & APInt::getLowBitsSet( 2138 ExtDstTyBits, 2139 ExtSrcTyBits), 2140 ExtDstTy), 2141 Cond); 2142 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 2143 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 2144 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 2145 if (N0.getOpcode() == ISD::SETCC && 2146 isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) { 2147 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1); 2148 if (TrueWhenTrue) 2149 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 2150 // Invert the condition. 2151 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 2152 CC = ISD::getSetCCInverse(CC, 2153 N0.getOperand(0).getValueType().isInteger()); 2154 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 2155 } 2156 2157 if ((N0.getOpcode() == ISD::XOR || 2158 (N0.getOpcode() == ISD::AND && 2159 N0.getOperand(0).getOpcode() == ISD::XOR && 2160 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 2161 isa<ConstantSDNode>(N0.getOperand(1)) && 2162 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 2163 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 2164 // can only do this if the top bits are known zero. 2165 unsigned BitWidth = N0.getValueSizeInBits(); 2166 if (DAG.MaskedValueIsZero(N0, 2167 APInt::getHighBitsSet(BitWidth, 2168 BitWidth-1))) { 2169 // Okay, get the un-inverted input value. 2170 SDValue Val; 2171 if (N0.getOpcode() == ISD::XOR) 2172 Val = N0.getOperand(0); 2173 else { 2174 assert(N0.getOpcode() == ISD::AND && 2175 N0.getOperand(0).getOpcode() == ISD::XOR); 2176 // ((X^1)&1)^1 -> X & 1 2177 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 2178 N0.getOperand(0).getOperand(0), 2179 N0.getOperand(1)); 2180 } 2181 2182 return DAG.getSetCC(dl, VT, Val, N1, 2183 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 2184 } 2185 } else if (N1C->getAPIntValue() == 1 && 2186 (VT == MVT::i1 || 2187 getBooleanContents() == ZeroOrOneBooleanContent)) { 2188 SDValue Op0 = N0; 2189 if (Op0.getOpcode() == ISD::TRUNCATE) 2190 Op0 = Op0.getOperand(0); 2191 2192 if ((Op0.getOpcode() == ISD::XOR) && 2193 Op0.getOperand(0).getOpcode() == ISD::SETCC && 2194 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 2195 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 2196 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 2197 return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1), 2198 Cond); 2199 } else if (Op0.getOpcode() == ISD::AND && 2200 isa<ConstantSDNode>(Op0.getOperand(1)) && 2201 cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) { 2202 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 2203 if (Op0.getValueType().bitsGT(VT)) 2204 Op0 = DAG.getNode(ISD::AND, dl, VT, 2205 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 2206 DAG.getConstant(1, VT)); 2207 else if (Op0.getValueType().bitsLT(VT)) 2208 Op0 = DAG.getNode(ISD::AND, dl, VT, 2209 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 2210 DAG.getConstant(1, VT)); 2211 2212 return DAG.getSetCC(dl, VT, Op0, 2213 DAG.getConstant(0, Op0.getValueType()), 2214 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 2215 } 2216 } 2217 } 2218 2219 APInt MinVal, MaxVal; 2220 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 2221 if (ISD::isSignedIntSetCC(Cond)) { 2222 MinVal = APInt::getSignedMinValue(OperandBitSize); 2223 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 2224 } else { 2225 MinVal = APInt::getMinValue(OperandBitSize); 2226 MaxVal = APInt::getMaxValue(OperandBitSize); 2227 } 2228 2229 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 2230 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 2231 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 2232 // X >= C0 --> X > (C0-1) 2233 return DAG.getSetCC(dl, VT, N0, 2234 DAG.getConstant(C1-1, N1.getValueType()), 2235 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 2236 } 2237 2238 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 2239 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 2240 // X <= C0 --> X < (C0+1) 2241 return DAG.getSetCC(dl, VT, N0, 2242 DAG.getConstant(C1+1, N1.getValueType()), 2243 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 2244 } 2245 2246 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 2247 return DAG.getConstant(0, VT); // X < MIN --> false 2248 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 2249 return DAG.getConstant(1, VT); // X >= MIN --> true 2250 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 2251 return DAG.getConstant(0, VT); // X > MAX --> false 2252 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 2253 return DAG.getConstant(1, VT); // X <= MAX --> true 2254 2255 // Canonicalize setgt X, Min --> setne X, Min 2256 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 2257 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 2258 // Canonicalize setlt X, Max --> setne X, Max 2259 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 2260 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 2261 2262 // If we have setult X, 1, turn it into seteq X, 0 2263 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 2264 return DAG.getSetCC(dl, VT, N0, 2265 DAG.getConstant(MinVal, N0.getValueType()), 2266 ISD::SETEQ); 2267 // If we have setugt X, Max-1, turn it into seteq X, Max 2268 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 2269 return DAG.getSetCC(dl, VT, N0, 2270 DAG.getConstant(MaxVal, N0.getValueType()), 2271 ISD::SETEQ); 2272 2273 // If we have "setcc X, C0", check to see if we can shrink the immediate 2274 // by changing cc. 2275 2276 // SETUGT X, SINTMAX -> SETLT X, 0 2277 if (Cond == ISD::SETUGT && 2278 C1 == APInt::getSignedMaxValue(OperandBitSize)) 2279 return DAG.getSetCC(dl, VT, N0, 2280 DAG.getConstant(0, N1.getValueType()), 2281 ISD::SETLT); 2282 2283 // SETULT X, SINTMIN -> SETGT X, -1 2284 if (Cond == ISD::SETULT && 2285 C1 == APInt::getSignedMinValue(OperandBitSize)) { 2286 SDValue ConstMinusOne = 2287 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), 2288 N1.getValueType()); 2289 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 2290 } 2291 2292 // Fold bit comparisons when we can. 2293 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 2294 (VT == N0.getValueType() || 2295 (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) && 2296 N0.getOpcode() == ISD::AND) 2297 if (ConstantSDNode *AndRHS = 2298 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2299 EVT ShiftTy = DCI.isBeforeLegalize() ? 2300 getPointerTy() : getShiftAmountTy(N0.getValueType()); 2301 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 2302 // Perform the xform if the AND RHS is a single bit. 2303 if (AndRHS->getAPIntValue().isPowerOf2()) { 2304 return DAG.getNode(ISD::TRUNCATE, dl, VT, 2305 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0, 2306 DAG.getConstant(AndRHS->getAPIntValue().logBase2(), ShiftTy))); 2307 } 2308 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 2309 // (X & 8) == 8 --> (X & 8) >> 3 2310 // Perform the xform if C1 is a single bit. 2311 if (C1.isPowerOf2()) { 2312 return DAG.getNode(ISD::TRUNCATE, dl, VT, 2313 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0, 2314 DAG.getConstant(C1.logBase2(), ShiftTy))); 2315 } 2316 } 2317 } 2318 } 2319 2320 if (isa<ConstantFPSDNode>(N0.getNode())) { 2321 // Constant fold or commute setcc. 2322 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl); 2323 if (O.getNode()) return O; 2324 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 2325 // If the RHS of an FP comparison is a constant, simplify it away in 2326 // some cases. 2327 if (CFP->getValueAPF().isNaN()) { 2328 // If an operand is known to be a nan, we can fold it. 2329 switch (ISD::getUnorderedFlavor(Cond)) { 2330 default: llvm_unreachable("Unknown flavor!"); 2331 case 0: // Known false. 2332 return DAG.getConstant(0, VT); 2333 case 1: // Known true. 2334 return DAG.getConstant(1, VT); 2335 case 2: // Undefined. 2336 return DAG.getUNDEF(VT); 2337 } 2338 } 2339 2340 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 2341 // constant if knowing that the operand is non-nan is enough. We prefer to 2342 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 2343 // materialize 0.0. 2344 if (Cond == ISD::SETO || Cond == ISD::SETUO) 2345 return DAG.getSetCC(dl, VT, N0, N0, Cond); 2346 2347 // If the condition is not legal, see if we can find an equivalent one 2348 // which is legal. 2349 if (!isCondCodeLegal(Cond, N0.getValueType())) { 2350 // If the comparison was an awkward floating-point == or != and one of 2351 // the comparison operands is infinity or negative infinity, convert the 2352 // condition to a less-awkward <= or >=. 2353 if (CFP->getValueAPF().isInfinity()) { 2354 if (CFP->getValueAPF().isNegative()) { 2355 if (Cond == ISD::SETOEQ && 2356 isCondCodeLegal(ISD::SETOLE, N0.getValueType())) 2357 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE); 2358 if (Cond == ISD::SETUEQ && 2359 isCondCodeLegal(ISD::SETOLE, N0.getValueType())) 2360 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE); 2361 if (Cond == ISD::SETUNE && 2362 isCondCodeLegal(ISD::SETUGT, N0.getValueType())) 2363 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT); 2364 if (Cond == ISD::SETONE && 2365 isCondCodeLegal(ISD::SETUGT, N0.getValueType())) 2366 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT); 2367 } else { 2368 if (Cond == ISD::SETOEQ && 2369 isCondCodeLegal(ISD::SETOGE, N0.getValueType())) 2370 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE); 2371 if (Cond == ISD::SETUEQ && 2372 isCondCodeLegal(ISD::SETOGE, N0.getValueType())) 2373 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE); 2374 if (Cond == ISD::SETUNE && 2375 isCondCodeLegal(ISD::SETULT, N0.getValueType())) 2376 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT); 2377 if (Cond == ISD::SETONE && 2378 isCondCodeLegal(ISD::SETULT, N0.getValueType())) 2379 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT); 2380 } 2381 } 2382 } 2383 } 2384 2385 if (N0 == N1) { 2386 // We can always fold X == X for integer setcc's. 2387 if (N0.getValueType().isInteger()) 2388 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 2389 unsigned UOF = ISD::getUnorderedFlavor(Cond); 2390 if (UOF == 2) // FP operators that are undefined on NaNs. 2391 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 2392 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 2393 return DAG.getConstant(UOF, VT); 2394 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 2395 // if it is not already. 2396 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 2397 if (NewCond != Cond) 2398 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 2399 } 2400 2401 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 2402 N0.getValueType().isInteger()) { 2403 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 2404 N0.getOpcode() == ISD::XOR) { 2405 // Simplify (X+Y) == (X+Z) --> Y == Z 2406 if (N0.getOpcode() == N1.getOpcode()) { 2407 if (N0.getOperand(0) == N1.getOperand(0)) 2408 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 2409 if (N0.getOperand(1) == N1.getOperand(1)) 2410 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 2411 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 2412 // If X op Y == Y op X, try other combinations. 2413 if (N0.getOperand(0) == N1.getOperand(1)) 2414 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 2415 Cond); 2416 if (N0.getOperand(1) == N1.getOperand(0)) 2417 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 2418 Cond); 2419 } 2420 } 2421 2422 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 2423 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2424 // Turn (X+C1) == C2 --> X == C2-C1 2425 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 2426 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2427 DAG.getConstant(RHSC->getAPIntValue()- 2428 LHSR->getAPIntValue(), 2429 N0.getValueType()), Cond); 2430 } 2431 2432 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 2433 if (N0.getOpcode() == ISD::XOR) 2434 // If we know that all of the inverted bits are zero, don't bother 2435 // performing the inversion. 2436 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 2437 return 2438 DAG.getSetCC(dl, VT, N0.getOperand(0), 2439 DAG.getConstant(LHSR->getAPIntValue() ^ 2440 RHSC->getAPIntValue(), 2441 N0.getValueType()), 2442 Cond); 2443 } 2444 2445 // Turn (C1-X) == C2 --> X == C1-C2 2446 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 2447 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 2448 return 2449 DAG.getSetCC(dl, VT, N0.getOperand(1), 2450 DAG.getConstant(SUBC->getAPIntValue() - 2451 RHSC->getAPIntValue(), 2452 N0.getValueType()), 2453 Cond); 2454 } 2455 } 2456 } 2457 2458 // Simplify (X+Z) == X --> Z == 0 2459 if (N0.getOperand(0) == N1) 2460 return DAG.getSetCC(dl, VT, N0.getOperand(1), 2461 DAG.getConstant(0, N0.getValueType()), Cond); 2462 if (N0.getOperand(1) == N1) { 2463 if (DAG.isCommutativeBinOp(N0.getOpcode())) 2464 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2465 DAG.getConstant(0, N0.getValueType()), Cond); 2466 else if (N0.getNode()->hasOneUse()) { 2467 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 2468 // (Z-X) == X --> Z == X<<1 2469 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), 2470 N1, 2471 DAG.getConstant(1, getShiftAmountTy(N1.getValueType()))); 2472 if (!DCI.isCalledByLegalizer()) 2473 DCI.AddToWorklist(SH.getNode()); 2474 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond); 2475 } 2476 } 2477 } 2478 2479 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 2480 N1.getOpcode() == ISD::XOR) { 2481 // Simplify X == (X+Z) --> Z == 0 2482 if (N1.getOperand(0) == N0) { 2483 return DAG.getSetCC(dl, VT, N1.getOperand(1), 2484 DAG.getConstant(0, N1.getValueType()), Cond); 2485 } else if (N1.getOperand(1) == N0) { 2486 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 2487 return DAG.getSetCC(dl, VT, N1.getOperand(0), 2488 DAG.getConstant(0, N1.getValueType()), Cond); 2489 } else if (N1.getNode()->hasOneUse()) { 2490 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 2491 // X == (Z-X) --> X<<1 == Z 2492 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0, 2493 DAG.getConstant(1, getShiftAmountTy(N0.getValueType()))); 2494 if (!DCI.isCalledByLegalizer()) 2495 DCI.AddToWorklist(SH.getNode()); 2496 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond); 2497 } 2498 } 2499 } 2500 2501 // Simplify x&y == y to x&y != 0 if y has exactly one bit set. 2502 // Note that where y is variable and is known to have at most 2503 // one bit set (for example, if it is z&1) we cannot do this; 2504 // the expressions are not equivalent when y==0. 2505 if (N0.getOpcode() == ISD::AND) 2506 if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) { 2507 if (ValueHasExactlyOneBitSet(N1, DAG)) { 2508 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 2509 SDValue Zero = DAG.getConstant(0, N1.getValueType()); 2510 return DAG.getSetCC(dl, VT, N0, Zero, Cond); 2511 } 2512 } 2513 if (N1.getOpcode() == ISD::AND) 2514 if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) { 2515 if (ValueHasExactlyOneBitSet(N0, DAG)) { 2516 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 2517 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 2518 return DAG.getSetCC(dl, VT, N1, Zero, Cond); 2519 } 2520 } 2521 } 2522 2523 // Fold away ALL boolean setcc's. 2524 SDValue Temp; 2525 if (N0.getValueType() == MVT::i1 && foldBooleans) { 2526 switch (Cond) { 2527 default: llvm_unreachable("Unknown integer setcc!"); 2528 case ISD::SETEQ: // X == Y -> ~(X^Y) 2529 Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 2530 N0 = DAG.getNOT(dl, Temp, MVT::i1); 2531 if (!DCI.isCalledByLegalizer()) 2532 DCI.AddToWorklist(Temp.getNode()); 2533 break; 2534 case ISD::SETNE: // X != Y --> (X^Y) 2535 N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 2536 break; 2537 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 2538 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 2539 Temp = DAG.getNOT(dl, N0, MVT::i1); 2540 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp); 2541 if (!DCI.isCalledByLegalizer()) 2542 DCI.AddToWorklist(Temp.getNode()); 2543 break; 2544 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 2545 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 2546 Temp = DAG.getNOT(dl, N1, MVT::i1); 2547 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp); 2548 if (!DCI.isCalledByLegalizer()) 2549 DCI.AddToWorklist(Temp.getNode()); 2550 break; 2551 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 2552 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 2553 Temp = DAG.getNOT(dl, N0, MVT::i1); 2554 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp); 2555 if (!DCI.isCalledByLegalizer()) 2556 DCI.AddToWorklist(Temp.getNode()); 2557 break; 2558 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 2559 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 2560 Temp = DAG.getNOT(dl, N1, MVT::i1); 2561 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp); 2562 break; 2563 } 2564 if (VT != MVT::i1) { 2565 if (!DCI.isCalledByLegalizer()) 2566 DCI.AddToWorklist(N0.getNode()); 2567 // FIXME: If running after legalize, we probably can't do this. 2568 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0); 2569 } 2570 return N0; 2571 } 2572 2573 // Could not fold it. 2574 return SDValue(); 2575} 2576 2577/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 2578/// node is a GlobalAddress + offset. 2579bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA, 2580 int64_t &Offset) const { 2581 if (isa<GlobalAddressSDNode>(N)) { 2582 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 2583 GA = GASD->getGlobal(); 2584 Offset += GASD->getOffset(); 2585 return true; 2586 } 2587 2588 if (N->getOpcode() == ISD::ADD) { 2589 SDValue N1 = N->getOperand(0); 2590 SDValue N2 = N->getOperand(1); 2591 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 2592 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 2593 if (V) { 2594 Offset += V->getSExtValue(); 2595 return true; 2596 } 2597 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 2598 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 2599 if (V) { 2600 Offset += V->getSExtValue(); 2601 return true; 2602 } 2603 } 2604 } 2605 2606 return false; 2607} 2608 2609 2610SDValue TargetLowering:: 2611PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 2612 // Default implementation: no optimization. 2613 return SDValue(); 2614} 2615 2616//===----------------------------------------------------------------------===// 2617// Inline Assembler Implementation Methods 2618//===----------------------------------------------------------------------===// 2619 2620 2621TargetLowering::ConstraintType 2622TargetLowering::getConstraintType(const std::string &Constraint) const { 2623 // FIXME: lots more standard ones to handle. 2624 if (Constraint.size() == 1) { 2625 switch (Constraint[0]) { 2626 default: break; 2627 case 'r': return C_RegisterClass; 2628 case 'm': // memory 2629 case 'o': // offsetable 2630 case 'V': // not offsetable 2631 return C_Memory; 2632 case 'i': // Simple Integer or Relocatable Constant 2633 case 'n': // Simple Integer 2634 case 'E': // Floating Point Constant 2635 case 'F': // Floating Point Constant 2636 case 's': // Relocatable Constant 2637 case 'p': // Address. 2638 case 'X': // Allow ANY value. 2639 case 'I': // Target registers. 2640 case 'J': 2641 case 'K': 2642 case 'L': 2643 case 'M': 2644 case 'N': 2645 case 'O': 2646 case 'P': 2647 case '<': 2648 case '>': 2649 return C_Other; 2650 } 2651 } 2652 2653 if (Constraint.size() > 1 && Constraint[0] == '{' && 2654 Constraint[Constraint.size()-1] == '}') 2655 return C_Register; 2656 return C_Unknown; 2657} 2658 2659/// LowerXConstraint - try to replace an X constraint, which matches anything, 2660/// with another that has more specific requirements based on the type of the 2661/// corresponding operand. 2662const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{ 2663 if (ConstraintVT.isInteger()) 2664 return "r"; 2665 if (ConstraintVT.isFloatingPoint()) 2666 return "f"; // works for many targets 2667 return 0; 2668} 2669 2670/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 2671/// vector. If it is invalid, don't add anything to Ops. 2672void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 2673 std::string &Constraint, 2674 std::vector<SDValue> &Ops, 2675 SelectionDAG &DAG) const { 2676 2677 if (Constraint.length() > 1) return; 2678 2679 char ConstraintLetter = Constraint[0]; 2680 switch (ConstraintLetter) { 2681 default: break; 2682 case 'X': // Allows any operand; labels (basic block) use this. 2683 if (Op.getOpcode() == ISD::BasicBlock) { 2684 Ops.push_back(Op); 2685 return; 2686 } 2687 // fall through 2688 case 'i': // Simple Integer or Relocatable Constant 2689 case 'n': // Simple Integer 2690 case 's': { // Relocatable Constant 2691 // These operands are interested in values of the form (GV+C), where C may 2692 // be folded in as an offset of GV, or it may be explicitly added. Also, it 2693 // is possible and fine if either GV or C are missing. 2694 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2695 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 2696 2697 // If we have "(add GV, C)", pull out GV/C 2698 if (Op.getOpcode() == ISD::ADD) { 2699 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2700 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 2701 if (C == 0 || GA == 0) { 2702 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 2703 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 2704 } 2705 if (C == 0 || GA == 0) 2706 C = 0, GA = 0; 2707 } 2708 2709 // If we find a valid operand, map to the TargetXXX version so that the 2710 // value itself doesn't get selected. 2711 if (GA) { // Either &GV or &GV+C 2712 if (ConstraintLetter != 'n') { 2713 int64_t Offs = GA->getOffset(); 2714 if (C) Offs += C->getZExtValue(); 2715 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 2716 C ? C->getDebugLoc() : DebugLoc(), 2717 Op.getValueType(), Offs)); 2718 return; 2719 } 2720 } 2721 if (C) { // just C, no GV. 2722 // Simple constants are not allowed for 's'. 2723 if (ConstraintLetter != 's') { 2724 // gcc prints these as sign extended. Sign extend value to 64 bits 2725 // now; without this it would get ZExt'd later in 2726 // ScheduleDAGSDNodes::EmitNode, which is very generic. 2727 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(), 2728 MVT::i64)); 2729 return; 2730 } 2731 } 2732 break; 2733 } 2734 } 2735} 2736 2737std::vector<unsigned> TargetLowering:: 2738getRegClassForInlineAsmConstraint(const std::string &Constraint, 2739 EVT VT) const { 2740 return std::vector<unsigned>(); 2741} 2742 2743 2744std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 2745getRegForInlineAsmConstraint(const std::string &Constraint, 2746 EVT VT) const { 2747 if (Constraint[0] != '{') 2748 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 2749 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 2750 2751 // Remove the braces from around the name. 2752 StringRef RegName(Constraint.data()+1, Constraint.size()-2); 2753 2754 // Figure out which register class contains this reg. 2755 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 2756 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 2757 E = RI->regclass_end(); RCI != E; ++RCI) { 2758 const TargetRegisterClass *RC = *RCI; 2759 2760 // If none of the value types for this register class are valid, we 2761 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2762 bool isLegal = false; 2763 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2764 I != E; ++I) { 2765 if (isTypeLegal(*I)) { 2766 isLegal = true; 2767 break; 2768 } 2769 } 2770 2771 if (!isLegal) continue; 2772 2773 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 2774 I != E; ++I) { 2775 if (RegName.equals_lower(RI->getName(*I))) 2776 return std::make_pair(*I, RC); 2777 } 2778 } 2779 2780 return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0)); 2781} 2782 2783//===----------------------------------------------------------------------===// 2784// Constraint Selection. 2785 2786/// isMatchingInputConstraint - Return true of this is an input operand that is 2787/// a matching constraint like "4". 2788bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 2789 assert(!ConstraintCode.empty() && "No known constraint!"); 2790 return isdigit(ConstraintCode[0]); 2791} 2792 2793/// getMatchedOperand - If this is an input matching constraint, this method 2794/// returns the output operand it matches. 2795unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 2796 assert(!ConstraintCode.empty() && "No known constraint!"); 2797 return atoi(ConstraintCode.c_str()); 2798} 2799 2800 2801/// ParseConstraints - Split up the constraint string from the inline 2802/// assembly value into the specific constraints and their prefixes, 2803/// and also tie in the associated operand values. 2804/// If this returns an empty vector, and if the constraint string itself 2805/// isn't empty, there was an error parsing. 2806TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints( 2807 ImmutableCallSite CS) const { 2808 /// ConstraintOperands - Information about all of the constraints. 2809 AsmOperandInfoVector ConstraintOperands; 2810 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 2811 unsigned maCount = 0; // Largest number of multiple alternative constraints. 2812 2813 // Do a prepass over the constraints, canonicalizing them, and building up the 2814 // ConstraintOperands list. 2815 InlineAsm::ConstraintInfoVector 2816 ConstraintInfos = IA->ParseConstraints(); 2817 2818 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 2819 unsigned ResNo = 0; // ResNo - The result number of the next output. 2820 2821 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 2822 ConstraintOperands.push_back(AsmOperandInfo(ConstraintInfos[i])); 2823 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 2824 2825 // Update multiple alternative constraint count. 2826 if (OpInfo.multipleAlternatives.size() > maCount) 2827 maCount = OpInfo.multipleAlternatives.size(); 2828 2829 OpInfo.ConstraintVT = MVT::Other; 2830 2831 // Compute the value type for each operand. 2832 switch (OpInfo.Type) { 2833 case InlineAsm::isOutput: 2834 // Indirect outputs just consume an argument. 2835 if (OpInfo.isIndirect) { 2836 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 2837 break; 2838 } 2839 2840 // The return value of the call is this value. As such, there is no 2841 // corresponding argument. 2842 assert(!CS.getType()->isVoidTy() && 2843 "Bad inline asm!"); 2844 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) { 2845 OpInfo.ConstraintVT = getValueType(STy->getElementType(ResNo)); 2846 } else { 2847 assert(ResNo == 0 && "Asm only has one result!"); 2848 OpInfo.ConstraintVT = getValueType(CS.getType()); 2849 } 2850 ++ResNo; 2851 break; 2852 case InlineAsm::isInput: 2853 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 2854 break; 2855 case InlineAsm::isClobber: 2856 // Nothing to do. 2857 break; 2858 } 2859 2860 if (OpInfo.CallOperandVal) { 2861 const llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 2862 if (OpInfo.isIndirect) { 2863 const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 2864 if (!PtrTy) 2865 report_fatal_error("Indirect operand for inline asm not a pointer!"); 2866 OpTy = PtrTy->getElementType(); 2867 } 2868 2869 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 2870 if (const StructType *STy = dyn_cast<StructType>(OpTy)) 2871 if (STy->getNumElements() == 1) 2872 OpTy = STy->getElementType(0); 2873 2874 // If OpTy is not a single value, it may be a struct/union that we 2875 // can tile with integers. 2876 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 2877 unsigned BitSize = TD->getTypeSizeInBits(OpTy); 2878 switch (BitSize) { 2879 default: break; 2880 case 1: 2881 case 8: 2882 case 16: 2883 case 32: 2884 case 64: 2885 case 128: 2886 OpInfo.ConstraintVT = 2887 EVT::getEVT(IntegerType::get(OpTy->getContext(), BitSize), true); 2888 break; 2889 } 2890 } else if (dyn_cast<PointerType>(OpTy)) { 2891 OpInfo.ConstraintVT = MVT::getIntegerVT(8*TD->getPointerSize()); 2892 } else { 2893 OpInfo.ConstraintVT = EVT::getEVT(OpTy, true); 2894 } 2895 } 2896 } 2897 2898 // If we have multiple alternative constraints, select the best alternative. 2899 if (ConstraintInfos.size()) { 2900 if (maCount) { 2901 unsigned bestMAIndex = 0; 2902 int bestWeight = -1; 2903 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 2904 int weight = -1; 2905 unsigned maIndex; 2906 // Compute the sums of the weights for each alternative, keeping track 2907 // of the best (highest weight) one so far. 2908 for (maIndex = 0; maIndex < maCount; ++maIndex) { 2909 int weightSum = 0; 2910 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 2911 cIndex != eIndex; ++cIndex) { 2912 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex]; 2913 if (OpInfo.Type == InlineAsm::isClobber) 2914 continue; 2915 2916 // If this is an output operand with a matching input operand, 2917 // look up the matching input. If their types mismatch, e.g. one 2918 // is an integer, the other is floating point, or their sizes are 2919 // different, flag it as an maCantMatch. 2920 if (OpInfo.hasMatchingInput()) { 2921 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 2922 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 2923 if ((OpInfo.ConstraintVT.isInteger() != 2924 Input.ConstraintVT.isInteger()) || 2925 (OpInfo.ConstraintVT.getSizeInBits() != 2926 Input.ConstraintVT.getSizeInBits())) { 2927 weightSum = -1; // Can't match. 2928 break; 2929 } 2930 } 2931 } 2932 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 2933 if (weight == -1) { 2934 weightSum = -1; 2935 break; 2936 } 2937 weightSum += weight; 2938 } 2939 // Update best. 2940 if (weightSum > bestWeight) { 2941 bestWeight = weightSum; 2942 bestMAIndex = maIndex; 2943 } 2944 } 2945 2946 // Now select chosen alternative in each constraint. 2947 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 2948 cIndex != eIndex; ++cIndex) { 2949 AsmOperandInfo& cInfo = ConstraintOperands[cIndex]; 2950 if (cInfo.Type == InlineAsm::isClobber) 2951 continue; 2952 cInfo.selectAlternative(bestMAIndex); 2953 } 2954 } 2955 } 2956 2957 // Check and hook up tied operands, choose constraint code to use. 2958 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 2959 cIndex != eIndex; ++cIndex) { 2960 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex]; 2961 2962 // If this is an output operand with a matching input operand, look up the 2963 // matching input. If their types mismatch, e.g. one is an integer, the 2964 // other is floating point, or their sizes are different, flag it as an 2965 // error. 2966 if (OpInfo.hasMatchingInput()) { 2967 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 2968 2969 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 2970 if ((OpInfo.ConstraintVT.isInteger() != 2971 Input.ConstraintVT.isInteger()) || 2972 (OpInfo.ConstraintVT.getSizeInBits() != 2973 Input.ConstraintVT.getSizeInBits())) { 2974 report_fatal_error("Unsupported asm: input constraint" 2975 " with a matching output constraint of" 2976 " incompatible type!"); 2977 } 2978 } 2979 2980 } 2981 } 2982 2983 return ConstraintOperands; 2984} 2985 2986 2987/// getConstraintGenerality - Return an integer indicating how general CT 2988/// is. 2989static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 2990 switch (CT) { 2991 default: llvm_unreachable("Unknown constraint type!"); 2992 case TargetLowering::C_Other: 2993 case TargetLowering::C_Unknown: 2994 return 0; 2995 case TargetLowering::C_Register: 2996 return 1; 2997 case TargetLowering::C_RegisterClass: 2998 return 2; 2999 case TargetLowering::C_Memory: 3000 return 3; 3001 } 3002} 3003 3004/// Examine constraint type and operand type and determine a weight value. 3005/// This object must already have been set up with the operand type 3006/// and the current alternative constraint selected. 3007TargetLowering::ConstraintWeight 3008 TargetLowering::getMultipleConstraintMatchWeight( 3009 AsmOperandInfo &info, int maIndex) const { 3010 InlineAsm::ConstraintCodeVector *rCodes; 3011 if (maIndex >= (int)info.multipleAlternatives.size()) 3012 rCodes = &info.Codes; 3013 else 3014 rCodes = &info.multipleAlternatives[maIndex].Codes; 3015 ConstraintWeight BestWeight = CW_Invalid; 3016 3017 // Loop over the options, keeping track of the most general one. 3018 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 3019 ConstraintWeight weight = 3020 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 3021 if (weight > BestWeight) 3022 BestWeight = weight; 3023 } 3024 3025 return BestWeight; 3026} 3027 3028/// Examine constraint type and operand type and determine a weight value. 3029/// This object must already have been set up with the operand type 3030/// and the current alternative constraint selected. 3031TargetLowering::ConstraintWeight 3032 TargetLowering::getSingleConstraintMatchWeight( 3033 AsmOperandInfo &info, const char *constraint) const { 3034 ConstraintWeight weight = CW_Invalid; 3035 Value *CallOperandVal = info.CallOperandVal; 3036 // If we don't have a value, we can't do a match, 3037 // but allow it at the lowest weight. 3038 if (CallOperandVal == NULL) 3039 return CW_Default; 3040 // Look at the constraint type. 3041 switch (*constraint) { 3042 case 'i': // immediate integer. 3043 case 'n': // immediate integer with a known value. 3044 if (isa<ConstantInt>(CallOperandVal)) 3045 weight = CW_Constant; 3046 break; 3047 case 's': // non-explicit intregal immediate. 3048 if (isa<GlobalValue>(CallOperandVal)) 3049 weight = CW_Constant; 3050 break; 3051 case 'E': // immediate float if host format. 3052 case 'F': // immediate float. 3053 if (isa<ConstantFP>(CallOperandVal)) 3054 weight = CW_Constant; 3055 break; 3056 case '<': // memory operand with autodecrement. 3057 case '>': // memory operand with autoincrement. 3058 case 'm': // memory operand. 3059 case 'o': // offsettable memory operand 3060 case 'V': // non-offsettable memory operand 3061 weight = CW_Memory; 3062 break; 3063 case 'r': // general register. 3064 case 'g': // general register, memory operand or immediate integer. 3065 // note: Clang converts "g" to "imr". 3066 if (CallOperandVal->getType()->isIntegerTy()) 3067 weight = CW_Register; 3068 break; 3069 case 'X': // any operand. 3070 default: 3071 weight = CW_Default; 3072 break; 3073 } 3074 return weight; 3075} 3076 3077/// ChooseConstraint - If there are multiple different constraints that we 3078/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 3079/// This is somewhat tricky: constraints fall into four classes: 3080/// Other -> immediates and magic values 3081/// Register -> one specific register 3082/// RegisterClass -> a group of regs 3083/// Memory -> memory 3084/// Ideally, we would pick the most specific constraint possible: if we have 3085/// something that fits into a register, we would pick it. The problem here 3086/// is that if we have something that could either be in a register or in 3087/// memory that use of the register could cause selection of *other* 3088/// operands to fail: they might only succeed if we pick memory. Because of 3089/// this the heuristic we use is: 3090/// 3091/// 1) If there is an 'other' constraint, and if the operand is valid for 3092/// that constraint, use it. This makes us take advantage of 'i' 3093/// constraints when available. 3094/// 2) Otherwise, pick the most general constraint present. This prefers 3095/// 'm' over 'r', for example. 3096/// 3097static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 3098 const TargetLowering &TLI, 3099 SDValue Op, SelectionDAG *DAG) { 3100 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 3101 unsigned BestIdx = 0; 3102 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 3103 int BestGenerality = -1; 3104 3105 // Loop over the options, keeping track of the most general one. 3106 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 3107 TargetLowering::ConstraintType CType = 3108 TLI.getConstraintType(OpInfo.Codes[i]); 3109 3110 // If this is an 'other' constraint, see if the operand is valid for it. 3111 // For example, on X86 we might have an 'rI' constraint. If the operand 3112 // is an integer in the range [0..31] we want to use I (saving a load 3113 // of a register), otherwise we must use 'r'. 3114 if (CType == TargetLowering::C_Other && Op.getNode()) { 3115 assert(OpInfo.Codes[i].size() == 1 && 3116 "Unhandled multi-letter 'other' constraint"); 3117 std::vector<SDValue> ResultOps; 3118 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 3119 ResultOps, *DAG); 3120 if (!ResultOps.empty()) { 3121 BestType = CType; 3122 BestIdx = i; 3123 break; 3124 } 3125 } 3126 3127 // Things with matching constraints can only be registers, per gcc 3128 // documentation. This mainly affects "g" constraints. 3129 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 3130 continue; 3131 3132 // This constraint letter is more general than the previous one, use it. 3133 int Generality = getConstraintGenerality(CType); 3134 if (Generality > BestGenerality) { 3135 BestType = CType; 3136 BestIdx = i; 3137 BestGenerality = Generality; 3138 } 3139 } 3140 3141 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 3142 OpInfo.ConstraintType = BestType; 3143} 3144 3145/// ComputeConstraintToUse - Determines the constraint code and constraint 3146/// type to use for the specific AsmOperandInfo, setting 3147/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 3148void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 3149 SDValue Op, 3150 SelectionDAG *DAG) const { 3151 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 3152 3153 // Single-letter constraints ('r') are very common. 3154 if (OpInfo.Codes.size() == 1) { 3155 OpInfo.ConstraintCode = OpInfo.Codes[0]; 3156 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 3157 } else { 3158 ChooseConstraint(OpInfo, *this, Op, DAG); 3159 } 3160 3161 // 'X' matches anything. 3162 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 3163 // Labels and constants are handled elsewhere ('X' is the only thing 3164 // that matches labels). For Functions, the type here is the type of 3165 // the result, which is not what we want to look at; leave them alone. 3166 Value *v = OpInfo.CallOperandVal; 3167 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 3168 OpInfo.CallOperandVal = v; 3169 return; 3170 } 3171 3172 // Otherwise, try to resolve it to something we know about by looking at 3173 // the actual operand type. 3174 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 3175 OpInfo.ConstraintCode = Repl; 3176 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 3177 } 3178 } 3179} 3180 3181//===----------------------------------------------------------------------===// 3182// Loop Strength Reduction hooks 3183//===----------------------------------------------------------------------===// 3184 3185/// isLegalAddressingMode - Return true if the addressing mode represented 3186/// by AM is legal for this target, for a load/store of the specified type. 3187bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 3188 const Type *Ty) const { 3189 // The default implementation of this implements a conservative RISCy, r+r and 3190 // r+i addr mode. 3191 3192 // Allows a sign-extended 16-bit immediate field. 3193 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 3194 return false; 3195 3196 // No global is ever allowed as a base. 3197 if (AM.BaseGV) 3198 return false; 3199 3200 // Only support r+r, 3201 switch (AM.Scale) { 3202 case 0: // "r+i" or just "i", depending on HasBaseReg. 3203 break; 3204 case 1: 3205 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 3206 return false; 3207 // Otherwise we have r+r or r+i. 3208 break; 3209 case 2: 3210 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 3211 return false; 3212 // Allow 2*r as r+r. 3213 break; 3214 } 3215 3216 return true; 3217} 3218 3219/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 3220/// return a DAG expression to select that will generate the same value by 3221/// multiplying by a magic number. See: 3222/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 3223SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 3224 std::vector<SDNode*>* Created) const { 3225 EVT VT = N->getValueType(0); 3226 DebugLoc dl= N->getDebugLoc(); 3227 3228 // Check to see if we can do this. 3229 // FIXME: We should be more aggressive here. 3230 if (!isTypeLegal(VT)) 3231 return SDValue(); 3232 3233 APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 3234 APInt::ms magics = d.magic(); 3235 3236 // Multiply the numerator (operand 0) by the magic value 3237 // FIXME: We should support doing a MUL in a wider type 3238 SDValue Q; 3239 if (isOperationLegalOrCustom(ISD::MULHS, VT)) 3240 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0), 3241 DAG.getConstant(magics.m, VT)); 3242 else if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) 3243 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), 3244 N->getOperand(0), 3245 DAG.getConstant(magics.m, VT)).getNode(), 1); 3246 else 3247 return SDValue(); // No mulhs or equvialent 3248 // If d > 0 and m < 0, add the numerator 3249 if (d.isStrictlyPositive() && magics.m.isNegative()) { 3250 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0)); 3251 if (Created) 3252 Created->push_back(Q.getNode()); 3253 } 3254 // If d < 0 and m > 0, subtract the numerator. 3255 if (d.isNegative() && magics.m.isStrictlyPositive()) { 3256 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0)); 3257 if (Created) 3258 Created->push_back(Q.getNode()); 3259 } 3260 // Shift right algebraic if shift value is nonzero 3261 if (magics.s > 0) { 3262 Q = DAG.getNode(ISD::SRA, dl, VT, Q, 3263 DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType()))); 3264 if (Created) 3265 Created->push_back(Q.getNode()); 3266 } 3267 // Extract the sign bit and add it to the quotient 3268 SDValue T = 3269 DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 3270 getShiftAmountTy(Q.getValueType()))); 3271 if (Created) 3272 Created->push_back(T.getNode()); 3273 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 3274} 3275 3276/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 3277/// return a DAG expression to select that will generate the same value by 3278/// multiplying by a magic number. See: 3279/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 3280SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 3281 std::vector<SDNode*>* Created) const { 3282 EVT VT = N->getValueType(0); 3283 DebugLoc dl = N->getDebugLoc(); 3284 3285 // Check to see if we can do this. 3286 // FIXME: We should be more aggressive here. 3287 if (!isTypeLegal(VT)) 3288 return SDValue(); 3289 3290 // FIXME: We should use a narrower constant when the upper 3291 // bits are known to be zero. 3292 const APInt &N1C = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 3293 APInt::mu magics = N1C.magicu(); 3294 3295 SDValue Q = N->getOperand(0); 3296 3297 // If the divisor is even, we can avoid using the expensive fixup by shifting 3298 // the divided value upfront. 3299 if (magics.a != 0 && !N1C[0]) { 3300 unsigned Shift = N1C.countTrailingZeros(); 3301 Q = DAG.getNode(ISD::SRL, dl, VT, Q, 3302 DAG.getConstant(Shift, getShiftAmountTy(Q.getValueType()))); 3303 if (Created) 3304 Created->push_back(Q.getNode()); 3305 3306 // Get magic number for the shifted divisor. 3307 magics = N1C.lshr(Shift).magicu(Shift); 3308 assert(magics.a == 0 && "Should use cheap fixup now"); 3309 } 3310 3311 // Multiply the numerator (operand 0) by the magic value 3312 // FIXME: We should support doing a MUL in a wider type 3313 if (isOperationLegalOrCustom(ISD::MULHU, VT)) 3314 Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, VT)); 3315 else if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) 3316 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q, 3317 DAG.getConstant(magics.m, VT)).getNode(), 1); 3318 else 3319 return SDValue(); // No mulhu or equvialent 3320 if (Created) 3321 Created->push_back(Q.getNode()); 3322 3323 if (magics.a == 0) { 3324 assert(magics.s < N1C.getBitWidth() && 3325 "We shouldn't generate an undefined shift!"); 3326 return DAG.getNode(ISD::SRL, dl, VT, Q, 3327 DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType()))); 3328 } else { 3329 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q); 3330 if (Created) 3331 Created->push_back(NPQ.getNode()); 3332 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, 3333 DAG.getConstant(1, getShiftAmountTy(NPQ.getValueType()))); 3334 if (Created) 3335 Created->push_back(NPQ.getNode()); 3336 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 3337 if (Created) 3338 Created->push_back(NPQ.getNode()); 3339 return DAG.getNode(ISD::SRL, dl, VT, NPQ, 3340 DAG.getConstant(magics.s-1, getShiftAmountTy(NPQ.getValueType()))); 3341 } 3342} 3343