X86InstrInfo.cpp revision e8b4a4a9d173d67e35e4b1d32e20140381db6bde
1//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86InstrBuilder.h" 17#include "X86MachineFunctionInfo.h" 18#include "X86Subtarget.h" 19#include "X86TargetMachine.h" 20#include "llvm/DerivedTypes.h" 21#include "llvm/LLVMContext.h" 22#include "llvm/ADT/STLExtras.h" 23#include "llvm/CodeGen/MachineConstantPool.h" 24#include "llvm/CodeGen/MachineDominators.h" 25#include "llvm/CodeGen/MachineFrameInfo.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineRegisterInfo.h" 28#include "llvm/CodeGen/LiveVariables.h" 29#include "llvm/MC/MCAsmInfo.h" 30#include "llvm/MC/MCInst.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/Target/TargetOptions.h" 36#include <limits> 37 38#define GET_INSTRINFO_CTOR 39#include "X86GenInstrInfo.inc" 40 41using namespace llvm; 42 43static cl::opt<bool> 44NoFusing("disable-spill-fusing", 45 cl::desc("Disable fusing of spill code into instructions")); 46static cl::opt<bool> 47PrintFailedFusing("print-failed-fuse-candidates", 48 cl::desc("Print instructions that the allocator wants to" 49 " fuse, but the X86 backend currently can't"), 50 cl::Hidden); 51static cl::opt<bool> 52ReMatPICStubLoad("remat-pic-stub-load", 53 cl::desc("Re-materialize load from stub in PIC mode"), 54 cl::init(false), cl::Hidden); 55 56enum { 57 // Select which memory operand is being unfolded. 58 // (stored in bits 0 - 3) 59 TB_INDEX_0 = 0, 60 TB_INDEX_1 = 1, 61 TB_INDEX_2 = 2, 62 TB_INDEX_3 = 3, 63 TB_INDEX_MASK = 0xf, 64 65 // Do not insert the reverse map (MemOp -> RegOp) into the table. 66 // This may be needed because there is a many -> one mapping. 67 TB_NO_REVERSE = 1 << 4, 68 69 // Do not insert the forward map (RegOp -> MemOp) into the table. 70 // This is needed for Native Client, which prohibits branch 71 // instructions from using a memory operand. 72 TB_NO_FORWARD = 1 << 5, 73 74 TB_FOLDED_LOAD = 1 << 6, 75 TB_FOLDED_STORE = 1 << 7, 76 77 // Minimum alignment required for load/store. 78 // Used for RegOp->MemOp conversion. 79 // (stored in bits 8 - 15) 80 TB_ALIGN_SHIFT = 8, 81 TB_ALIGN_NONE = 0 << TB_ALIGN_SHIFT, 82 TB_ALIGN_16 = 16 << TB_ALIGN_SHIFT, 83 TB_ALIGN_32 = 32 << TB_ALIGN_SHIFT, 84 TB_ALIGN_MASK = 0xff << TB_ALIGN_SHIFT 85}; 86 87struct X86OpTblEntry { 88 uint16_t RegOp; 89 uint16_t MemOp; 90 uint16_t Flags; 91}; 92 93X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) 94 : X86GenInstrInfo((tm.getSubtarget<X86Subtarget>().is64Bit() 95 ? X86::ADJCALLSTACKDOWN64 96 : X86::ADJCALLSTACKDOWN32), 97 (tm.getSubtarget<X86Subtarget>().is64Bit() 98 ? X86::ADJCALLSTACKUP64 99 : X86::ADJCALLSTACKUP32)), 100 TM(tm), RI(tm, *this) { 101 102 static const X86OpTblEntry OpTbl2Addr[] = { 103 { X86::ADC32ri, X86::ADC32mi, 0 }, 104 { X86::ADC32ri8, X86::ADC32mi8, 0 }, 105 { X86::ADC32rr, X86::ADC32mr, 0 }, 106 { X86::ADC64ri32, X86::ADC64mi32, 0 }, 107 { X86::ADC64ri8, X86::ADC64mi8, 0 }, 108 { X86::ADC64rr, X86::ADC64mr, 0 }, 109 { X86::ADD16ri, X86::ADD16mi, 0 }, 110 { X86::ADD16ri8, X86::ADD16mi8, 0 }, 111 { X86::ADD16ri_DB, X86::ADD16mi, TB_NO_REVERSE }, 112 { X86::ADD16ri8_DB, X86::ADD16mi8, TB_NO_REVERSE }, 113 { X86::ADD16rr, X86::ADD16mr, 0 }, 114 { X86::ADD16rr_DB, X86::ADD16mr, TB_NO_REVERSE }, 115 { X86::ADD32ri, X86::ADD32mi, 0 }, 116 { X86::ADD32ri8, X86::ADD32mi8, 0 }, 117 { X86::ADD32ri_DB, X86::ADD32mi, TB_NO_REVERSE }, 118 { X86::ADD32ri8_DB, X86::ADD32mi8, TB_NO_REVERSE }, 119 { X86::ADD32rr, X86::ADD32mr, 0 }, 120 { X86::ADD32rr_DB, X86::ADD32mr, TB_NO_REVERSE }, 121 { X86::ADD64ri32, X86::ADD64mi32, 0 }, 122 { X86::ADD64ri8, X86::ADD64mi8, 0 }, 123 { X86::ADD64ri32_DB,X86::ADD64mi32, TB_NO_REVERSE }, 124 { X86::ADD64ri8_DB, X86::ADD64mi8, TB_NO_REVERSE }, 125 { X86::ADD64rr, X86::ADD64mr, 0 }, 126 { X86::ADD64rr_DB, X86::ADD64mr, TB_NO_REVERSE }, 127 { X86::ADD8ri, X86::ADD8mi, 0 }, 128 { X86::ADD8rr, X86::ADD8mr, 0 }, 129 { X86::AND16ri, X86::AND16mi, 0 }, 130 { X86::AND16ri8, X86::AND16mi8, 0 }, 131 { X86::AND16rr, X86::AND16mr, 0 }, 132 { X86::AND32ri, X86::AND32mi, 0 }, 133 { X86::AND32ri8, X86::AND32mi8, 0 }, 134 { X86::AND32rr, X86::AND32mr, 0 }, 135 { X86::AND64ri32, X86::AND64mi32, 0 }, 136 { X86::AND64ri8, X86::AND64mi8, 0 }, 137 { X86::AND64rr, X86::AND64mr, 0 }, 138 { X86::AND8ri, X86::AND8mi, 0 }, 139 { X86::AND8rr, X86::AND8mr, 0 }, 140 { X86::DEC16r, X86::DEC16m, 0 }, 141 { X86::DEC32r, X86::DEC32m, 0 }, 142 { X86::DEC64_16r, X86::DEC64_16m, 0 }, 143 { X86::DEC64_32r, X86::DEC64_32m, 0 }, 144 { X86::DEC64r, X86::DEC64m, 0 }, 145 { X86::DEC8r, X86::DEC8m, 0 }, 146 { X86::INC16r, X86::INC16m, 0 }, 147 { X86::INC32r, X86::INC32m, 0 }, 148 { X86::INC64_16r, X86::INC64_16m, 0 }, 149 { X86::INC64_32r, X86::INC64_32m, 0 }, 150 { X86::INC64r, X86::INC64m, 0 }, 151 { X86::INC8r, X86::INC8m, 0 }, 152 { X86::NEG16r, X86::NEG16m, 0 }, 153 { X86::NEG32r, X86::NEG32m, 0 }, 154 { X86::NEG64r, X86::NEG64m, 0 }, 155 { X86::NEG8r, X86::NEG8m, 0 }, 156 { X86::NOT16r, X86::NOT16m, 0 }, 157 { X86::NOT32r, X86::NOT32m, 0 }, 158 { X86::NOT64r, X86::NOT64m, 0 }, 159 { X86::NOT8r, X86::NOT8m, 0 }, 160 { X86::OR16ri, X86::OR16mi, 0 }, 161 { X86::OR16ri8, X86::OR16mi8, 0 }, 162 { X86::OR16rr, X86::OR16mr, 0 }, 163 { X86::OR32ri, X86::OR32mi, 0 }, 164 { X86::OR32ri8, X86::OR32mi8, 0 }, 165 { X86::OR32rr, X86::OR32mr, 0 }, 166 { X86::OR64ri32, X86::OR64mi32, 0 }, 167 { X86::OR64ri8, X86::OR64mi8, 0 }, 168 { X86::OR64rr, X86::OR64mr, 0 }, 169 { X86::OR8ri, X86::OR8mi, 0 }, 170 { X86::OR8rr, X86::OR8mr, 0 }, 171 { X86::ROL16r1, X86::ROL16m1, 0 }, 172 { X86::ROL16rCL, X86::ROL16mCL, 0 }, 173 { X86::ROL16ri, X86::ROL16mi, 0 }, 174 { X86::ROL32r1, X86::ROL32m1, 0 }, 175 { X86::ROL32rCL, X86::ROL32mCL, 0 }, 176 { X86::ROL32ri, X86::ROL32mi, 0 }, 177 { X86::ROL64r1, X86::ROL64m1, 0 }, 178 { X86::ROL64rCL, X86::ROL64mCL, 0 }, 179 { X86::ROL64ri, X86::ROL64mi, 0 }, 180 { X86::ROL8r1, X86::ROL8m1, 0 }, 181 { X86::ROL8rCL, X86::ROL8mCL, 0 }, 182 { X86::ROL8ri, X86::ROL8mi, 0 }, 183 { X86::ROR16r1, X86::ROR16m1, 0 }, 184 { X86::ROR16rCL, X86::ROR16mCL, 0 }, 185 { X86::ROR16ri, X86::ROR16mi, 0 }, 186 { X86::ROR32r1, X86::ROR32m1, 0 }, 187 { X86::ROR32rCL, X86::ROR32mCL, 0 }, 188 { X86::ROR32ri, X86::ROR32mi, 0 }, 189 { X86::ROR64r1, X86::ROR64m1, 0 }, 190 { X86::ROR64rCL, X86::ROR64mCL, 0 }, 191 { X86::ROR64ri, X86::ROR64mi, 0 }, 192 { X86::ROR8r1, X86::ROR8m1, 0 }, 193 { X86::ROR8rCL, X86::ROR8mCL, 0 }, 194 { X86::ROR8ri, X86::ROR8mi, 0 }, 195 { X86::SAR16r1, X86::SAR16m1, 0 }, 196 { X86::SAR16rCL, X86::SAR16mCL, 0 }, 197 { X86::SAR16ri, X86::SAR16mi, 0 }, 198 { X86::SAR32r1, X86::SAR32m1, 0 }, 199 { X86::SAR32rCL, X86::SAR32mCL, 0 }, 200 { X86::SAR32ri, X86::SAR32mi, 0 }, 201 { X86::SAR64r1, X86::SAR64m1, 0 }, 202 { X86::SAR64rCL, X86::SAR64mCL, 0 }, 203 { X86::SAR64ri, X86::SAR64mi, 0 }, 204 { X86::SAR8r1, X86::SAR8m1, 0 }, 205 { X86::SAR8rCL, X86::SAR8mCL, 0 }, 206 { X86::SAR8ri, X86::SAR8mi, 0 }, 207 { X86::SBB32ri, X86::SBB32mi, 0 }, 208 { X86::SBB32ri8, X86::SBB32mi8, 0 }, 209 { X86::SBB32rr, X86::SBB32mr, 0 }, 210 { X86::SBB64ri32, X86::SBB64mi32, 0 }, 211 { X86::SBB64ri8, X86::SBB64mi8, 0 }, 212 { X86::SBB64rr, X86::SBB64mr, 0 }, 213 { X86::SHL16rCL, X86::SHL16mCL, 0 }, 214 { X86::SHL16ri, X86::SHL16mi, 0 }, 215 { X86::SHL32rCL, X86::SHL32mCL, 0 }, 216 { X86::SHL32ri, X86::SHL32mi, 0 }, 217 { X86::SHL64rCL, X86::SHL64mCL, 0 }, 218 { X86::SHL64ri, X86::SHL64mi, 0 }, 219 { X86::SHL8rCL, X86::SHL8mCL, 0 }, 220 { X86::SHL8ri, X86::SHL8mi, 0 }, 221 { X86::SHLD16rrCL, X86::SHLD16mrCL, 0 }, 222 { X86::SHLD16rri8, X86::SHLD16mri8, 0 }, 223 { X86::SHLD32rrCL, X86::SHLD32mrCL, 0 }, 224 { X86::SHLD32rri8, X86::SHLD32mri8, 0 }, 225 { X86::SHLD64rrCL, X86::SHLD64mrCL, 0 }, 226 { X86::SHLD64rri8, X86::SHLD64mri8, 0 }, 227 { X86::SHR16r1, X86::SHR16m1, 0 }, 228 { X86::SHR16rCL, X86::SHR16mCL, 0 }, 229 { X86::SHR16ri, X86::SHR16mi, 0 }, 230 { X86::SHR32r1, X86::SHR32m1, 0 }, 231 { X86::SHR32rCL, X86::SHR32mCL, 0 }, 232 { X86::SHR32ri, X86::SHR32mi, 0 }, 233 { X86::SHR64r1, X86::SHR64m1, 0 }, 234 { X86::SHR64rCL, X86::SHR64mCL, 0 }, 235 { X86::SHR64ri, X86::SHR64mi, 0 }, 236 { X86::SHR8r1, X86::SHR8m1, 0 }, 237 { X86::SHR8rCL, X86::SHR8mCL, 0 }, 238 { X86::SHR8ri, X86::SHR8mi, 0 }, 239 { X86::SHRD16rrCL, X86::SHRD16mrCL, 0 }, 240 { X86::SHRD16rri8, X86::SHRD16mri8, 0 }, 241 { X86::SHRD32rrCL, X86::SHRD32mrCL, 0 }, 242 { X86::SHRD32rri8, X86::SHRD32mri8, 0 }, 243 { X86::SHRD64rrCL, X86::SHRD64mrCL, 0 }, 244 { X86::SHRD64rri8, X86::SHRD64mri8, 0 }, 245 { X86::SUB16ri, X86::SUB16mi, 0 }, 246 { X86::SUB16ri8, X86::SUB16mi8, 0 }, 247 { X86::SUB16rr, X86::SUB16mr, 0 }, 248 { X86::SUB32ri, X86::SUB32mi, 0 }, 249 { X86::SUB32ri8, X86::SUB32mi8, 0 }, 250 { X86::SUB32rr, X86::SUB32mr, 0 }, 251 { X86::SUB64ri32, X86::SUB64mi32, 0 }, 252 { X86::SUB64ri8, X86::SUB64mi8, 0 }, 253 { X86::SUB64rr, X86::SUB64mr, 0 }, 254 { X86::SUB8ri, X86::SUB8mi, 0 }, 255 { X86::SUB8rr, X86::SUB8mr, 0 }, 256 { X86::XOR16ri, X86::XOR16mi, 0 }, 257 { X86::XOR16ri8, X86::XOR16mi8, 0 }, 258 { X86::XOR16rr, X86::XOR16mr, 0 }, 259 { X86::XOR32ri, X86::XOR32mi, 0 }, 260 { X86::XOR32ri8, X86::XOR32mi8, 0 }, 261 { X86::XOR32rr, X86::XOR32mr, 0 }, 262 { X86::XOR64ri32, X86::XOR64mi32, 0 }, 263 { X86::XOR64ri8, X86::XOR64mi8, 0 }, 264 { X86::XOR64rr, X86::XOR64mr, 0 }, 265 { X86::XOR8ri, X86::XOR8mi, 0 }, 266 { X86::XOR8rr, X86::XOR8mr, 0 } 267 }; 268 269 for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) { 270 unsigned RegOp = OpTbl2Addr[i].RegOp; 271 unsigned MemOp = OpTbl2Addr[i].MemOp; 272 unsigned Flags = OpTbl2Addr[i].Flags; 273 AddTableEntry(RegOp2MemOpTable2Addr, MemOp2RegOpTable, 274 RegOp, MemOp, 275 // Index 0, folded load and store, no alignment requirement. 276 Flags | TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE); 277 } 278 279 static const X86OpTblEntry OpTbl0[] = { 280 { X86::BT16ri8, X86::BT16mi8, TB_FOLDED_LOAD }, 281 { X86::BT32ri8, X86::BT32mi8, TB_FOLDED_LOAD }, 282 { X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD }, 283 { X86::CALL32r, X86::CALL32m, TB_FOLDED_LOAD }, 284 { X86::CALL64r, X86::CALL64m, TB_FOLDED_LOAD }, 285 { X86::CMP16ri, X86::CMP16mi, TB_FOLDED_LOAD }, 286 { X86::CMP16ri8, X86::CMP16mi8, TB_FOLDED_LOAD }, 287 { X86::CMP16rr, X86::CMP16mr, TB_FOLDED_LOAD }, 288 { X86::CMP32ri, X86::CMP32mi, TB_FOLDED_LOAD }, 289 { X86::CMP32ri8, X86::CMP32mi8, TB_FOLDED_LOAD }, 290 { X86::CMP32rr, X86::CMP32mr, TB_FOLDED_LOAD }, 291 { X86::CMP64ri32, X86::CMP64mi32, TB_FOLDED_LOAD }, 292 { X86::CMP64ri8, X86::CMP64mi8, TB_FOLDED_LOAD }, 293 { X86::CMP64rr, X86::CMP64mr, TB_FOLDED_LOAD }, 294 { X86::CMP8ri, X86::CMP8mi, TB_FOLDED_LOAD }, 295 { X86::CMP8rr, X86::CMP8mr, TB_FOLDED_LOAD }, 296 { X86::DIV16r, X86::DIV16m, TB_FOLDED_LOAD }, 297 { X86::DIV32r, X86::DIV32m, TB_FOLDED_LOAD }, 298 { X86::DIV64r, X86::DIV64m, TB_FOLDED_LOAD }, 299 { X86::DIV8r, X86::DIV8m, TB_FOLDED_LOAD }, 300 { X86::EXTRACTPSrr, X86::EXTRACTPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 301 { X86::FsMOVAPDrr, X86::MOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE }, 302 { X86::FsMOVAPSrr, X86::MOVSSmr, TB_FOLDED_STORE | TB_NO_REVERSE }, 303 { X86::IDIV16r, X86::IDIV16m, TB_FOLDED_LOAD }, 304 { X86::IDIV32r, X86::IDIV32m, TB_FOLDED_LOAD }, 305 { X86::IDIV64r, X86::IDIV64m, TB_FOLDED_LOAD }, 306 { X86::IDIV8r, X86::IDIV8m, TB_FOLDED_LOAD }, 307 { X86::IMUL16r, X86::IMUL16m, TB_FOLDED_LOAD }, 308 { X86::IMUL32r, X86::IMUL32m, TB_FOLDED_LOAD }, 309 { X86::IMUL64r, X86::IMUL64m, TB_FOLDED_LOAD }, 310 { X86::IMUL8r, X86::IMUL8m, TB_FOLDED_LOAD }, 311 { X86::JMP32r, X86::JMP32m, TB_FOLDED_LOAD }, 312 { X86::JMP64r, X86::JMP64m, TB_FOLDED_LOAD }, 313 { X86::MOV16ri, X86::MOV16mi, TB_FOLDED_STORE }, 314 { X86::MOV16rr, X86::MOV16mr, TB_FOLDED_STORE }, 315 { X86::MOV32ri, X86::MOV32mi, TB_FOLDED_STORE }, 316 { X86::MOV32rr, X86::MOV32mr, TB_FOLDED_STORE }, 317 { X86::MOV64ri32, X86::MOV64mi32, TB_FOLDED_STORE }, 318 { X86::MOV64rr, X86::MOV64mr, TB_FOLDED_STORE }, 319 { X86::MOV8ri, X86::MOV8mi, TB_FOLDED_STORE }, 320 { X86::MOV8rr, X86::MOV8mr, TB_FOLDED_STORE }, 321 { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, TB_FOLDED_STORE }, 322 { X86::MOVAPDrr, X86::MOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 323 { X86::MOVAPSrr, X86::MOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 324 { X86::MOVDQArr, X86::MOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 325 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, TB_FOLDED_STORE }, 326 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, TB_FOLDED_STORE }, 327 { X86::MOVSDto64rr, X86::MOVSDto64mr, TB_FOLDED_STORE }, 328 { X86::MOVSS2DIrr, X86::MOVSS2DImr, TB_FOLDED_STORE }, 329 { X86::MOVUPDrr, X86::MOVUPDmr, TB_FOLDED_STORE }, 330 { X86::MOVUPSrr, X86::MOVUPSmr, TB_FOLDED_STORE }, 331 { X86::MUL16r, X86::MUL16m, TB_FOLDED_LOAD }, 332 { X86::MUL32r, X86::MUL32m, TB_FOLDED_LOAD }, 333 { X86::MUL64r, X86::MUL64m, TB_FOLDED_LOAD }, 334 { X86::MUL8r, X86::MUL8m, TB_FOLDED_LOAD }, 335 { X86::SETAEr, X86::SETAEm, TB_FOLDED_STORE }, 336 { X86::SETAr, X86::SETAm, TB_FOLDED_STORE }, 337 { X86::SETBEr, X86::SETBEm, TB_FOLDED_STORE }, 338 { X86::SETBr, X86::SETBm, TB_FOLDED_STORE }, 339 { X86::SETEr, X86::SETEm, TB_FOLDED_STORE }, 340 { X86::SETGEr, X86::SETGEm, TB_FOLDED_STORE }, 341 { X86::SETGr, X86::SETGm, TB_FOLDED_STORE }, 342 { X86::SETLEr, X86::SETLEm, TB_FOLDED_STORE }, 343 { X86::SETLr, X86::SETLm, TB_FOLDED_STORE }, 344 { X86::SETNEr, X86::SETNEm, TB_FOLDED_STORE }, 345 { X86::SETNOr, X86::SETNOm, TB_FOLDED_STORE }, 346 { X86::SETNPr, X86::SETNPm, TB_FOLDED_STORE }, 347 { X86::SETNSr, X86::SETNSm, TB_FOLDED_STORE }, 348 { X86::SETOr, X86::SETOm, TB_FOLDED_STORE }, 349 { X86::SETPr, X86::SETPm, TB_FOLDED_STORE }, 350 { X86::SETSr, X86::SETSm, TB_FOLDED_STORE }, 351 { X86::TAILJMPr, X86::TAILJMPm, TB_FOLDED_LOAD }, 352 { X86::TAILJMPr64, X86::TAILJMPm64, TB_FOLDED_LOAD }, 353 { X86::TEST16ri, X86::TEST16mi, TB_FOLDED_LOAD }, 354 { X86::TEST32ri, X86::TEST32mi, TB_FOLDED_LOAD }, 355 { X86::TEST64ri32, X86::TEST64mi32, TB_FOLDED_LOAD }, 356 { X86::TEST8ri, X86::TEST8mi, TB_FOLDED_LOAD }, 357 // AVX 128-bit versions of foldable instructions 358 { X86::VEXTRACTPSrr,X86::VEXTRACTPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 359 { X86::FsVMOVAPDrr, X86::VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE }, 360 { X86::FsVMOVAPSrr, X86::VMOVSSmr, TB_FOLDED_STORE | TB_NO_REVERSE }, 361 { X86::VEXTRACTF128rr, X86::VEXTRACTF128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 362 { X86::VMOVAPDrr, X86::VMOVAPDmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 363 { X86::VMOVAPSrr, X86::VMOVAPSmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 364 { X86::VMOVDQArr, X86::VMOVDQAmr, TB_FOLDED_STORE | TB_ALIGN_16 }, 365 { X86::VMOVPDI2DIrr,X86::VMOVPDI2DImr, TB_FOLDED_STORE }, 366 { X86::VMOVPQIto64rr, X86::VMOVPQI2QImr,TB_FOLDED_STORE }, 367 { X86::VMOVSDto64rr,X86::VMOVSDto64mr, TB_FOLDED_STORE }, 368 { X86::VMOVSS2DIrr, X86::VMOVSS2DImr, TB_FOLDED_STORE }, 369 { X86::VMOVUPDrr, X86::VMOVUPDmr, TB_FOLDED_STORE }, 370 { X86::VMOVUPSrr, X86::VMOVUPSmr, TB_FOLDED_STORE }, 371 // AVX 256-bit foldable instructions 372 { X86::VEXTRACTI128rr, X86::VEXTRACTI128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, 373 { X86::VMOVAPDYrr, X86::VMOVAPDYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 374 { X86::VMOVAPSYrr, X86::VMOVAPSYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 375 { X86::VMOVDQAYrr, X86::VMOVDQAYmr, TB_FOLDED_STORE | TB_ALIGN_32 }, 376 { X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE }, 377 { X86::VMOVUPSYrr, X86::VMOVUPSYmr, TB_FOLDED_STORE } 378 }; 379 380 for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) { 381 unsigned RegOp = OpTbl0[i].RegOp; 382 unsigned MemOp = OpTbl0[i].MemOp; 383 unsigned Flags = OpTbl0[i].Flags; 384 AddTableEntry(RegOp2MemOpTable0, MemOp2RegOpTable, 385 RegOp, MemOp, TB_INDEX_0 | Flags); 386 } 387 388 static const X86OpTblEntry OpTbl1[] = { 389 { X86::CMP16rr, X86::CMP16rm, 0 }, 390 { X86::CMP32rr, X86::CMP32rm, 0 }, 391 { X86::CMP64rr, X86::CMP64rm, 0 }, 392 { X86::CMP8rr, X86::CMP8rm, 0 }, 393 { X86::CVTSD2SSrr, X86::CVTSD2SSrm, 0 }, 394 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm, 0 }, 395 { X86::CVTSI2SDrr, X86::CVTSI2SDrm, 0 }, 396 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm, 0 }, 397 { X86::CVTSI2SSrr, X86::CVTSI2SSrm, 0 }, 398 { X86::CVTSS2SDrr, X86::CVTSS2SDrm, 0 }, 399 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm, 0 }, 400 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 }, 401 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 }, 402 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 }, 403 { X86::FsMOVAPDrr, X86::MOVSDrm, TB_NO_REVERSE }, 404 { X86::FsMOVAPSrr, X86::MOVSSrm, TB_NO_REVERSE }, 405 { X86::IMUL16rri, X86::IMUL16rmi, 0 }, 406 { X86::IMUL16rri8, X86::IMUL16rmi8, 0 }, 407 { X86::IMUL32rri, X86::IMUL32rmi, 0 }, 408 { X86::IMUL32rri8, X86::IMUL32rmi8, 0 }, 409 { X86::IMUL64rri32, X86::IMUL64rmi32, 0 }, 410 { X86::IMUL64rri8, X86::IMUL64rmi8, 0 }, 411 { X86::Int_COMISDrr, X86::Int_COMISDrm, 0 }, 412 { X86::Int_COMISSrr, X86::Int_COMISSrm, 0 }, 413 { X86::CVTSD2SI64rr, X86::CVTSD2SI64rm, 0 }, 414 { X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 }, 415 { X86::CVTSS2SI64rr, X86::CVTSS2SI64rm, 0 }, 416 { X86::CVTSS2SIrr, X86::CVTSS2SIrm, 0 }, 417 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 }, 418 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 }, 419 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 }, 420 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 }, 421 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 }, 422 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 }, 423 { X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, TB_ALIGN_16 }, 424 { X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, TB_ALIGN_16 }, 425 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 }, 426 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm, 0 }, 427 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm, 0 }, 428 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm, 0 }, 429 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm, 0 }, 430 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 }, 431 { X86::MOV16rr, X86::MOV16rm, 0 }, 432 { X86::MOV32rr, X86::MOV32rm, 0 }, 433 { X86::MOV64rr, X86::MOV64rm, 0 }, 434 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 }, 435 { X86::MOV64toSDrr, X86::MOV64toSDrm, 0 }, 436 { X86::MOV8rr, X86::MOV8rm, 0 }, 437 { X86::MOVAPDrr, X86::MOVAPDrm, TB_ALIGN_16 }, 438 { X86::MOVAPSrr, X86::MOVAPSrm, TB_ALIGN_16 }, 439 { X86::MOVDDUPrr, X86::MOVDDUPrm, 0 }, 440 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 }, 441 { X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 }, 442 { X86::MOVDQArr, X86::MOVDQArm, TB_ALIGN_16 }, 443 { X86::MOVSHDUPrr, X86::MOVSHDUPrm, TB_ALIGN_16 }, 444 { X86::MOVSLDUPrr, X86::MOVSLDUPrm, TB_ALIGN_16 }, 445 { X86::MOVSX16rr8, X86::MOVSX16rm8, 0 }, 446 { X86::MOVSX32rr16, X86::MOVSX32rm16, 0 }, 447 { X86::MOVSX32rr8, X86::MOVSX32rm8, 0 }, 448 { X86::MOVSX64rr16, X86::MOVSX64rm16, 0 }, 449 { X86::MOVSX64rr32, X86::MOVSX64rm32, 0 }, 450 { X86::MOVSX64rr8, X86::MOVSX64rm8, 0 }, 451 { X86::MOVUPDrr, X86::MOVUPDrm, TB_ALIGN_16 }, 452 { X86::MOVUPSrr, X86::MOVUPSrm, 0 }, 453 { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm, 0 }, 454 { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm, 0 }, 455 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm, TB_ALIGN_16 }, 456 { X86::MOVZX16rr8, X86::MOVZX16rm8, 0 }, 457 { X86::MOVZX32rr16, X86::MOVZX32rm16, 0 }, 458 { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 }, 459 { X86::MOVZX32rr8, X86::MOVZX32rm8, 0 }, 460 { X86::MOVZX64rr16, X86::MOVZX64rm16, 0 }, 461 { X86::MOVZX64rr32, X86::MOVZX64rm32, 0 }, 462 { X86::MOVZX64rr8, X86::MOVZX64rm8, 0 }, 463 { X86::PABSBrr128, X86::PABSBrm128, TB_ALIGN_16 }, 464 { X86::PABSDrr128, X86::PABSDrm128, TB_ALIGN_16 }, 465 { X86::PABSWrr128, X86::PABSWrm128, TB_ALIGN_16 }, 466 { X86::PSHUFDri, X86::PSHUFDmi, TB_ALIGN_16 }, 467 { X86::PSHUFHWri, X86::PSHUFHWmi, TB_ALIGN_16 }, 468 { X86::PSHUFLWri, X86::PSHUFLWmi, TB_ALIGN_16 }, 469 { X86::RCPPSr, X86::RCPPSm, TB_ALIGN_16 }, 470 { X86::RCPPSr_Int, X86::RCPPSm_Int, TB_ALIGN_16 }, 471 { X86::RSQRTPSr, X86::RSQRTPSm, TB_ALIGN_16 }, 472 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int, TB_ALIGN_16 }, 473 { X86::RSQRTSSr, X86::RSQRTSSm, 0 }, 474 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int, 0 }, 475 { X86::SQRTPDr, X86::SQRTPDm, TB_ALIGN_16 }, 476 { X86::SQRTPDr_Int, X86::SQRTPDm_Int, TB_ALIGN_16 }, 477 { X86::SQRTPSr, X86::SQRTPSm, TB_ALIGN_16 }, 478 { X86::SQRTPSr_Int, X86::SQRTPSm_Int, TB_ALIGN_16 }, 479 { X86::SQRTSDr, X86::SQRTSDm, 0 }, 480 { X86::SQRTSDr_Int, X86::SQRTSDm_Int, 0 }, 481 { X86::SQRTSSr, X86::SQRTSSm, 0 }, 482 { X86::SQRTSSr_Int, X86::SQRTSSm_Int, 0 }, 483 { X86::TEST16rr, X86::TEST16rm, 0 }, 484 { X86::TEST32rr, X86::TEST32rm, 0 }, 485 { X86::TEST64rr, X86::TEST64rm, 0 }, 486 { X86::TEST8rr, X86::TEST8rm, 0 }, 487 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 488 { X86::UCOMISDrr, X86::UCOMISDrm, 0 }, 489 { X86::UCOMISSrr, X86::UCOMISSrm, 0 }, 490 // AVX 128-bit versions of foldable instructions 491 { X86::Int_VCOMISDrr, X86::Int_VCOMISDrm, 0 }, 492 { X86::Int_VCOMISSrr, X86::Int_VCOMISSrm, 0 }, 493 { X86::Int_VUCOMISDrr, X86::Int_VUCOMISDrm, 0 }, 494 { X86::Int_VUCOMISSrr, X86::Int_VUCOMISSrm, 0 }, 495 { X86::VCVTTSD2SI64rr, X86::VCVTTSD2SI64rm, 0 }, 496 { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,0 }, 497 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SIrm, 0 }, 498 { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm, 0 }, 499 { X86::VCVTTSS2SI64rr, X86::VCVTTSS2SI64rm, 0 }, 500 { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,0 }, 501 { X86::VCVTTSS2SIrr, X86::VCVTTSS2SIrm, 0 }, 502 { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm, 0 }, 503 { X86::VCVTSD2SI64rr, X86::VCVTSD2SI64rm, 0 }, 504 { X86::VCVTSD2SIrr, X86::VCVTSD2SIrm, 0 }, 505 { X86::VCVTSS2SI64rr, X86::VCVTSS2SI64rm, 0 }, 506 { X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 }, 507 { X86::FsVMOVAPDrr, X86::VMOVSDrm, TB_NO_REVERSE }, 508 { X86::FsVMOVAPSrr, X86::VMOVSSrm, TB_NO_REVERSE }, 509 { X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 }, 510 { X86::VMOV64toSDrr, X86::VMOV64toSDrm, 0 }, 511 { X86::VMOVAPDrr, X86::VMOVAPDrm, TB_ALIGN_16 }, 512 { X86::VMOVAPSrr, X86::VMOVAPSrm, TB_ALIGN_16 }, 513 { X86::VMOVDDUPrr, X86::VMOVDDUPrm, 0 }, 514 { X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 }, 515 { X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 }, 516 { X86::VMOVDQArr, X86::VMOVDQArm, TB_ALIGN_16 }, 517 { X86::VMOVSLDUPrr, X86::VMOVSLDUPrm, TB_ALIGN_16 }, 518 { X86::VMOVSHDUPrr, X86::VMOVSHDUPrm, TB_ALIGN_16 }, 519 { X86::VMOVUPDrr, X86::VMOVUPDrm, TB_ALIGN_16 }, 520 { X86::VMOVUPSrr, X86::VMOVUPSrm, 0 }, 521 { X86::VMOVZDI2PDIrr, X86::VMOVZDI2PDIrm, 0 }, 522 { X86::VMOVZQI2PQIrr, X86::VMOVZQI2PQIrm, 0 }, 523 { X86::VMOVZPQILo2PQIrr,X86::VMOVZPQILo2PQIrm, TB_ALIGN_16 }, 524 { X86::VPABSBrr128, X86::VPABSBrm128, TB_ALIGN_16 }, 525 { X86::VPABSDrr128, X86::VPABSDrm128, TB_ALIGN_16 }, 526 { X86::VPABSWrr128, X86::VPABSWrm128, TB_ALIGN_16 }, 527 { X86::VPERMILPDri, X86::VPERMILPDmi, TB_ALIGN_16 }, 528 { X86::VPERMILPSri, X86::VPERMILPSmi, TB_ALIGN_16 }, 529 { X86::VPSHUFDri, X86::VPSHUFDmi, TB_ALIGN_16 }, 530 { X86::VPSHUFHWri, X86::VPSHUFHWmi, TB_ALIGN_16 }, 531 { X86::VPSHUFLWri, X86::VPSHUFLWmi, TB_ALIGN_16 }, 532 { X86::VRCPPSr, X86::VRCPPSm, TB_ALIGN_16 }, 533 { X86::VRCPPSr_Int, X86::VRCPPSm_Int, TB_ALIGN_16 }, 534 { X86::VRSQRTPSr, X86::VRSQRTPSm, TB_ALIGN_16 }, 535 { X86::VRSQRTPSr_Int, X86::VRSQRTPSm_Int, TB_ALIGN_16 }, 536 { X86::VSQRTPDr, X86::VSQRTPDm, TB_ALIGN_16 }, 537 { X86::VSQRTPDr_Int, X86::VSQRTPDm_Int, TB_ALIGN_16 }, 538 { X86::VSQRTPSr, X86::VSQRTPSm, TB_ALIGN_16 }, 539 { X86::VSQRTPSr_Int, X86::VSQRTPSm_Int, TB_ALIGN_16 }, 540 { X86::VUCOMISDrr, X86::VUCOMISDrm, 0 }, 541 { X86::VUCOMISSrr, X86::VUCOMISSrm, 0 }, 542 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrm, TB_NO_REVERSE }, 543 544 // AVX 256-bit foldable instructions 545 { X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 }, 546 { X86::VMOVAPSYrr, X86::VMOVAPSYrm, TB_ALIGN_32 }, 547 { X86::VMOVDQAYrr, X86::VMOVDQAYrm, TB_ALIGN_32 }, 548 { X86::VMOVUPDYrr, X86::VMOVUPDYrm, 0 }, 549 { X86::VMOVUPSYrr, X86::VMOVUPSYrm, 0 }, 550 { X86::VPERMILPDYri, X86::VPERMILPDYmi, TB_ALIGN_32 }, 551 { X86::VPERMILPSYri, X86::VPERMILPSYmi, TB_ALIGN_32 }, 552 553 // AVX2 foldable instructions 554 { X86::VPABSBrr256, X86::VPABSBrm256, TB_ALIGN_32 }, 555 { X86::VPABSDrr256, X86::VPABSDrm256, TB_ALIGN_32 }, 556 { X86::VPABSWrr256, X86::VPABSWrm256, TB_ALIGN_32 }, 557 { X86::VPSHUFDYri, X86::VPSHUFDYmi, TB_ALIGN_32 }, 558 { X86::VPSHUFHWYri, X86::VPSHUFHWYmi, TB_ALIGN_32 }, 559 { X86::VPSHUFLWYri, X86::VPSHUFLWYmi, TB_ALIGN_32 }, 560 { X86::VRCPPSYr, X86::VRCPPSYm, TB_ALIGN_32 }, 561 { X86::VRCPPSYr_Int, X86::VRCPPSYm_Int, TB_ALIGN_32 }, 562 { X86::VRSQRTPSYr, X86::VRSQRTPSYm, TB_ALIGN_32 }, 563 { X86::VRSQRTPSYr_Int, X86::VRSQRTPSYm_Int, TB_ALIGN_32 }, 564 { X86::VSQRTPDYr, X86::VSQRTPDYm, TB_ALIGN_32 }, 565 { X86::VSQRTPDYr_Int, X86::VSQRTPDYm_Int, TB_ALIGN_32 }, 566 { X86::VSQRTPSYr, X86::VSQRTPSYm, TB_ALIGN_32 }, 567 { X86::VSQRTPSYr_Int, X86::VSQRTPSYm_Int, TB_ALIGN_32 }, 568 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE }, 569 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE }, 570 }; 571 572 for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) { 573 unsigned RegOp = OpTbl1[i].RegOp; 574 unsigned MemOp = OpTbl1[i].MemOp; 575 unsigned Flags = OpTbl1[i].Flags; 576 AddTableEntry(RegOp2MemOpTable1, MemOp2RegOpTable, 577 RegOp, MemOp, 578 // Index 1, folded load 579 Flags | TB_INDEX_1 | TB_FOLDED_LOAD); 580 } 581 582 static const X86OpTblEntry OpTbl2[] = { 583 { X86::ADC32rr, X86::ADC32rm, 0 }, 584 { X86::ADC64rr, X86::ADC64rm, 0 }, 585 { X86::ADD16rr, X86::ADD16rm, 0 }, 586 { X86::ADD16rr_DB, X86::ADD16rm, TB_NO_REVERSE }, 587 { X86::ADD32rr, X86::ADD32rm, 0 }, 588 { X86::ADD32rr_DB, X86::ADD32rm, TB_NO_REVERSE }, 589 { X86::ADD64rr, X86::ADD64rm, 0 }, 590 { X86::ADD64rr_DB, X86::ADD64rm, TB_NO_REVERSE }, 591 { X86::ADD8rr, X86::ADD8rm, 0 }, 592 { X86::ADDPDrr, X86::ADDPDrm, TB_ALIGN_16 }, 593 { X86::ADDPSrr, X86::ADDPSrm, TB_ALIGN_16 }, 594 { X86::ADDSDrr, X86::ADDSDrm, 0 }, 595 { X86::ADDSSrr, X86::ADDSSrm, 0 }, 596 { X86::ADDSUBPDrr, X86::ADDSUBPDrm, TB_ALIGN_16 }, 597 { X86::ADDSUBPSrr, X86::ADDSUBPSrm, TB_ALIGN_16 }, 598 { X86::AND16rr, X86::AND16rm, 0 }, 599 { X86::AND32rr, X86::AND32rm, 0 }, 600 { X86::AND64rr, X86::AND64rm, 0 }, 601 { X86::AND8rr, X86::AND8rm, 0 }, 602 { X86::ANDNPDrr, X86::ANDNPDrm, TB_ALIGN_16 }, 603 { X86::ANDNPSrr, X86::ANDNPSrm, TB_ALIGN_16 }, 604 { X86::ANDPDrr, X86::ANDPDrm, TB_ALIGN_16 }, 605 { X86::ANDPSrr, X86::ANDPSrm, TB_ALIGN_16 }, 606 { X86::BLENDPDrri, X86::BLENDPDrmi, TB_ALIGN_16 }, 607 { X86::BLENDPSrri, X86::BLENDPSrmi, TB_ALIGN_16 }, 608 { X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16 }, 609 { X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16 }, 610 { X86::CMOVA16rr, X86::CMOVA16rm, 0 }, 611 { X86::CMOVA32rr, X86::CMOVA32rm, 0 }, 612 { X86::CMOVA64rr, X86::CMOVA64rm, 0 }, 613 { X86::CMOVAE16rr, X86::CMOVAE16rm, 0 }, 614 { X86::CMOVAE32rr, X86::CMOVAE32rm, 0 }, 615 { X86::CMOVAE64rr, X86::CMOVAE64rm, 0 }, 616 { X86::CMOVB16rr, X86::CMOVB16rm, 0 }, 617 { X86::CMOVB32rr, X86::CMOVB32rm, 0 }, 618 { X86::CMOVB64rr, X86::CMOVB64rm, 0 }, 619 { X86::CMOVBE16rr, X86::CMOVBE16rm, 0 }, 620 { X86::CMOVBE32rr, X86::CMOVBE32rm, 0 }, 621 { X86::CMOVBE64rr, X86::CMOVBE64rm, 0 }, 622 { X86::CMOVE16rr, X86::CMOVE16rm, 0 }, 623 { X86::CMOVE32rr, X86::CMOVE32rm, 0 }, 624 { X86::CMOVE64rr, X86::CMOVE64rm, 0 }, 625 { X86::CMOVG16rr, X86::CMOVG16rm, 0 }, 626 { X86::CMOVG32rr, X86::CMOVG32rm, 0 }, 627 { X86::CMOVG64rr, X86::CMOVG64rm, 0 }, 628 { X86::CMOVGE16rr, X86::CMOVGE16rm, 0 }, 629 { X86::CMOVGE32rr, X86::CMOVGE32rm, 0 }, 630 { X86::CMOVGE64rr, X86::CMOVGE64rm, 0 }, 631 { X86::CMOVL16rr, X86::CMOVL16rm, 0 }, 632 { X86::CMOVL32rr, X86::CMOVL32rm, 0 }, 633 { X86::CMOVL64rr, X86::CMOVL64rm, 0 }, 634 { X86::CMOVLE16rr, X86::CMOVLE16rm, 0 }, 635 { X86::CMOVLE32rr, X86::CMOVLE32rm, 0 }, 636 { X86::CMOVLE64rr, X86::CMOVLE64rm, 0 }, 637 { X86::CMOVNE16rr, X86::CMOVNE16rm, 0 }, 638 { X86::CMOVNE32rr, X86::CMOVNE32rm, 0 }, 639 { X86::CMOVNE64rr, X86::CMOVNE64rm, 0 }, 640 { X86::CMOVNO16rr, X86::CMOVNO16rm, 0 }, 641 { X86::CMOVNO32rr, X86::CMOVNO32rm, 0 }, 642 { X86::CMOVNO64rr, X86::CMOVNO64rm, 0 }, 643 { X86::CMOVNP16rr, X86::CMOVNP16rm, 0 }, 644 { X86::CMOVNP32rr, X86::CMOVNP32rm, 0 }, 645 { X86::CMOVNP64rr, X86::CMOVNP64rm, 0 }, 646 { X86::CMOVNS16rr, X86::CMOVNS16rm, 0 }, 647 { X86::CMOVNS32rr, X86::CMOVNS32rm, 0 }, 648 { X86::CMOVNS64rr, X86::CMOVNS64rm, 0 }, 649 { X86::CMOVO16rr, X86::CMOVO16rm, 0 }, 650 { X86::CMOVO32rr, X86::CMOVO32rm, 0 }, 651 { X86::CMOVO64rr, X86::CMOVO64rm, 0 }, 652 { X86::CMOVP16rr, X86::CMOVP16rm, 0 }, 653 { X86::CMOVP32rr, X86::CMOVP32rm, 0 }, 654 { X86::CMOVP64rr, X86::CMOVP64rm, 0 }, 655 { X86::CMOVS16rr, X86::CMOVS16rm, 0 }, 656 { X86::CMOVS32rr, X86::CMOVS32rm, 0 }, 657 { X86::CMOVS64rr, X86::CMOVS64rm, 0 }, 658 { X86::CMPPDrri, X86::CMPPDrmi, TB_ALIGN_16 }, 659 { X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16 }, 660 { X86::CMPSDrr, X86::CMPSDrm, 0 }, 661 { X86::CMPSSrr, X86::CMPSSrm, 0 }, 662 { X86::DIVPDrr, X86::DIVPDrm, TB_ALIGN_16 }, 663 { X86::DIVPSrr, X86::DIVPSrm, TB_ALIGN_16 }, 664 { X86::DIVSDrr, X86::DIVSDrm, 0 }, 665 { X86::DIVSSrr, X86::DIVSSrm, 0 }, 666 { X86::FsANDNPDrr, X86::FsANDNPDrm, TB_ALIGN_16 }, 667 { X86::FsANDNPSrr, X86::FsANDNPSrm, TB_ALIGN_16 }, 668 { X86::FsANDPDrr, X86::FsANDPDrm, TB_ALIGN_16 }, 669 { X86::FsANDPSrr, X86::FsANDPSrm, TB_ALIGN_16 }, 670 { X86::FsORPDrr, X86::FsORPDrm, TB_ALIGN_16 }, 671 { X86::FsORPSrr, X86::FsORPSrm, TB_ALIGN_16 }, 672 { X86::FsXORPDrr, X86::FsXORPDrm, TB_ALIGN_16 }, 673 { X86::FsXORPSrr, X86::FsXORPSrm, TB_ALIGN_16 }, 674 { X86::HADDPDrr, X86::HADDPDrm, TB_ALIGN_16 }, 675 { X86::HADDPSrr, X86::HADDPSrm, TB_ALIGN_16 }, 676 { X86::HSUBPDrr, X86::HSUBPDrm, TB_ALIGN_16 }, 677 { X86::HSUBPSrr, X86::HSUBPSrm, TB_ALIGN_16 }, 678 { X86::IMUL16rr, X86::IMUL16rm, 0 }, 679 { X86::IMUL32rr, X86::IMUL32rm, 0 }, 680 { X86::IMUL64rr, X86::IMUL64rm, 0 }, 681 { X86::Int_CMPSDrr, X86::Int_CMPSDrm, 0 }, 682 { X86::Int_CMPSSrr, X86::Int_CMPSSrm, 0 }, 683 { X86::MAXPDrr, X86::MAXPDrm, TB_ALIGN_16 }, 684 { X86::MAXPDrr_Int, X86::MAXPDrm_Int, TB_ALIGN_16 }, 685 { X86::MAXPSrr, X86::MAXPSrm, TB_ALIGN_16 }, 686 { X86::MAXPSrr_Int, X86::MAXPSrm_Int, TB_ALIGN_16 }, 687 { X86::MAXSDrr, X86::MAXSDrm, 0 }, 688 { X86::MAXSDrr_Int, X86::MAXSDrm_Int, 0 }, 689 { X86::MAXSSrr, X86::MAXSSrm, 0 }, 690 { X86::MAXSSrr_Int, X86::MAXSSrm_Int, 0 }, 691 { X86::MINPDrr, X86::MINPDrm, TB_ALIGN_16 }, 692 { X86::MINPDrr_Int, X86::MINPDrm_Int, TB_ALIGN_16 }, 693 { X86::MINPSrr, X86::MINPSrm, TB_ALIGN_16 }, 694 { X86::MINPSrr_Int, X86::MINPSrm_Int, TB_ALIGN_16 }, 695 { X86::MINSDrr, X86::MINSDrm, 0 }, 696 { X86::MINSDrr_Int, X86::MINSDrm_Int, 0 }, 697 { X86::MINSSrr, X86::MINSSrm, 0 }, 698 { X86::MINSSrr_Int, X86::MINSSrm_Int, 0 }, 699 { X86::MPSADBWrri, X86::MPSADBWrmi, TB_ALIGN_16 }, 700 { X86::MULPDrr, X86::MULPDrm, TB_ALIGN_16 }, 701 { X86::MULPSrr, X86::MULPSrm, TB_ALIGN_16 }, 702 { X86::MULSDrr, X86::MULSDrm, 0 }, 703 { X86::MULSSrr, X86::MULSSrm, 0 }, 704 { X86::OR16rr, X86::OR16rm, 0 }, 705 { X86::OR32rr, X86::OR32rm, 0 }, 706 { X86::OR64rr, X86::OR64rm, 0 }, 707 { X86::OR8rr, X86::OR8rm, 0 }, 708 { X86::ORPDrr, X86::ORPDrm, TB_ALIGN_16 }, 709 { X86::ORPSrr, X86::ORPSrm, TB_ALIGN_16 }, 710 { X86::PACKSSDWrr, X86::PACKSSDWrm, TB_ALIGN_16 }, 711 { X86::PACKSSWBrr, X86::PACKSSWBrm, TB_ALIGN_16 }, 712 { X86::PACKUSDWrr, X86::PACKUSDWrm, TB_ALIGN_16 }, 713 { X86::PACKUSWBrr, X86::PACKUSWBrm, TB_ALIGN_16 }, 714 { X86::PADDBrr, X86::PADDBrm, TB_ALIGN_16 }, 715 { X86::PADDDrr, X86::PADDDrm, TB_ALIGN_16 }, 716 { X86::PADDQrr, X86::PADDQrm, TB_ALIGN_16 }, 717 { X86::PADDSBrr, X86::PADDSBrm, TB_ALIGN_16 }, 718 { X86::PADDSWrr, X86::PADDSWrm, TB_ALIGN_16 }, 719 { X86::PADDUSBrr, X86::PADDUSBrm, TB_ALIGN_16 }, 720 { X86::PADDUSWrr, X86::PADDUSWrm, TB_ALIGN_16 }, 721 { X86::PADDWrr, X86::PADDWrm, TB_ALIGN_16 }, 722 { X86::PALIGNR128rr, X86::PALIGNR128rm, TB_ALIGN_16 }, 723 { X86::PANDNrr, X86::PANDNrm, TB_ALIGN_16 }, 724 { X86::PANDrr, X86::PANDrm, TB_ALIGN_16 }, 725 { X86::PAVGBrr, X86::PAVGBrm, TB_ALIGN_16 }, 726 { X86::PAVGWrr, X86::PAVGWrm, TB_ALIGN_16 }, 727 { X86::PBLENDWrri, X86::PBLENDWrmi, TB_ALIGN_16 }, 728 { X86::PCMPEQBrr, X86::PCMPEQBrm, TB_ALIGN_16 }, 729 { X86::PCMPEQDrr, X86::PCMPEQDrm, TB_ALIGN_16 }, 730 { X86::PCMPEQQrr, X86::PCMPEQQrm, TB_ALIGN_16 }, 731 { X86::PCMPEQWrr, X86::PCMPEQWrm, TB_ALIGN_16 }, 732 { X86::PCMPGTBrr, X86::PCMPGTBrm, TB_ALIGN_16 }, 733 { X86::PCMPGTDrr, X86::PCMPGTDrm, TB_ALIGN_16 }, 734 { X86::PCMPGTQrr, X86::PCMPGTQrm, TB_ALIGN_16 }, 735 { X86::PCMPGTWrr, X86::PCMPGTWrm, TB_ALIGN_16 }, 736 { X86::PHADDDrr, X86::PHADDDrm, TB_ALIGN_16 }, 737 { X86::PHADDWrr, X86::PHADDWrm, TB_ALIGN_16 }, 738 { X86::PHADDSWrr128, X86::PHADDSWrm128, TB_ALIGN_16 }, 739 { X86::PHSUBDrr, X86::PHSUBDrm, TB_ALIGN_16 }, 740 { X86::PHSUBSWrr128, X86::PHSUBSWrm128, TB_ALIGN_16 }, 741 { X86::PHSUBWrr, X86::PHSUBWrm, TB_ALIGN_16 }, 742 { X86::PINSRWrri, X86::PINSRWrmi, TB_ALIGN_16 }, 743 { X86::PMADDUBSWrr128, X86::PMADDUBSWrm128, TB_ALIGN_16 }, 744 { X86::PMADDWDrr, X86::PMADDWDrm, TB_ALIGN_16 }, 745 { X86::PMAXSWrr, X86::PMAXSWrm, TB_ALIGN_16 }, 746 { X86::PMAXUBrr, X86::PMAXUBrm, TB_ALIGN_16 }, 747 { X86::PMINSWrr, X86::PMINSWrm, TB_ALIGN_16 }, 748 { X86::PMINUBrr, X86::PMINUBrm, TB_ALIGN_16 }, 749 { X86::PMULDQrr, X86::PMULDQrm, TB_ALIGN_16 }, 750 { X86::PMULHRSWrr128, X86::PMULHRSWrm128, TB_ALIGN_16 }, 751 { X86::PMULHUWrr, X86::PMULHUWrm, TB_ALIGN_16 }, 752 { X86::PMULHWrr, X86::PMULHWrm, TB_ALIGN_16 }, 753 { X86::PMULLDrr, X86::PMULLDrm, TB_ALIGN_16 }, 754 { X86::PMULLWrr, X86::PMULLWrm, TB_ALIGN_16 }, 755 { X86::PMULUDQrr, X86::PMULUDQrm, TB_ALIGN_16 }, 756 { X86::PORrr, X86::PORrm, TB_ALIGN_16 }, 757 { X86::PSADBWrr, X86::PSADBWrm, TB_ALIGN_16 }, 758 { X86::PSHUFBrr, X86::PSHUFBrm, TB_ALIGN_16 }, 759 { X86::PSIGNBrr, X86::PSIGNBrm, TB_ALIGN_16 }, 760 { X86::PSIGNWrr, X86::PSIGNWrm, TB_ALIGN_16 }, 761 { X86::PSIGNDrr, X86::PSIGNDrm, TB_ALIGN_16 }, 762 { X86::PSLLDrr, X86::PSLLDrm, TB_ALIGN_16 }, 763 { X86::PSLLQrr, X86::PSLLQrm, TB_ALIGN_16 }, 764 { X86::PSLLWrr, X86::PSLLWrm, TB_ALIGN_16 }, 765 { X86::PSRADrr, X86::PSRADrm, TB_ALIGN_16 }, 766 { X86::PSRAWrr, X86::PSRAWrm, TB_ALIGN_16 }, 767 { X86::PSRLDrr, X86::PSRLDrm, TB_ALIGN_16 }, 768 { X86::PSRLQrr, X86::PSRLQrm, TB_ALIGN_16 }, 769 { X86::PSRLWrr, X86::PSRLWrm, TB_ALIGN_16 }, 770 { X86::PSUBBrr, X86::PSUBBrm, TB_ALIGN_16 }, 771 { X86::PSUBDrr, X86::PSUBDrm, TB_ALIGN_16 }, 772 { X86::PSUBSBrr, X86::PSUBSBrm, TB_ALIGN_16 }, 773 { X86::PSUBSWrr, X86::PSUBSWrm, TB_ALIGN_16 }, 774 { X86::PSUBWrr, X86::PSUBWrm, TB_ALIGN_16 }, 775 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm, TB_ALIGN_16 }, 776 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm, TB_ALIGN_16 }, 777 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm, TB_ALIGN_16 }, 778 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm, TB_ALIGN_16 }, 779 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm, TB_ALIGN_16 }, 780 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm, TB_ALIGN_16 }, 781 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm, TB_ALIGN_16 }, 782 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm, TB_ALIGN_16 }, 783 { X86::PXORrr, X86::PXORrm, TB_ALIGN_16 }, 784 { X86::SBB32rr, X86::SBB32rm, 0 }, 785 { X86::SBB64rr, X86::SBB64rm, 0 }, 786 { X86::SHUFPDrri, X86::SHUFPDrmi, TB_ALIGN_16 }, 787 { X86::SHUFPSrri, X86::SHUFPSrmi, TB_ALIGN_16 }, 788 { X86::SUB16rr, X86::SUB16rm, 0 }, 789 { X86::SUB32rr, X86::SUB32rm, 0 }, 790 { X86::SUB64rr, X86::SUB64rm, 0 }, 791 { X86::SUB8rr, X86::SUB8rm, 0 }, 792 { X86::SUBPDrr, X86::SUBPDrm, TB_ALIGN_16 }, 793 { X86::SUBPSrr, X86::SUBPSrm, TB_ALIGN_16 }, 794 { X86::SUBSDrr, X86::SUBSDrm, 0 }, 795 { X86::SUBSSrr, X86::SUBSSrm, 0 }, 796 // FIXME: TEST*rr -> swapped operand of TEST*mr. 797 { X86::UNPCKHPDrr, X86::UNPCKHPDrm, TB_ALIGN_16 }, 798 { X86::UNPCKHPSrr, X86::UNPCKHPSrm, TB_ALIGN_16 }, 799 { X86::UNPCKLPDrr, X86::UNPCKLPDrm, TB_ALIGN_16 }, 800 { X86::UNPCKLPSrr, X86::UNPCKLPSrm, TB_ALIGN_16 }, 801 { X86::XOR16rr, X86::XOR16rm, 0 }, 802 { X86::XOR32rr, X86::XOR32rm, 0 }, 803 { X86::XOR64rr, X86::XOR64rm, 0 }, 804 { X86::XOR8rr, X86::XOR8rm, 0 }, 805 { X86::XORPDrr, X86::XORPDrm, TB_ALIGN_16 }, 806 { X86::XORPSrr, X86::XORPSrm, TB_ALIGN_16 }, 807 // AVX 128-bit versions of foldable instructions 808 { X86::VCVTSD2SSrr, X86::VCVTSD2SSrm, 0 }, 809 { X86::Int_VCVTSD2SSrr, X86::Int_VCVTSD2SSrm, 0 }, 810 { X86::VCVTSI2SD64rr, X86::VCVTSI2SD64rm, 0 }, 811 { X86::Int_VCVTSI2SD64rr, X86::Int_VCVTSI2SD64rm, 0 }, 812 { X86::VCVTSI2SDrr, X86::VCVTSI2SDrm, 0 }, 813 { X86::Int_VCVTSI2SDrr, X86::Int_VCVTSI2SDrm, 0 }, 814 { X86::VCVTSI2SS64rr, X86::VCVTSI2SS64rm, 0 }, 815 { X86::Int_VCVTSI2SS64rr, X86::Int_VCVTSI2SS64rm, 0 }, 816 { X86::VCVTSI2SSrr, X86::VCVTSI2SSrm, 0 }, 817 { X86::Int_VCVTSI2SSrr, X86::Int_VCVTSI2SSrm, 0 }, 818 { X86::VCVTSS2SDrr, X86::VCVTSS2SDrm, 0 }, 819 { X86::Int_VCVTSS2SDrr, X86::Int_VCVTSS2SDrm, 0 }, 820 { X86::VCVTTPD2DQrr, X86::VCVTTPD2DQXrm, TB_ALIGN_16 }, 821 { X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, TB_ALIGN_16 }, 822 { X86::VRSQRTSSr, X86::VRSQRTSSm, 0 }, 823 { X86::VSQRTSDr, X86::VSQRTSDm, 0 }, 824 { X86::VSQRTSSr, X86::VSQRTSSm, 0 }, 825 { X86::VADDPDrr, X86::VADDPDrm, TB_ALIGN_16 }, 826 { X86::VADDPSrr, X86::VADDPSrm, TB_ALIGN_16 }, 827 { X86::VADDSDrr, X86::VADDSDrm, 0 }, 828 { X86::VADDSSrr, X86::VADDSSrm, 0 }, 829 { X86::VADDSUBPDrr, X86::VADDSUBPDrm, TB_ALIGN_16 }, 830 { X86::VADDSUBPSrr, X86::VADDSUBPSrm, TB_ALIGN_16 }, 831 { X86::VANDNPDrr, X86::VANDNPDrm, TB_ALIGN_16 }, 832 { X86::VANDNPSrr, X86::VANDNPSrm, TB_ALIGN_16 }, 833 { X86::VANDPDrr, X86::VANDPDrm, TB_ALIGN_16 }, 834 { X86::VANDPSrr, X86::VANDPSrm, TB_ALIGN_16 }, 835 { X86::VBLENDPDrri, X86::VBLENDPDrmi, TB_ALIGN_16 }, 836 { X86::VBLENDPSrri, X86::VBLENDPSrmi, TB_ALIGN_16 }, 837 { X86::VBLENDVPDrr, X86::VBLENDVPDrm, TB_ALIGN_16 }, 838 { X86::VBLENDVPSrr, X86::VBLENDVPSrm, TB_ALIGN_16 }, 839 { X86::VCMPPDrri, X86::VCMPPDrmi, TB_ALIGN_16 }, 840 { X86::VCMPPSrri, X86::VCMPPSrmi, TB_ALIGN_16 }, 841 { X86::VCMPSDrr, X86::VCMPSDrm, 0 }, 842 { X86::VCMPSSrr, X86::VCMPSSrm, 0 }, 843 { X86::VDIVPDrr, X86::VDIVPDrm, TB_ALIGN_16 }, 844 { X86::VDIVPSrr, X86::VDIVPSrm, TB_ALIGN_16 }, 845 { X86::VDIVSDrr, X86::VDIVSDrm, 0 }, 846 { X86::VDIVSSrr, X86::VDIVSSrm, 0 }, 847 { X86::VFsANDNPDrr, X86::VFsANDNPDrm, TB_ALIGN_16 }, 848 { X86::VFsANDNPSrr, X86::VFsANDNPSrm, TB_ALIGN_16 }, 849 { X86::VFsANDPDrr, X86::VFsANDPDrm, TB_ALIGN_16 }, 850 { X86::VFsANDPSrr, X86::VFsANDPSrm, TB_ALIGN_16 }, 851 { X86::VFsORPDrr, X86::VFsORPDrm, TB_ALIGN_16 }, 852 { X86::VFsORPSrr, X86::VFsORPSrm, TB_ALIGN_16 }, 853 { X86::VFsXORPDrr, X86::VFsXORPDrm, TB_ALIGN_16 }, 854 { X86::VFsXORPSrr, X86::VFsXORPSrm, TB_ALIGN_16 }, 855 { X86::VHADDPDrr, X86::VHADDPDrm, TB_ALIGN_16 }, 856 { X86::VHADDPSrr, X86::VHADDPSrm, TB_ALIGN_16 }, 857 { X86::VHSUBPDrr, X86::VHSUBPDrm, TB_ALIGN_16 }, 858 { X86::VHSUBPSrr, X86::VHSUBPSrm, TB_ALIGN_16 }, 859 { X86::Int_VCMPSDrr, X86::Int_VCMPSDrm, 0 }, 860 { X86::Int_VCMPSSrr, X86::Int_VCMPSSrm, 0 }, 861 { X86::VMAXPDrr, X86::VMAXPDrm, TB_ALIGN_16 }, 862 { X86::VMAXPDrr_Int, X86::VMAXPDrm_Int, TB_ALIGN_16 }, 863 { X86::VMAXPSrr, X86::VMAXPSrm, TB_ALIGN_16 }, 864 { X86::VMAXPSrr_Int, X86::VMAXPSrm_Int, TB_ALIGN_16 }, 865 { X86::VMAXSDrr, X86::VMAXSDrm, 0 }, 866 { X86::VMAXSDrr_Int, X86::VMAXSDrm_Int, 0 }, 867 { X86::VMAXSSrr, X86::VMAXSSrm, 0 }, 868 { X86::VMAXSSrr_Int, X86::VMAXSSrm_Int, 0 }, 869 { X86::VMINPDrr, X86::VMINPDrm, TB_ALIGN_16 }, 870 { X86::VMINPDrr_Int, X86::VMINPDrm_Int, TB_ALIGN_16 }, 871 { X86::VMINPSrr, X86::VMINPSrm, TB_ALIGN_16 }, 872 { X86::VMINPSrr_Int, X86::VMINPSrm_Int, TB_ALIGN_16 }, 873 { X86::VMINSDrr, X86::VMINSDrm, 0 }, 874 { X86::VMINSDrr_Int, X86::VMINSDrm_Int, 0 }, 875 { X86::VMINSSrr, X86::VMINSSrm, 0 }, 876 { X86::VMINSSrr_Int, X86::VMINSSrm_Int, 0 }, 877 { X86::VMPSADBWrri, X86::VMPSADBWrmi, TB_ALIGN_16 }, 878 { X86::VMULPDrr, X86::VMULPDrm, TB_ALIGN_16 }, 879 { X86::VMULPSrr, X86::VMULPSrm, TB_ALIGN_16 }, 880 { X86::VMULSDrr, X86::VMULSDrm, 0 }, 881 { X86::VMULSSrr, X86::VMULSSrm, 0 }, 882 { X86::VORPDrr, X86::VORPDrm, TB_ALIGN_16 }, 883 { X86::VORPSrr, X86::VORPSrm, TB_ALIGN_16 }, 884 { X86::VPACKSSDWrr, X86::VPACKSSDWrm, TB_ALIGN_16 }, 885 { X86::VPACKSSWBrr, X86::VPACKSSWBrm, TB_ALIGN_16 }, 886 { X86::VPACKUSDWrr, X86::VPACKUSDWrm, TB_ALIGN_16 }, 887 { X86::VPACKUSWBrr, X86::VPACKUSWBrm, TB_ALIGN_16 }, 888 { X86::VPADDBrr, X86::VPADDBrm, TB_ALIGN_16 }, 889 { X86::VPADDDrr, X86::VPADDDrm, TB_ALIGN_16 }, 890 { X86::VPADDQrr, X86::VPADDQrm, TB_ALIGN_16 }, 891 { X86::VPADDSBrr, X86::VPADDSBrm, TB_ALIGN_16 }, 892 { X86::VPADDSWrr, X86::VPADDSWrm, TB_ALIGN_16 }, 893 { X86::VPADDUSBrr, X86::VPADDUSBrm, TB_ALIGN_16 }, 894 { X86::VPADDUSWrr, X86::VPADDUSWrm, TB_ALIGN_16 }, 895 { X86::VPADDWrr, X86::VPADDWrm, TB_ALIGN_16 }, 896 { X86::VPALIGNR128rr, X86::VPALIGNR128rm, TB_ALIGN_16 }, 897 { X86::VPANDNrr, X86::VPANDNrm, TB_ALIGN_16 }, 898 { X86::VPANDrr, X86::VPANDrm, TB_ALIGN_16 }, 899 { X86::VPAVGBrr, X86::VPAVGBrm, TB_ALIGN_16 }, 900 { X86::VPAVGWrr, X86::VPAVGWrm, TB_ALIGN_16 }, 901 { X86::VPBLENDWrri, X86::VPBLENDWrmi, TB_ALIGN_16 }, 902 { X86::VPCMPEQBrr, X86::VPCMPEQBrm, TB_ALIGN_16 }, 903 { X86::VPCMPEQDrr, X86::VPCMPEQDrm, TB_ALIGN_16 }, 904 { X86::VPCMPEQQrr, X86::VPCMPEQQrm, TB_ALIGN_16 }, 905 { X86::VPCMPEQWrr, X86::VPCMPEQWrm, TB_ALIGN_16 }, 906 { X86::VPCMPGTBrr, X86::VPCMPGTBrm, TB_ALIGN_16 }, 907 { X86::VPCMPGTDrr, X86::VPCMPGTDrm, TB_ALIGN_16 }, 908 { X86::VPCMPGTQrr, X86::VPCMPGTQrm, TB_ALIGN_16 }, 909 { X86::VPCMPGTWrr, X86::VPCMPGTWrm, TB_ALIGN_16 }, 910 { X86::VPHADDDrr, X86::VPHADDDrm, TB_ALIGN_16 }, 911 { X86::VPHADDSWrr128, X86::VPHADDSWrm128, TB_ALIGN_16 }, 912 { X86::VPHADDWrr, X86::VPHADDWrm, TB_ALIGN_16 }, 913 { X86::VPHSUBDrr, X86::VPHSUBDrm, TB_ALIGN_16 }, 914 { X86::VPHSUBSWrr128, X86::VPHSUBSWrm128, TB_ALIGN_16 }, 915 { X86::VPHSUBWrr, X86::VPHSUBWrm, TB_ALIGN_16 }, 916 { X86::VPERMILPDrr, X86::VPERMILPDrm, TB_ALIGN_16 }, 917 { X86::VPERMILPSrr, X86::VPERMILPSrm, TB_ALIGN_16 }, 918 { X86::VPINSRWrri, X86::VPINSRWrmi, TB_ALIGN_16 }, 919 { X86::VPMADDUBSWrr128, X86::VPMADDUBSWrm128, TB_ALIGN_16 }, 920 { X86::VPMADDWDrr, X86::VPMADDWDrm, TB_ALIGN_16 }, 921 { X86::VPMAXSWrr, X86::VPMAXSWrm, TB_ALIGN_16 }, 922 { X86::VPMAXUBrr, X86::VPMAXUBrm, TB_ALIGN_16 }, 923 { X86::VPMINSWrr, X86::VPMINSWrm, TB_ALIGN_16 }, 924 { X86::VPMINUBrr, X86::VPMINUBrm, TB_ALIGN_16 }, 925 { X86::VPMULDQrr, X86::VPMULDQrm, TB_ALIGN_16 }, 926 { X86::VPMULHRSWrr128, X86::VPMULHRSWrm128, TB_ALIGN_16 }, 927 { X86::VPMULHUWrr, X86::VPMULHUWrm, TB_ALIGN_16 }, 928 { X86::VPMULHWrr, X86::VPMULHWrm, TB_ALIGN_16 }, 929 { X86::VPMULLDrr, X86::VPMULLDrm, TB_ALIGN_16 }, 930 { X86::VPMULLWrr, X86::VPMULLWrm, TB_ALIGN_16 }, 931 { X86::VPMULUDQrr, X86::VPMULUDQrm, TB_ALIGN_16 }, 932 { X86::VPORrr, X86::VPORrm, TB_ALIGN_16 }, 933 { X86::VPSADBWrr, X86::VPSADBWrm, TB_ALIGN_16 }, 934 { X86::VPSHUFBrr, X86::VPSHUFBrm, TB_ALIGN_16 }, 935 { X86::VPSIGNBrr, X86::VPSIGNBrm, TB_ALIGN_16 }, 936 { X86::VPSIGNWrr, X86::VPSIGNWrm, TB_ALIGN_16 }, 937 { X86::VPSIGNDrr, X86::VPSIGNDrm, TB_ALIGN_16 }, 938 { X86::VPSLLDrr, X86::VPSLLDrm, TB_ALIGN_16 }, 939 { X86::VPSLLQrr, X86::VPSLLQrm, TB_ALIGN_16 }, 940 { X86::VPSLLWrr, X86::VPSLLWrm, TB_ALIGN_16 }, 941 { X86::VPSRADrr, X86::VPSRADrm, TB_ALIGN_16 }, 942 { X86::VPSRAWrr, X86::VPSRAWrm, TB_ALIGN_16 }, 943 { X86::VPSRLDrr, X86::VPSRLDrm, TB_ALIGN_16 }, 944 { X86::VPSRLQrr, X86::VPSRLQrm, TB_ALIGN_16 }, 945 { X86::VPSRLWrr, X86::VPSRLWrm, TB_ALIGN_16 }, 946 { X86::VPSUBBrr, X86::VPSUBBrm, TB_ALIGN_16 }, 947 { X86::VPSUBDrr, X86::VPSUBDrm, TB_ALIGN_16 }, 948 { X86::VPSUBSBrr, X86::VPSUBSBrm, TB_ALIGN_16 }, 949 { X86::VPSUBSWrr, X86::VPSUBSWrm, TB_ALIGN_16 }, 950 { X86::VPSUBWrr, X86::VPSUBWrm, TB_ALIGN_16 }, 951 { X86::VPUNPCKHBWrr, X86::VPUNPCKHBWrm, TB_ALIGN_16 }, 952 { X86::VPUNPCKHDQrr, X86::VPUNPCKHDQrm, TB_ALIGN_16 }, 953 { X86::VPUNPCKHQDQrr, X86::VPUNPCKHQDQrm, TB_ALIGN_16 }, 954 { X86::VPUNPCKHWDrr, X86::VPUNPCKHWDrm, TB_ALIGN_16 }, 955 { X86::VPUNPCKLBWrr, X86::VPUNPCKLBWrm, TB_ALIGN_16 }, 956 { X86::VPUNPCKLDQrr, X86::VPUNPCKLDQrm, TB_ALIGN_16 }, 957 { X86::VPUNPCKLQDQrr, X86::VPUNPCKLQDQrm, TB_ALIGN_16 }, 958 { X86::VPUNPCKLWDrr, X86::VPUNPCKLWDrm, TB_ALIGN_16 }, 959 { X86::VPXORrr, X86::VPXORrm, TB_ALIGN_16 }, 960 { X86::VSHUFPDrri, X86::VSHUFPDrmi, TB_ALIGN_16 }, 961 { X86::VSHUFPSrri, X86::VSHUFPSrmi, TB_ALIGN_16 }, 962 { X86::VSUBPDrr, X86::VSUBPDrm, TB_ALIGN_16 }, 963 { X86::VSUBPSrr, X86::VSUBPSrm, TB_ALIGN_16 }, 964 { X86::VSUBSDrr, X86::VSUBSDrm, 0 }, 965 { X86::VSUBSSrr, X86::VSUBSSrm, 0 }, 966 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrm, TB_ALIGN_16 }, 967 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrm, TB_ALIGN_16 }, 968 { X86::VUNPCKLPDrr, X86::VUNPCKLPDrm, TB_ALIGN_16 }, 969 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrm, TB_ALIGN_16 }, 970 { X86::VXORPDrr, X86::VXORPDrm, TB_ALIGN_16 }, 971 { X86::VXORPSrr, X86::VXORPSrm, TB_ALIGN_16 }, 972 // AVX 256-bit foldable instructions 973 { X86::VADDPDYrr, X86::VADDPDYrm, TB_ALIGN_32 }, 974 { X86::VADDPSYrr, X86::VADDPSYrm, TB_ALIGN_32 }, 975 { X86::VADDSUBPDYrr, X86::VADDSUBPDYrm, TB_ALIGN_32 }, 976 { X86::VADDSUBPSYrr, X86::VADDSUBPSYrm, TB_ALIGN_32 }, 977 { X86::VANDNPDYrr, X86::VANDNPDYrm, TB_ALIGN_32 }, 978 { X86::VANDNPSYrr, X86::VANDNPSYrm, TB_ALIGN_32 }, 979 { X86::VANDPDYrr, X86::VANDPDYrm, TB_ALIGN_32 }, 980 { X86::VANDPSYrr, X86::VANDPSYrm, TB_ALIGN_32 }, 981 { X86::VBLENDPDYrri, X86::VBLENDPDYrmi, TB_ALIGN_32 }, 982 { X86::VBLENDPSYrri, X86::VBLENDPSYrmi, TB_ALIGN_32 }, 983 { X86::VBLENDVPDYrr, X86::VBLENDVPDYrm, TB_ALIGN_32 }, 984 { X86::VBLENDVPSYrr, X86::VBLENDVPSYrm, TB_ALIGN_32 }, 985 { X86::VCMPPDYrri, X86::VCMPPDYrmi, TB_ALIGN_32 }, 986 { X86::VCMPPSYrri, X86::VCMPPSYrmi, TB_ALIGN_32 }, 987 { X86::VDIVPDYrr, X86::VDIVPDYrm, TB_ALIGN_32 }, 988 { X86::VDIVPSYrr, X86::VDIVPSYrm, TB_ALIGN_32 }, 989 { X86::VHADDPDYrr, X86::VHADDPDYrm, TB_ALIGN_32 }, 990 { X86::VHADDPSYrr, X86::VHADDPSYrm, TB_ALIGN_32 }, 991 { X86::VHSUBPDYrr, X86::VHSUBPDYrm, TB_ALIGN_32 }, 992 { X86::VHSUBPSYrr, X86::VHSUBPSYrm, TB_ALIGN_32 }, 993 { X86::VINSERTF128rr, X86::VINSERTF128rm, TB_ALIGN_32 }, 994 { X86::VMAXPDYrr, X86::VMAXPDYrm, TB_ALIGN_32 }, 995 { X86::VMAXPDYrr_Int, X86::VMAXPDYrm_Int, TB_ALIGN_32 }, 996 { X86::VMAXPSYrr, X86::VMAXPSYrm, TB_ALIGN_32 }, 997 { X86::VMAXPSYrr_Int, X86::VMAXPSYrm_Int, TB_ALIGN_32 }, 998 { X86::VMINPDYrr, X86::VMINPDYrm, TB_ALIGN_32 }, 999 { X86::VMINPDYrr_Int, X86::VMINPDYrm_Int, TB_ALIGN_32 }, 1000 { X86::VMINPSYrr, X86::VMINPSYrm, TB_ALIGN_32 }, 1001 { X86::VMINPSYrr_Int, X86::VMINPSYrm_Int, TB_ALIGN_32 }, 1002 { X86::VMULPDYrr, X86::VMULPDYrm, TB_ALIGN_32 }, 1003 { X86::VMULPSYrr, X86::VMULPSYrm, TB_ALIGN_32 }, 1004 { X86::VORPDYrr, X86::VORPDYrm, TB_ALIGN_32 }, 1005 { X86::VORPSYrr, X86::VORPSYrm, TB_ALIGN_32 }, 1006 { X86::VPERM2F128rr, X86::VPERM2F128rm, TB_ALIGN_32 }, 1007 { X86::VPERMILPDYrr, X86::VPERMILPDYrm, TB_ALIGN_32 }, 1008 { X86::VPERMILPSYrr, X86::VPERMILPSYrm, TB_ALIGN_32 }, 1009 { X86::VSHUFPDYrri, X86::VSHUFPDYrmi, TB_ALIGN_32 }, 1010 { X86::VSHUFPSYrri, X86::VSHUFPSYrmi, TB_ALIGN_32 }, 1011 { X86::VSUBPDYrr, X86::VSUBPDYrm, TB_ALIGN_32 }, 1012 { X86::VSUBPSYrr, X86::VSUBPSYrm, TB_ALIGN_32 }, 1013 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrm, TB_ALIGN_32 }, 1014 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrm, TB_ALIGN_32 }, 1015 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrm, TB_ALIGN_32 }, 1016 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrm, TB_ALIGN_32 }, 1017 { X86::VXORPDYrr, X86::VXORPDYrm, TB_ALIGN_32 }, 1018 { X86::VXORPSYrr, X86::VXORPSYrm, TB_ALIGN_32 }, 1019 // AVX2 foldable instructions 1020 { X86::VINSERTI128rr, X86::VINSERTI128rm, TB_ALIGN_16 }, 1021 { X86::VPACKSSDWYrr, X86::VPACKSSDWYrm, TB_ALIGN_32 }, 1022 { X86::VPACKSSWBYrr, X86::VPACKSSWBYrm, TB_ALIGN_32 }, 1023 { X86::VPACKUSDWYrr, X86::VPACKUSDWYrm, TB_ALIGN_32 }, 1024 { X86::VPACKUSWBYrr, X86::VPACKUSWBYrm, TB_ALIGN_32 }, 1025 { X86::VPADDBYrr, X86::VPADDBYrm, TB_ALIGN_32 }, 1026 { X86::VPADDDYrr, X86::VPADDDYrm, TB_ALIGN_32 }, 1027 { X86::VPADDQYrr, X86::VPADDQYrm, TB_ALIGN_32 }, 1028 { X86::VPADDSBYrr, X86::VPADDSBYrm, TB_ALIGN_32 }, 1029 { X86::VPADDSWYrr, X86::VPADDSWYrm, TB_ALIGN_32 }, 1030 { X86::VPADDUSBYrr, X86::VPADDUSBYrm, TB_ALIGN_32 }, 1031 { X86::VPADDUSWYrr, X86::VPADDUSWYrm, TB_ALIGN_32 }, 1032 { X86::VPADDWYrr, X86::VPADDWYrm, TB_ALIGN_32 }, 1033 { X86::VPALIGNR256rr, X86::VPALIGNR256rm, TB_ALIGN_32 }, 1034 { X86::VPANDNYrr, X86::VPANDNYrm, TB_ALIGN_32 }, 1035 { X86::VPANDYrr, X86::VPANDYrm, TB_ALIGN_32 }, 1036 { X86::VPAVGBYrr, X86::VPAVGBYrm, TB_ALIGN_32 }, 1037 { X86::VPAVGWYrr, X86::VPAVGWYrm, TB_ALIGN_32 }, 1038 { X86::VPBLENDDrri, X86::VPBLENDDrmi, TB_ALIGN_32 }, 1039 { X86::VPBLENDDYrri, X86::VPBLENDDYrmi, TB_ALIGN_32 }, 1040 { X86::VPBLENDWYrri, X86::VPBLENDWYrmi, TB_ALIGN_32 }, 1041 { X86::VPCMPEQBYrr, X86::VPCMPEQBYrm, TB_ALIGN_32 }, 1042 { X86::VPCMPEQDYrr, X86::VPCMPEQDYrm, TB_ALIGN_32 }, 1043 { X86::VPCMPEQQYrr, X86::VPCMPEQQYrm, TB_ALIGN_32 }, 1044 { X86::VPCMPEQWYrr, X86::VPCMPEQWYrm, TB_ALIGN_32 }, 1045 { X86::VPCMPGTBYrr, X86::VPCMPGTBYrm, TB_ALIGN_32 }, 1046 { X86::VPCMPGTDYrr, X86::VPCMPGTDYrm, TB_ALIGN_32 }, 1047 { X86::VPCMPGTQYrr, X86::VPCMPGTQYrm, TB_ALIGN_32 }, 1048 { X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, TB_ALIGN_32 }, 1049 { X86::VPERM2I128rr, X86::VPERM2I128rm, TB_ALIGN_32 }, 1050 { X86::VPERMDYrr, X86::VPERMDYrm, TB_ALIGN_32 }, 1051 { X86::VPERMPDYri, X86::VPERMPDYmi, TB_ALIGN_32 }, 1052 { X86::VPERMPSYrr, X86::VPERMPSYrm, TB_ALIGN_32 }, 1053 { X86::VPERMQYri, X86::VPERMQYmi, TB_ALIGN_32 }, 1054 { X86::VPHADDDYrr, X86::VPHADDDYrm, TB_ALIGN_32 }, 1055 { X86::VPHADDSWrr256, X86::VPHADDSWrm256, TB_ALIGN_32 }, 1056 { X86::VPHADDWYrr, X86::VPHADDWYrm, TB_ALIGN_32 }, 1057 { X86::VPHSUBDYrr, X86::VPHSUBDYrm, TB_ALIGN_32 }, 1058 { X86::VPHSUBSWrr256, X86::VPHSUBSWrm256, TB_ALIGN_32 }, 1059 { X86::VPHSUBWYrr, X86::VPHSUBWYrm, TB_ALIGN_32 }, 1060 { X86::VPMADDUBSWrr256, X86::VPMADDUBSWrm256, TB_ALIGN_32 }, 1061 { X86::VPMADDWDYrr, X86::VPMADDWDYrm, TB_ALIGN_32 }, 1062 { X86::VPMAXSWYrr, X86::VPMAXSWYrm, TB_ALIGN_32 }, 1063 { X86::VPMAXUBYrr, X86::VPMAXUBYrm, TB_ALIGN_32 }, 1064 { X86::VPMINSWYrr, X86::VPMINSWYrm, TB_ALIGN_32 }, 1065 { X86::VPMINUBYrr, X86::VPMINUBYrm, TB_ALIGN_32 }, 1066 { X86::VMPSADBWYrri, X86::VMPSADBWYrmi, TB_ALIGN_32 }, 1067 { X86::VPMULDQYrr, X86::VPMULDQYrm, TB_ALIGN_32 }, 1068 { X86::VPMULHRSWrr256, X86::VPMULHRSWrm256, TB_ALIGN_32 }, 1069 { X86::VPMULHUWYrr, X86::VPMULHUWYrm, TB_ALIGN_32 }, 1070 { X86::VPMULHWYrr, X86::VPMULHWYrm, TB_ALIGN_32 }, 1071 { X86::VPMULLDYrr, X86::VPMULLDYrm, TB_ALIGN_32 }, 1072 { X86::VPMULLWYrr, X86::VPMULLWYrm, TB_ALIGN_32 }, 1073 { X86::VPMULUDQYrr, X86::VPMULUDQYrm, TB_ALIGN_32 }, 1074 { X86::VPORYrr, X86::VPORYrm, TB_ALIGN_32 }, 1075 { X86::VPSADBWYrr, X86::VPSADBWYrm, TB_ALIGN_32 }, 1076 { X86::VPSHUFBYrr, X86::VPSHUFBYrm, TB_ALIGN_32 }, 1077 { X86::VPSIGNBYrr, X86::VPSIGNBYrm, TB_ALIGN_32 }, 1078 { X86::VPSIGNWYrr, X86::VPSIGNWYrm, TB_ALIGN_32 }, 1079 { X86::VPSIGNDYrr, X86::VPSIGNDYrm, TB_ALIGN_32 }, 1080 { X86::VPSLLDYrr, X86::VPSLLDYrm, TB_ALIGN_16 }, 1081 { X86::VPSLLQYrr, X86::VPSLLQYrm, TB_ALIGN_16 }, 1082 { X86::VPSLLWYrr, X86::VPSLLWYrm, TB_ALIGN_16 }, 1083 { X86::VPSLLVDrr, X86::VPSLLVDrm, TB_ALIGN_16 }, 1084 { X86::VPSLLVDYrr, X86::VPSLLVDYrm, TB_ALIGN_32 }, 1085 { X86::VPSLLVQrr, X86::VPSLLVQrm, TB_ALIGN_16 }, 1086 { X86::VPSLLVQYrr, X86::VPSLLVQYrm, TB_ALIGN_32 }, 1087 { X86::VPSRADYrr, X86::VPSRADYrm, TB_ALIGN_16 }, 1088 { X86::VPSRAWYrr, X86::VPSRAWYrm, TB_ALIGN_16 }, 1089 { X86::VPSRAVDrr, X86::VPSRAVDrm, TB_ALIGN_16 }, 1090 { X86::VPSRAVDYrr, X86::VPSRAVDYrm, TB_ALIGN_32 }, 1091 { X86::VPSRLDYrr, X86::VPSRLDYrm, TB_ALIGN_16 }, 1092 { X86::VPSRLQYrr, X86::VPSRLQYrm, TB_ALIGN_16 }, 1093 { X86::VPSRLWYrr, X86::VPSRLWYrm, TB_ALIGN_16 }, 1094 { X86::VPSRLVDrr, X86::VPSRLVDrm, TB_ALIGN_16 }, 1095 { X86::VPSRLVDYrr, X86::VPSRLVDYrm, TB_ALIGN_32 }, 1096 { X86::VPSRLVQrr, X86::VPSRLVQrm, TB_ALIGN_16 }, 1097 { X86::VPSRLVQYrr, X86::VPSRLVQYrm, TB_ALIGN_32 }, 1098 { X86::VPSUBBYrr, X86::VPSUBBYrm, TB_ALIGN_32 }, 1099 { X86::VPSUBDYrr, X86::VPSUBDYrm, TB_ALIGN_32 }, 1100 { X86::VPSUBSBYrr, X86::VPSUBSBYrm, TB_ALIGN_32 }, 1101 { X86::VPSUBSWYrr, X86::VPSUBSWYrm, TB_ALIGN_32 }, 1102 { X86::VPSUBWYrr, X86::VPSUBWYrm, TB_ALIGN_32 }, 1103 { X86::VPUNPCKHBWYrr, X86::VPUNPCKHBWYrm, TB_ALIGN_32 }, 1104 { X86::VPUNPCKHDQYrr, X86::VPUNPCKHDQYrm, TB_ALIGN_32 }, 1105 { X86::VPUNPCKHQDQYrr, X86::VPUNPCKHQDQYrm, TB_ALIGN_16 }, 1106 { X86::VPUNPCKHWDYrr, X86::VPUNPCKHWDYrm, TB_ALIGN_32 }, 1107 { X86::VPUNPCKLBWYrr, X86::VPUNPCKLBWYrm, TB_ALIGN_32 }, 1108 { X86::VPUNPCKLDQYrr, X86::VPUNPCKLDQYrm, TB_ALIGN_32 }, 1109 { X86::VPUNPCKLQDQYrr, X86::VPUNPCKLQDQYrm, TB_ALIGN_32 }, 1110 { X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, TB_ALIGN_32 }, 1111 { X86::VPXORYrr, X86::VPXORYrm, TB_ALIGN_32 }, 1112 // FIXME: add AVX 256-bit foldable instructions 1113 }; 1114 1115 for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) { 1116 unsigned RegOp = OpTbl2[i].RegOp; 1117 unsigned MemOp = OpTbl2[i].MemOp; 1118 unsigned Flags = OpTbl2[i].Flags; 1119 AddTableEntry(RegOp2MemOpTable2, MemOp2RegOpTable, 1120 RegOp, MemOp, 1121 // Index 2, folded load 1122 Flags | TB_INDEX_2 | TB_FOLDED_LOAD); 1123 } 1124 1125 static const X86OpTblEntry OpTbl3[] = { 1126 // FMA foldable instructions 1127 { X86::VFMADDSSr231r, X86::VFMADDSSr231m, 0 }, 1128 { X86::VFMADDSDr231r, X86::VFMADDSDr231m, 0 }, 1129 { X86::VFMADDSSr132r, X86::VFMADDSSr132m, 0 }, 1130 { X86::VFMADDSDr132r, X86::VFMADDSDr132m, 0 }, 1131 { X86::VFMADDSSr213r, X86::VFMADDSSr213m, 0 }, 1132 { X86::VFMADDSDr213r, X86::VFMADDSDr213m, 0 }, 1133 { X86::VFMADDSSr132r_Int, X86::VFMADDSSr132m_Int, 0 }, 1134 { X86::VFMADDSDr132r_Int, X86::VFMADDSDr132m_Int, 0 }, 1135 1136 { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_16 }, 1137 { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_16 }, 1138 { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_16 }, 1139 { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_16 }, 1140 { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_16 }, 1141 { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_16 }, 1142 { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_32 }, 1143 { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_32 }, 1144 { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_32 }, 1145 { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_32 }, 1146 { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_32 }, 1147 { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_32 }, 1148 { X86::VFMADDPSr132r_Int, X86::VFMADDPSr132m_Int, TB_ALIGN_16 }, 1149 { X86::VFMADDPDr132r_Int, X86::VFMADDPDr132m_Int, TB_ALIGN_16 }, 1150 { X86::VFMADDPSr132rY_Int, X86::VFMADDPSr132mY_Int, TB_ALIGN_32 }, 1151 { X86::VFMADDPDr132rY_Int, X86::VFMADDPDr132mY_Int, TB_ALIGN_32 }, 1152 1153 { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, 0 }, 1154 { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, 0 }, 1155 { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, 0 }, 1156 { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, 0 }, 1157 { X86::VFNMADDSSr213r, X86::VFNMADDSSr213m, 0 }, 1158 { X86::VFNMADDSDr213r, X86::VFNMADDSDr213m, 0 }, 1159 { X86::VFNMADDSSr132r_Int, X86::VFNMADDSSr132m_Int, 0 }, 1160 { X86::VFNMADDSDr132r_Int, X86::VFNMADDSDr132m_Int, 0 }, 1161 1162 { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_16 }, 1163 { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_16 }, 1164 { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_16 }, 1165 { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_16 }, 1166 { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_16 }, 1167 { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_16 }, 1168 { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_32 }, 1169 { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_32 }, 1170 { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_32 }, 1171 { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_32 }, 1172 { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_32 }, 1173 { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_32 }, 1174 { X86::VFNMADDPSr132r_Int, X86::VFNMADDPSr132m_Int, TB_ALIGN_16 }, 1175 { X86::VFNMADDPDr132r_Int, X86::VFNMADDPDr132m_Int, TB_ALIGN_16 }, 1176 { X86::VFNMADDPSr132rY_Int, X86::VFNMADDPSr132mY_Int, TB_ALIGN_32 }, 1177 { X86::VFNMADDPDr132rY_Int, X86::VFNMADDPDr132mY_Int, TB_ALIGN_32 }, 1178 1179 { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, 0 }, 1180 { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, 0 }, 1181 { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, 0 }, 1182 { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, 0 }, 1183 { X86::VFMSUBSSr213r, X86::VFMSUBSSr213m, 0 }, 1184 { X86::VFMSUBSDr213r, X86::VFMSUBSDr213m, 0 }, 1185 { X86::VFMSUBSSr132r_Int, X86::VFMSUBSSr132m_Int, 0 }, 1186 { X86::VFMSUBSDr132r_Int, X86::VFMSUBSDr132m_Int, 0 }, 1187 1188 { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_16 }, 1189 { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_16 }, 1190 { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_16 }, 1191 { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_16 }, 1192 { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_16 }, 1193 { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_16 }, 1194 { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_32 }, 1195 { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_32 }, 1196 { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_32 }, 1197 { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_32 }, 1198 { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_32 }, 1199 { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_32 }, 1200 { X86::VFMSUBPSr132r_Int, X86::VFMSUBPSr132m_Int, TB_ALIGN_16 }, 1201 { X86::VFMSUBPDr132r_Int, X86::VFMSUBPDr132m_Int, TB_ALIGN_16 }, 1202 { X86::VFMSUBPSr132rY_Int, X86::VFMSUBPSr132mY_Int, TB_ALIGN_32 }, 1203 { X86::VFMSUBPDr132rY_Int, X86::VFMSUBPDr132mY_Int, TB_ALIGN_32 }, 1204 1205 { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, 0 }, 1206 { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, 0 }, 1207 { X86::VFNMSUBSSr132r, X86::VFNMSUBSSr132m, 0 }, 1208 { X86::VFNMSUBSDr132r, X86::VFNMSUBSDr132m, 0 }, 1209 { X86::VFNMSUBSSr213r, X86::VFNMSUBSSr213m, 0 }, 1210 { X86::VFNMSUBSDr213r, X86::VFNMSUBSDr213m, 0 }, 1211 { X86::VFNMSUBSSr132r_Int, X86::VFNMSUBSSr132m_Int, 0 }, 1212 { X86::VFNMSUBSDr132r_Int, X86::VFNMSUBSDr132m_Int, 0 }, 1213 1214 { X86::VFNMSUBPSr231r, X86::VFNMSUBPSr231m, TB_ALIGN_16 }, 1215 { X86::VFNMSUBPDr231r, X86::VFNMSUBPDr231m, TB_ALIGN_16 }, 1216 { X86::VFNMSUBPSr132r, X86::VFNMSUBPSr132m, TB_ALIGN_16 }, 1217 { X86::VFNMSUBPDr132r, X86::VFNMSUBPDr132m, TB_ALIGN_16 }, 1218 { X86::VFNMSUBPSr213r, X86::VFNMSUBPSr213m, TB_ALIGN_16 }, 1219 { X86::VFNMSUBPDr213r, X86::VFNMSUBPDr213m, TB_ALIGN_16 }, 1220 { X86::VFNMSUBPSr231rY, X86::VFNMSUBPSr231mY, TB_ALIGN_32 }, 1221 { X86::VFNMSUBPDr231rY, X86::VFNMSUBPDr231mY, TB_ALIGN_32 }, 1222 { X86::VFNMSUBPSr132rY, X86::VFNMSUBPSr132mY, TB_ALIGN_32 }, 1223 { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_32 }, 1224 { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_32 }, 1225 { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_32 }, 1226 { X86::VFNMSUBPSr132r_Int, X86::VFNMSUBPSr132m_Int, TB_ALIGN_16 }, 1227 { X86::VFNMSUBPDr132r_Int, X86::VFNMSUBPDr132m_Int, TB_ALIGN_16 }, 1228 { X86::VFNMSUBPSr132rY_Int, X86::VFNMSUBPSr132mY_Int, TB_ALIGN_32 }, 1229 { X86::VFNMSUBPDr132rY_Int, X86::VFNMSUBPDr132mY_Int, TB_ALIGN_32 }, 1230 1231 { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_16 }, 1232 { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_16 }, 1233 { X86::VFMADDSUBPSr132r, X86::VFMADDSUBPSr132m, TB_ALIGN_16 }, 1234 { X86::VFMADDSUBPDr132r, X86::VFMADDSUBPDr132m, TB_ALIGN_16 }, 1235 { X86::VFMADDSUBPSr213r, X86::VFMADDSUBPSr213m, TB_ALIGN_16 }, 1236 { X86::VFMADDSUBPDr213r, X86::VFMADDSUBPDr213m, TB_ALIGN_16 }, 1237 { X86::VFMADDSUBPSr231rY, X86::VFMADDSUBPSr231mY, TB_ALIGN_32 }, 1238 { X86::VFMADDSUBPDr231rY, X86::VFMADDSUBPDr231mY, TB_ALIGN_32 }, 1239 { X86::VFMADDSUBPSr132rY, X86::VFMADDSUBPSr132mY, TB_ALIGN_32 }, 1240 { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_32 }, 1241 { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_32 }, 1242 { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_32 }, 1243 { X86::VFMADDSUBPSr132r_Int, X86::VFMADDSUBPSr132m_Int, TB_ALIGN_16 }, 1244 { X86::VFMADDSUBPDr132r_Int, X86::VFMADDSUBPDr132m_Int, TB_ALIGN_16 }, 1245 { X86::VFMADDSUBPSr132rY_Int, X86::VFMADDSUBPSr132mY_Int, TB_ALIGN_32 }, 1246 { X86::VFMADDSUBPDr132rY_Int, X86::VFMADDSUBPDr132mY_Int, TB_ALIGN_32 }, 1247 1248 { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_16 }, 1249 { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_16 }, 1250 { X86::VFMSUBADDPSr132r, X86::VFMSUBADDPSr132m, TB_ALIGN_16 }, 1251 { X86::VFMSUBADDPDr132r, X86::VFMSUBADDPDr132m, TB_ALIGN_16 }, 1252 { X86::VFMSUBADDPSr213r, X86::VFMSUBADDPSr213m, TB_ALIGN_16 }, 1253 { X86::VFMSUBADDPDr213r, X86::VFMSUBADDPDr213m, TB_ALIGN_16 }, 1254 { X86::VFMSUBADDPSr231rY, X86::VFMSUBADDPSr231mY, TB_ALIGN_32 }, 1255 { X86::VFMSUBADDPDr231rY, X86::VFMSUBADDPDr231mY, TB_ALIGN_32 }, 1256 { X86::VFMSUBADDPSr132rY, X86::VFMSUBADDPSr132mY, TB_ALIGN_32 }, 1257 { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_32 }, 1258 { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_32 }, 1259 { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_32 }, 1260 { X86::VFMSUBADDPSr132r_Int, X86::VFMSUBADDPSr132m_Int, TB_ALIGN_16 }, 1261 { X86::VFMSUBADDPDr132r_Int, X86::VFMSUBADDPDr132m_Int, TB_ALIGN_16 }, 1262 { X86::VFMSUBADDPSr132rY_Int, X86::VFMSUBADDPSr132mY_Int, TB_ALIGN_32 }, 1263 { X86::VFMSUBADDPDr132rY_Int, X86::VFMSUBADDPDr132mY_Int, TB_ALIGN_32 }, 1264 }; 1265 1266 for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) { 1267 unsigned RegOp = OpTbl3[i].RegOp; 1268 unsigned MemOp = OpTbl3[i].MemOp; 1269 unsigned Flags = OpTbl3[i].Flags; 1270 AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable, 1271 RegOp, MemOp, 1272 // Index 3, folded load 1273 Flags | TB_INDEX_3 | TB_FOLDED_LOAD); 1274 } 1275 1276} 1277 1278void 1279X86InstrInfo::AddTableEntry(RegOp2MemOpTableType &R2MTable, 1280 MemOp2RegOpTableType &M2RTable, 1281 unsigned RegOp, unsigned MemOp, unsigned Flags) { 1282 if ((Flags & TB_NO_FORWARD) == 0) { 1283 assert(!R2MTable.count(RegOp) && "Duplicate entry!"); 1284 R2MTable[RegOp] = std::make_pair(MemOp, Flags); 1285 } 1286 if ((Flags & TB_NO_REVERSE) == 0) { 1287 assert(!M2RTable.count(MemOp) && 1288 "Duplicated entries in unfolding maps?"); 1289 M2RTable[MemOp] = std::make_pair(RegOp, Flags); 1290 } 1291} 1292 1293bool 1294X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 1295 unsigned &SrcReg, unsigned &DstReg, 1296 unsigned &SubIdx) const { 1297 switch (MI.getOpcode()) { 1298 default: break; 1299 case X86::MOVSX16rr8: 1300 case X86::MOVZX16rr8: 1301 case X86::MOVSX32rr8: 1302 case X86::MOVZX32rr8: 1303 case X86::MOVSX64rr8: 1304 case X86::MOVZX64rr8: 1305 if (!TM.getSubtarget<X86Subtarget>().is64Bit()) 1306 // It's not always legal to reference the low 8-bit of the larger 1307 // register in 32-bit mode. 1308 return false; 1309 case X86::MOVSX32rr16: 1310 case X86::MOVZX32rr16: 1311 case X86::MOVSX64rr16: 1312 case X86::MOVZX64rr16: 1313 case X86::MOVSX64rr32: 1314 case X86::MOVZX64rr32: { 1315 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) 1316 // Be conservative. 1317 return false; 1318 SrcReg = MI.getOperand(1).getReg(); 1319 DstReg = MI.getOperand(0).getReg(); 1320 switch (MI.getOpcode()) { 1321 default: 1322 llvm_unreachable(0); 1323 case X86::MOVSX16rr8: 1324 case X86::MOVZX16rr8: 1325 case X86::MOVSX32rr8: 1326 case X86::MOVZX32rr8: 1327 case X86::MOVSX64rr8: 1328 case X86::MOVZX64rr8: 1329 SubIdx = X86::sub_8bit; 1330 break; 1331 case X86::MOVSX32rr16: 1332 case X86::MOVZX32rr16: 1333 case X86::MOVSX64rr16: 1334 case X86::MOVZX64rr16: 1335 SubIdx = X86::sub_16bit; 1336 break; 1337 case X86::MOVSX64rr32: 1338 case X86::MOVZX64rr32: 1339 SubIdx = X86::sub_32bit; 1340 break; 1341 } 1342 return true; 1343 } 1344 } 1345 return false; 1346} 1347 1348/// isFrameOperand - Return true and the FrameIndex if the specified 1349/// operand and follow operands form a reference to the stack frame. 1350bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op, 1351 int &FrameIndex) const { 1352 if (MI->getOperand(Op).isFI() && MI->getOperand(Op+1).isImm() && 1353 MI->getOperand(Op+2).isReg() && MI->getOperand(Op+3).isImm() && 1354 MI->getOperand(Op+1).getImm() == 1 && 1355 MI->getOperand(Op+2).getReg() == 0 && 1356 MI->getOperand(Op+3).getImm() == 0) { 1357 FrameIndex = MI->getOperand(Op).getIndex(); 1358 return true; 1359 } 1360 return false; 1361} 1362 1363static bool isFrameLoadOpcode(int Opcode) { 1364 switch (Opcode) { 1365 default: 1366 return false; 1367 case X86::MOV8rm: 1368 case X86::MOV16rm: 1369 case X86::MOV32rm: 1370 case X86::MOV64rm: 1371 case X86::LD_Fp64m: 1372 case X86::MOVSSrm: 1373 case X86::MOVSDrm: 1374 case X86::MOVAPSrm: 1375 case X86::MOVAPDrm: 1376 case X86::MOVDQArm: 1377 case X86::VMOVSSrm: 1378 case X86::VMOVSDrm: 1379 case X86::VMOVAPSrm: 1380 case X86::VMOVAPDrm: 1381 case X86::VMOVDQArm: 1382 case X86::VMOVAPSYrm: 1383 case X86::VMOVAPDYrm: 1384 case X86::VMOVDQAYrm: 1385 case X86::MMX_MOVD64rm: 1386 case X86::MMX_MOVQ64rm: 1387 return true; 1388 } 1389} 1390 1391static bool isFrameStoreOpcode(int Opcode) { 1392 switch (Opcode) { 1393 default: break; 1394 case X86::MOV8mr: 1395 case X86::MOV16mr: 1396 case X86::MOV32mr: 1397 case X86::MOV64mr: 1398 case X86::ST_FpP64m: 1399 case X86::MOVSSmr: 1400 case X86::MOVSDmr: 1401 case X86::MOVAPSmr: 1402 case X86::MOVAPDmr: 1403 case X86::MOVDQAmr: 1404 case X86::VMOVSSmr: 1405 case X86::VMOVSDmr: 1406 case X86::VMOVAPSmr: 1407 case X86::VMOVAPDmr: 1408 case X86::VMOVDQAmr: 1409 case X86::VMOVAPSYmr: 1410 case X86::VMOVAPDYmr: 1411 case X86::VMOVDQAYmr: 1412 case X86::MMX_MOVD64mr: 1413 case X86::MMX_MOVQ64mr: 1414 case X86::MMX_MOVNTQmr: 1415 return true; 1416 } 1417 return false; 1418} 1419 1420unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 1421 int &FrameIndex) const { 1422 if (isFrameLoadOpcode(MI->getOpcode())) 1423 if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) 1424 return MI->getOperand(0).getReg(); 1425 return 0; 1426} 1427 1428unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 1429 int &FrameIndex) const { 1430 if (isFrameLoadOpcode(MI->getOpcode())) { 1431 unsigned Reg; 1432 if ((Reg = isLoadFromStackSlot(MI, FrameIndex))) 1433 return Reg; 1434 // Check for post-frame index elimination operations 1435 const MachineMemOperand *Dummy; 1436 return hasLoadFromStackSlot(MI, Dummy, FrameIndex); 1437 } 1438 return 0; 1439} 1440 1441unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI, 1442 int &FrameIndex) const { 1443 if (isFrameStoreOpcode(MI->getOpcode())) 1444 if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 && 1445 isFrameOperand(MI, 0, FrameIndex)) 1446 return MI->getOperand(X86::AddrNumOperands).getReg(); 1447 return 0; 1448} 1449 1450unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 1451 int &FrameIndex) const { 1452 if (isFrameStoreOpcode(MI->getOpcode())) { 1453 unsigned Reg; 1454 if ((Reg = isStoreToStackSlot(MI, FrameIndex))) 1455 return Reg; 1456 // Check for post-frame index elimination operations 1457 const MachineMemOperand *Dummy; 1458 return hasStoreToStackSlot(MI, Dummy, FrameIndex); 1459 } 1460 return 0; 1461} 1462 1463/// regIsPICBase - Return true if register is PIC base (i.e.g defined by 1464/// X86::MOVPC32r. 1465static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 1466 bool isPICBase = false; 1467 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 1468 E = MRI.def_end(); I != E; ++I) { 1469 MachineInstr *DefMI = I.getOperand().getParent(); 1470 if (DefMI->getOpcode() != X86::MOVPC32r) 1471 return false; 1472 assert(!isPICBase && "More than one PIC base?"); 1473 isPICBase = true; 1474 } 1475 return isPICBase; 1476} 1477 1478bool 1479X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 1480 AliasAnalysis *AA) const { 1481 switch (MI->getOpcode()) { 1482 default: break; 1483 case X86::MOV8rm: 1484 case X86::MOV16rm: 1485 case X86::MOV32rm: 1486 case X86::MOV64rm: 1487 case X86::LD_Fp64m: 1488 case X86::MOVSSrm: 1489 case X86::MOVSDrm: 1490 case X86::MOVAPSrm: 1491 case X86::MOVUPSrm: 1492 case X86::MOVAPDrm: 1493 case X86::MOVDQArm: 1494 case X86::VMOVSSrm: 1495 case X86::VMOVSDrm: 1496 case X86::VMOVAPSrm: 1497 case X86::VMOVUPSrm: 1498 case X86::VMOVAPDrm: 1499 case X86::VMOVDQArm: 1500 case X86::VMOVAPSYrm: 1501 case X86::VMOVUPSYrm: 1502 case X86::VMOVAPDYrm: 1503 case X86::VMOVDQAYrm: 1504 case X86::MMX_MOVD64rm: 1505 case X86::MMX_MOVQ64rm: 1506 case X86::FsVMOVAPSrm: 1507 case X86::FsVMOVAPDrm: 1508 case X86::FsMOVAPSrm: 1509 case X86::FsMOVAPDrm: { 1510 // Loads from constant pools are trivially rematerializable. 1511 if (MI->getOperand(1).isReg() && 1512 MI->getOperand(2).isImm() && 1513 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && 1514 MI->isInvariantLoad(AA)) { 1515 unsigned BaseReg = MI->getOperand(1).getReg(); 1516 if (BaseReg == 0 || BaseReg == X86::RIP) 1517 return true; 1518 // Allow re-materialization of PIC load. 1519 if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal()) 1520 return false; 1521 const MachineFunction &MF = *MI->getParent()->getParent(); 1522 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1523 bool isPICBase = false; 1524 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 1525 E = MRI.def_end(); I != E; ++I) { 1526 MachineInstr *DefMI = I.getOperand().getParent(); 1527 if (DefMI->getOpcode() != X86::MOVPC32r) 1528 return false; 1529 assert(!isPICBase && "More than one PIC base?"); 1530 isPICBase = true; 1531 } 1532 return isPICBase; 1533 } 1534 return false; 1535 } 1536 1537 case X86::LEA32r: 1538 case X86::LEA64r: { 1539 if (MI->getOperand(2).isImm() && 1540 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && 1541 !MI->getOperand(4).isReg()) { 1542 // lea fi#, lea GV, etc. are all rematerializable. 1543 if (!MI->getOperand(1).isReg()) 1544 return true; 1545 unsigned BaseReg = MI->getOperand(1).getReg(); 1546 if (BaseReg == 0) 1547 return true; 1548 // Allow re-materialization of lea PICBase + x. 1549 const MachineFunction &MF = *MI->getParent()->getParent(); 1550 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1551 return regIsPICBase(BaseReg, MRI); 1552 } 1553 return false; 1554 } 1555 } 1556 1557 // All other instructions marked M_REMATERIALIZABLE are always trivially 1558 // rematerializable. 1559 return true; 1560} 1561 1562/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that 1563/// would clobber the EFLAGS condition register. Note the result may be 1564/// conservative. If it cannot definitely determine the safety after visiting 1565/// a few instructions in each direction it assumes it's not safe. 1566static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 1567 MachineBasicBlock::iterator I) { 1568 MachineBasicBlock::iterator E = MBB.end(); 1569 1570 // For compile time consideration, if we are not able to determine the 1571 // safety after visiting 4 instructions in each direction, we will assume 1572 // it's not safe. 1573 MachineBasicBlock::iterator Iter = I; 1574 for (unsigned i = 0; Iter != E && i < 4; ++i) { 1575 bool SeenDef = false; 1576 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 1577 MachineOperand &MO = Iter->getOperand(j); 1578 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 1579 SeenDef = true; 1580 if (!MO.isReg()) 1581 continue; 1582 if (MO.getReg() == X86::EFLAGS) { 1583 if (MO.isUse()) 1584 return false; 1585 SeenDef = true; 1586 } 1587 } 1588 1589 if (SeenDef) 1590 // This instruction defines EFLAGS, no need to look any further. 1591 return true; 1592 ++Iter; 1593 // Skip over DBG_VALUE. 1594 while (Iter != E && Iter->isDebugValue()) 1595 ++Iter; 1596 } 1597 1598 // It is safe to clobber EFLAGS at the end of a block of no successor has it 1599 // live in. 1600 if (Iter == E) { 1601 for (MachineBasicBlock::succ_iterator SI = MBB.succ_begin(), 1602 SE = MBB.succ_end(); SI != SE; ++SI) 1603 if ((*SI)->isLiveIn(X86::EFLAGS)) 1604 return false; 1605 return true; 1606 } 1607 1608 MachineBasicBlock::iterator B = MBB.begin(); 1609 Iter = I; 1610 for (unsigned i = 0; i < 4; ++i) { 1611 // If we make it to the beginning of the block, it's safe to clobber 1612 // EFLAGS iff EFLAGS is not live-in. 1613 if (Iter == B) 1614 return !MBB.isLiveIn(X86::EFLAGS); 1615 1616 --Iter; 1617 // Skip over DBG_VALUE. 1618 while (Iter != B && Iter->isDebugValue()) 1619 --Iter; 1620 1621 bool SawKill = false; 1622 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) { 1623 MachineOperand &MO = Iter->getOperand(j); 1624 // A register mask may clobber EFLAGS, but we should still look for a 1625 // live EFLAGS def. 1626 if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS)) 1627 SawKill = true; 1628 if (MO.isReg() && MO.getReg() == X86::EFLAGS) { 1629 if (MO.isDef()) return MO.isDead(); 1630 if (MO.isKill()) SawKill = true; 1631 } 1632 } 1633 1634 if (SawKill) 1635 // This instruction kills EFLAGS and doesn't redefine it, so 1636 // there's no need to look further. 1637 return true; 1638 } 1639 1640 // Conservative answer. 1641 return false; 1642} 1643 1644void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 1645 MachineBasicBlock::iterator I, 1646 unsigned DestReg, unsigned SubIdx, 1647 const MachineInstr *Orig, 1648 const TargetRegisterInfo &TRI) const { 1649 DebugLoc DL = Orig->getDebugLoc(); 1650 1651 // MOV32r0 etc. are implemented with xor which clobbers condition code. 1652 // Re-materialize them as movri instructions to avoid side effects. 1653 bool Clone = true; 1654 unsigned Opc = Orig->getOpcode(); 1655 switch (Opc) { 1656 default: break; 1657 case X86::MOV8r0: 1658 case X86::MOV16r0: 1659 case X86::MOV32r0: 1660 case X86::MOV64r0: { 1661 if (!isSafeToClobberEFLAGS(MBB, I)) { 1662 switch (Opc) { 1663 default: break; 1664 case X86::MOV8r0: Opc = X86::MOV8ri; break; 1665 case X86::MOV16r0: Opc = X86::MOV16ri; break; 1666 case X86::MOV32r0: Opc = X86::MOV32ri; break; 1667 case X86::MOV64r0: Opc = X86::MOV64ri64i32; break; 1668 } 1669 Clone = false; 1670 } 1671 break; 1672 } 1673 } 1674 1675 if (Clone) { 1676 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 1677 MBB.insert(I, MI); 1678 } else { 1679 BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0); 1680 } 1681 1682 MachineInstr *NewMI = prior(I); 1683 NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 1684} 1685 1686/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that 1687/// is not marked dead. 1688static bool hasLiveCondCodeDef(MachineInstr *MI) { 1689 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1690 MachineOperand &MO = MI->getOperand(i); 1691 if (MO.isReg() && MO.isDef() && 1692 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 1693 return true; 1694 } 1695 } 1696 return false; 1697} 1698 1699/// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when 1700/// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting 1701/// to a 32-bit superregister and then truncating back down to a 16-bit 1702/// subregister. 1703MachineInstr * 1704X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, 1705 MachineFunction::iterator &MFI, 1706 MachineBasicBlock::iterator &MBBI, 1707 LiveVariables *LV) const { 1708 MachineInstr *MI = MBBI; 1709 unsigned Dest = MI->getOperand(0).getReg(); 1710 unsigned Src = MI->getOperand(1).getReg(); 1711 bool isDead = MI->getOperand(0).isDead(); 1712 bool isKill = MI->getOperand(1).isKill(); 1713 1714 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() 1715 ? X86::LEA64_32r : X86::LEA32r; 1716 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 1717 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 1718 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1719 1720 // Build and insert into an implicit UNDEF value. This is OK because 1721 // well be shifting and then extracting the lower 16-bits. 1722 // This has the potential to cause partial register stall. e.g. 1723 // movw (%rbp,%rcx,2), %dx 1724 // leal -65(%rdx), %esi 1725 // But testing has shown this *does* help performance in 64-bit mode (at 1726 // least on modern x86 machines). 1727 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg); 1728 MachineInstr *InsMI = 1729 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY)) 1730 .addReg(leaInReg, RegState::Define, X86::sub_16bit) 1731 .addReg(Src, getKillRegState(isKill)); 1732 1733 MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(), 1734 get(Opc), leaOutReg); 1735 switch (MIOpc) { 1736 default: 1737 llvm_unreachable(0); 1738 case X86::SHL16ri: { 1739 unsigned ShAmt = MI->getOperand(2).getImm(); 1740 MIB.addReg(0).addImm(1 << ShAmt) 1741 .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0); 1742 break; 1743 } 1744 case X86::INC16r: 1745 case X86::INC64_16r: 1746 addRegOffset(MIB, leaInReg, true, 1); 1747 break; 1748 case X86::DEC16r: 1749 case X86::DEC64_16r: 1750 addRegOffset(MIB, leaInReg, true, -1); 1751 break; 1752 case X86::ADD16ri: 1753 case X86::ADD16ri8: 1754 case X86::ADD16ri_DB: 1755 case X86::ADD16ri8_DB: 1756 addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm()); 1757 break; 1758 case X86::ADD16rr: 1759 case X86::ADD16rr_DB: { 1760 unsigned Src2 = MI->getOperand(2).getReg(); 1761 bool isKill2 = MI->getOperand(2).isKill(); 1762 unsigned leaInReg2 = 0; 1763 MachineInstr *InsMI2 = 0; 1764 if (Src == Src2) { 1765 // ADD16rr %reg1028<kill>, %reg1028 1766 // just a single insert_subreg. 1767 addRegReg(MIB, leaInReg, true, leaInReg, false); 1768 } else { 1769 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 1770 // Build and insert into an implicit UNDEF value. This is OK because 1771 // well be shifting and then extracting the lower 16-bits. 1772 BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF),leaInReg2); 1773 InsMI2 = 1774 BuildMI(*MFI, &*MIB, MI->getDebugLoc(), get(TargetOpcode::COPY)) 1775 .addReg(leaInReg2, RegState::Define, X86::sub_16bit) 1776 .addReg(Src2, getKillRegState(isKill2)); 1777 addRegReg(MIB, leaInReg, true, leaInReg2, true); 1778 } 1779 if (LV && isKill2 && InsMI2) 1780 LV->replaceKillInstruction(Src2, MI, InsMI2); 1781 break; 1782 } 1783 } 1784 1785 MachineInstr *NewMI = MIB; 1786 MachineInstr *ExtMI = 1787 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY)) 1788 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 1789 .addReg(leaOutReg, RegState::Kill, X86::sub_16bit); 1790 1791 if (LV) { 1792 // Update live variables 1793 LV->getVarInfo(leaInReg).Kills.push_back(NewMI); 1794 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI); 1795 if (isKill) 1796 LV->replaceKillInstruction(Src, MI, InsMI); 1797 if (isDead) 1798 LV->replaceKillInstruction(Dest, MI, ExtMI); 1799 } 1800 1801 return ExtMI; 1802} 1803 1804/// convertToThreeAddress - This method must be implemented by targets that 1805/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 1806/// may be able to convert a two-address instruction into a true 1807/// three-address instruction on demand. This allows the X86 target (for 1808/// example) to convert ADD and SHL instructions into LEA instructions if they 1809/// would require register copies due to two-addressness. 1810/// 1811/// This method returns a null pointer if the transformation cannot be 1812/// performed, otherwise it returns the new instruction. 1813/// 1814MachineInstr * 1815X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 1816 MachineBasicBlock::iterator &MBBI, 1817 LiveVariables *LV) const { 1818 MachineInstr *MI = MBBI; 1819 MachineFunction &MF = *MI->getParent()->getParent(); 1820 // All instructions input are two-addr instructions. Get the known operands. 1821 unsigned Dest = MI->getOperand(0).getReg(); 1822 unsigned Src = MI->getOperand(1).getReg(); 1823 bool isDead = MI->getOperand(0).isDead(); 1824 bool isKill = MI->getOperand(1).isKill(); 1825 1826 MachineInstr *NewMI = NULL; 1827 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 1828 // we have better subtarget support, enable the 16-bit LEA generation here. 1829 // 16-bit LEA is also slow on Core2. 1830 bool DisableLEA16 = true; 1831 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1832 1833 unsigned MIOpc = MI->getOpcode(); 1834 switch (MIOpc) { 1835 case X86::SHUFPSrri: { 1836 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); 1837 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 1838 1839 unsigned B = MI->getOperand(1).getReg(); 1840 unsigned C = MI->getOperand(2).getReg(); 1841 if (B != C) return 0; 1842 unsigned A = MI->getOperand(0).getReg(); 1843 unsigned M = MI->getOperand(3).getImm(); 1844 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri)) 1845 .addReg(A, RegState::Define | getDeadRegState(isDead)) 1846 .addReg(B, getKillRegState(isKill)).addImm(M); 1847 break; 1848 } 1849 case X86::SHUFPDrri: { 1850 assert(MI->getNumOperands() == 4 && "Unknown shufpd instruction!"); 1851 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 1852 1853 unsigned B = MI->getOperand(1).getReg(); 1854 unsigned C = MI->getOperand(2).getReg(); 1855 if (B != C) return 0; 1856 unsigned A = MI->getOperand(0).getReg(); 1857 unsigned M = MI->getOperand(3).getImm(); 1858 1859 // Convert to PSHUFD mask. 1860 M = ((M & 1) << 1) | ((M & 1) << 3) | ((M & 2) << 4) | ((M & 2) << 6)| 0x44; 1861 1862 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri)) 1863 .addReg(A, RegState::Define | getDeadRegState(isDead)) 1864 .addReg(B, getKillRegState(isKill)).addImm(M); 1865 break; 1866 } 1867 case X86::SHL64ri: { 1868 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1869 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1870 // the flags produced by a shift yet, so this is safe. 1871 unsigned ShAmt = MI->getOperand(2).getImm(); 1872 if (ShAmt == 0 || ShAmt >= 4) return 0; 1873 1874 // LEA can't handle RSP. 1875 if (TargetRegisterInfo::isVirtualRegister(Src) && 1876 !MF.getRegInfo().constrainRegClass(Src, &X86::GR64_NOSPRegClass)) 1877 return 0; 1878 1879 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) 1880 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 1881 .addReg(0).addImm(1 << ShAmt) 1882 .addReg(Src, getKillRegState(isKill)) 1883 .addImm(0).addReg(0); 1884 break; 1885 } 1886 case X86::SHL32ri: { 1887 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1888 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1889 // the flags produced by a shift yet, so this is safe. 1890 unsigned ShAmt = MI->getOperand(2).getImm(); 1891 if (ShAmt == 0 || ShAmt >= 4) return 0; 1892 1893 // LEA can't handle ESP. 1894 if (TargetRegisterInfo::isVirtualRegister(Src) && 1895 !MF.getRegInfo().constrainRegClass(Src, &X86::GR32_NOSPRegClass)) 1896 return 0; 1897 1898 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 1899 NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc)) 1900 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 1901 .addReg(0).addImm(1 << ShAmt) 1902 .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0); 1903 break; 1904 } 1905 case X86::SHL16ri: { 1906 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1907 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1908 // the flags produced by a shift yet, so this is safe. 1909 unsigned ShAmt = MI->getOperand(2).getImm(); 1910 if (ShAmt == 0 || ShAmt >= 4) return 0; 1911 1912 if (DisableLEA16) 1913 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; 1914 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 1915 .addReg(Dest, RegState::Define | getDeadRegState(isDead)) 1916 .addReg(0).addImm(1 << ShAmt) 1917 .addReg(Src, getKillRegState(isKill)) 1918 .addImm(0).addReg(0); 1919 break; 1920 } 1921 default: { 1922 // The following opcodes also sets the condition code register(s). Only 1923 // convert them to equivalent lea if the condition code register def's 1924 // are dead! 1925 if (hasLiveCondCodeDef(MI)) 1926 return 0; 1927 1928 switch (MIOpc) { 1929 default: return 0; 1930 case X86::INC64r: 1931 case X86::INC32r: 1932 case X86::INC64_32r: { 1933 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1934 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 1935 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1936 const TargetRegisterClass *RC = MIOpc == X86::INC64r ? 1937 (const TargetRegisterClass*)&X86::GR64_NOSPRegClass : 1938 (const TargetRegisterClass*)&X86::GR32_NOSPRegClass; 1939 1940 // LEA can't handle RSP. 1941 if (TargetRegisterInfo::isVirtualRegister(Src) && 1942 !MF.getRegInfo().constrainRegClass(Src, RC)) 1943 return 0; 1944 1945 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) 1946 .addReg(Dest, RegState::Define | 1947 getDeadRegState(isDead)), 1948 Src, isKill, 1); 1949 break; 1950 } 1951 case X86::INC16r: 1952 case X86::INC64_16r: 1953 if (DisableLEA16) 1954 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; 1955 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1956 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 1957 .addReg(Dest, RegState::Define | 1958 getDeadRegState(isDead)), 1959 Src, isKill, 1); 1960 break; 1961 case X86::DEC64r: 1962 case X86::DEC32r: 1963 case X86::DEC64_32r: { 1964 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1965 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 1966 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1967 const TargetRegisterClass *RC = MIOpc == X86::DEC64r ? 1968 (const TargetRegisterClass*)&X86::GR64_NOSPRegClass : 1969 (const TargetRegisterClass*)&X86::GR32_NOSPRegClass; 1970 // LEA can't handle RSP. 1971 if (TargetRegisterInfo::isVirtualRegister(Src) && 1972 !MF.getRegInfo().constrainRegClass(Src, RC)) 1973 return 0; 1974 1975 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) 1976 .addReg(Dest, RegState::Define | 1977 getDeadRegState(isDead)), 1978 Src, isKill, -1); 1979 break; 1980 } 1981 case X86::DEC16r: 1982 case X86::DEC64_16r: 1983 if (DisableLEA16) 1984 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; 1985 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1986 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 1987 .addReg(Dest, RegState::Define | 1988 getDeadRegState(isDead)), 1989 Src, isKill, -1); 1990 break; 1991 case X86::ADD64rr: 1992 case X86::ADD64rr_DB: 1993 case X86::ADD32rr: 1994 case X86::ADD32rr_DB: { 1995 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1996 unsigned Opc; 1997 const TargetRegisterClass *RC; 1998 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) { 1999 Opc = X86::LEA64r; 2000 RC = &X86::GR64_NOSPRegClass; 2001 } else { 2002 Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2003 RC = &X86::GR32_NOSPRegClass; 2004 } 2005 2006 2007 unsigned Src2 = MI->getOperand(2).getReg(); 2008 bool isKill2 = MI->getOperand(2).isKill(); 2009 2010 // LEA can't handle RSP. 2011 if (TargetRegisterInfo::isVirtualRegister(Src2) && 2012 !MF.getRegInfo().constrainRegClass(Src2, RC)) 2013 return 0; 2014 2015 NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2016 .addReg(Dest, RegState::Define | 2017 getDeadRegState(isDead)), 2018 Src, isKill, Src2, isKill2); 2019 2020 // Preserve undefness of the operands. 2021 bool isUndef = MI->getOperand(1).isUndef(); 2022 bool isUndef2 = MI->getOperand(2).isUndef(); 2023 NewMI->getOperand(1).setIsUndef(isUndef); 2024 NewMI->getOperand(3).setIsUndef(isUndef2); 2025 2026 if (LV && isKill2) 2027 LV->replaceKillInstruction(Src2, MI, NewMI); 2028 break; 2029 } 2030 case X86::ADD16rr: 2031 case X86::ADD16rr_DB: { 2032 if (DisableLEA16) 2033 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; 2034 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2035 unsigned Src2 = MI->getOperand(2).getReg(); 2036 bool isKill2 = MI->getOperand(2).isKill(); 2037 NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2038 .addReg(Dest, RegState::Define | 2039 getDeadRegState(isDead)), 2040 Src, isKill, Src2, isKill2); 2041 if (LV && isKill2) 2042 LV->replaceKillInstruction(Src2, MI, NewMI); 2043 break; 2044 } 2045 case X86::ADD64ri32: 2046 case X86::ADD64ri8: 2047 case X86::ADD64ri32_DB: 2048 case X86::ADD64ri8_DB: 2049 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2050 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) 2051 .addReg(Dest, RegState::Define | 2052 getDeadRegState(isDead)), 2053 Src, isKill, MI->getOperand(2).getImm()); 2054 break; 2055 case X86::ADD32ri: 2056 case X86::ADD32ri8: 2057 case X86::ADD32ri_DB: 2058 case X86::ADD32ri8_DB: { 2059 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2060 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 2061 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) 2062 .addReg(Dest, RegState::Define | 2063 getDeadRegState(isDead)), 2064 Src, isKill, MI->getOperand(2).getImm()); 2065 break; 2066 } 2067 case X86::ADD16ri: 2068 case X86::ADD16ri8: 2069 case X86::ADD16ri_DB: 2070 case X86::ADD16ri8_DB: 2071 if (DisableLEA16) 2072 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; 2073 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 2074 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) 2075 .addReg(Dest, RegState::Define | 2076 getDeadRegState(isDead)), 2077 Src, isKill, MI->getOperand(2).getImm()); 2078 break; 2079 } 2080 } 2081 } 2082 2083 if (!NewMI) return 0; 2084 2085 if (LV) { // Update live variables 2086 if (isKill) 2087 LV->replaceKillInstruction(Src, MI, NewMI); 2088 if (isDead) 2089 LV->replaceKillInstruction(Dest, MI, NewMI); 2090 } 2091 2092 MFI->insert(MBBI, NewMI); // Insert the new inst 2093 return NewMI; 2094} 2095 2096/// commuteInstruction - We have a few instructions that must be hacked on to 2097/// commute them. 2098/// 2099MachineInstr * 2100X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 2101 switch (MI->getOpcode()) { 2102 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 2103 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 2104 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 2105 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 2106 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 2107 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 2108 unsigned Opc; 2109 unsigned Size; 2110 switch (MI->getOpcode()) { 2111 default: llvm_unreachable("Unreachable!"); 2112 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 2113 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 2114 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 2115 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 2116 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 2117 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 2118 } 2119 unsigned Amt = MI->getOperand(3).getImm(); 2120 if (NewMI) { 2121 MachineFunction &MF = *MI->getParent()->getParent(); 2122 MI = MF.CloneMachineInstr(MI); 2123 NewMI = false; 2124 } 2125 MI->setDesc(get(Opc)); 2126 MI->getOperand(3).setImm(Size-Amt); 2127 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 2128 } 2129 case X86::CMOVB16rr: 2130 case X86::CMOVB32rr: 2131 case X86::CMOVB64rr: 2132 case X86::CMOVAE16rr: 2133 case X86::CMOVAE32rr: 2134 case X86::CMOVAE64rr: 2135 case X86::CMOVE16rr: 2136 case X86::CMOVE32rr: 2137 case X86::CMOVE64rr: 2138 case X86::CMOVNE16rr: 2139 case X86::CMOVNE32rr: 2140 case X86::CMOVNE64rr: 2141 case X86::CMOVBE16rr: 2142 case X86::CMOVBE32rr: 2143 case X86::CMOVBE64rr: 2144 case X86::CMOVA16rr: 2145 case X86::CMOVA32rr: 2146 case X86::CMOVA64rr: 2147 case X86::CMOVL16rr: 2148 case X86::CMOVL32rr: 2149 case X86::CMOVL64rr: 2150 case X86::CMOVGE16rr: 2151 case X86::CMOVGE32rr: 2152 case X86::CMOVGE64rr: 2153 case X86::CMOVLE16rr: 2154 case X86::CMOVLE32rr: 2155 case X86::CMOVLE64rr: 2156 case X86::CMOVG16rr: 2157 case X86::CMOVG32rr: 2158 case X86::CMOVG64rr: 2159 case X86::CMOVS16rr: 2160 case X86::CMOVS32rr: 2161 case X86::CMOVS64rr: 2162 case X86::CMOVNS16rr: 2163 case X86::CMOVNS32rr: 2164 case X86::CMOVNS64rr: 2165 case X86::CMOVP16rr: 2166 case X86::CMOVP32rr: 2167 case X86::CMOVP64rr: 2168 case X86::CMOVNP16rr: 2169 case X86::CMOVNP32rr: 2170 case X86::CMOVNP64rr: 2171 case X86::CMOVO16rr: 2172 case X86::CMOVO32rr: 2173 case X86::CMOVO64rr: 2174 case X86::CMOVNO16rr: 2175 case X86::CMOVNO32rr: 2176 case X86::CMOVNO64rr: { 2177 unsigned Opc = 0; 2178 switch (MI->getOpcode()) { 2179 default: break; 2180 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 2181 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 2182 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 2183 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 2184 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 2185 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 2186 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 2187 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 2188 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 2189 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 2190 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 2191 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 2192 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 2193 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 2194 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 2195 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 2196 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 2197 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 2198 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 2199 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 2200 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 2201 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 2202 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 2203 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 2204 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 2205 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 2206 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 2207 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 2208 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 2209 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 2210 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 2211 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 2212 case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break; 2213 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 2214 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 2215 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 2216 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 2217 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 2218 case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break; 2219 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 2220 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 2221 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 2222 case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break; 2223 case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break; 2224 case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break; 2225 case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break; 2226 case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break; 2227 case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break; 2228 } 2229 if (NewMI) { 2230 MachineFunction &MF = *MI->getParent()->getParent(); 2231 MI = MF.CloneMachineInstr(MI); 2232 NewMI = false; 2233 } 2234 MI->setDesc(get(Opc)); 2235 // Fallthrough intended. 2236 } 2237 default: 2238 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 2239 } 2240} 2241 2242static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) { 2243 switch (BrOpc) { 2244 default: return X86::COND_INVALID; 2245 case X86::JE_4: return X86::COND_E; 2246 case X86::JNE_4: return X86::COND_NE; 2247 case X86::JL_4: return X86::COND_L; 2248 case X86::JLE_4: return X86::COND_LE; 2249 case X86::JG_4: return X86::COND_G; 2250 case X86::JGE_4: return X86::COND_GE; 2251 case X86::JB_4: return X86::COND_B; 2252 case X86::JBE_4: return X86::COND_BE; 2253 case X86::JA_4: return X86::COND_A; 2254 case X86::JAE_4: return X86::COND_AE; 2255 case X86::JS_4: return X86::COND_S; 2256 case X86::JNS_4: return X86::COND_NS; 2257 case X86::JP_4: return X86::COND_P; 2258 case X86::JNP_4: return X86::COND_NP; 2259 case X86::JO_4: return X86::COND_O; 2260 case X86::JNO_4: return X86::COND_NO; 2261 } 2262} 2263 2264/// getCondFromSETOpc - return condition code of a SET opcode. 2265static X86::CondCode getCondFromSETOpc(unsigned Opc) { 2266 switch (Opc) { 2267 default: return X86::COND_INVALID; 2268 case X86::SETAr: case X86::SETAm: return X86::COND_A; 2269 case X86::SETAEr: case X86::SETAEm: return X86::COND_AE; 2270 case X86::SETBr: case X86::SETBm: return X86::COND_B; 2271 case X86::SETBEr: case X86::SETBEm: return X86::COND_BE; 2272 case X86::SETEr: case X86::SETEm: return X86::COND_E; 2273 case X86::SETGr: case X86::SETGm: return X86::COND_G; 2274 case X86::SETGEr: case X86::SETGEm: return X86::COND_GE; 2275 case X86::SETLr: case X86::SETLm: return X86::COND_L; 2276 case X86::SETLEr: case X86::SETLEm: return X86::COND_LE; 2277 case X86::SETNEr: case X86::SETNEm: return X86::COND_NE; 2278 case X86::SETNOr: case X86::SETNOm: return X86::COND_NO; 2279 case X86::SETNPr: case X86::SETNPm: return X86::COND_NP; 2280 case X86::SETNSr: case X86::SETNSm: return X86::COND_NS; 2281 case X86::SETOr: case X86::SETOm: return X86::COND_O; 2282 case X86::SETPr: case X86::SETPm: return X86::COND_P; 2283 case X86::SETSr: case X86::SETSm: return X86::COND_S; 2284 } 2285} 2286 2287/// getCondFromCmovOpc - return condition code of a CMov opcode. 2288static X86::CondCode getCondFromCMovOpc(unsigned Opc) { 2289 switch (Opc) { 2290 default: return X86::COND_INVALID; 2291 case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm: 2292 case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr: 2293 return X86::COND_A; 2294 case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm: 2295 case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr: 2296 return X86::COND_AE; 2297 case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm: 2298 case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr: 2299 return X86::COND_B; 2300 case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm: 2301 case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr: 2302 return X86::COND_BE; 2303 case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm: 2304 case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr: 2305 return X86::COND_E; 2306 case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm: 2307 case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr: 2308 return X86::COND_G; 2309 case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm: 2310 case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr: 2311 return X86::COND_GE; 2312 case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm: 2313 case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr: 2314 return X86::COND_L; 2315 case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm: 2316 case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr: 2317 return X86::COND_LE; 2318 case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm: 2319 case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr: 2320 return X86::COND_NE; 2321 case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm: 2322 case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr: 2323 return X86::COND_NO; 2324 case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm: 2325 case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr: 2326 return X86::COND_NP; 2327 case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm: 2328 case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr: 2329 return X86::COND_NS; 2330 case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm: 2331 case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr: 2332 return X86::COND_O; 2333 case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm: 2334 case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr: 2335 return X86::COND_P; 2336 case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm: 2337 case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr: 2338 return X86::COND_S; 2339 } 2340} 2341 2342unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 2343 switch (CC) { 2344 default: llvm_unreachable("Illegal condition code!"); 2345 case X86::COND_E: return X86::JE_4; 2346 case X86::COND_NE: return X86::JNE_4; 2347 case X86::COND_L: return X86::JL_4; 2348 case X86::COND_LE: return X86::JLE_4; 2349 case X86::COND_G: return X86::JG_4; 2350 case X86::COND_GE: return X86::JGE_4; 2351 case X86::COND_B: return X86::JB_4; 2352 case X86::COND_BE: return X86::JBE_4; 2353 case X86::COND_A: return X86::JA_4; 2354 case X86::COND_AE: return X86::JAE_4; 2355 case X86::COND_S: return X86::JS_4; 2356 case X86::COND_NS: return X86::JNS_4; 2357 case X86::COND_P: return X86::JP_4; 2358 case X86::COND_NP: return X86::JNP_4; 2359 case X86::COND_O: return X86::JO_4; 2360 case X86::COND_NO: return X86::JNO_4; 2361 } 2362} 2363 2364/// GetOppositeBranchCondition - Return the inverse of the specified condition, 2365/// e.g. turning COND_E to COND_NE. 2366X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 2367 switch (CC) { 2368 default: llvm_unreachable("Illegal condition code!"); 2369 case X86::COND_E: return X86::COND_NE; 2370 case X86::COND_NE: return X86::COND_E; 2371 case X86::COND_L: return X86::COND_GE; 2372 case X86::COND_LE: return X86::COND_G; 2373 case X86::COND_G: return X86::COND_LE; 2374 case X86::COND_GE: return X86::COND_L; 2375 case X86::COND_B: return X86::COND_AE; 2376 case X86::COND_BE: return X86::COND_A; 2377 case X86::COND_A: return X86::COND_BE; 2378 case X86::COND_AE: return X86::COND_B; 2379 case X86::COND_S: return X86::COND_NS; 2380 case X86::COND_NS: return X86::COND_S; 2381 case X86::COND_P: return X86::COND_NP; 2382 case X86::COND_NP: return X86::COND_P; 2383 case X86::COND_O: return X86::COND_NO; 2384 case X86::COND_NO: return X86::COND_O; 2385 } 2386} 2387 2388/// getSwappedCondition - assume the flags are set by MI(a,b), return 2389/// the condition code if we modify the instructions such that flags are 2390/// set by MI(b,a). 2391static X86::CondCode getSwappedCondition(X86::CondCode CC) { 2392 switch (CC) { 2393 default: return X86::COND_INVALID; 2394 case X86::COND_E: return X86::COND_E; 2395 case X86::COND_NE: return X86::COND_NE; 2396 case X86::COND_L: return X86::COND_G; 2397 case X86::COND_LE: return X86::COND_GE; 2398 case X86::COND_G: return X86::COND_L; 2399 case X86::COND_GE: return X86::COND_LE; 2400 case X86::COND_B: return X86::COND_A; 2401 case X86::COND_BE: return X86::COND_AE; 2402 case X86::COND_A: return X86::COND_B; 2403 case X86::COND_AE: return X86::COND_BE; 2404 } 2405} 2406 2407/// getSETFromCond - Return a set opcode for the given condition and 2408/// whether it has memory operand. 2409static unsigned getSETFromCond(X86::CondCode CC, 2410 bool HasMemoryOperand) { 2411 static const unsigned Opc[16][2] = { 2412 { X86::SETAr, X86::SETAm }, 2413 { X86::SETAEr, X86::SETAEm }, 2414 { X86::SETBr, X86::SETBm }, 2415 { X86::SETBEr, X86::SETBEm }, 2416 { X86::SETEr, X86::SETEm }, 2417 { X86::SETGr, X86::SETGm }, 2418 { X86::SETGEr, X86::SETGEm }, 2419 { X86::SETLr, X86::SETLm }, 2420 { X86::SETLEr, X86::SETLEm }, 2421 { X86::SETNEr, X86::SETNEm }, 2422 { X86::SETNOr, X86::SETNOm }, 2423 { X86::SETNPr, X86::SETNPm }, 2424 { X86::SETNSr, X86::SETNSm }, 2425 { X86::SETOr, X86::SETOm }, 2426 { X86::SETPr, X86::SETPm }, 2427 { X86::SETSr, X86::SETSm } 2428 }; 2429 2430 assert(CC < 16 && "Can only handle standard cond codes"); 2431 return Opc[CC][HasMemoryOperand ? 1 : 0]; 2432} 2433 2434/// getCMovFromCond - Return a cmov opcode for the given condition, 2435/// register size in bytes, and operand type. 2436static unsigned getCMovFromCond(X86::CondCode CC, unsigned RegBytes, 2437 bool HasMemoryOperand) { 2438 static const unsigned Opc[32][3] = { 2439 { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr }, 2440 { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr }, 2441 { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr }, 2442 { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr }, 2443 { X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr }, 2444 { X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr }, 2445 { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr }, 2446 { X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr }, 2447 { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr }, 2448 { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr }, 2449 { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr }, 2450 { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr }, 2451 { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr }, 2452 { X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr }, 2453 { X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr }, 2454 { X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr }, 2455 { X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm }, 2456 { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm }, 2457 { X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm }, 2458 { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm }, 2459 { X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm }, 2460 { X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm }, 2461 { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm }, 2462 { X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm }, 2463 { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm }, 2464 { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm }, 2465 { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm }, 2466 { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm }, 2467 { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm }, 2468 { X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm }, 2469 { X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm }, 2470 { X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm } 2471 }; 2472 2473 assert(CC < 16 && "Can only handle standard cond codes"); 2474 unsigned Idx = HasMemoryOperand ? 16+CC : CC; 2475 switch(RegBytes) { 2476 default: llvm_unreachable("Illegal register size!"); 2477 case 2: return Opc[Idx][0]; 2478 case 4: return Opc[Idx][1]; 2479 case 8: return Opc[Idx][2]; 2480 } 2481} 2482 2483bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 2484 if (!MI->isTerminator()) return false; 2485 2486 // Conditional branch is a special case. 2487 if (MI->isBranch() && !MI->isBarrier()) 2488 return true; 2489 if (!MI->isPredicable()) 2490 return true; 2491 return !isPredicated(MI); 2492} 2493 2494bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 2495 MachineBasicBlock *&TBB, 2496 MachineBasicBlock *&FBB, 2497 SmallVectorImpl<MachineOperand> &Cond, 2498 bool AllowModify) const { 2499 // Start from the bottom of the block and work up, examining the 2500 // terminator instructions. 2501 MachineBasicBlock::iterator I = MBB.end(); 2502 MachineBasicBlock::iterator UnCondBrIter = MBB.end(); 2503 while (I != MBB.begin()) { 2504 --I; 2505 if (I->isDebugValue()) 2506 continue; 2507 2508 // Working from the bottom, when we see a non-terminator instruction, we're 2509 // done. 2510 if (!isUnpredicatedTerminator(I)) 2511 break; 2512 2513 // A terminator that isn't a branch can't easily be handled by this 2514 // analysis. 2515 if (!I->isBranch()) 2516 return true; 2517 2518 // Handle unconditional branches. 2519 if (I->getOpcode() == X86::JMP_4) { 2520 UnCondBrIter = I; 2521 2522 if (!AllowModify) { 2523 TBB = I->getOperand(0).getMBB(); 2524 continue; 2525 } 2526 2527 // If the block has any instructions after a JMP, delete them. 2528 while (llvm::next(I) != MBB.end()) 2529 llvm::next(I)->eraseFromParent(); 2530 2531 Cond.clear(); 2532 FBB = 0; 2533 2534 // Delete the JMP if it's equivalent to a fall-through. 2535 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 2536 TBB = 0; 2537 I->eraseFromParent(); 2538 I = MBB.end(); 2539 UnCondBrIter = MBB.end(); 2540 continue; 2541 } 2542 2543 // TBB is used to indicate the unconditional destination. 2544 TBB = I->getOperand(0).getMBB(); 2545 continue; 2546 } 2547 2548 // Handle conditional branches. 2549 X86::CondCode BranchCode = getCondFromBranchOpc(I->getOpcode()); 2550 if (BranchCode == X86::COND_INVALID) 2551 return true; // Can't handle indirect branch. 2552 2553 // Working from the bottom, handle the first conditional branch. 2554 if (Cond.empty()) { 2555 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB(); 2556 if (AllowModify && UnCondBrIter != MBB.end() && 2557 MBB.isLayoutSuccessor(TargetBB)) { 2558 // If we can modify the code and it ends in something like: 2559 // 2560 // jCC L1 2561 // jmp L2 2562 // L1: 2563 // ... 2564 // L2: 2565 // 2566 // Then we can change this to: 2567 // 2568 // jnCC L2 2569 // L1: 2570 // ... 2571 // L2: 2572 // 2573 // Which is a bit more efficient. 2574 // We conditionally jump to the fall-through block. 2575 BranchCode = GetOppositeBranchCondition(BranchCode); 2576 unsigned JNCC = GetCondBranchFromCond(BranchCode); 2577 MachineBasicBlock::iterator OldInst = I; 2578 2579 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC)) 2580 .addMBB(UnCondBrIter->getOperand(0).getMBB()); 2581 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4)) 2582 .addMBB(TargetBB); 2583 2584 OldInst->eraseFromParent(); 2585 UnCondBrIter->eraseFromParent(); 2586 2587 // Restart the analysis. 2588 UnCondBrIter = MBB.end(); 2589 I = MBB.end(); 2590 continue; 2591 } 2592 2593 FBB = TBB; 2594 TBB = I->getOperand(0).getMBB(); 2595 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 2596 continue; 2597 } 2598 2599 // Handle subsequent conditional branches. Only handle the case where all 2600 // conditional branches branch to the same destination and their condition 2601 // opcodes fit one of the special multi-branch idioms. 2602 assert(Cond.size() == 1); 2603 assert(TBB); 2604 2605 // Only handle the case where all conditional branches branch to the same 2606 // destination. 2607 if (TBB != I->getOperand(0).getMBB()) 2608 return true; 2609 2610 // If the conditions are the same, we can leave them alone. 2611 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); 2612 if (OldBranchCode == BranchCode) 2613 continue; 2614 2615 // If they differ, see if they fit one of the known patterns. Theoretically, 2616 // we could handle more patterns here, but we shouldn't expect to see them 2617 // if instruction selection has done a reasonable job. 2618 if ((OldBranchCode == X86::COND_NP && 2619 BranchCode == X86::COND_E) || 2620 (OldBranchCode == X86::COND_E && 2621 BranchCode == X86::COND_NP)) 2622 BranchCode = X86::COND_NP_OR_E; 2623 else if ((OldBranchCode == X86::COND_P && 2624 BranchCode == X86::COND_NE) || 2625 (OldBranchCode == X86::COND_NE && 2626 BranchCode == X86::COND_P)) 2627 BranchCode = X86::COND_NE_OR_P; 2628 else 2629 return true; 2630 2631 // Update the MachineOperand. 2632 Cond[0].setImm(BranchCode); 2633 } 2634 2635 return false; 2636} 2637 2638unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 2639 MachineBasicBlock::iterator I = MBB.end(); 2640 unsigned Count = 0; 2641 2642 while (I != MBB.begin()) { 2643 --I; 2644 if (I->isDebugValue()) 2645 continue; 2646 if (I->getOpcode() != X86::JMP_4 && 2647 getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 2648 break; 2649 // Remove the branch. 2650 I->eraseFromParent(); 2651 I = MBB.end(); 2652 ++Count; 2653 } 2654 2655 return Count; 2656} 2657 2658unsigned 2659X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 2660 MachineBasicBlock *FBB, 2661 const SmallVectorImpl<MachineOperand> &Cond, 2662 DebugLoc DL) const { 2663 // Shouldn't be a fall through. 2664 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 2665 assert((Cond.size() == 1 || Cond.size() == 0) && 2666 "X86 branch conditions have one component!"); 2667 2668 if (Cond.empty()) { 2669 // Unconditional branch? 2670 assert(!FBB && "Unconditional branch with multiple successors!"); 2671 BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB); 2672 return 1; 2673 } 2674 2675 // Conditional branch. 2676 unsigned Count = 0; 2677 X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); 2678 switch (CC) { 2679 case X86::COND_NP_OR_E: 2680 // Synthesize NP_OR_E with two branches. 2681 BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB); 2682 ++Count; 2683 BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB); 2684 ++Count; 2685 break; 2686 case X86::COND_NE_OR_P: 2687 // Synthesize NE_OR_P with two branches. 2688 BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB); 2689 ++Count; 2690 BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB); 2691 ++Count; 2692 break; 2693 default: { 2694 unsigned Opc = GetCondBranchFromCond(CC); 2695 BuildMI(&MBB, DL, get(Opc)).addMBB(TBB); 2696 ++Count; 2697 } 2698 } 2699 if (FBB) { 2700 // Two-way Conditional branch. Insert the second branch. 2701 BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB); 2702 ++Count; 2703 } 2704 return Count; 2705} 2706 2707bool X86InstrInfo:: 2708canInsertSelect(const MachineBasicBlock &MBB, 2709 const SmallVectorImpl<MachineOperand> &Cond, 2710 unsigned TrueReg, unsigned FalseReg, 2711 int &CondCycles, int &TrueCycles, int &FalseCycles) const { 2712 // Not all subtargets have cmov instructions. 2713 if (!TM.getSubtarget<X86Subtarget>().hasCMov()) 2714 return false; 2715 if (Cond.size() != 1) 2716 return false; 2717 // We cannot do the composite conditions, at least not in SSA form. 2718 if ((X86::CondCode)Cond[0].getImm() > X86::COND_S) 2719 return false; 2720 2721 // Check register classes. 2722 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2723 const TargetRegisterClass *RC = 2724 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 2725 if (!RC) 2726 return false; 2727 2728 // We have cmov instructions for 16, 32, and 64 bit general purpose registers. 2729 if (X86::GR16RegClass.hasSubClassEq(RC) || 2730 X86::GR32RegClass.hasSubClassEq(RC) || 2731 X86::GR64RegClass.hasSubClassEq(RC)) { 2732 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy 2733 // Bridge. Probably Ivy Bridge as well. 2734 CondCycles = 2; 2735 TrueCycles = 2; 2736 FalseCycles = 2; 2737 return true; 2738 } 2739 2740 // Can't do vectors. 2741 return false; 2742} 2743 2744void X86InstrInfo::insertSelect(MachineBasicBlock &MBB, 2745 MachineBasicBlock::iterator I, DebugLoc DL, 2746 unsigned DstReg, 2747 const SmallVectorImpl<MachineOperand> &Cond, 2748 unsigned TrueReg, unsigned FalseReg) const { 2749 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2750 assert(Cond.size() == 1 && "Invalid Cond array"); 2751 unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(), 2752 MRI.getRegClass(DstReg)->getSize(), 2753 false/*HasMemoryOperand*/); 2754 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg); 2755} 2756 2757/// isHReg - Test if the given register is a physical h register. 2758static bool isHReg(unsigned Reg) { 2759 return X86::GR8_ABCD_HRegClass.contains(Reg); 2760} 2761 2762// Try and copy between VR128/VR64 and GR64 registers. 2763static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, 2764 bool HasAVX) { 2765 // SrcReg(VR128) -> DestReg(GR64) 2766 // SrcReg(VR64) -> DestReg(GR64) 2767 // SrcReg(GR64) -> DestReg(VR128) 2768 // SrcReg(GR64) -> DestReg(VR64) 2769 2770 if (X86::GR64RegClass.contains(DestReg)) { 2771 if (X86::VR128RegClass.contains(SrcReg)) { 2772 // Copy from a VR128 register to a GR64 register. 2773 return HasAVX ? X86::VMOVPQIto64rr : X86::MOVPQIto64rr; 2774 } else if (X86::VR64RegClass.contains(SrcReg)) { 2775 // Copy from a VR64 register to a GR64 register. 2776 return X86::MOVSDto64rr; 2777 } 2778 } else if (X86::GR64RegClass.contains(SrcReg)) { 2779 // Copy from a GR64 register to a VR128 register. 2780 if (X86::VR128RegClass.contains(DestReg)) 2781 return HasAVX ? X86::VMOV64toPQIrr : X86::MOV64toPQIrr; 2782 // Copy from a GR64 register to a VR64 register. 2783 else if (X86::VR64RegClass.contains(DestReg)) 2784 return X86::MOV64toSDrr; 2785 } 2786 2787 // SrcReg(FR32) -> DestReg(GR32) 2788 // SrcReg(GR32) -> DestReg(FR32) 2789 2790 if (X86::GR32RegClass.contains(DestReg) && X86::FR32RegClass.contains(SrcReg)) 2791 // Copy from a FR32 register to a GR32 register. 2792 return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr; 2793 2794 if (X86::FR32RegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg)) 2795 // Copy from a GR32 register to a FR32 register. 2796 return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr; 2797 2798 return 0; 2799} 2800 2801void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 2802 MachineBasicBlock::iterator MI, DebugLoc DL, 2803 unsigned DestReg, unsigned SrcReg, 2804 bool KillSrc) const { 2805 // First deal with the normal symmetric copies. 2806 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 2807 unsigned Opc = 0; 2808 if (X86::GR64RegClass.contains(DestReg, SrcReg)) 2809 Opc = X86::MOV64rr; 2810 else if (X86::GR32RegClass.contains(DestReg, SrcReg)) 2811 Opc = X86::MOV32rr; 2812 else if (X86::GR16RegClass.contains(DestReg, SrcReg)) 2813 Opc = X86::MOV16rr; 2814 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) { 2815 // Copying to or from a physical H register on x86-64 requires a NOREX 2816 // move. Otherwise use a normal move. 2817 if ((isHReg(DestReg) || isHReg(SrcReg)) && 2818 TM.getSubtarget<X86Subtarget>().is64Bit()) { 2819 Opc = X86::MOV8rr_NOREX; 2820 // Both operands must be encodable without an REX prefix. 2821 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) && 2822 "8-bit H register can not be copied outside GR8_NOREX"); 2823 } else 2824 Opc = X86::MOV8rr; 2825 } else if (X86::VR128RegClass.contains(DestReg, SrcReg)) 2826 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr; 2827 else if (X86::VR256RegClass.contains(DestReg, SrcReg)) 2828 Opc = X86::VMOVAPSYrr; 2829 else if (X86::VR64RegClass.contains(DestReg, SrcReg)) 2830 Opc = X86::MMX_MOVQ64rr; 2831 else 2832 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, HasAVX); 2833 2834 if (Opc) { 2835 BuildMI(MBB, MI, DL, get(Opc), DestReg) 2836 .addReg(SrcReg, getKillRegState(KillSrc)); 2837 return; 2838 } 2839 2840 // Moving EFLAGS to / from another register requires a push and a pop. 2841 if (SrcReg == X86::EFLAGS) { 2842 if (X86::GR64RegClass.contains(DestReg)) { 2843 BuildMI(MBB, MI, DL, get(X86::PUSHF64)); 2844 BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg); 2845 return; 2846 } else if (X86::GR32RegClass.contains(DestReg)) { 2847 BuildMI(MBB, MI, DL, get(X86::PUSHF32)); 2848 BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg); 2849 return; 2850 } 2851 } 2852 if (DestReg == X86::EFLAGS) { 2853 if (X86::GR64RegClass.contains(SrcReg)) { 2854 BuildMI(MBB, MI, DL, get(X86::PUSH64r)) 2855 .addReg(SrcReg, getKillRegState(KillSrc)); 2856 BuildMI(MBB, MI, DL, get(X86::POPF64)); 2857 return; 2858 } else if (X86::GR32RegClass.contains(SrcReg)) { 2859 BuildMI(MBB, MI, DL, get(X86::PUSH32r)) 2860 .addReg(SrcReg, getKillRegState(KillSrc)); 2861 BuildMI(MBB, MI, DL, get(X86::POPF32)); 2862 return; 2863 } 2864 } 2865 2866 DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) 2867 << " to " << RI.getName(DestReg) << '\n'); 2868 llvm_unreachable("Cannot emit physreg copy instruction"); 2869} 2870 2871static unsigned getLoadStoreRegOpcode(unsigned Reg, 2872 const TargetRegisterClass *RC, 2873 bool isStackAligned, 2874 const TargetMachine &TM, 2875 bool load) { 2876 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 2877 switch (RC->getSize()) { 2878 default: 2879 llvm_unreachable("Unknown spill size"); 2880 case 1: 2881 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass"); 2882 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 2883 // Copying to or from a physical H register on x86-64 requires a NOREX 2884 // move. Otherwise use a normal move. 2885 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC)) 2886 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX; 2887 return load ? X86::MOV8rm : X86::MOV8mr; 2888 case 2: 2889 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass"); 2890 return load ? X86::MOV16rm : X86::MOV16mr; 2891 case 4: 2892 if (X86::GR32RegClass.hasSubClassEq(RC)) 2893 return load ? X86::MOV32rm : X86::MOV32mr; 2894 if (X86::FR32RegClass.hasSubClassEq(RC)) 2895 return load ? 2896 (HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) : 2897 (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr); 2898 if (X86::RFP32RegClass.hasSubClassEq(RC)) 2899 return load ? X86::LD_Fp32m : X86::ST_Fp32m; 2900 llvm_unreachable("Unknown 4-byte regclass"); 2901 case 8: 2902 if (X86::GR64RegClass.hasSubClassEq(RC)) 2903 return load ? X86::MOV64rm : X86::MOV64mr; 2904 if (X86::FR64RegClass.hasSubClassEq(RC)) 2905 return load ? 2906 (HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) : 2907 (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr); 2908 if (X86::VR64RegClass.hasSubClassEq(RC)) 2909 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr; 2910 if (X86::RFP64RegClass.hasSubClassEq(RC)) 2911 return load ? X86::LD_Fp64m : X86::ST_Fp64m; 2912 llvm_unreachable("Unknown 8-byte regclass"); 2913 case 10: 2914 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass"); 2915 return load ? X86::LD_Fp80m : X86::ST_FpP80m; 2916 case 16: { 2917 assert(X86::VR128RegClass.hasSubClassEq(RC) && "Unknown 16-byte regclass"); 2918 // If stack is realigned we can use aligned stores. 2919 if (isStackAligned) 2920 return load ? 2921 (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) : 2922 (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr); 2923 else 2924 return load ? 2925 (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) : 2926 (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); 2927 } 2928 case 32: 2929 assert(X86::VR256RegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass"); 2930 // If stack is realigned we can use aligned stores. 2931 if (isStackAligned) 2932 return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr; 2933 else 2934 return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr; 2935 } 2936} 2937 2938static unsigned getStoreRegOpcode(unsigned SrcReg, 2939 const TargetRegisterClass *RC, 2940 bool isStackAligned, 2941 TargetMachine &TM) { 2942 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false); 2943} 2944 2945 2946static unsigned getLoadRegOpcode(unsigned DestReg, 2947 const TargetRegisterClass *RC, 2948 bool isStackAligned, 2949 const TargetMachine &TM) { 2950 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true); 2951} 2952 2953void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 2954 MachineBasicBlock::iterator MI, 2955 unsigned SrcReg, bool isKill, int FrameIdx, 2956 const TargetRegisterClass *RC, 2957 const TargetRegisterInfo *TRI) const { 2958 const MachineFunction &MF = *MBB.getParent(); 2959 assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() && 2960 "Stack slot too small for store"); 2961 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 2962 bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) || 2963 RI.canRealignStack(MF); 2964 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM); 2965 DebugLoc DL = MBB.findDebugLoc(MI); 2966 addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx) 2967 .addReg(SrcReg, getKillRegState(isKill)); 2968} 2969 2970void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 2971 bool isKill, 2972 SmallVectorImpl<MachineOperand> &Addr, 2973 const TargetRegisterClass *RC, 2974 MachineInstr::mmo_iterator MMOBegin, 2975 MachineInstr::mmo_iterator MMOEnd, 2976 SmallVectorImpl<MachineInstr*> &NewMIs) const { 2977 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 2978 bool isAligned = MMOBegin != MMOEnd && 2979 (*MMOBegin)->getAlignment() >= Alignment; 2980 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM); 2981 DebugLoc DL; 2982 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); 2983 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 2984 MIB.addOperand(Addr[i]); 2985 MIB.addReg(SrcReg, getKillRegState(isKill)); 2986 (*MIB).setMemRefs(MMOBegin, MMOEnd); 2987 NewMIs.push_back(MIB); 2988} 2989 2990 2991void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 2992 MachineBasicBlock::iterator MI, 2993 unsigned DestReg, int FrameIdx, 2994 const TargetRegisterClass *RC, 2995 const TargetRegisterInfo *TRI) const { 2996 const MachineFunction &MF = *MBB.getParent(); 2997 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 2998 bool isAligned = (TM.getFrameLowering()->getStackAlignment() >= Alignment) || 2999 RI.canRealignStack(MF); 3000 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM); 3001 DebugLoc DL = MBB.findDebugLoc(MI); 3002 addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx); 3003} 3004 3005void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 3006 SmallVectorImpl<MachineOperand> &Addr, 3007 const TargetRegisterClass *RC, 3008 MachineInstr::mmo_iterator MMOBegin, 3009 MachineInstr::mmo_iterator MMOEnd, 3010 SmallVectorImpl<MachineInstr*> &NewMIs) const { 3011 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 3012 bool isAligned = MMOBegin != MMOEnd && 3013 (*MMOBegin)->getAlignment() >= Alignment; 3014 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM); 3015 DebugLoc DL; 3016 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); 3017 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 3018 MIB.addOperand(Addr[i]); 3019 (*MIB).setMemRefs(MMOBegin, MMOEnd); 3020 NewMIs.push_back(MIB); 3021} 3022 3023bool X86InstrInfo:: 3024analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2, 3025 int &CmpMask, int &CmpValue) const { 3026 switch (MI->getOpcode()) { 3027 default: break; 3028 case X86::CMP64ri32: 3029 case X86::CMP64ri8: 3030 case X86::CMP32ri: 3031 case X86::CMP32ri8: 3032 case X86::CMP16ri: 3033 case X86::CMP16ri8: 3034 case X86::CMP8ri: 3035 SrcReg = MI->getOperand(0).getReg(); 3036 SrcReg2 = 0; 3037 CmpMask = ~0; 3038 CmpValue = MI->getOperand(1).getImm(); 3039 return true; 3040 case X86::CMP64rr: 3041 case X86::CMP32rr: 3042 case X86::CMP16rr: 3043 case X86::CMP8rr: 3044 SrcReg = MI->getOperand(0).getReg(); 3045 SrcReg2 = MI->getOperand(1).getReg(); 3046 CmpMask = ~0; 3047 CmpValue = 0; 3048 return true; 3049 case X86::TEST8rr: 3050 case X86::TEST16rr: 3051 case X86::TEST32rr: 3052 case X86::TEST64rr: 3053 SrcReg = MI->getOperand(0).getReg(); 3054 if (MI->getOperand(1).getReg() != SrcReg) return false; 3055 // Compare against zero. 3056 SrcReg2 = 0; 3057 CmpMask = ~0; 3058 CmpValue = 0; 3059 return true; 3060 } 3061 return false; 3062} 3063 3064/// isRedundantFlagInstr - check whether the first instruction, whose only 3065/// purpose is to update flags, can be made redundant. 3066/// CMPrr can be made redundant by SUBrr if the operands are the same. 3067/// This function can be extended later on. 3068/// SrcReg, SrcRegs: register operands for FlagI. 3069/// ImmValue: immediate for FlagI if it takes an immediate. 3070inline static bool isRedundantFlagInstr(MachineInstr *FlagI, unsigned SrcReg, 3071 unsigned SrcReg2, int ImmValue, 3072 MachineInstr *OI) { 3073 if (((FlagI->getOpcode() == X86::CMP64rr && 3074 OI->getOpcode() == X86::SUB64rr) || 3075 (FlagI->getOpcode() == X86::CMP32rr && 3076 OI->getOpcode() == X86::SUB32rr)|| 3077 (FlagI->getOpcode() == X86::CMP16rr && 3078 OI->getOpcode() == X86::SUB16rr)|| 3079 (FlagI->getOpcode() == X86::CMP8rr && 3080 OI->getOpcode() == X86::SUB8rr)) && 3081 ((OI->getOperand(1).getReg() == SrcReg && 3082 OI->getOperand(2).getReg() == SrcReg2) || 3083 (OI->getOperand(1).getReg() == SrcReg2 && 3084 OI->getOperand(2).getReg() == SrcReg))) 3085 return true; 3086 3087 if (((FlagI->getOpcode() == X86::CMP64ri32 && 3088 OI->getOpcode() == X86::SUB64ri32) || 3089 (FlagI->getOpcode() == X86::CMP64ri8 && 3090 OI->getOpcode() == X86::SUB64ri8) || 3091 (FlagI->getOpcode() == X86::CMP32ri && 3092 OI->getOpcode() == X86::SUB32ri) || 3093 (FlagI->getOpcode() == X86::CMP32ri8 && 3094 OI->getOpcode() == X86::SUB32ri8) || 3095 (FlagI->getOpcode() == X86::CMP16ri && 3096 OI->getOpcode() == X86::SUB16ri) || 3097 (FlagI->getOpcode() == X86::CMP16ri8 && 3098 OI->getOpcode() == X86::SUB16ri8) || 3099 (FlagI->getOpcode() == X86::CMP8ri && 3100 OI->getOpcode() == X86::SUB8ri)) && 3101 OI->getOperand(1).getReg() == SrcReg && 3102 OI->getOperand(2).getImm() == ImmValue) 3103 return true; 3104 return false; 3105} 3106 3107/// isDefConvertible - check whether the definition can be converted 3108/// to remove a comparison against zero. 3109inline static bool isDefConvertible(MachineInstr *MI) { 3110 switch (MI->getOpcode()) { 3111 default: return false; 3112 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri: 3113 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8: 3114 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: 3115 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: 3116 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: 3117 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: 3118 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: 3119 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: 3120 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: 3121 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: 3122 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: 3123 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: 3124 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: 3125 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm: 3126 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm: 3127 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri: 3128 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8: 3129 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr: 3130 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm: 3131 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm: 3132 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri: 3133 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8: 3134 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr: 3135 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm: 3136 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm: 3137 return true; 3138 } 3139} 3140 3141/// optimizeCompareInstr - Check if there exists an earlier instruction that 3142/// operates on the same source operands and sets flags in the same way as 3143/// Compare; remove Compare if possible. 3144bool X86InstrInfo:: 3145optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, 3146 int CmpMask, int CmpValue, 3147 const MachineRegisterInfo *MRI) const { 3148 // Get the unique definition of SrcReg. 3149 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 3150 if (!MI) return false; 3151 3152 // CmpInstr is the first instruction of the BB. 3153 MachineBasicBlock::iterator I = CmpInstr, Def = MI; 3154 3155 // If we are comparing against zero, check whether we can use MI to update 3156 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize. 3157 bool IsCmpZero = (SrcReg2 == 0 && CmpValue == 0); 3158 if (IsCmpZero && (MI->getParent() != CmpInstr->getParent() || 3159 !isDefConvertible(MI))) 3160 return false; 3161 3162 // We are searching for an earlier instruction that can make CmpInstr 3163 // redundant and that instruction will be saved in Sub. 3164 MachineInstr *Sub = NULL; 3165 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3166 3167 // We iterate backward, starting from the instruction before CmpInstr and 3168 // stop when reaching the definition of a source register or done with the BB. 3169 // RI points to the instruction before CmpInstr. 3170 // If the definition is in this basic block, RE points to the definition; 3171 // otherwise, RE is the rend of the basic block. 3172 MachineBasicBlock::reverse_iterator 3173 RI = MachineBasicBlock::reverse_iterator(I), 3174 RE = CmpInstr->getParent() == MI->getParent() ? 3175 MachineBasicBlock::reverse_iterator(++Def) /* points to MI */ : 3176 CmpInstr->getParent()->rend(); 3177 MachineInstr *Movr0Inst = 0; 3178 for (; RI != RE; ++RI) { 3179 MachineInstr *Instr = &*RI; 3180 // Check whether CmpInstr can be made redundant by the current instruction. 3181 if (!IsCmpZero && 3182 isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, Instr)) { 3183 Sub = Instr; 3184 break; 3185 } 3186 3187 if (Instr->modifiesRegister(X86::EFLAGS, TRI) || 3188 Instr->readsRegister(X86::EFLAGS, TRI)) { 3189 // This instruction modifies or uses EFLAGS. 3190 3191 // MOV32r0 etc. are implemented with xor which clobbers condition code. 3192 // They are safe to move up, if the definition to EFLAGS is dead and 3193 // earlier instructions do not read or write EFLAGS. 3194 if (!Movr0Inst && (Instr->getOpcode() == X86::MOV8r0 || 3195 Instr->getOpcode() == X86::MOV16r0 || 3196 Instr->getOpcode() == X86::MOV32r0 || 3197 Instr->getOpcode() == X86::MOV64r0) && 3198 Instr->registerDefIsDead(X86::EFLAGS, TRI)) { 3199 Movr0Inst = Instr; 3200 continue; 3201 } 3202 3203 // We can't remove CmpInstr. 3204 return false; 3205 } 3206 } 3207 3208 // Return false if no candidates exist. 3209 if (!IsCmpZero && !Sub) 3210 return false; 3211 3212 bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 3213 Sub->getOperand(2).getReg() == SrcReg); 3214 3215 // Scan forward from the instruction after CmpInstr for uses of EFLAGS. 3216 // It is safe to remove CmpInstr if EFLAGS is redefined or killed. 3217 // If we are done with the basic block, we need to check whether EFLAGS is 3218 // live-out. 3219 bool IsSafe = false; 3220 SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate; 3221 MachineBasicBlock::iterator E = CmpInstr->getParent()->end(); 3222 for (++I; I != E; ++I) { 3223 const MachineInstr &Instr = *I; 3224 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI); 3225 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI); 3226 // We should check the usage if this instruction uses and updates EFLAGS. 3227 if (!UseEFLAGS && ModifyEFLAGS) { 3228 // It is safe to remove CmpInstr if EFLAGS is updated again. 3229 IsSafe = true; 3230 break; 3231 } 3232 if (!UseEFLAGS && !ModifyEFLAGS) 3233 continue; 3234 3235 // EFLAGS is used by this instruction. 3236 X86::CondCode OldCC; 3237 bool OpcIsSET = false; 3238 if (IsCmpZero || IsSwapped) { 3239 // We decode the condition code from opcode. 3240 if (Instr.isBranch()) 3241 OldCC = getCondFromBranchOpc(Instr.getOpcode()); 3242 else { 3243 OldCC = getCondFromSETOpc(Instr.getOpcode()); 3244 if (OldCC != X86::COND_INVALID) 3245 OpcIsSET = true; 3246 else 3247 OldCC = getCondFromCMovOpc(Instr.getOpcode()); 3248 } 3249 if (OldCC == X86::COND_INVALID) return false; 3250 } 3251 if (IsCmpZero) { 3252 switch (OldCC) { 3253 default: break; 3254 case X86::COND_A: case X86::COND_AE: 3255 case X86::COND_B: case X86::COND_BE: 3256 case X86::COND_G: case X86::COND_GE: 3257 case X86::COND_L: case X86::COND_LE: 3258 case X86::COND_O: case X86::COND_NO: 3259 // CF and OF are used, we can't perform this optimization. 3260 return false; 3261 } 3262 } else if (IsSwapped) { 3263 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs 3264 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3265 // We swap the condition code and synthesize the new opcode. 3266 X86::CondCode NewCC = getSwappedCondition(OldCC); 3267 if (NewCC == X86::COND_INVALID) return false; 3268 3269 // Synthesize the new opcode. 3270 bool HasMemoryOperand = Instr.hasOneMemOperand(); 3271 unsigned NewOpc; 3272 if (Instr.isBranch()) 3273 NewOpc = GetCondBranchFromCond(NewCC); 3274 else if(OpcIsSET) 3275 NewOpc = getSETFromCond(NewCC, HasMemoryOperand); 3276 else { 3277 unsigned DstReg = Instr.getOperand(0).getReg(); 3278 NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(), 3279 HasMemoryOperand); 3280 } 3281 3282 // Push the MachineInstr to OpsToUpdate. 3283 // If it is safe to remove CmpInstr, the condition code of these 3284 // instructions will be modified. 3285 OpsToUpdate.push_back(std::make_pair(&*I, NewOpc)); 3286 } 3287 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) { 3288 // It is safe to remove CmpInstr if EFLAGS is updated again or killed. 3289 IsSafe = true; 3290 break; 3291 } 3292 } 3293 3294 // If EFLAGS is not killed nor re-defined, we should check whether it is 3295 // live-out. If it is live-out, do not optimize. 3296 if ((IsCmpZero || IsSwapped) && !IsSafe) { 3297 MachineBasicBlock *MBB = CmpInstr->getParent(); 3298 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 3299 SE = MBB->succ_end(); SI != SE; ++SI) 3300 if ((*SI)->isLiveIn(X86::EFLAGS)) 3301 return false; 3302 } 3303 3304 // The instruction to be updated is either Sub or MI. 3305 Sub = IsCmpZero ? MI : Sub; 3306 // Move Movr0Inst to the place right before Sub. 3307 if (Movr0Inst) { 3308 Sub->getParent()->remove(Movr0Inst); 3309 Sub->getParent()->insert(MachineBasicBlock::iterator(Sub), Movr0Inst); 3310 } 3311 3312 // Make sure Sub instruction defines EFLAGS. 3313 assert(Sub->getNumOperands() >= 2 && 3314 Sub->getOperand(Sub->getNumOperands()-1).isReg() && 3315 Sub->getOperand(Sub->getNumOperands()-1).getReg() == X86::EFLAGS && 3316 "EFLAGS should be the last operand of SUB, ADD, OR, XOR, AND"); 3317 Sub->getOperand(Sub->getNumOperands()-1).setIsDef(true); 3318 CmpInstr->eraseFromParent(); 3319 3320 // Modify the condition code of instructions in OpsToUpdate. 3321 for (unsigned i = 0, e = OpsToUpdate.size(); i < e; i++) 3322 OpsToUpdate[i].first->setDesc(get(OpsToUpdate[i].second)); 3323 return true; 3324} 3325 3326/// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr 3327/// instruction with two undef reads of the register being defined. This is 3328/// used for mapping: 3329/// %xmm4 = V_SET0 3330/// to: 3331/// %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef> 3332/// 3333static bool Expand2AddrUndef(MachineInstr *MI, const MCInstrDesc &Desc) { 3334 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); 3335 unsigned Reg = MI->getOperand(0).getReg(); 3336 MI->setDesc(Desc); 3337 3338 // MachineInstr::addOperand() will insert explicit operands before any 3339 // implicit operands. 3340 MachineInstrBuilder(MI).addReg(Reg, RegState::Undef) 3341 .addReg(Reg, RegState::Undef); 3342 // But we don't trust that. 3343 assert(MI->getOperand(1).getReg() == Reg && 3344 MI->getOperand(2).getReg() == Reg && "Misplaced operand"); 3345 return true; 3346} 3347 3348bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 3349 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 3350 switch (MI->getOpcode()) { 3351 case X86::V_SET0: 3352 case X86::FsFLD0SS: 3353 case X86::FsFLD0SD: 3354 return Expand2AddrUndef(MI, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); 3355 case X86::TEST8ri_NOREX: 3356 MI->setDesc(get(X86::TEST8ri)); 3357 return true; 3358 } 3359 return false; 3360} 3361 3362MachineInstr* 3363X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, 3364 int FrameIx, uint64_t Offset, 3365 const MDNode *MDPtr, 3366 DebugLoc DL) const { 3367 X86AddressMode AM; 3368 AM.BaseType = X86AddressMode::FrameIndexBase; 3369 AM.Base.FrameIndex = FrameIx; 3370 MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE)); 3371 addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr); 3372 return &*MIB; 3373} 3374 3375static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 3376 const SmallVectorImpl<MachineOperand> &MOs, 3377 MachineInstr *MI, 3378 const TargetInstrInfo &TII) { 3379 // Create the base instruction with the memory operand as the first part. 3380 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), 3381 MI->getDebugLoc(), true); 3382 MachineInstrBuilder MIB(NewMI); 3383 unsigned NumAddrOps = MOs.size(); 3384 for (unsigned i = 0; i != NumAddrOps; ++i) 3385 MIB.addOperand(MOs[i]); 3386 if (NumAddrOps < 4) // FrameIndex only 3387 addOffset(MIB, 0); 3388 3389 // Loop over the rest of the ri operands, converting them over. 3390 unsigned NumOps = MI->getDesc().getNumOperands()-2; 3391 for (unsigned i = 0; i != NumOps; ++i) { 3392 MachineOperand &MO = MI->getOperand(i+2); 3393 MIB.addOperand(MO); 3394 } 3395 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) { 3396 MachineOperand &MO = MI->getOperand(i); 3397 MIB.addOperand(MO); 3398 } 3399 return MIB; 3400} 3401 3402static MachineInstr *FuseInst(MachineFunction &MF, 3403 unsigned Opcode, unsigned OpNo, 3404 const SmallVectorImpl<MachineOperand> &MOs, 3405 MachineInstr *MI, const TargetInstrInfo &TII) { 3406 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), 3407 MI->getDebugLoc(), true); 3408 MachineInstrBuilder MIB(NewMI); 3409 3410 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 3411 MachineOperand &MO = MI->getOperand(i); 3412 if (i == OpNo) { 3413 assert(MO.isReg() && "Expected to fold into reg operand!"); 3414 unsigned NumAddrOps = MOs.size(); 3415 for (unsigned i = 0; i != NumAddrOps; ++i) 3416 MIB.addOperand(MOs[i]); 3417 if (NumAddrOps < 4) // FrameIndex only 3418 addOffset(MIB, 0); 3419 } else { 3420 MIB.addOperand(MO); 3421 } 3422 } 3423 return MIB; 3424} 3425 3426static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 3427 const SmallVectorImpl<MachineOperand> &MOs, 3428 MachineInstr *MI) { 3429 MachineFunction &MF = *MI->getParent()->getParent(); 3430 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode)); 3431 3432 unsigned NumAddrOps = MOs.size(); 3433 for (unsigned i = 0; i != NumAddrOps; ++i) 3434 MIB.addOperand(MOs[i]); 3435 if (NumAddrOps < 4) // FrameIndex only 3436 addOffset(MIB, 0); 3437 return MIB.addImm(0); 3438} 3439 3440MachineInstr* 3441X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 3442 MachineInstr *MI, unsigned i, 3443 const SmallVectorImpl<MachineOperand> &MOs, 3444 unsigned Size, unsigned Align) const { 3445 const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0; 3446 bool isTwoAddrFold = false; 3447 unsigned NumOps = MI->getDesc().getNumOperands(); 3448 bool isTwoAddr = NumOps > 1 && 3449 MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 3450 3451 // FIXME: AsmPrinter doesn't know how to handle 3452 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 3453 if (MI->getOpcode() == X86::ADD32ri && 3454 MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 3455 return NULL; 3456 3457 MachineInstr *NewMI = NULL; 3458 // Folding a memory location into the two-address part of a two-address 3459 // instruction is different than folding it other places. It requires 3460 // replacing the *two* registers with the memory location. 3461 if (isTwoAddr && NumOps >= 2 && i < 2 && 3462 MI->getOperand(0).isReg() && 3463 MI->getOperand(1).isReg() && 3464 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 3465 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 3466 isTwoAddrFold = true; 3467 } else if (i == 0) { // If operand 0 3468 if (MI->getOpcode() == X86::MOV64r0) 3469 NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI); 3470 else if (MI->getOpcode() == X86::MOV32r0) 3471 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); 3472 else if (MI->getOpcode() == X86::MOV16r0) 3473 NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); 3474 else if (MI->getOpcode() == X86::MOV8r0) 3475 NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); 3476 if (NewMI) 3477 return NewMI; 3478 3479 OpcodeTablePtr = &RegOp2MemOpTable0; 3480 } else if (i == 1) { 3481 OpcodeTablePtr = &RegOp2MemOpTable1; 3482 } else if (i == 2) { 3483 OpcodeTablePtr = &RegOp2MemOpTable2; 3484 } 3485 3486 // If table selected... 3487 if (OpcodeTablePtr) { 3488 // Find the Opcode to fuse 3489 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 3490 OpcodeTablePtr->find(MI->getOpcode()); 3491 if (I != OpcodeTablePtr->end()) { 3492 unsigned Opcode = I->second.first; 3493 unsigned MinAlign = (I->second.second & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; 3494 if (Align < MinAlign) 3495 return NULL; 3496 bool NarrowToMOV32rm = false; 3497 if (Size) { 3498 unsigned RCSize = getRegClass(MI->getDesc(), i, &RI, MF)->getSize(); 3499 if (Size < RCSize) { 3500 // Check if it's safe to fold the load. If the size of the object is 3501 // narrower than the load width, then it's not. 3502 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4) 3503 return NULL; 3504 // If this is a 64-bit load, but the spill slot is 32, then we can do 3505 // a 32-bit load which is implicitly zero-extended. This likely is due 3506 // to liveintervalanalysis remat'ing a load from stack slot. 3507 if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg()) 3508 return NULL; 3509 Opcode = X86::MOV32rm; 3510 NarrowToMOV32rm = true; 3511 } 3512 } 3513 3514 if (isTwoAddrFold) 3515 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this); 3516 else 3517 NewMI = FuseInst(MF, Opcode, i, MOs, MI, *this); 3518 3519 if (NarrowToMOV32rm) { 3520 // If this is the special case where we use a MOV32rm to load a 32-bit 3521 // value and zero-extend the top bits. Change the destination register 3522 // to a 32-bit one. 3523 unsigned DstReg = NewMI->getOperand(0).getReg(); 3524 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 3525 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, 3526 X86::sub_32bit)); 3527 else 3528 NewMI->getOperand(0).setSubReg(X86::sub_32bit); 3529 } 3530 return NewMI; 3531 } 3532 } 3533 3534 // No fusion 3535 if (PrintFailedFusing && !MI->isCopy()) 3536 dbgs() << "We failed to fuse operand " << i << " in " << *MI; 3537 return NULL; 3538} 3539 3540/// hasPartialRegUpdate - Return true for all instructions that only update 3541/// the first 32 or 64-bits of the destination register and leave the rest 3542/// unmodified. This can be used to avoid folding loads if the instructions 3543/// only update part of the destination register, and the non-updated part is 3544/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these 3545/// instructions breaks the partial register dependency and it can improve 3546/// performance. e.g.: 3547/// 3548/// movss (%rdi), %xmm0 3549/// cvtss2sd %xmm0, %xmm0 3550/// 3551/// Instead of 3552/// cvtss2sd (%rdi), %xmm0 3553/// 3554/// FIXME: This should be turned into a TSFlags. 3555/// 3556static bool hasPartialRegUpdate(unsigned Opcode) { 3557 switch (Opcode) { 3558 case X86::CVTSI2SSrr: 3559 case X86::CVTSI2SS64rr: 3560 case X86::CVTSI2SDrr: 3561 case X86::CVTSI2SD64rr: 3562 case X86::CVTSD2SSrr: 3563 case X86::Int_CVTSD2SSrr: 3564 case X86::CVTSS2SDrr: 3565 case X86::Int_CVTSS2SDrr: 3566 case X86::RCPSSr: 3567 case X86::RCPSSr_Int: 3568 case X86::ROUNDSDr: 3569 case X86::ROUNDSDr_Int: 3570 case X86::ROUNDSSr: 3571 case X86::ROUNDSSr_Int: 3572 case X86::RSQRTSSr: 3573 case X86::RSQRTSSr_Int: 3574 case X86::SQRTSSr: 3575 case X86::SQRTSSr_Int: 3576 // AVX encoded versions 3577 case X86::VCVTSD2SSrr: 3578 case X86::Int_VCVTSD2SSrr: 3579 case X86::VCVTSS2SDrr: 3580 case X86::Int_VCVTSS2SDrr: 3581 case X86::VRCPSSr: 3582 case X86::VROUNDSDr: 3583 case X86::VROUNDSDr_Int: 3584 case X86::VROUNDSSr: 3585 case X86::VROUNDSSr_Int: 3586 case X86::VRSQRTSSr: 3587 case X86::VSQRTSSr: 3588 return true; 3589 } 3590 3591 return false; 3592} 3593 3594/// getPartialRegUpdateClearance - Inform the ExeDepsFix pass how many idle 3595/// instructions we would like before a partial register update. 3596unsigned X86InstrInfo:: 3597getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, 3598 const TargetRegisterInfo *TRI) const { 3599 if (OpNum != 0 || !hasPartialRegUpdate(MI->getOpcode())) 3600 return 0; 3601 3602 // If MI is marked as reading Reg, the partial register update is wanted. 3603 const MachineOperand &MO = MI->getOperand(0); 3604 unsigned Reg = MO.getReg(); 3605 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 3606 if (MO.readsReg() || MI->readsVirtualRegister(Reg)) 3607 return 0; 3608 } else { 3609 if (MI->readsRegister(Reg, TRI)) 3610 return 0; 3611 } 3612 3613 // If any of the preceding 16 instructions are reading Reg, insert a 3614 // dependency breaking instruction. The magic number is based on a few 3615 // Nehalem experiments. 3616 return 16; 3617} 3618 3619void X86InstrInfo:: 3620breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, 3621 const TargetRegisterInfo *TRI) const { 3622 unsigned Reg = MI->getOperand(OpNum).getReg(); 3623 if (X86::VR128RegClass.contains(Reg)) { 3624 // These instructions are all floating point domain, so xorps is the best 3625 // choice. 3626 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX(); 3627 unsigned Opc = HasAVX ? X86::VXORPSrr : X86::XORPSrr; 3628 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(Opc), Reg) 3629 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); 3630 } else if (X86::VR256RegClass.contains(Reg)) { 3631 // Use vxorps to clear the full ymm register. 3632 // It wants to read and write the xmm sub-register. 3633 unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm); 3634 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(X86::VXORPSrr), XReg) 3635 .addReg(XReg, RegState::Undef).addReg(XReg, RegState::Undef) 3636 .addReg(Reg, RegState::ImplicitDefine); 3637 } else 3638 return; 3639 MI->addRegisterKilled(Reg, TRI, true); 3640} 3641 3642MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 3643 MachineInstr *MI, 3644 const SmallVectorImpl<unsigned> &Ops, 3645 int FrameIndex) const { 3646 // Check switch flag 3647 if (NoFusing) return NULL; 3648 3649 // Unless optimizing for size, don't fold to avoid partial 3650 // register update stalls 3651 if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) && 3652 hasPartialRegUpdate(MI->getOpcode())) 3653 return 0; 3654 3655 const MachineFrameInfo *MFI = MF.getFrameInfo(); 3656 unsigned Size = MFI->getObjectSize(FrameIndex); 3657 unsigned Alignment = MFI->getObjectAlignment(FrameIndex); 3658 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 3659 unsigned NewOpc = 0; 3660 unsigned RCSize = 0; 3661 switch (MI->getOpcode()) { 3662 default: return NULL; 3663 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break; 3664 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break; 3665 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break; 3666 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break; 3667 } 3668 // Check if it's safe to fold the load. If the size of the object is 3669 // narrower than the load width, then it's not. 3670 if (Size < RCSize) 3671 return NULL; 3672 // Change to CMPXXri r, 0 first. 3673 MI->setDesc(get(NewOpc)); 3674 MI->getOperand(1).ChangeToImmediate(0); 3675 } else if (Ops.size() != 1) 3676 return NULL; 3677 3678 SmallVector<MachineOperand,4> MOs; 3679 MOs.push_back(MachineOperand::CreateFI(FrameIndex)); 3680 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, Size, Alignment); 3681} 3682 3683MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 3684 MachineInstr *MI, 3685 const SmallVectorImpl<unsigned> &Ops, 3686 MachineInstr *LoadMI) const { 3687 // Check switch flag 3688 if (NoFusing) return NULL; 3689 3690 // Unless optimizing for size, don't fold to avoid partial 3691 // register update stalls 3692 if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) && 3693 hasPartialRegUpdate(MI->getOpcode())) 3694 return 0; 3695 3696 // Determine the alignment of the load. 3697 unsigned Alignment = 0; 3698 if (LoadMI->hasOneMemOperand()) 3699 Alignment = (*LoadMI->memoperands_begin())->getAlignment(); 3700 else 3701 switch (LoadMI->getOpcode()) { 3702 case X86::AVX_SET0PSY: 3703 case X86::AVX_SET0PDY: 3704 case X86::AVX2_SETALLONES: 3705 case X86::AVX2_SET0: 3706 Alignment = 32; 3707 break; 3708 case X86::V_SET0: 3709 case X86::V_SETALLONES: 3710 case X86::AVX_SETALLONES: 3711 Alignment = 16; 3712 break; 3713 case X86::FsFLD0SD: 3714 Alignment = 8; 3715 break; 3716 case X86::FsFLD0SS: 3717 Alignment = 4; 3718 break; 3719 default: 3720 return 0; 3721 } 3722 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 3723 unsigned NewOpc = 0; 3724 switch (MI->getOpcode()) { 3725 default: return NULL; 3726 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 3727 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break; 3728 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break; 3729 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break; 3730 } 3731 // Change to CMPXXri r, 0 first. 3732 MI->setDesc(get(NewOpc)); 3733 MI->getOperand(1).ChangeToImmediate(0); 3734 } else if (Ops.size() != 1) 3735 return NULL; 3736 3737 // Make sure the subregisters match. 3738 // Otherwise we risk changing the size of the load. 3739 if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg()) 3740 return NULL; 3741 3742 SmallVector<MachineOperand,X86::AddrNumOperands> MOs; 3743 switch (LoadMI->getOpcode()) { 3744 case X86::V_SET0: 3745 case X86::V_SETALLONES: 3746 case X86::AVX_SET0PSY: 3747 case X86::AVX_SET0PDY: 3748 case X86::AVX_SETALLONES: 3749 case X86::AVX2_SETALLONES: 3750 case X86::AVX2_SET0: 3751 case X86::FsFLD0SD: 3752 case X86::FsFLD0SS: { 3753 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. 3754 // Create a constant-pool entry and operands to load from it. 3755 3756 // Medium and large mode can't fold loads this way. 3757 if (TM.getCodeModel() != CodeModel::Small && 3758 TM.getCodeModel() != CodeModel::Kernel) 3759 return NULL; 3760 3761 // x86-32 PIC requires a PIC base register for constant pools. 3762 unsigned PICBase = 0; 3763 if (TM.getRelocationModel() == Reloc::PIC_) { 3764 if (TM.getSubtarget<X86Subtarget>().is64Bit()) 3765 PICBase = X86::RIP; 3766 else 3767 // FIXME: PICBase = getGlobalBaseReg(&MF); 3768 // This doesn't work for several reasons. 3769 // 1. GlobalBaseReg may have been spilled. 3770 // 2. It may not be live at MI. 3771 return NULL; 3772 } 3773 3774 // Create a constant-pool entry. 3775 MachineConstantPool &MCP = *MF.getConstantPool(); 3776 Type *Ty; 3777 unsigned Opc = LoadMI->getOpcode(); 3778 if (Opc == X86::FsFLD0SS) 3779 Ty = Type::getFloatTy(MF.getFunction()->getContext()); 3780 else if (Opc == X86::FsFLD0SD) 3781 Ty = Type::getDoubleTy(MF.getFunction()->getContext()); 3782 else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY) 3783 Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8); 3784 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX2_SET0) 3785 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8); 3786 else 3787 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); 3788 3789 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX_SETALLONES || 3790 Opc == X86::AVX2_SETALLONES); 3791 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : 3792 Constant::getNullValue(Ty); 3793 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); 3794 3795 // Create operands to load from the constant pool entry. 3796 MOs.push_back(MachineOperand::CreateReg(PICBase, false)); 3797 MOs.push_back(MachineOperand::CreateImm(1)); 3798 MOs.push_back(MachineOperand::CreateReg(0, false)); 3799 MOs.push_back(MachineOperand::CreateCPI(CPI, 0)); 3800 MOs.push_back(MachineOperand::CreateReg(0, false)); 3801 break; 3802 } 3803 default: { 3804 // Folding a normal load. Just copy the load's address operands. 3805 unsigned NumOps = LoadMI->getDesc().getNumOperands(); 3806 for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i) 3807 MOs.push_back(LoadMI->getOperand(i)); 3808 break; 3809 } 3810 } 3811 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, 0, Alignment); 3812} 3813 3814 3815bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, 3816 const SmallVectorImpl<unsigned> &Ops) const { 3817 // Check switch flag 3818 if (NoFusing) return 0; 3819 3820 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 3821 switch (MI->getOpcode()) { 3822 default: return false; 3823 case X86::TEST8rr: 3824 case X86::TEST16rr: 3825 case X86::TEST32rr: 3826 case X86::TEST64rr: 3827 return true; 3828 case X86::ADD32ri: 3829 // FIXME: AsmPrinter doesn't know how to handle 3830 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding. 3831 if (MI->getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) 3832 return false; 3833 break; 3834 } 3835 } 3836 3837 if (Ops.size() != 1) 3838 return false; 3839 3840 unsigned OpNum = Ops[0]; 3841 unsigned Opc = MI->getOpcode(); 3842 unsigned NumOps = MI->getDesc().getNumOperands(); 3843 bool isTwoAddr = NumOps > 1 && 3844 MI->getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1; 3845 3846 // Folding a memory location into the two-address part of a two-address 3847 // instruction is different than folding it other places. It requires 3848 // replacing the *two* registers with the memory location. 3849 const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0; 3850 if (isTwoAddr && NumOps >= 2 && OpNum < 2) { 3851 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 3852 } else if (OpNum == 0) { // If operand 0 3853 switch (Opc) { 3854 case X86::MOV8r0: 3855 case X86::MOV16r0: 3856 case X86::MOV32r0: 3857 case X86::MOV64r0: return true; 3858 default: break; 3859 } 3860 OpcodeTablePtr = &RegOp2MemOpTable0; 3861 } else if (OpNum == 1) { 3862 OpcodeTablePtr = &RegOp2MemOpTable1; 3863 } else if (OpNum == 2) { 3864 OpcodeTablePtr = &RegOp2MemOpTable2; 3865 } 3866 3867 if (OpcodeTablePtr && OpcodeTablePtr->count(Opc)) 3868 return true; 3869 return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops); 3870} 3871 3872bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 3873 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 3874 SmallVectorImpl<MachineInstr*> &NewMIs) const { 3875 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 3876 MemOp2RegOpTable.find(MI->getOpcode()); 3877 if (I == MemOp2RegOpTable.end()) 3878 return false; 3879 unsigned Opc = I->second.first; 3880 unsigned Index = I->second.second & TB_INDEX_MASK; 3881 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 3882 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 3883 if (UnfoldLoad && !FoldedLoad) 3884 return false; 3885 UnfoldLoad &= FoldedLoad; 3886 if (UnfoldStore && !FoldedStore) 3887 return false; 3888 UnfoldStore &= FoldedStore; 3889 3890 const MCInstrDesc &MCID = get(Opc); 3891 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 3892 if (!MI->hasOneMemOperand() && 3893 RC == &X86::VR128RegClass && 3894 !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast()) 3895 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will 3896 // conservatively assume the address is unaligned. That's bad for 3897 // performance. 3898 return false; 3899 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps; 3900 SmallVector<MachineOperand,2> BeforeOps; 3901 SmallVector<MachineOperand,2> AfterOps; 3902 SmallVector<MachineOperand,4> ImpOps; 3903 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 3904 MachineOperand &Op = MI->getOperand(i); 3905 if (i >= Index && i < Index + X86::AddrNumOperands) 3906 AddrOps.push_back(Op); 3907 else if (Op.isReg() && Op.isImplicit()) 3908 ImpOps.push_back(Op); 3909 else if (i < Index) 3910 BeforeOps.push_back(Op); 3911 else if (i > Index) 3912 AfterOps.push_back(Op); 3913 } 3914 3915 // Emit the load instruction. 3916 if (UnfoldLoad) { 3917 std::pair<MachineInstr::mmo_iterator, 3918 MachineInstr::mmo_iterator> MMOs = 3919 MF.extractLoadMemRefs(MI->memoperands_begin(), 3920 MI->memoperands_end()); 3921 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs); 3922 if (UnfoldStore) { 3923 // Address operands cannot be marked isKill. 3924 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { 3925 MachineOperand &MO = NewMIs[0]->getOperand(i); 3926 if (MO.isReg()) 3927 MO.setIsKill(false); 3928 } 3929 } 3930 } 3931 3932 // Emit the data processing instruction. 3933 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI->getDebugLoc(), true); 3934 MachineInstrBuilder MIB(DataMI); 3935 3936 if (FoldedStore) 3937 MIB.addReg(Reg, RegState::Define); 3938 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i) 3939 MIB.addOperand(BeforeOps[i]); 3940 if (FoldedLoad) 3941 MIB.addReg(Reg); 3942 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i) 3943 MIB.addOperand(AfterOps[i]); 3944 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) { 3945 MachineOperand &MO = ImpOps[i]; 3946 MIB.addReg(MO.getReg(), 3947 getDefRegState(MO.isDef()) | 3948 RegState::Implicit | 3949 getKillRegState(MO.isKill()) | 3950 getDeadRegState(MO.isDead()) | 3951 getUndefRegState(MO.isUndef())); 3952 } 3953 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 3954 unsigned NewOpc = 0; 3955 switch (DataMI->getOpcode()) { 3956 default: break; 3957 case X86::CMP64ri32: 3958 case X86::CMP64ri8: 3959 case X86::CMP32ri: 3960 case X86::CMP32ri8: 3961 case X86::CMP16ri: 3962 case X86::CMP16ri8: 3963 case X86::CMP8ri: { 3964 MachineOperand &MO0 = DataMI->getOperand(0); 3965 MachineOperand &MO1 = DataMI->getOperand(1); 3966 if (MO1.getImm() == 0) { 3967 switch (DataMI->getOpcode()) { 3968 default: break; 3969 case X86::CMP64ri8: 3970 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 3971 case X86::CMP32ri8: 3972 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 3973 case X86::CMP16ri8: 3974 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 3975 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 3976 } 3977 DataMI->setDesc(get(NewOpc)); 3978 MO1.ChangeToRegister(MO0.getReg(), false); 3979 } 3980 } 3981 } 3982 NewMIs.push_back(DataMI); 3983 3984 // Emit the store instruction. 3985 if (UnfoldStore) { 3986 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); 3987 std::pair<MachineInstr::mmo_iterator, 3988 MachineInstr::mmo_iterator> MMOs = 3989 MF.extractStoreMemRefs(MI->memoperands_begin(), 3990 MI->memoperands_end()); 3991 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs); 3992 } 3993 3994 return true; 3995} 3996 3997bool 3998X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 3999 SmallVectorImpl<SDNode*> &NewNodes) const { 4000 if (!N->isMachineOpcode()) 4001 return false; 4002 4003 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 4004 MemOp2RegOpTable.find(N->getMachineOpcode()); 4005 if (I == MemOp2RegOpTable.end()) 4006 return false; 4007 unsigned Opc = I->second.first; 4008 unsigned Index = I->second.second & TB_INDEX_MASK; 4009 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 4010 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 4011 const MCInstrDesc &MCID = get(Opc); 4012 MachineFunction &MF = DAG.getMachineFunction(); 4013 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); 4014 unsigned NumDefs = MCID.NumDefs; 4015 std::vector<SDValue> AddrOps; 4016 std::vector<SDValue> BeforeOps; 4017 std::vector<SDValue> AfterOps; 4018 DebugLoc dl = N->getDebugLoc(); 4019 unsigned NumOps = N->getNumOperands(); 4020 for (unsigned i = 0; i != NumOps-1; ++i) { 4021 SDValue Op = N->getOperand(i); 4022 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands) 4023 AddrOps.push_back(Op); 4024 else if (i < Index-NumDefs) 4025 BeforeOps.push_back(Op); 4026 else if (i > Index-NumDefs) 4027 AfterOps.push_back(Op); 4028 } 4029 SDValue Chain = N->getOperand(NumOps-1); 4030 AddrOps.push_back(Chain); 4031 4032 // Emit the load instruction. 4033 SDNode *Load = 0; 4034 if (FoldedLoad) { 4035 EVT VT = *RC->vt_begin(); 4036 std::pair<MachineInstr::mmo_iterator, 4037 MachineInstr::mmo_iterator> MMOs = 4038 MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 4039 cast<MachineSDNode>(N)->memoperands_end()); 4040 if (!(*MMOs.first) && 4041 RC == &X86::VR128RegClass && 4042 !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast()) 4043 // Do not introduce a slow unaligned load. 4044 return false; 4045 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 4046 bool isAligned = (*MMOs.first) && 4047 (*MMOs.first)->getAlignment() >= Alignment; 4048 Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl, 4049 VT, MVT::Other, &AddrOps[0], AddrOps.size()); 4050 NewNodes.push_back(Load); 4051 4052 // Preserve memory reference information. 4053 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second); 4054 } 4055 4056 // Emit the data processing instruction. 4057 std::vector<EVT> VTs; 4058 const TargetRegisterClass *DstRC = 0; 4059 if (MCID.getNumDefs() > 0) { 4060 DstRC = getRegClass(MCID, 0, &RI, MF); 4061 VTs.push_back(*DstRC->vt_begin()); 4062 } 4063 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 4064 EVT VT = N->getValueType(i); 4065 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs()) 4066 VTs.push_back(VT); 4067 } 4068 if (Load) 4069 BeforeOps.push_back(SDValue(Load, 0)); 4070 std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); 4071 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, &BeforeOps[0], 4072 BeforeOps.size()); 4073 NewNodes.push_back(NewNode); 4074 4075 // Emit the store instruction. 4076 if (FoldedStore) { 4077 AddrOps.pop_back(); 4078 AddrOps.push_back(SDValue(NewNode, 0)); 4079 AddrOps.push_back(Chain); 4080 std::pair<MachineInstr::mmo_iterator, 4081 MachineInstr::mmo_iterator> MMOs = 4082 MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(), 4083 cast<MachineSDNode>(N)->memoperands_end()); 4084 if (!(*MMOs.first) && 4085 RC == &X86::VR128RegClass && 4086 !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast()) 4087 // Do not introduce a slow unaligned store. 4088 return false; 4089 unsigned Alignment = RC->getSize() == 32 ? 32 : 16; 4090 bool isAligned = (*MMOs.first) && 4091 (*MMOs.first)->getAlignment() >= Alignment; 4092 SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC, 4093 isAligned, TM), 4094 dl, MVT::Other, 4095 &AddrOps[0], AddrOps.size()); 4096 NewNodes.push_back(Store); 4097 4098 // Preserve memory reference information. 4099 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second); 4100 } 4101 4102 return true; 4103} 4104 4105unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 4106 bool UnfoldLoad, bool UnfoldStore, 4107 unsigned *LoadRegIndex) const { 4108 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I = 4109 MemOp2RegOpTable.find(Opc); 4110 if (I == MemOp2RegOpTable.end()) 4111 return 0; 4112 bool FoldedLoad = I->second.second & TB_FOLDED_LOAD; 4113 bool FoldedStore = I->second.second & TB_FOLDED_STORE; 4114 if (UnfoldLoad && !FoldedLoad) 4115 return 0; 4116 if (UnfoldStore && !FoldedStore) 4117 return 0; 4118 if (LoadRegIndex) 4119 *LoadRegIndex = I->second.second & TB_INDEX_MASK; 4120 return I->second.first; 4121} 4122 4123bool 4124X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 4125 int64_t &Offset1, int64_t &Offset2) const { 4126 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 4127 return false; 4128 unsigned Opc1 = Load1->getMachineOpcode(); 4129 unsigned Opc2 = Load2->getMachineOpcode(); 4130 switch (Opc1) { 4131 default: return false; 4132 case X86::MOV8rm: 4133 case X86::MOV16rm: 4134 case X86::MOV32rm: 4135 case X86::MOV64rm: 4136 case X86::LD_Fp32m: 4137 case X86::LD_Fp64m: 4138 case X86::LD_Fp80m: 4139 case X86::MOVSSrm: 4140 case X86::MOVSDrm: 4141 case X86::MMX_MOVD64rm: 4142 case X86::MMX_MOVQ64rm: 4143 case X86::FsMOVAPSrm: 4144 case X86::FsMOVAPDrm: 4145 case X86::MOVAPSrm: 4146 case X86::MOVUPSrm: 4147 case X86::MOVAPDrm: 4148 case X86::MOVDQArm: 4149 case X86::MOVDQUrm: 4150 // AVX load instructions 4151 case X86::VMOVSSrm: 4152 case X86::VMOVSDrm: 4153 case X86::FsVMOVAPSrm: 4154 case X86::FsVMOVAPDrm: 4155 case X86::VMOVAPSrm: 4156 case X86::VMOVUPSrm: 4157 case X86::VMOVAPDrm: 4158 case X86::VMOVDQArm: 4159 case X86::VMOVDQUrm: 4160 case X86::VMOVAPSYrm: 4161 case X86::VMOVUPSYrm: 4162 case X86::VMOVAPDYrm: 4163 case X86::VMOVDQAYrm: 4164 case X86::VMOVDQUYrm: 4165 break; 4166 } 4167 switch (Opc2) { 4168 default: return false; 4169 case X86::MOV8rm: 4170 case X86::MOV16rm: 4171 case X86::MOV32rm: 4172 case X86::MOV64rm: 4173 case X86::LD_Fp32m: 4174 case X86::LD_Fp64m: 4175 case X86::LD_Fp80m: 4176 case X86::MOVSSrm: 4177 case X86::MOVSDrm: 4178 case X86::MMX_MOVD64rm: 4179 case X86::MMX_MOVQ64rm: 4180 case X86::FsMOVAPSrm: 4181 case X86::FsMOVAPDrm: 4182 case X86::MOVAPSrm: 4183 case X86::MOVUPSrm: 4184 case X86::MOVAPDrm: 4185 case X86::MOVDQArm: 4186 case X86::MOVDQUrm: 4187 // AVX load instructions 4188 case X86::VMOVSSrm: 4189 case X86::VMOVSDrm: 4190 case X86::FsVMOVAPSrm: 4191 case X86::FsVMOVAPDrm: 4192 case X86::VMOVAPSrm: 4193 case X86::VMOVUPSrm: 4194 case X86::VMOVAPDrm: 4195 case X86::VMOVDQArm: 4196 case X86::VMOVDQUrm: 4197 case X86::VMOVAPSYrm: 4198 case X86::VMOVUPSYrm: 4199 case X86::VMOVAPDYrm: 4200 case X86::VMOVDQAYrm: 4201 case X86::VMOVDQUYrm: 4202 break; 4203 } 4204 4205 // Check if chain operands and base addresses match. 4206 if (Load1->getOperand(0) != Load2->getOperand(0) || 4207 Load1->getOperand(5) != Load2->getOperand(5)) 4208 return false; 4209 // Segment operands should match as well. 4210 if (Load1->getOperand(4) != Load2->getOperand(4)) 4211 return false; 4212 // Scale should be 1, Index should be Reg0. 4213 if (Load1->getOperand(1) == Load2->getOperand(1) && 4214 Load1->getOperand(2) == Load2->getOperand(2)) { 4215 if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1) 4216 return false; 4217 4218 // Now let's examine the displacements. 4219 if (isa<ConstantSDNode>(Load1->getOperand(3)) && 4220 isa<ConstantSDNode>(Load2->getOperand(3))) { 4221 Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue(); 4222 Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue(); 4223 return true; 4224 } 4225 } 4226 return false; 4227} 4228 4229bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 4230 int64_t Offset1, int64_t Offset2, 4231 unsigned NumLoads) const { 4232 assert(Offset2 > Offset1); 4233 if ((Offset2 - Offset1) / 8 > 64) 4234 return false; 4235 4236 unsigned Opc1 = Load1->getMachineOpcode(); 4237 unsigned Opc2 = Load2->getMachineOpcode(); 4238 if (Opc1 != Opc2) 4239 return false; // FIXME: overly conservative? 4240 4241 switch (Opc1) { 4242 default: break; 4243 case X86::LD_Fp32m: 4244 case X86::LD_Fp64m: 4245 case X86::LD_Fp80m: 4246 case X86::MMX_MOVD64rm: 4247 case X86::MMX_MOVQ64rm: 4248 return false; 4249 } 4250 4251 EVT VT = Load1->getValueType(0); 4252 switch (VT.getSimpleVT().SimpleTy) { 4253 default: 4254 // XMM registers. In 64-bit mode we can be a bit more aggressive since we 4255 // have 16 of them to play with. 4256 if (TM.getSubtargetImpl()->is64Bit()) { 4257 if (NumLoads >= 3) 4258 return false; 4259 } else if (NumLoads) { 4260 return false; 4261 } 4262 break; 4263 case MVT::i8: 4264 case MVT::i16: 4265 case MVT::i32: 4266 case MVT::i64: 4267 case MVT::f32: 4268 case MVT::f64: 4269 if (NumLoads) 4270 return false; 4271 break; 4272 } 4273 4274 return true; 4275} 4276 4277 4278bool X86InstrInfo:: 4279ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 4280 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 4281 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 4282 if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E) 4283 return true; 4284 Cond[0].setImm(GetOppositeBranchCondition(CC)); 4285 return false; 4286} 4287 4288bool X86InstrInfo:: 4289isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 4290 // FIXME: Return false for x87 stack register classes for now. We can't 4291 // allow any loads of these registers before FpGet_ST0_80. 4292 return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass || 4293 RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass); 4294} 4295 4296/// getGlobalBaseReg - Return a virtual register initialized with the 4297/// the global base register value. Output instructions required to 4298/// initialize the register in the function entry block, if necessary. 4299/// 4300/// TODO: Eliminate this and move the code to X86MachineFunctionInfo. 4301/// 4302unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 4303 assert(!TM.getSubtarget<X86Subtarget>().is64Bit() && 4304 "X86-64 PIC uses RIP relative addressing"); 4305 4306 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 4307 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 4308 if (GlobalBaseReg != 0) 4309 return GlobalBaseReg; 4310 4311 // Create the register. The code to initialize it is inserted 4312 // later, by the CGBR pass (below). 4313 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4314 GlobalBaseReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); 4315 X86FI->setGlobalBaseReg(GlobalBaseReg); 4316 return GlobalBaseReg; 4317} 4318 4319// These are the replaceable SSE instructions. Some of these have Int variants 4320// that we don't include here. We don't want to replace instructions selected 4321// by intrinsics. 4322static const uint16_t ReplaceableInstrs[][3] = { 4323 //PackedSingle PackedDouble PackedInt 4324 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, 4325 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, 4326 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, 4327 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr }, 4328 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm }, 4329 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr }, 4330 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm }, 4331 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr }, 4332 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm }, 4333 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr }, 4334 { X86::ORPSrm, X86::ORPDrm, X86::PORrm }, 4335 { X86::ORPSrr, X86::ORPDrr, X86::PORrr }, 4336 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, 4337 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, 4338 // AVX 128-bit support 4339 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, 4340 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, 4341 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, 4342 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, 4343 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, 4344 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, 4345 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, 4346 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, 4347 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, 4348 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, 4349 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, 4350 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, 4351 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, 4352 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, 4353 // AVX 256-bit support 4354 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr }, 4355 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm }, 4356 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr }, 4357 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr }, 4358 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm }, 4359 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr } 4360}; 4361 4362static const uint16_t ReplaceableInstrsAVX2[][3] = { 4363 //PackedSingle PackedDouble PackedInt 4364 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm }, 4365 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr }, 4366 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm }, 4367 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr }, 4368 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm }, 4369 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr }, 4370 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm }, 4371 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr }, 4372 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr }, 4373 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr }, 4374 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm }, 4375 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr }, 4376 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm }, 4377 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr } 4378}; 4379 4380// FIXME: Some shuffle and unpack instructions have equivalents in different 4381// domains, but they require a bit more work than just switching opcodes. 4382 4383static const uint16_t *lookup(unsigned opcode, unsigned domain) { 4384 for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i) 4385 if (ReplaceableInstrs[i][domain-1] == opcode) 4386 return ReplaceableInstrs[i]; 4387 return 0; 4388} 4389 4390static const uint16_t *lookupAVX2(unsigned opcode, unsigned domain) { 4391 for (unsigned i = 0, e = array_lengthof(ReplaceableInstrsAVX2); i != e; ++i) 4392 if (ReplaceableInstrsAVX2[i][domain-1] == opcode) 4393 return ReplaceableInstrsAVX2[i]; 4394 return 0; 4395} 4396 4397std::pair<uint16_t, uint16_t> 4398X86InstrInfo::getExecutionDomain(const MachineInstr *MI) const { 4399 uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 4400 bool hasAVX2 = TM.getSubtarget<X86Subtarget>().hasAVX2(); 4401 uint16_t validDomains = 0; 4402 if (domain && lookup(MI->getOpcode(), domain)) 4403 validDomains = 0xe; 4404 else if (domain && lookupAVX2(MI->getOpcode(), domain)) 4405 validDomains = hasAVX2 ? 0xe : 0x6; 4406 return std::make_pair(domain, validDomains); 4407} 4408 4409void X86InstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 4410 assert(Domain>0 && Domain<4 && "Invalid execution domain"); 4411 uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3; 4412 assert(dom && "Not an SSE instruction"); 4413 const uint16_t *table = lookup(MI->getOpcode(), dom); 4414 if (!table) { // try the other table 4415 assert((TM.getSubtarget<X86Subtarget>().hasAVX2() || Domain < 3) && 4416 "256-bit vector operations only available in AVX2"); 4417 table = lookupAVX2(MI->getOpcode(), dom); 4418 } 4419 assert(table && "Cannot change domain"); 4420 MI->setDesc(get(table[Domain-1])); 4421} 4422 4423/// getNoopForMachoTarget - Return the noop instruction to use for a noop. 4424void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { 4425 NopInst.setOpcode(X86::NOOP); 4426} 4427 4428bool X86InstrInfo::isHighLatencyDef(int opc) const { 4429 switch (opc) { 4430 default: return false; 4431 case X86::DIVSDrm: 4432 case X86::DIVSDrm_Int: 4433 case X86::DIVSDrr: 4434 case X86::DIVSDrr_Int: 4435 case X86::DIVSSrm: 4436 case X86::DIVSSrm_Int: 4437 case X86::DIVSSrr: 4438 case X86::DIVSSrr_Int: 4439 case X86::SQRTPDm: 4440 case X86::SQRTPDm_Int: 4441 case X86::SQRTPDr: 4442 case X86::SQRTPDr_Int: 4443 case X86::SQRTPSm: 4444 case X86::SQRTPSm_Int: 4445 case X86::SQRTPSr: 4446 case X86::SQRTPSr_Int: 4447 case X86::SQRTSDm: 4448 case X86::SQRTSDm_Int: 4449 case X86::SQRTSDr: 4450 case X86::SQRTSDr_Int: 4451 case X86::SQRTSSm: 4452 case X86::SQRTSSm_Int: 4453 case X86::SQRTSSr: 4454 case X86::SQRTSSr_Int: 4455 // AVX instructions with high latency 4456 case X86::VDIVSDrm: 4457 case X86::VDIVSDrm_Int: 4458 case X86::VDIVSDrr: 4459 case X86::VDIVSDrr_Int: 4460 case X86::VDIVSSrm: 4461 case X86::VDIVSSrm_Int: 4462 case X86::VDIVSSrr: 4463 case X86::VDIVSSrr_Int: 4464 case X86::VSQRTPDm: 4465 case X86::VSQRTPDm_Int: 4466 case X86::VSQRTPDr: 4467 case X86::VSQRTPDr_Int: 4468 case X86::VSQRTPSm: 4469 case X86::VSQRTPSm_Int: 4470 case X86::VSQRTPSr: 4471 case X86::VSQRTPSr_Int: 4472 case X86::VSQRTSDm: 4473 case X86::VSQRTSDm_Int: 4474 case X86::VSQRTSDr: 4475 case X86::VSQRTSSm: 4476 case X86::VSQRTSSm_Int: 4477 case X86::VSQRTSSr: 4478 return true; 4479 } 4480} 4481 4482bool X86InstrInfo:: 4483hasHighOperandLatency(const InstrItineraryData *ItinData, 4484 const MachineRegisterInfo *MRI, 4485 const MachineInstr *DefMI, unsigned DefIdx, 4486 const MachineInstr *UseMI, unsigned UseIdx) const { 4487 return isHighLatencyDef(DefMI->getOpcode()); 4488} 4489 4490namespace { 4491 /// CGBR - Create Global Base Reg pass. This initializes the PIC 4492 /// global base register for x86-32. 4493 struct CGBR : public MachineFunctionPass { 4494 static char ID; 4495 CGBR() : MachineFunctionPass(ID) {} 4496 4497 virtual bool runOnMachineFunction(MachineFunction &MF) { 4498 const X86TargetMachine *TM = 4499 static_cast<const X86TargetMachine *>(&MF.getTarget()); 4500 4501 assert(!TM->getSubtarget<X86Subtarget>().is64Bit() && 4502 "X86-64 PIC uses RIP relative addressing"); 4503 4504 // Only emit a global base reg in PIC mode. 4505 if (TM->getRelocationModel() != Reloc::PIC_) 4506 return false; 4507 4508 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 4509 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 4510 4511 // If we didn't need a GlobalBaseReg, don't insert code. 4512 if (GlobalBaseReg == 0) 4513 return false; 4514 4515 // Insert the set of GlobalBaseReg into the first MBB of the function 4516 MachineBasicBlock &FirstMBB = MF.front(); 4517 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 4518 DebugLoc DL = FirstMBB.findDebugLoc(MBBI); 4519 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4520 const X86InstrInfo *TII = TM->getInstrInfo(); 4521 4522 unsigned PC; 4523 if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) 4524 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass); 4525 else 4526 PC = GlobalBaseReg; 4527 4528 // Operand of MovePCtoStack is completely ignored by asm printer. It's 4529 // only used in JIT code emission as displacement to pc. 4530 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); 4531 4532 // If we're using vanilla 'GOT' PIC style, we should use relative addressing 4533 // not to pc, but to _GLOBAL_OFFSET_TABLE_ external. 4534 if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) { 4535 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register 4536 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg) 4537 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 4538 X86II::MO_GOT_ABSOLUTE_ADDRESS); 4539 } 4540 4541 return true; 4542 } 4543 4544 virtual const char *getPassName() const { 4545 return "X86 PIC Global Base Reg Initialization"; 4546 } 4547 4548 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 4549 AU.setPreservesCFG(); 4550 MachineFunctionPass::getAnalysisUsage(AU); 4551 } 4552 }; 4553} 4554 4555char CGBR::ID = 0; 4556FunctionPass* 4557llvm::createGlobalBaseRegPass() { return new CGBR(); } 4558 4559namespace { 4560 struct LDTLSCleanup : public MachineFunctionPass { 4561 static char ID; 4562 LDTLSCleanup() : MachineFunctionPass(ID) {} 4563 4564 virtual bool runOnMachineFunction(MachineFunction &MF) { 4565 X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>(); 4566 if (MFI->getNumLocalDynamicTLSAccesses() < 2) { 4567 // No point folding accesses if there isn't at least two. 4568 return false; 4569 } 4570 4571 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>(); 4572 return VisitNode(DT->getRootNode(), 0); 4573 } 4574 4575 // Visit the dominator subtree rooted at Node in pre-order. 4576 // If TLSBaseAddrReg is non-null, then use that to replace any 4577 // TLS_base_addr instructions. Otherwise, create the register 4578 // when the first such instruction is seen, and then use it 4579 // as we encounter more instructions. 4580 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) { 4581 MachineBasicBlock *BB = Node->getBlock(); 4582 bool Changed = false; 4583 4584 // Traverse the current block. 4585 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; 4586 ++I) { 4587 switch (I->getOpcode()) { 4588 case X86::TLS_base_addr32: 4589 case X86::TLS_base_addr64: 4590 if (TLSBaseAddrReg) 4591 I = ReplaceTLSBaseAddrCall(I, TLSBaseAddrReg); 4592 else 4593 I = SetRegister(I, &TLSBaseAddrReg); 4594 Changed = true; 4595 break; 4596 default: 4597 break; 4598 } 4599 } 4600 4601 // Visit the children of this block in the dominator tree. 4602 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end(); 4603 I != E; ++I) { 4604 Changed |= VisitNode(*I, TLSBaseAddrReg); 4605 } 4606 4607 return Changed; 4608 } 4609 4610 // Replace the TLS_base_addr instruction I with a copy from 4611 // TLSBaseAddrReg, returning the new instruction. 4612 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr *I, 4613 unsigned TLSBaseAddrReg) { 4614 MachineFunction *MF = I->getParent()->getParent(); 4615 const X86TargetMachine *TM = 4616 static_cast<const X86TargetMachine *>(&MF->getTarget()); 4617 const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit(); 4618 const X86InstrInfo *TII = TM->getInstrInfo(); 4619 4620 // Insert a Copy from TLSBaseAddrReg to RAX/EAX. 4621 MachineInstr *Copy = BuildMI(*I->getParent(), I, I->getDebugLoc(), 4622 TII->get(TargetOpcode::COPY), 4623 is64Bit ? X86::RAX : X86::EAX) 4624 .addReg(TLSBaseAddrReg); 4625 4626 // Erase the TLS_base_addr instruction. 4627 I->eraseFromParent(); 4628 4629 return Copy; 4630 } 4631 4632 // Create a virtal register in *TLSBaseAddrReg, and populate it by 4633 // inserting a copy instruction after I. Returns the new instruction. 4634 MachineInstr *SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) { 4635 MachineFunction *MF = I->getParent()->getParent(); 4636 const X86TargetMachine *TM = 4637 static_cast<const X86TargetMachine *>(&MF->getTarget()); 4638 const bool is64Bit = TM->getSubtarget<X86Subtarget>().is64Bit(); 4639 const X86InstrInfo *TII = TM->getInstrInfo(); 4640 4641 // Create a virtual register for the TLS base address. 4642 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 4643 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit 4644 ? &X86::GR64RegClass 4645 : &X86::GR32RegClass); 4646 4647 // Insert a copy from RAX/EAX to TLSBaseAddrReg. 4648 MachineInstr *Next = I->getNextNode(); 4649 MachineInstr *Copy = BuildMI(*I->getParent(), Next, I->getDebugLoc(), 4650 TII->get(TargetOpcode::COPY), 4651 *TLSBaseAddrReg) 4652 .addReg(is64Bit ? X86::RAX : X86::EAX); 4653 4654 return Copy; 4655 } 4656 4657 virtual const char *getPassName() const { 4658 return "Local Dynamic TLS Access Clean-up"; 4659 } 4660 4661 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 4662 AU.setPreservesCFG(); 4663 AU.addRequired<MachineDominatorTree>(); 4664 MachineFunctionPass::getAnalysisUsage(AU); 4665 } 4666 }; 4667} 4668 4669char LDTLSCleanup::ID = 0; 4670FunctionPass* 4671llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); } 4672