X86InstrInfo.cpp revision 014278e6a11fa0767853b831e5bf51b95bf541c5
1//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86GenInstrInfo.inc" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/ADT/STLExtras.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/CodeGen/LiveVariables.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Target/TargetOptions.h" 28#include "llvm/Target/TargetAsmInfo.h" 29 30using namespace llvm; 31 32namespace { 33 cl::opt<bool> 34 NoFusing("disable-spill-fusing", 35 cl::desc("Disable fusing of spill code into instructions")); 36 cl::opt<bool> 37 PrintFailedFusing("print-failed-fuse-candidates", 38 cl::desc("Print instructions that the allocator wants to" 39 " fuse, but the X86 backend currently can't"), 40 cl::Hidden); 41 cl::opt<bool> 42 ReMatPICStubLoad("remat-pic-stub-load", 43 cl::desc("Re-materialize load from stub in PIC mode"), 44 cl::init(false), cl::Hidden); 45} 46 47X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) 48 : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)), 49 TM(tm), RI(tm, *this) { 50 SmallVector<unsigned,16> AmbEntries; 51 static const unsigned OpTbl2Addr[][2] = { 52 { X86::ADC32ri, X86::ADC32mi }, 53 { X86::ADC32ri8, X86::ADC32mi8 }, 54 { X86::ADC32rr, X86::ADC32mr }, 55 { X86::ADC64ri32, X86::ADC64mi32 }, 56 { X86::ADC64ri8, X86::ADC64mi8 }, 57 { X86::ADC64rr, X86::ADC64mr }, 58 { X86::ADD16ri, X86::ADD16mi }, 59 { X86::ADD16ri8, X86::ADD16mi8 }, 60 { X86::ADD16rr, X86::ADD16mr }, 61 { X86::ADD32ri, X86::ADD32mi }, 62 { X86::ADD32ri8, X86::ADD32mi8 }, 63 { X86::ADD32rr, X86::ADD32mr }, 64 { X86::ADD64ri32, X86::ADD64mi32 }, 65 { X86::ADD64ri8, X86::ADD64mi8 }, 66 { X86::ADD64rr, X86::ADD64mr }, 67 { X86::ADD8ri, X86::ADD8mi }, 68 { X86::ADD8rr, X86::ADD8mr }, 69 { X86::AND16ri, X86::AND16mi }, 70 { X86::AND16ri8, X86::AND16mi8 }, 71 { X86::AND16rr, X86::AND16mr }, 72 { X86::AND32ri, X86::AND32mi }, 73 { X86::AND32ri8, X86::AND32mi8 }, 74 { X86::AND32rr, X86::AND32mr }, 75 { X86::AND64ri32, X86::AND64mi32 }, 76 { X86::AND64ri8, X86::AND64mi8 }, 77 { X86::AND64rr, X86::AND64mr }, 78 { X86::AND8ri, X86::AND8mi }, 79 { X86::AND8rr, X86::AND8mr }, 80 { X86::DEC16r, X86::DEC16m }, 81 { X86::DEC32r, X86::DEC32m }, 82 { X86::DEC64_16r, X86::DEC64_16m }, 83 { X86::DEC64_32r, X86::DEC64_32m }, 84 { X86::DEC64r, X86::DEC64m }, 85 { X86::DEC8r, X86::DEC8m }, 86 { X86::INC16r, X86::INC16m }, 87 { X86::INC32r, X86::INC32m }, 88 { X86::INC64_16r, X86::INC64_16m }, 89 { X86::INC64_32r, X86::INC64_32m }, 90 { X86::INC64r, X86::INC64m }, 91 { X86::INC8r, X86::INC8m }, 92 { X86::NEG16r, X86::NEG16m }, 93 { X86::NEG32r, X86::NEG32m }, 94 { X86::NEG64r, X86::NEG64m }, 95 { X86::NEG8r, X86::NEG8m }, 96 { X86::NOT16r, X86::NOT16m }, 97 { X86::NOT32r, X86::NOT32m }, 98 { X86::NOT64r, X86::NOT64m }, 99 { X86::NOT8r, X86::NOT8m }, 100 { X86::OR16ri, X86::OR16mi }, 101 { X86::OR16ri8, X86::OR16mi8 }, 102 { X86::OR16rr, X86::OR16mr }, 103 { X86::OR32ri, X86::OR32mi }, 104 { X86::OR32ri8, X86::OR32mi8 }, 105 { X86::OR32rr, X86::OR32mr }, 106 { X86::OR64ri32, X86::OR64mi32 }, 107 { X86::OR64ri8, X86::OR64mi8 }, 108 { X86::OR64rr, X86::OR64mr }, 109 { X86::OR8ri, X86::OR8mi }, 110 { X86::OR8rr, X86::OR8mr }, 111 { X86::ROL16r1, X86::ROL16m1 }, 112 { X86::ROL16rCL, X86::ROL16mCL }, 113 { X86::ROL16ri, X86::ROL16mi }, 114 { X86::ROL32r1, X86::ROL32m1 }, 115 { X86::ROL32rCL, X86::ROL32mCL }, 116 { X86::ROL32ri, X86::ROL32mi }, 117 { X86::ROL64r1, X86::ROL64m1 }, 118 { X86::ROL64rCL, X86::ROL64mCL }, 119 { X86::ROL64ri, X86::ROL64mi }, 120 { X86::ROL8r1, X86::ROL8m1 }, 121 { X86::ROL8rCL, X86::ROL8mCL }, 122 { X86::ROL8ri, X86::ROL8mi }, 123 { X86::ROR16r1, X86::ROR16m1 }, 124 { X86::ROR16rCL, X86::ROR16mCL }, 125 { X86::ROR16ri, X86::ROR16mi }, 126 { X86::ROR32r1, X86::ROR32m1 }, 127 { X86::ROR32rCL, X86::ROR32mCL }, 128 { X86::ROR32ri, X86::ROR32mi }, 129 { X86::ROR64r1, X86::ROR64m1 }, 130 { X86::ROR64rCL, X86::ROR64mCL }, 131 { X86::ROR64ri, X86::ROR64mi }, 132 { X86::ROR8r1, X86::ROR8m1 }, 133 { X86::ROR8rCL, X86::ROR8mCL }, 134 { X86::ROR8ri, X86::ROR8mi }, 135 { X86::SAR16r1, X86::SAR16m1 }, 136 { X86::SAR16rCL, X86::SAR16mCL }, 137 { X86::SAR16ri, X86::SAR16mi }, 138 { X86::SAR32r1, X86::SAR32m1 }, 139 { X86::SAR32rCL, X86::SAR32mCL }, 140 { X86::SAR32ri, X86::SAR32mi }, 141 { X86::SAR64r1, X86::SAR64m1 }, 142 { X86::SAR64rCL, X86::SAR64mCL }, 143 { X86::SAR64ri, X86::SAR64mi }, 144 { X86::SAR8r1, X86::SAR8m1 }, 145 { X86::SAR8rCL, X86::SAR8mCL }, 146 { X86::SAR8ri, X86::SAR8mi }, 147 { X86::SBB32ri, X86::SBB32mi }, 148 { X86::SBB32ri8, X86::SBB32mi8 }, 149 { X86::SBB32rr, X86::SBB32mr }, 150 { X86::SBB64ri32, X86::SBB64mi32 }, 151 { X86::SBB64ri8, X86::SBB64mi8 }, 152 { X86::SBB64rr, X86::SBB64mr }, 153 { X86::SHL16rCL, X86::SHL16mCL }, 154 { X86::SHL16ri, X86::SHL16mi }, 155 { X86::SHL32rCL, X86::SHL32mCL }, 156 { X86::SHL32ri, X86::SHL32mi }, 157 { X86::SHL64rCL, X86::SHL64mCL }, 158 { X86::SHL64ri, X86::SHL64mi }, 159 { X86::SHL8rCL, X86::SHL8mCL }, 160 { X86::SHL8ri, X86::SHL8mi }, 161 { X86::SHLD16rrCL, X86::SHLD16mrCL }, 162 { X86::SHLD16rri8, X86::SHLD16mri8 }, 163 { X86::SHLD32rrCL, X86::SHLD32mrCL }, 164 { X86::SHLD32rri8, X86::SHLD32mri8 }, 165 { X86::SHLD64rrCL, X86::SHLD64mrCL }, 166 { X86::SHLD64rri8, X86::SHLD64mri8 }, 167 { X86::SHR16r1, X86::SHR16m1 }, 168 { X86::SHR16rCL, X86::SHR16mCL }, 169 { X86::SHR16ri, X86::SHR16mi }, 170 { X86::SHR32r1, X86::SHR32m1 }, 171 { X86::SHR32rCL, X86::SHR32mCL }, 172 { X86::SHR32ri, X86::SHR32mi }, 173 { X86::SHR64r1, X86::SHR64m1 }, 174 { X86::SHR64rCL, X86::SHR64mCL }, 175 { X86::SHR64ri, X86::SHR64mi }, 176 { X86::SHR8r1, X86::SHR8m1 }, 177 { X86::SHR8rCL, X86::SHR8mCL }, 178 { X86::SHR8ri, X86::SHR8mi }, 179 { X86::SHRD16rrCL, X86::SHRD16mrCL }, 180 { X86::SHRD16rri8, X86::SHRD16mri8 }, 181 { X86::SHRD32rrCL, X86::SHRD32mrCL }, 182 { X86::SHRD32rri8, X86::SHRD32mri8 }, 183 { X86::SHRD64rrCL, X86::SHRD64mrCL }, 184 { X86::SHRD64rri8, X86::SHRD64mri8 }, 185 { X86::SUB16ri, X86::SUB16mi }, 186 { X86::SUB16ri8, X86::SUB16mi8 }, 187 { X86::SUB16rr, X86::SUB16mr }, 188 { X86::SUB32ri, X86::SUB32mi }, 189 { X86::SUB32ri8, X86::SUB32mi8 }, 190 { X86::SUB32rr, X86::SUB32mr }, 191 { X86::SUB64ri32, X86::SUB64mi32 }, 192 { X86::SUB64ri8, X86::SUB64mi8 }, 193 { X86::SUB64rr, X86::SUB64mr }, 194 { X86::SUB8ri, X86::SUB8mi }, 195 { X86::SUB8rr, X86::SUB8mr }, 196 { X86::XOR16ri, X86::XOR16mi }, 197 { X86::XOR16ri8, X86::XOR16mi8 }, 198 { X86::XOR16rr, X86::XOR16mr }, 199 { X86::XOR32ri, X86::XOR32mi }, 200 { X86::XOR32ri8, X86::XOR32mi8 }, 201 { X86::XOR32rr, X86::XOR32mr }, 202 { X86::XOR64ri32, X86::XOR64mi32 }, 203 { X86::XOR64ri8, X86::XOR64mi8 }, 204 { X86::XOR64rr, X86::XOR64mr }, 205 { X86::XOR8ri, X86::XOR8mi }, 206 { X86::XOR8rr, X86::XOR8mr } 207 }; 208 209 for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) { 210 unsigned RegOp = OpTbl2Addr[i][0]; 211 unsigned MemOp = OpTbl2Addr[i][1]; 212 if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp, 213 MemOp)).second) 214 assert(false && "Duplicated entries?"); 215 unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); // Index 0,folded load and store 216 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 217 std::make_pair(RegOp, 218 AuxInfo))).second) 219 AmbEntries.push_back(MemOp); 220 } 221 222 // If the third value is 1, then it's folding either a load or a store. 223 static const unsigned OpTbl0[][3] = { 224 { X86::CALL32r, X86::CALL32m, 1 }, 225 { X86::CALL64r, X86::CALL64m, 1 }, 226 { X86::CMP16ri, X86::CMP16mi, 1 }, 227 { X86::CMP16ri8, X86::CMP16mi8, 1 }, 228 { X86::CMP16rr, X86::CMP16mr, 1 }, 229 { X86::CMP32ri, X86::CMP32mi, 1 }, 230 { X86::CMP32ri8, X86::CMP32mi8, 1 }, 231 { X86::CMP32rr, X86::CMP32mr, 1 }, 232 { X86::CMP64ri32, X86::CMP64mi32, 1 }, 233 { X86::CMP64ri8, X86::CMP64mi8, 1 }, 234 { X86::CMP64rr, X86::CMP64mr, 1 }, 235 { X86::CMP8ri, X86::CMP8mi, 1 }, 236 { X86::CMP8rr, X86::CMP8mr, 1 }, 237 { X86::DIV16r, X86::DIV16m, 1 }, 238 { X86::DIV32r, X86::DIV32m, 1 }, 239 { X86::DIV64r, X86::DIV64m, 1 }, 240 { X86::DIV8r, X86::DIV8m, 1 }, 241 { X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0 }, 242 { X86::FsMOVAPDrr, X86::MOVSDmr, 0 }, 243 { X86::FsMOVAPSrr, X86::MOVSSmr, 0 }, 244 { X86::IDIV16r, X86::IDIV16m, 1 }, 245 { X86::IDIV32r, X86::IDIV32m, 1 }, 246 { X86::IDIV64r, X86::IDIV64m, 1 }, 247 { X86::IDIV8r, X86::IDIV8m, 1 }, 248 { X86::IMUL16r, X86::IMUL16m, 1 }, 249 { X86::IMUL32r, X86::IMUL32m, 1 }, 250 { X86::IMUL64r, X86::IMUL64m, 1 }, 251 { X86::IMUL8r, X86::IMUL8m, 1 }, 252 { X86::JMP32r, X86::JMP32m, 1 }, 253 { X86::JMP64r, X86::JMP64m, 1 }, 254 { X86::MOV16ri, X86::MOV16mi, 0 }, 255 { X86::MOV16rr, X86::MOV16mr, 0 }, 256 { X86::MOV16to16_, X86::MOV16_mr, 0 }, 257 { X86::MOV32ri, X86::MOV32mi, 0 }, 258 { X86::MOV32rr, X86::MOV32mr, 0 }, 259 { X86::MOV32to32_, X86::MOV32_mr, 0 }, 260 { X86::MOV64ri32, X86::MOV64mi32, 0 }, 261 { X86::MOV64rr, X86::MOV64mr, 0 }, 262 { X86::MOV8ri, X86::MOV8mi, 0 }, 263 { X86::MOV8rr, X86::MOV8mr, 0 }, 264 { X86::MOVAPDrr, X86::MOVAPDmr, 0 }, 265 { X86::MOVAPSrr, X86::MOVAPSmr, 0 }, 266 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0 }, 267 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0 }, 268 { X86::MOVPS2SSrr, X86::MOVPS2SSmr, 0 }, 269 { X86::MOVSDrr, X86::MOVSDmr, 0 }, 270 { X86::MOVSDto64rr, X86::MOVSDto64mr, 0 }, 271 { X86::MOVSS2DIrr, X86::MOVSS2DImr, 0 }, 272 { X86::MOVSSrr, X86::MOVSSmr, 0 }, 273 { X86::MOVUPDrr, X86::MOVUPDmr, 0 }, 274 { X86::MOVUPSrr, X86::MOVUPSmr, 0 }, 275 { X86::MUL16r, X86::MUL16m, 1 }, 276 { X86::MUL32r, X86::MUL32m, 1 }, 277 { X86::MUL64r, X86::MUL64m, 1 }, 278 { X86::MUL8r, X86::MUL8m, 1 }, 279 { X86::SETAEr, X86::SETAEm, 0 }, 280 { X86::SETAr, X86::SETAm, 0 }, 281 { X86::SETBEr, X86::SETBEm, 0 }, 282 { X86::SETBr, X86::SETBm, 0 }, 283 { X86::SETEr, X86::SETEm, 0 }, 284 { X86::SETGEr, X86::SETGEm, 0 }, 285 { X86::SETGr, X86::SETGm, 0 }, 286 { X86::SETLEr, X86::SETLEm, 0 }, 287 { X86::SETLr, X86::SETLm, 0 }, 288 { X86::SETNEr, X86::SETNEm, 0 }, 289 { X86::SETNPr, X86::SETNPm, 0 }, 290 { X86::SETNSr, X86::SETNSm, 0 }, 291 { X86::SETPr, X86::SETPm, 0 }, 292 { X86::SETSr, X86::SETSm, 0 }, 293 { X86::TAILJMPr, X86::TAILJMPm, 1 }, 294 { X86::TEST16ri, X86::TEST16mi, 1 }, 295 { X86::TEST32ri, X86::TEST32mi, 1 }, 296 { X86::TEST64ri32, X86::TEST64mi32, 1 }, 297 { X86::TEST8ri, X86::TEST8mi, 1 } 298 }; 299 300 for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) { 301 unsigned RegOp = OpTbl0[i][0]; 302 unsigned MemOp = OpTbl0[i][1]; 303 if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp, 304 MemOp)).second) 305 assert(false && "Duplicated entries?"); 306 unsigned FoldedLoad = OpTbl0[i][2]; 307 // Index 0, folded load or store. 308 unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5); 309 if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr) 310 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 311 std::make_pair(RegOp, AuxInfo))).second) 312 AmbEntries.push_back(MemOp); 313 } 314 315 static const unsigned OpTbl1[][2] = { 316 { X86::CMP16rr, X86::CMP16rm }, 317 { X86::CMP32rr, X86::CMP32rm }, 318 { X86::CMP64rr, X86::CMP64rm }, 319 { X86::CMP8rr, X86::CMP8rm }, 320 { X86::CVTSD2SSrr, X86::CVTSD2SSrm }, 321 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm }, 322 { X86::CVTSI2SDrr, X86::CVTSI2SDrm }, 323 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm }, 324 { X86::CVTSI2SSrr, X86::CVTSI2SSrm }, 325 { X86::CVTSS2SDrr, X86::CVTSS2SDrm }, 326 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm }, 327 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm }, 328 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm }, 329 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm }, 330 { X86::FsMOVAPDrr, X86::MOVSDrm }, 331 { X86::FsMOVAPSrr, X86::MOVSSrm }, 332 { X86::IMUL16rri, X86::IMUL16rmi }, 333 { X86::IMUL16rri8, X86::IMUL16rmi8 }, 334 { X86::IMUL32rri, X86::IMUL32rmi }, 335 { X86::IMUL32rri8, X86::IMUL32rmi8 }, 336 { X86::IMUL64rri32, X86::IMUL64rmi32 }, 337 { X86::IMUL64rri8, X86::IMUL64rmi8 }, 338 { X86::Int_CMPSDrr, X86::Int_CMPSDrm }, 339 { X86::Int_CMPSSrr, X86::Int_CMPSSrm }, 340 { X86::Int_COMISDrr, X86::Int_COMISDrm }, 341 { X86::Int_COMISSrr, X86::Int_COMISSrm }, 342 { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm }, 343 { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm }, 344 { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm }, 345 { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm }, 346 { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm }, 347 { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm }, 348 { X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm }, 349 { X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm }, 350 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm }, 351 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm }, 352 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm }, 353 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm }, 354 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm }, 355 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm }, 356 { X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm }, 357 { X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm }, 358 { X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm }, 359 { X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm }, 360 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm }, 361 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm }, 362 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm }, 363 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm }, 364 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm }, 365 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm }, 366 { X86::MOV16rr, X86::MOV16rm }, 367 { X86::MOV16to16_, X86::MOV16_rm }, 368 { X86::MOV32rr, X86::MOV32rm }, 369 { X86::MOV32to32_, X86::MOV32_rm }, 370 { X86::MOV64rr, X86::MOV64rm }, 371 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm }, 372 { X86::MOV64toSDrr, X86::MOV64toSDrm }, 373 { X86::MOV8rr, X86::MOV8rm }, 374 { X86::MOVAPDrr, X86::MOVAPDrm }, 375 { X86::MOVAPSrr, X86::MOVAPSrm }, 376 { X86::MOVDDUPrr, X86::MOVDDUPrm }, 377 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm }, 378 { X86::MOVDI2SSrr, X86::MOVDI2SSrm }, 379 { X86::MOVSD2PDrr, X86::MOVSD2PDrm }, 380 { X86::MOVSDrr, X86::MOVSDrm }, 381 { X86::MOVSHDUPrr, X86::MOVSHDUPrm }, 382 { X86::MOVSLDUPrr, X86::MOVSLDUPrm }, 383 { X86::MOVSS2PSrr, X86::MOVSS2PSrm }, 384 { X86::MOVSSrr, X86::MOVSSrm }, 385 { X86::MOVSX16rr8, X86::MOVSX16rm8 }, 386 { X86::MOVSX32rr16, X86::MOVSX32rm16 }, 387 { X86::MOVSX32rr8, X86::MOVSX32rm8 }, 388 { X86::MOVSX64rr16, X86::MOVSX64rm16 }, 389 { X86::MOVSX64rr32, X86::MOVSX64rm32 }, 390 { X86::MOVSX64rr8, X86::MOVSX64rm8 }, 391 { X86::MOVUPDrr, X86::MOVUPDrm }, 392 { X86::MOVUPSrr, X86::MOVUPSrm }, 393 { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm }, 394 { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm }, 395 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm }, 396 { X86::MOVZX16rr8, X86::MOVZX16rm8 }, 397 { X86::MOVZX32rr16, X86::MOVZX32rm16 }, 398 { X86::MOVZX32rr8, X86::MOVZX32rm8 }, 399 { X86::MOVZX64rr16, X86::MOVZX64rm16 }, 400 { X86::MOVZX64rr32, X86::MOVZX64rm32 }, 401 { X86::MOVZX64rr8, X86::MOVZX64rm8 }, 402 { X86::PSHUFDri, X86::PSHUFDmi }, 403 { X86::PSHUFHWri, X86::PSHUFHWmi }, 404 { X86::PSHUFLWri, X86::PSHUFLWmi }, 405 { X86::RCPPSr, X86::RCPPSm }, 406 { X86::RCPPSr_Int, X86::RCPPSm_Int }, 407 { X86::RSQRTPSr, X86::RSQRTPSm }, 408 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int }, 409 { X86::RSQRTSSr, X86::RSQRTSSm }, 410 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int }, 411 { X86::SQRTPDr, X86::SQRTPDm }, 412 { X86::SQRTPDr_Int, X86::SQRTPDm_Int }, 413 { X86::SQRTPSr, X86::SQRTPSm }, 414 { X86::SQRTPSr_Int, X86::SQRTPSm_Int }, 415 { X86::SQRTSDr, X86::SQRTSDm }, 416 { X86::SQRTSDr_Int, X86::SQRTSDm_Int }, 417 { X86::SQRTSSr, X86::SQRTSSm }, 418 { X86::SQRTSSr_Int, X86::SQRTSSm_Int }, 419 { X86::TEST16rr, X86::TEST16rm }, 420 { X86::TEST32rr, X86::TEST32rm }, 421 { X86::TEST64rr, X86::TEST64rm }, 422 { X86::TEST8rr, X86::TEST8rm }, 423 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 424 { X86::UCOMISDrr, X86::UCOMISDrm }, 425 { X86::UCOMISSrr, X86::UCOMISSrm } 426 }; 427 428 for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) { 429 unsigned RegOp = OpTbl1[i][0]; 430 unsigned MemOp = OpTbl1[i][1]; 431 if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp, 432 MemOp)).second) 433 assert(false && "Duplicated entries?"); 434 unsigned AuxInfo = 1 | (1 << 4); // Index 1, folded load 435 if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr) 436 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 437 std::make_pair(RegOp, AuxInfo))).second) 438 AmbEntries.push_back(MemOp); 439 } 440 441 static const unsigned OpTbl2[][2] = { 442 { X86::ADC32rr, X86::ADC32rm }, 443 { X86::ADC64rr, X86::ADC64rm }, 444 { X86::ADD16rr, X86::ADD16rm }, 445 { X86::ADD32rr, X86::ADD32rm }, 446 { X86::ADD64rr, X86::ADD64rm }, 447 { X86::ADD8rr, X86::ADD8rm }, 448 { X86::ADDPDrr, X86::ADDPDrm }, 449 { X86::ADDPSrr, X86::ADDPSrm }, 450 { X86::ADDSDrr, X86::ADDSDrm }, 451 { X86::ADDSSrr, X86::ADDSSrm }, 452 { X86::ADDSUBPDrr, X86::ADDSUBPDrm }, 453 { X86::ADDSUBPSrr, X86::ADDSUBPSrm }, 454 { X86::AND16rr, X86::AND16rm }, 455 { X86::AND32rr, X86::AND32rm }, 456 { X86::AND64rr, X86::AND64rm }, 457 { X86::AND8rr, X86::AND8rm }, 458 { X86::ANDNPDrr, X86::ANDNPDrm }, 459 { X86::ANDNPSrr, X86::ANDNPSrm }, 460 { X86::ANDPDrr, X86::ANDPDrm }, 461 { X86::ANDPSrr, X86::ANDPSrm }, 462 { X86::CMOVA16rr, X86::CMOVA16rm }, 463 { X86::CMOVA32rr, X86::CMOVA32rm }, 464 { X86::CMOVA64rr, X86::CMOVA64rm }, 465 { X86::CMOVAE16rr, X86::CMOVAE16rm }, 466 { X86::CMOVAE32rr, X86::CMOVAE32rm }, 467 { X86::CMOVAE64rr, X86::CMOVAE64rm }, 468 { X86::CMOVB16rr, X86::CMOVB16rm }, 469 { X86::CMOVB32rr, X86::CMOVB32rm }, 470 { X86::CMOVB64rr, X86::CMOVB64rm }, 471 { X86::CMOVBE16rr, X86::CMOVBE16rm }, 472 { X86::CMOVBE32rr, X86::CMOVBE32rm }, 473 { X86::CMOVBE64rr, X86::CMOVBE64rm }, 474 { X86::CMOVE16rr, X86::CMOVE16rm }, 475 { X86::CMOVE32rr, X86::CMOVE32rm }, 476 { X86::CMOVE64rr, X86::CMOVE64rm }, 477 { X86::CMOVG16rr, X86::CMOVG16rm }, 478 { X86::CMOVG32rr, X86::CMOVG32rm }, 479 { X86::CMOVG64rr, X86::CMOVG64rm }, 480 { X86::CMOVGE16rr, X86::CMOVGE16rm }, 481 { X86::CMOVGE32rr, X86::CMOVGE32rm }, 482 { X86::CMOVGE64rr, X86::CMOVGE64rm }, 483 { X86::CMOVL16rr, X86::CMOVL16rm }, 484 { X86::CMOVL32rr, X86::CMOVL32rm }, 485 { X86::CMOVL64rr, X86::CMOVL64rm }, 486 { X86::CMOVLE16rr, X86::CMOVLE16rm }, 487 { X86::CMOVLE32rr, X86::CMOVLE32rm }, 488 { X86::CMOVLE64rr, X86::CMOVLE64rm }, 489 { X86::CMOVNE16rr, X86::CMOVNE16rm }, 490 { X86::CMOVNE32rr, X86::CMOVNE32rm }, 491 { X86::CMOVNE64rr, X86::CMOVNE64rm }, 492 { X86::CMOVNP16rr, X86::CMOVNP16rm }, 493 { X86::CMOVNP32rr, X86::CMOVNP32rm }, 494 { X86::CMOVNP64rr, X86::CMOVNP64rm }, 495 { X86::CMOVNS16rr, X86::CMOVNS16rm }, 496 { X86::CMOVNS32rr, X86::CMOVNS32rm }, 497 { X86::CMOVNS64rr, X86::CMOVNS64rm }, 498 { X86::CMOVP16rr, X86::CMOVP16rm }, 499 { X86::CMOVP32rr, X86::CMOVP32rm }, 500 { X86::CMOVP64rr, X86::CMOVP64rm }, 501 { X86::CMOVS16rr, X86::CMOVS16rm }, 502 { X86::CMOVS32rr, X86::CMOVS32rm }, 503 { X86::CMOVS64rr, X86::CMOVS64rm }, 504 { X86::CMPPDrri, X86::CMPPDrmi }, 505 { X86::CMPPSrri, X86::CMPPSrmi }, 506 { X86::CMPSDrr, X86::CMPSDrm }, 507 { X86::CMPSSrr, X86::CMPSSrm }, 508 { X86::DIVPDrr, X86::DIVPDrm }, 509 { X86::DIVPSrr, X86::DIVPSrm }, 510 { X86::DIVSDrr, X86::DIVSDrm }, 511 { X86::DIVSSrr, X86::DIVSSrm }, 512 { X86::FsANDNPDrr, X86::FsANDNPDrm }, 513 { X86::FsANDNPSrr, X86::FsANDNPSrm }, 514 { X86::FsANDPDrr, X86::FsANDPDrm }, 515 { X86::FsANDPSrr, X86::FsANDPSrm }, 516 { X86::FsORPDrr, X86::FsORPDrm }, 517 { X86::FsORPSrr, X86::FsORPSrm }, 518 { X86::FsXORPDrr, X86::FsXORPDrm }, 519 { X86::FsXORPSrr, X86::FsXORPSrm }, 520 { X86::HADDPDrr, X86::HADDPDrm }, 521 { X86::HADDPSrr, X86::HADDPSrm }, 522 { X86::HSUBPDrr, X86::HSUBPDrm }, 523 { X86::HSUBPSrr, X86::HSUBPSrm }, 524 { X86::IMUL16rr, X86::IMUL16rm }, 525 { X86::IMUL32rr, X86::IMUL32rm }, 526 { X86::IMUL64rr, X86::IMUL64rm }, 527 { X86::MAXPDrr, X86::MAXPDrm }, 528 { X86::MAXPDrr_Int, X86::MAXPDrm_Int }, 529 { X86::MAXPSrr, X86::MAXPSrm }, 530 { X86::MAXPSrr_Int, X86::MAXPSrm_Int }, 531 { X86::MAXSDrr, X86::MAXSDrm }, 532 { X86::MAXSDrr_Int, X86::MAXSDrm_Int }, 533 { X86::MAXSSrr, X86::MAXSSrm }, 534 { X86::MAXSSrr_Int, X86::MAXSSrm_Int }, 535 { X86::MINPDrr, X86::MINPDrm }, 536 { X86::MINPDrr_Int, X86::MINPDrm_Int }, 537 { X86::MINPSrr, X86::MINPSrm }, 538 { X86::MINPSrr_Int, X86::MINPSrm_Int }, 539 { X86::MINSDrr, X86::MINSDrm }, 540 { X86::MINSDrr_Int, X86::MINSDrm_Int }, 541 { X86::MINSSrr, X86::MINSSrm }, 542 { X86::MINSSrr_Int, X86::MINSSrm_Int }, 543 { X86::MULPDrr, X86::MULPDrm }, 544 { X86::MULPSrr, X86::MULPSrm }, 545 { X86::MULSDrr, X86::MULSDrm }, 546 { X86::MULSSrr, X86::MULSSrm }, 547 { X86::OR16rr, X86::OR16rm }, 548 { X86::OR32rr, X86::OR32rm }, 549 { X86::OR64rr, X86::OR64rm }, 550 { X86::OR8rr, X86::OR8rm }, 551 { X86::ORPDrr, X86::ORPDrm }, 552 { X86::ORPSrr, X86::ORPSrm }, 553 { X86::PACKSSDWrr, X86::PACKSSDWrm }, 554 { X86::PACKSSWBrr, X86::PACKSSWBrm }, 555 { X86::PACKUSWBrr, X86::PACKUSWBrm }, 556 { X86::PADDBrr, X86::PADDBrm }, 557 { X86::PADDDrr, X86::PADDDrm }, 558 { X86::PADDQrr, X86::PADDQrm }, 559 { X86::PADDSBrr, X86::PADDSBrm }, 560 { X86::PADDSWrr, X86::PADDSWrm }, 561 { X86::PADDWrr, X86::PADDWrm }, 562 { X86::PANDNrr, X86::PANDNrm }, 563 { X86::PANDrr, X86::PANDrm }, 564 { X86::PAVGBrr, X86::PAVGBrm }, 565 { X86::PAVGWrr, X86::PAVGWrm }, 566 { X86::PCMPEQBrr, X86::PCMPEQBrm }, 567 { X86::PCMPEQDrr, X86::PCMPEQDrm }, 568 { X86::PCMPEQWrr, X86::PCMPEQWrm }, 569 { X86::PCMPGTBrr, X86::PCMPGTBrm }, 570 { X86::PCMPGTDrr, X86::PCMPGTDrm }, 571 { X86::PCMPGTWrr, X86::PCMPGTWrm }, 572 { X86::PINSRWrri, X86::PINSRWrmi }, 573 { X86::PMADDWDrr, X86::PMADDWDrm }, 574 { X86::PMAXSWrr, X86::PMAXSWrm }, 575 { X86::PMAXUBrr, X86::PMAXUBrm }, 576 { X86::PMINSWrr, X86::PMINSWrm }, 577 { X86::PMINUBrr, X86::PMINUBrm }, 578 { X86::PMULDQrr, X86::PMULDQrm }, 579 { X86::PMULDQrr_int, X86::PMULDQrm_int }, 580 { X86::PMULHUWrr, X86::PMULHUWrm }, 581 { X86::PMULHWrr, X86::PMULHWrm }, 582 { X86::PMULLDrr, X86::PMULLDrm }, 583 { X86::PMULLDrr_int, X86::PMULLDrm_int }, 584 { X86::PMULLWrr, X86::PMULLWrm }, 585 { X86::PMULUDQrr, X86::PMULUDQrm }, 586 { X86::PORrr, X86::PORrm }, 587 { X86::PSADBWrr, X86::PSADBWrm }, 588 { X86::PSLLDrr, X86::PSLLDrm }, 589 { X86::PSLLQrr, X86::PSLLQrm }, 590 { X86::PSLLWrr, X86::PSLLWrm }, 591 { X86::PSRADrr, X86::PSRADrm }, 592 { X86::PSRAWrr, X86::PSRAWrm }, 593 { X86::PSRLDrr, X86::PSRLDrm }, 594 { X86::PSRLQrr, X86::PSRLQrm }, 595 { X86::PSRLWrr, X86::PSRLWrm }, 596 { X86::PSUBBrr, X86::PSUBBrm }, 597 { X86::PSUBDrr, X86::PSUBDrm }, 598 { X86::PSUBSBrr, X86::PSUBSBrm }, 599 { X86::PSUBSWrr, X86::PSUBSWrm }, 600 { X86::PSUBWrr, X86::PSUBWrm }, 601 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm }, 602 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm }, 603 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm }, 604 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm }, 605 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm }, 606 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm }, 607 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm }, 608 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm }, 609 { X86::PXORrr, X86::PXORrm }, 610 { X86::SBB32rr, X86::SBB32rm }, 611 { X86::SBB64rr, X86::SBB64rm }, 612 { X86::SHUFPDrri, X86::SHUFPDrmi }, 613 { X86::SHUFPSrri, X86::SHUFPSrmi }, 614 { X86::SUB16rr, X86::SUB16rm }, 615 { X86::SUB32rr, X86::SUB32rm }, 616 { X86::SUB64rr, X86::SUB64rm }, 617 { X86::SUB8rr, X86::SUB8rm }, 618 { X86::SUBPDrr, X86::SUBPDrm }, 619 { X86::SUBPSrr, X86::SUBPSrm }, 620 { X86::SUBSDrr, X86::SUBSDrm }, 621 { X86::SUBSSrr, X86::SUBSSrm }, 622 // FIXME: TEST*rr -> swapped operand of TEST*mr. 623 { X86::UNPCKHPDrr, X86::UNPCKHPDrm }, 624 { X86::UNPCKHPSrr, X86::UNPCKHPSrm }, 625 { X86::UNPCKLPDrr, X86::UNPCKLPDrm }, 626 { X86::UNPCKLPSrr, X86::UNPCKLPSrm }, 627 { X86::XOR16rr, X86::XOR16rm }, 628 { X86::XOR32rr, X86::XOR32rm }, 629 { X86::XOR64rr, X86::XOR64rm }, 630 { X86::XOR8rr, X86::XOR8rm }, 631 { X86::XORPDrr, X86::XORPDrm }, 632 { X86::XORPSrr, X86::XORPSrm } 633 }; 634 635 for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) { 636 unsigned RegOp = OpTbl2[i][0]; 637 unsigned MemOp = OpTbl2[i][1]; 638 if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp, 639 MemOp)).second) 640 assert(false && "Duplicated entries?"); 641 unsigned AuxInfo = 2 | (1 << 4); // Index 1, folded load 642 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 643 std::make_pair(RegOp, AuxInfo))).second) 644 AmbEntries.push_back(MemOp); 645 } 646 647 // Remove ambiguous entries. 648 assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?"); 649} 650 651bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, 652 unsigned& sourceReg, 653 unsigned& destReg) const { 654 switch (MI.getOpcode()) { 655 default: 656 return false; 657 case X86::MOV8rr: 658 case X86::MOV16rr: 659 case X86::MOV32rr: 660 case X86::MOV64rr: 661 case X86::MOV16to16_: 662 case X86::MOV32to32_: 663 case X86::MOVSSrr: 664 case X86::MOVSDrr: 665 666 // FP Stack register class copies 667 case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080: 668 case X86::MOV_Fp3264: case X86::MOV_Fp3280: 669 case X86::MOV_Fp6432: case X86::MOV_Fp8032: 670 671 case X86::FsMOVAPSrr: 672 case X86::FsMOVAPDrr: 673 case X86::MOVAPSrr: 674 case X86::MOVAPDrr: 675 case X86::MOVSS2PSrr: 676 case X86::MOVSD2PDrr: 677 case X86::MOVPS2SSrr: 678 case X86::MOVPD2SDrr: 679 case X86::MMX_MOVD64rr: 680 case X86::MMX_MOVQ64rr: 681 assert(MI.getNumOperands() >= 2 && 682 MI.getOperand(0).isRegister() && 683 MI.getOperand(1).isRegister() && 684 "invalid register-register move instruction"); 685 sourceReg = MI.getOperand(1).getReg(); 686 destReg = MI.getOperand(0).getReg(); 687 return true; 688 } 689} 690 691unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI, 692 int &FrameIndex) const { 693 switch (MI->getOpcode()) { 694 default: break; 695 case X86::MOV8rm: 696 case X86::MOV16rm: 697 case X86::MOV16_rm: 698 case X86::MOV32rm: 699 case X86::MOV32_rm: 700 case X86::MOV64rm: 701 case X86::LD_Fp64m: 702 case X86::MOVSSrm: 703 case X86::MOVSDrm: 704 case X86::MOVAPSrm: 705 case X86::MOVAPDrm: 706 case X86::MMX_MOVD64rm: 707 case X86::MMX_MOVQ64rm: 708 if (MI->getOperand(1).isFrameIndex() && MI->getOperand(2).isImmediate() && 709 MI->getOperand(3).isRegister() && MI->getOperand(4).isImmediate() && 710 MI->getOperand(2).getImm() == 1 && 711 MI->getOperand(3).getReg() == 0 && 712 MI->getOperand(4).getImm() == 0) { 713 FrameIndex = MI->getOperand(1).getIndex(); 714 return MI->getOperand(0).getReg(); 715 } 716 break; 717 } 718 return 0; 719} 720 721unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI, 722 int &FrameIndex) const { 723 switch (MI->getOpcode()) { 724 default: break; 725 case X86::MOV8mr: 726 case X86::MOV16mr: 727 case X86::MOV16_mr: 728 case X86::MOV32mr: 729 case X86::MOV32_mr: 730 case X86::MOV64mr: 731 case X86::ST_FpP64m: 732 case X86::MOVSSmr: 733 case X86::MOVSDmr: 734 case X86::MOVAPSmr: 735 case X86::MOVAPDmr: 736 case X86::MMX_MOVD64mr: 737 case X86::MMX_MOVQ64mr: 738 case X86::MMX_MOVNTQmr: 739 if (MI->getOperand(0).isFrameIndex() && MI->getOperand(1).isImmediate() && 740 MI->getOperand(2).isRegister() && MI->getOperand(3).isImmediate() && 741 MI->getOperand(1).getImm() == 1 && 742 MI->getOperand(2).getReg() == 0 && 743 MI->getOperand(3).getImm() == 0) { 744 FrameIndex = MI->getOperand(0).getIndex(); 745 return MI->getOperand(4).getReg(); 746 } 747 break; 748 } 749 return 0; 750} 751 752 753/// regIsPICBase - Return true if register is PIC base (i.e.g defined by 754/// X86::MOVPC32r. 755static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 756 bool isPICBase = false; 757 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 758 E = MRI.def_end(); I != E; ++I) { 759 MachineInstr *DefMI = I.getOperand().getParent(); 760 if (DefMI->getOpcode() != X86::MOVPC32r) 761 return false; 762 assert(!isPICBase && "More than one PIC base?"); 763 isPICBase = true; 764 } 765 return isPICBase; 766} 767 768/// isGVStub - Return true if the GV requires an extra load to get the 769/// real address. 770static inline bool isGVStub(GlobalValue *GV, X86TargetMachine &TM) { 771 return TM.getSubtarget<X86Subtarget>().GVRequiresExtraLoad(GV, TM, false); 772} 773 774bool 775X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const { 776 switch (MI->getOpcode()) { 777 default: break; 778 case X86::MOV8rm: 779 case X86::MOV16rm: 780 case X86::MOV16_rm: 781 case X86::MOV32rm: 782 case X86::MOV32_rm: 783 case X86::MOV64rm: 784 case X86::LD_Fp64m: 785 case X86::MOVSSrm: 786 case X86::MOVSDrm: 787 case X86::MOVAPSrm: 788 case X86::MOVAPDrm: 789 case X86::MMX_MOVD64rm: 790 case X86::MMX_MOVQ64rm: { 791 // Loads from constant pools are trivially rematerializable. 792 if (MI->getOperand(1).isRegister() && 793 MI->getOperand(2).isImmediate() && 794 MI->getOperand(3).isRegister() && MI->getOperand(3).getReg() == 0 && 795 (MI->getOperand(4).isConstantPoolIndex() || 796 (MI->getOperand(4).isGlobalAddress() && 797 isGVStub(MI->getOperand(4).getGlobal(), TM)))) { 798 unsigned BaseReg = MI->getOperand(1).getReg(); 799 if (BaseReg == 0) 800 return true; 801 // Allow re-materialization of PIC load. 802 if (!ReMatPICStubLoad && MI->getOperand(4).isGlobalAddress()) 803 return false; 804 const MachineFunction &MF = *MI->getParent()->getParent(); 805 const MachineRegisterInfo &MRI = MF.getRegInfo(); 806 bool isPICBase = false; 807 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 808 E = MRI.def_end(); I != E; ++I) { 809 MachineInstr *DefMI = I.getOperand().getParent(); 810 if (DefMI->getOpcode() != X86::MOVPC32r) 811 return false; 812 assert(!isPICBase && "More than one PIC base?"); 813 isPICBase = true; 814 } 815 return isPICBase; 816 } 817 return false; 818 } 819 820 case X86::LEA32r: 821 case X86::LEA64r: { 822 if (MI->getOperand(1).isRegister() && 823 MI->getOperand(2).isImmediate() && 824 MI->getOperand(3).isRegister() && MI->getOperand(3).getReg() == 0 && 825 !MI->getOperand(4).isRegister()) { 826 // lea fi#, lea GV, etc. are all rematerializable. 827 unsigned BaseReg = MI->getOperand(1).getReg(); 828 if (BaseReg == 0) 829 return true; 830 // Allow re-materialization of lea PICBase + x. 831 const MachineFunction &MF = *MI->getParent()->getParent(); 832 const MachineRegisterInfo &MRI = MF.getRegInfo(); 833 return regIsPICBase(BaseReg, MRI); 834 } 835 return false; 836 } 837 } 838 839 // All other instructions marked M_REMATERIALIZABLE are always trivially 840 // rematerializable. 841 return true; 842} 843 844/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that 845/// would clobber the EFLAGS condition register. Note the result may be 846/// conservative. If it cannot definitely determine the safety after visiting 847/// two instructions it assumes it's not safe. 848static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 849 MachineBasicBlock::iterator I) { 850 // For compile time consideration, if we are not able to determine the 851 // safety after visiting 2 instructions, we will assume it's not safe. 852 for (unsigned i = 0; i < 2; ++i) { 853 if (I == MBB.end()) 854 // Reached end of block, it's safe. 855 return true; 856 bool SeenDef = false; 857 for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) { 858 MachineOperand &MO = I->getOperand(j); 859 if (!MO.isRegister()) 860 continue; 861 if (MO.getReg() == X86::EFLAGS) { 862 if (MO.isUse()) 863 return false; 864 SeenDef = true; 865 } 866 } 867 868 if (SeenDef) 869 // This instruction defines EFLAGS, no need to look any further. 870 return true; 871 ++I; 872 } 873 874 // Conservative answer. 875 return false; 876} 877 878void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 879 MachineBasicBlock::iterator I, 880 unsigned DestReg, 881 const MachineInstr *Orig) const { 882 unsigned SubIdx = Orig->getOperand(0).isRegister() 883 ? Orig->getOperand(0).getSubReg() : 0; 884 bool ChangeSubIdx = SubIdx != 0; 885 if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) { 886 DestReg = RI.getSubReg(DestReg, SubIdx); 887 SubIdx = 0; 888 } 889 890 // MOV32r0 etc. are implemented with xor which clobbers condition code. 891 // Re-materialize them as movri instructions to avoid side effects. 892 bool Emitted = false; 893 switch (Orig->getOpcode()) { 894 default: break; 895 case X86::MOV8r0: 896 case X86::MOV16r0: 897 case X86::MOV32r0: 898 case X86::MOV64r0: { 899 if (!isSafeToClobberEFLAGS(MBB, I)) { 900 unsigned Opc = 0; 901 switch (Orig->getOpcode()) { 902 default: break; 903 case X86::MOV8r0: Opc = X86::MOV8ri; break; 904 case X86::MOV16r0: Opc = X86::MOV16ri; break; 905 case X86::MOV32r0: Opc = X86::MOV32ri; break; 906 case X86::MOV64r0: Opc = X86::MOV64ri32; break; 907 } 908 BuildMI(MBB, I, get(Opc), DestReg).addImm(0); 909 Emitted = true; 910 } 911 break; 912 } 913 } 914 915 if (!Emitted) { 916 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 917 MI->getOperand(0).setReg(DestReg); 918 MBB.insert(I, MI); 919 } 920 921 if (ChangeSubIdx) { 922 MachineInstr *NewMI = prior(I); 923 NewMI->getOperand(0).setSubReg(SubIdx); 924 } 925} 926 927/// isInvariantLoad - Return true if the specified instruction (which is marked 928/// mayLoad) is loading from a location whose value is invariant across the 929/// function. For example, loading a value from the constant pool or from 930/// from the argument area of a function if it does not change. This should 931/// only return true of *all* loads the instruction does are invariant (if it 932/// does multiple loads). 933bool X86InstrInfo::isInvariantLoad(MachineInstr *MI) const { 934 // This code cares about loads from three cases: constant pool entries, 935 // invariant argument slots, and global stubs. In order to handle these cases 936 // for all of the myriad of X86 instructions, we just scan for a CP/FI/GV 937 // operand and base our analysis on it. This is safe because the address of 938 // none of these three cases is ever used as anything other than a load base 939 // and X86 doesn't have any instructions that load from multiple places. 940 941 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 942 const MachineOperand &MO = MI->getOperand(i); 943 // Loads from constant pools are trivially invariant. 944 if (MO.isConstantPoolIndex()) 945 return true; 946 947 if (MO.isGlobalAddress()) 948 return isGVStub(MO.getGlobal(), TM); 949 950 // If this is a load from an invariant stack slot, the load is a constant. 951 if (MO.isFrameIndex()) { 952 const MachineFrameInfo &MFI = 953 *MI->getParent()->getParent()->getFrameInfo(); 954 int Idx = MO.getIndex(); 955 return MFI.isFixedObjectIndex(Idx) && MFI.isImmutableObjectIndex(Idx); 956 } 957 } 958 959 // All other instances of these instructions are presumed to have other 960 // issues. 961 return false; 962} 963 964/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that 965/// is not marked dead. 966static bool hasLiveCondCodeDef(MachineInstr *MI) { 967 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 968 MachineOperand &MO = MI->getOperand(i); 969 if (MO.isRegister() && MO.isDef() && 970 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 971 return true; 972 } 973 } 974 return false; 975} 976 977/// convertToThreeAddress - This method must be implemented by targets that 978/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 979/// may be able to convert a two-address instruction into a true 980/// three-address instruction on demand. This allows the X86 target (for 981/// example) to convert ADD and SHL instructions into LEA instructions if they 982/// would require register copies due to two-addressness. 983/// 984/// This method returns a null pointer if the transformation cannot be 985/// performed, otherwise it returns the new instruction. 986/// 987MachineInstr * 988X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 989 MachineBasicBlock::iterator &MBBI, 990 LiveVariables *LV) const { 991 MachineInstr *MI = MBBI; 992 MachineFunction &MF = *MI->getParent()->getParent(); 993 // All instructions input are two-addr instructions. Get the known operands. 994 unsigned Dest = MI->getOperand(0).getReg(); 995 unsigned Src = MI->getOperand(1).getReg(); 996 bool isDead = MI->getOperand(0).isDead(); 997 bool isKill = MI->getOperand(1).isKill(); 998 999 MachineInstr *NewMI = NULL; 1000 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 1001 // we have better subtarget support, enable the 16-bit LEA generation here. 1002 bool DisableLEA16 = true; 1003 1004 unsigned MIOpc = MI->getOpcode(); 1005 switch (MIOpc) { 1006 case X86::SHUFPSrri: { 1007 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); 1008 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 1009 1010 unsigned B = MI->getOperand(1).getReg(); 1011 unsigned C = MI->getOperand(2).getReg(); 1012 if (B != C) return 0; 1013 unsigned A = MI->getOperand(0).getReg(); 1014 unsigned M = MI->getOperand(3).getImm(); 1015 NewMI = BuildMI(MF, get(X86::PSHUFDri)).addReg(A, true, false, false, isDead) 1016 .addReg(B, false, false, isKill).addImm(M); 1017 break; 1018 } 1019 case X86::SHL64ri: { 1020 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1021 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1022 // the flags produced by a shift yet, so this is safe. 1023 unsigned ShAmt = MI->getOperand(2).getImm(); 1024 if (ShAmt == 0 || ShAmt >= 4) return 0; 1025 1026 NewMI = BuildMI(MF, get(X86::LEA64r)).addReg(Dest, true, false, false, isDead) 1027 .addReg(0).addImm(1 << ShAmt).addReg(Src, false, false, isKill).addImm(0); 1028 break; 1029 } 1030 case X86::SHL32ri: { 1031 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1032 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1033 // the flags produced by a shift yet, so this is safe. 1034 unsigned ShAmt = MI->getOperand(2).getImm(); 1035 if (ShAmt == 0 || ShAmt >= 4) return 0; 1036 1037 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ? 1038 X86::LEA64_32r : X86::LEA32r; 1039 NewMI = BuildMI(MF, get(Opc)).addReg(Dest, true, false, false, isDead) 1040 .addReg(0).addImm(1 << ShAmt) 1041 .addReg(Src, false, false, isKill).addImm(0); 1042 break; 1043 } 1044 case X86::SHL16ri: { 1045 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1046 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1047 // the flags produced by a shift yet, so this is safe. 1048 unsigned ShAmt = MI->getOperand(2).getImm(); 1049 if (ShAmt == 0 || ShAmt >= 4) return 0; 1050 1051 if (DisableLEA16) { 1052 // If 16-bit LEA is disabled, use 32-bit LEA via subregisters. 1053 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 1054 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() 1055 ? X86::LEA64_32r : X86::LEA32r; 1056 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1057 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1058 1059 // Build and insert into an implicit UNDEF value. This is OK because 1060 // well be shifting and then extracting the lower 16-bits. 1061 BuildMI(*MFI, MBBI, get(X86::IMPLICIT_DEF), leaInReg); 1062 MachineInstr *InsMI = BuildMI(*MFI, MBBI, get(X86::INSERT_SUBREG),leaInReg) 1063 .addReg(leaInReg).addReg(Src, false, false, isKill) 1064 .addImm(X86::SUBREG_16BIT); 1065 1066 NewMI = BuildMI(*MFI, MBBI, get(Opc), leaOutReg).addReg(0).addImm(1 << ShAmt) 1067 .addReg(leaInReg, false, false, true).addImm(0); 1068 1069 MachineInstr *ExtMI = BuildMI(*MFI, MBBI, get(X86::EXTRACT_SUBREG)) 1070 .addReg(Dest, true, false, false, isDead) 1071 .addReg(leaOutReg, false, false, true).addImm(X86::SUBREG_16BIT); 1072 if (LV) { 1073 // Update live variables 1074 LV->getVarInfo(leaInReg).Kills.push_back(NewMI); 1075 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI); 1076 if (isKill) 1077 LV->replaceKillInstruction(Src, MI, InsMI); 1078 if (isDead) 1079 LV->replaceKillInstruction(Dest, MI, ExtMI); 1080 } 1081 return ExtMI; 1082 } else { 1083 NewMI = BuildMI(MF, get(X86::LEA16r)).addReg(Dest, true, false, false, isDead) 1084 .addReg(0).addImm(1 << ShAmt) 1085 .addReg(Src, false, false, isKill).addImm(0); 1086 } 1087 break; 1088 } 1089 default: { 1090 // The following opcodes also sets the condition code register(s). Only 1091 // convert them to equivalent lea if the condition code register def's 1092 // are dead! 1093 if (hasLiveCondCodeDef(MI)) 1094 return 0; 1095 1096 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1097 switch (MIOpc) { 1098 default: return 0; 1099 case X86::INC64r: 1100 case X86::INC32r: { 1101 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1102 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 1103 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1104 NewMI = addRegOffset(BuildMI(MF, get(Opc)) 1105 .addReg(Dest, true, false, false, isDead), 1106 Src, isKill, 1); 1107 break; 1108 } 1109 case X86::INC16r: 1110 case X86::INC64_16r: 1111 if (DisableLEA16) return 0; 1112 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1113 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r)) 1114 .addReg(Dest, true, false, false, isDead), 1115 Src, isKill, 1); 1116 break; 1117 case X86::DEC64r: 1118 case X86::DEC32r: { 1119 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1120 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 1121 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1122 NewMI = addRegOffset(BuildMI(MF, get(Opc)) 1123 .addReg(Dest, true, false, false, isDead), 1124 Src, isKill, -1); 1125 break; 1126 } 1127 case X86::DEC16r: 1128 case X86::DEC64_16r: 1129 if (DisableLEA16) return 0; 1130 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1131 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r)) 1132 .addReg(Dest, true, false, false, isDead), 1133 Src, isKill, -1); 1134 break; 1135 case X86::ADD64rr: 1136 case X86::ADD32rr: { 1137 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1138 unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r 1139 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1140 unsigned Src2 = MI->getOperand(2).getReg(); 1141 bool isKill2 = MI->getOperand(2).isKill(); 1142 NewMI = addRegReg(BuildMI(MF, get(Opc)) 1143 .addReg(Dest, true, false, false, isDead), 1144 Src, isKill, Src2, isKill2); 1145 if (LV && isKill2) 1146 LV->replaceKillInstruction(Src2, MI, NewMI); 1147 break; 1148 } 1149 case X86::ADD16rr: { 1150 if (DisableLEA16) return 0; 1151 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1152 unsigned Src2 = MI->getOperand(2).getReg(); 1153 bool isKill2 = MI->getOperand(2).isKill(); 1154 NewMI = addRegReg(BuildMI(MF, get(X86::LEA16r)) 1155 .addReg(Dest, true, false, false, isDead), 1156 Src, isKill, Src2, isKill2); 1157 if (LV && isKill2) 1158 LV->replaceKillInstruction(Src2, MI, NewMI); 1159 break; 1160 } 1161 case X86::ADD64ri32: 1162 case X86::ADD64ri8: 1163 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1164 if (MI->getOperand(2).isImmediate()) 1165 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA64r)) 1166 .addReg(Dest, true, false, false, isDead), 1167 Src, isKill, MI->getOperand(2).getImm()); 1168 break; 1169 case X86::ADD32ri: 1170 case X86::ADD32ri8: 1171 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1172 if (MI->getOperand(2).isImmediate()) { 1173 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 1174 NewMI = addRegOffset(BuildMI(MF, get(Opc)) 1175 .addReg(Dest, true, false, false, isDead), 1176 Src, isKill, MI->getOperand(2).getImm()); 1177 } 1178 break; 1179 case X86::ADD16ri: 1180 case X86::ADD16ri8: 1181 if (DisableLEA16) return 0; 1182 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1183 if (MI->getOperand(2).isImmediate()) 1184 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r)) 1185 .addReg(Dest, true, false, false, isDead), 1186 Src, isKill, MI->getOperand(2).getImm()); 1187 break; 1188 case X86::SHL16ri: 1189 if (DisableLEA16) return 0; 1190 case X86::SHL32ri: 1191 case X86::SHL64ri: { 1192 assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() && 1193 "Unknown shl instruction!"); 1194 unsigned ShAmt = MI->getOperand(2).getImm(); 1195 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) { 1196 X86AddressMode AM; 1197 AM.Scale = 1 << ShAmt; 1198 AM.IndexReg = Src; 1199 unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r 1200 : (MIOpc == X86::SHL32ri 1201 ? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r); 1202 NewMI = addFullAddress(BuildMI(MF, get(Opc)) 1203 .addReg(Dest, true, false, false, isDead), AM); 1204 if (isKill) 1205 NewMI->getOperand(3).setIsKill(true); 1206 } 1207 break; 1208 } 1209 } 1210 } 1211 } 1212 1213 if (!NewMI) return 0; 1214 1215 if (LV) { // Update live variables 1216 if (isKill) 1217 LV->replaceKillInstruction(Src, MI, NewMI); 1218 if (isDead) 1219 LV->replaceKillInstruction(Dest, MI, NewMI); 1220 } 1221 1222 MFI->insert(MBBI, NewMI); // Insert the new inst 1223 return NewMI; 1224} 1225 1226/// commuteInstruction - We have a few instructions that must be hacked on to 1227/// commute them. 1228/// 1229MachineInstr * 1230X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 1231 switch (MI->getOpcode()) { 1232 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 1233 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 1234 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 1235 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 1236 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 1237 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 1238 unsigned Opc; 1239 unsigned Size; 1240 switch (MI->getOpcode()) { 1241 default: assert(0 && "Unreachable!"); 1242 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 1243 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 1244 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 1245 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 1246 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 1247 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 1248 } 1249 unsigned Amt = MI->getOperand(3).getImm(); 1250 unsigned A = MI->getOperand(0).getReg(); 1251 unsigned B = MI->getOperand(1).getReg(); 1252 unsigned C = MI->getOperand(2).getReg(); 1253 bool AisDead = MI->getOperand(0).isDead(); 1254 bool BisKill = MI->getOperand(1).isKill(); 1255 bool CisKill = MI->getOperand(2).isKill(); 1256 // If machine instrs are no longer in two-address forms, update 1257 // destination register as well. 1258 if (A == B) { 1259 // Must be two address instruction! 1260 assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) && 1261 "Expecting a two-address instruction!"); 1262 A = C; 1263 CisKill = false; 1264 } 1265 MachineFunction &MF = *MI->getParent()->getParent(); 1266 return BuildMI(MF, get(Opc)) 1267 .addReg(A, true, false, false, AisDead) 1268 .addReg(C, false, false, CisKill) 1269 .addReg(B, false, false, BisKill).addImm(Size-Amt); 1270 } 1271 case X86::CMOVB16rr: 1272 case X86::CMOVB32rr: 1273 case X86::CMOVB64rr: 1274 case X86::CMOVAE16rr: 1275 case X86::CMOVAE32rr: 1276 case X86::CMOVAE64rr: 1277 case X86::CMOVE16rr: 1278 case X86::CMOVE32rr: 1279 case X86::CMOVE64rr: 1280 case X86::CMOVNE16rr: 1281 case X86::CMOVNE32rr: 1282 case X86::CMOVNE64rr: 1283 case X86::CMOVBE16rr: 1284 case X86::CMOVBE32rr: 1285 case X86::CMOVBE64rr: 1286 case X86::CMOVA16rr: 1287 case X86::CMOVA32rr: 1288 case X86::CMOVA64rr: 1289 case X86::CMOVL16rr: 1290 case X86::CMOVL32rr: 1291 case X86::CMOVL64rr: 1292 case X86::CMOVGE16rr: 1293 case X86::CMOVGE32rr: 1294 case X86::CMOVGE64rr: 1295 case X86::CMOVLE16rr: 1296 case X86::CMOVLE32rr: 1297 case X86::CMOVLE64rr: 1298 case X86::CMOVG16rr: 1299 case X86::CMOVG32rr: 1300 case X86::CMOVG64rr: 1301 case X86::CMOVS16rr: 1302 case X86::CMOVS32rr: 1303 case X86::CMOVS64rr: 1304 case X86::CMOVNS16rr: 1305 case X86::CMOVNS32rr: 1306 case X86::CMOVNS64rr: 1307 case X86::CMOVP16rr: 1308 case X86::CMOVP32rr: 1309 case X86::CMOVP64rr: 1310 case X86::CMOVNP16rr: 1311 case X86::CMOVNP32rr: 1312 case X86::CMOVNP64rr: { 1313 unsigned Opc = 0; 1314 switch (MI->getOpcode()) { 1315 default: break; 1316 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 1317 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 1318 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 1319 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 1320 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 1321 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 1322 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 1323 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 1324 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 1325 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 1326 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 1327 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 1328 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 1329 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 1330 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 1331 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 1332 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 1333 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 1334 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 1335 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 1336 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 1337 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 1338 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 1339 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 1340 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 1341 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 1342 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 1343 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 1344 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 1345 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 1346 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 1347 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 1348 case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break; 1349 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 1350 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 1351 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 1352 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 1353 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 1354 case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break; 1355 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 1356 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 1357 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 1358 } 1359 1360 MI->setDesc(get(Opc)); 1361 // Fallthrough intended. 1362 } 1363 default: 1364 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1365 } 1366} 1367 1368static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { 1369 switch (BrOpc) { 1370 default: return X86::COND_INVALID; 1371 case X86::JE: return X86::COND_E; 1372 case X86::JNE: return X86::COND_NE; 1373 case X86::JL: return X86::COND_L; 1374 case X86::JLE: return X86::COND_LE; 1375 case X86::JG: return X86::COND_G; 1376 case X86::JGE: return X86::COND_GE; 1377 case X86::JB: return X86::COND_B; 1378 case X86::JBE: return X86::COND_BE; 1379 case X86::JA: return X86::COND_A; 1380 case X86::JAE: return X86::COND_AE; 1381 case X86::JS: return X86::COND_S; 1382 case X86::JNS: return X86::COND_NS; 1383 case X86::JP: return X86::COND_P; 1384 case X86::JNP: return X86::COND_NP; 1385 case X86::JO: return X86::COND_O; 1386 case X86::JNO: return X86::COND_NO; 1387 } 1388} 1389 1390unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 1391 switch (CC) { 1392 default: assert(0 && "Illegal condition code!"); 1393 case X86::COND_E: return X86::JE; 1394 case X86::COND_NE: return X86::JNE; 1395 case X86::COND_L: return X86::JL; 1396 case X86::COND_LE: return X86::JLE; 1397 case X86::COND_G: return X86::JG; 1398 case X86::COND_GE: return X86::JGE; 1399 case X86::COND_B: return X86::JB; 1400 case X86::COND_BE: return X86::JBE; 1401 case X86::COND_A: return X86::JA; 1402 case X86::COND_AE: return X86::JAE; 1403 case X86::COND_S: return X86::JS; 1404 case X86::COND_NS: return X86::JNS; 1405 case X86::COND_P: return X86::JP; 1406 case X86::COND_NP: return X86::JNP; 1407 case X86::COND_O: return X86::JO; 1408 case X86::COND_NO: return X86::JNO; 1409 } 1410} 1411 1412/// GetOppositeBranchCondition - Return the inverse of the specified condition, 1413/// e.g. turning COND_E to COND_NE. 1414X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 1415 switch (CC) { 1416 default: assert(0 && "Illegal condition code!"); 1417 case X86::COND_E: return X86::COND_NE; 1418 case X86::COND_NE: return X86::COND_E; 1419 case X86::COND_L: return X86::COND_GE; 1420 case X86::COND_LE: return X86::COND_G; 1421 case X86::COND_G: return X86::COND_LE; 1422 case X86::COND_GE: return X86::COND_L; 1423 case X86::COND_B: return X86::COND_AE; 1424 case X86::COND_BE: return X86::COND_A; 1425 case X86::COND_A: return X86::COND_BE; 1426 case X86::COND_AE: return X86::COND_B; 1427 case X86::COND_S: return X86::COND_NS; 1428 case X86::COND_NS: return X86::COND_S; 1429 case X86::COND_P: return X86::COND_NP; 1430 case X86::COND_NP: return X86::COND_P; 1431 case X86::COND_O: return X86::COND_NO; 1432 case X86::COND_NO: return X86::COND_O; 1433 } 1434} 1435 1436bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 1437 const TargetInstrDesc &TID = MI->getDesc(); 1438 if (!TID.isTerminator()) return false; 1439 1440 // Conditional branch is a special case. 1441 if (TID.isBranch() && !TID.isBarrier()) 1442 return true; 1443 if (!TID.isPredicable()) 1444 return true; 1445 return !isPredicated(MI); 1446} 1447 1448// For purposes of branch analysis do not count FP_REG_KILL as a terminator. 1449static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI, 1450 const X86InstrInfo &TII) { 1451 if (MI->getOpcode() == X86::FP_REG_KILL) 1452 return false; 1453 return TII.isUnpredicatedTerminator(MI); 1454} 1455 1456bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 1457 MachineBasicBlock *&TBB, 1458 MachineBasicBlock *&FBB, 1459 SmallVectorImpl<MachineOperand> &Cond) const { 1460 // If the block has no terminators, it just falls into the block after it. 1461 MachineBasicBlock::iterator I = MBB.end(); 1462 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) 1463 return false; 1464 1465 // Get the last instruction in the block. 1466 MachineInstr *LastInst = I; 1467 1468 // If there is only one terminator instruction, process it. 1469 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) { 1470 if (!LastInst->getDesc().isBranch()) 1471 return true; 1472 1473 // If the block ends with a branch there are 3 possibilities: 1474 // it's an unconditional, conditional, or indirect branch. 1475 1476 if (LastInst->getOpcode() == X86::JMP) { 1477 TBB = LastInst->getOperand(0).getMBB(); 1478 return false; 1479 } 1480 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); 1481 if (BranchCode == X86::COND_INVALID) 1482 return true; // Can't handle indirect branch. 1483 1484 // Otherwise, block ends with fall-through condbranch. 1485 TBB = LastInst->getOperand(0).getMBB(); 1486 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 1487 return false; 1488 } 1489 1490 // Get the instruction before it if it's a terminator. 1491 MachineInstr *SecondLastInst = I; 1492 1493 // If there are three terminators, we don't know what sort of block this is. 1494 if (SecondLastInst && I != MBB.begin() && 1495 isBrAnalysisUnpredicatedTerminator(--I, *this)) 1496 return true; 1497 1498 // If the block ends with X86::JMP and a conditional branch, handle it. 1499 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode()); 1500 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) { 1501 TBB = SecondLastInst->getOperand(0).getMBB(); 1502 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 1503 FBB = LastInst->getOperand(0).getMBB(); 1504 return false; 1505 } 1506 1507 // If the block ends with two X86::JMPs, handle it. The second one is not 1508 // executed, so remove it. 1509 if (SecondLastInst->getOpcode() == X86::JMP && 1510 LastInst->getOpcode() == X86::JMP) { 1511 TBB = SecondLastInst->getOperand(0).getMBB(); 1512 I = LastInst; 1513 I->eraseFromParent(); 1514 return false; 1515 } 1516 1517 // Otherwise, can't handle this. 1518 return true; 1519} 1520 1521unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 1522 MachineBasicBlock::iterator I = MBB.end(); 1523 if (I == MBB.begin()) return 0; 1524 --I; 1525 if (I->getOpcode() != X86::JMP && 1526 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 1527 return 0; 1528 1529 // Remove the branch. 1530 I->eraseFromParent(); 1531 1532 I = MBB.end(); 1533 1534 if (I == MBB.begin()) return 1; 1535 --I; 1536 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 1537 return 1; 1538 1539 // Remove the branch. 1540 I->eraseFromParent(); 1541 return 2; 1542} 1543 1544static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB, 1545 MachineOperand &MO) { 1546 if (MO.isRegister()) 1547 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(), 1548 MO.isKill(), MO.isDead(), MO.getSubReg()); 1549 else if (MO.isImmediate()) 1550 MIB = MIB.addImm(MO.getImm()); 1551 else if (MO.isFrameIndex()) 1552 MIB = MIB.addFrameIndex(MO.getIndex()); 1553 else if (MO.isGlobalAddress()) 1554 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 1555 else if (MO.isConstantPoolIndex()) 1556 MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset()); 1557 else if (MO.isJumpTableIndex()) 1558 MIB = MIB.addJumpTableIndex(MO.getIndex()); 1559 else if (MO.isExternalSymbol()) 1560 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 1561 else 1562 assert(0 && "Unknown operand for X86InstrAddOperand!"); 1563 1564 return MIB; 1565} 1566 1567unsigned 1568X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 1569 MachineBasicBlock *FBB, 1570 const SmallVectorImpl<MachineOperand> &Cond) const { 1571 // Shouldn't be a fall through. 1572 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 1573 assert((Cond.size() == 1 || Cond.size() == 0) && 1574 "X86 branch conditions have one component!"); 1575 1576 if (FBB == 0) { // One way branch. 1577 if (Cond.empty()) { 1578 // Unconditional branch? 1579 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB); 1580 } else { 1581 // Conditional branch. 1582 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 1583 BuildMI(&MBB, get(Opc)).addMBB(TBB); 1584 } 1585 return 1; 1586 } 1587 1588 // Two-way Conditional branch. 1589 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 1590 BuildMI(&MBB, get(Opc)).addMBB(TBB); 1591 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB); 1592 return 2; 1593} 1594 1595bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, 1596 MachineBasicBlock::iterator MI, 1597 unsigned DestReg, unsigned SrcReg, 1598 const TargetRegisterClass *DestRC, 1599 const TargetRegisterClass *SrcRC) const { 1600 if (DestRC == SrcRC) { 1601 unsigned Opc; 1602 if (DestRC == &X86::GR64RegClass) { 1603 Opc = X86::MOV64rr; 1604 } else if (DestRC == &X86::GR32RegClass) { 1605 Opc = X86::MOV32rr; 1606 } else if (DestRC == &X86::GR16RegClass) { 1607 Opc = X86::MOV16rr; 1608 } else if (DestRC == &X86::GR8RegClass) { 1609 Opc = X86::MOV8rr; 1610 } else if (DestRC == &X86::GR32_RegClass) { 1611 Opc = X86::MOV32_rr; 1612 } else if (DestRC == &X86::GR16_RegClass) { 1613 Opc = X86::MOV16_rr; 1614 } else if (DestRC == &X86::RFP32RegClass) { 1615 Opc = X86::MOV_Fp3232; 1616 } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) { 1617 Opc = X86::MOV_Fp6464; 1618 } else if (DestRC == &X86::RFP80RegClass) { 1619 Opc = X86::MOV_Fp8080; 1620 } else if (DestRC == &X86::FR32RegClass) { 1621 Opc = X86::FsMOVAPSrr; 1622 } else if (DestRC == &X86::FR64RegClass) { 1623 Opc = X86::FsMOVAPDrr; 1624 } else if (DestRC == &X86::VR128RegClass) { 1625 Opc = X86::MOVAPSrr; 1626 } else if (DestRC == &X86::VR64RegClass) { 1627 Opc = X86::MMX_MOVQ64rr; 1628 } else { 1629 return false; 1630 } 1631 BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg); 1632 return true; 1633 } 1634 1635 // Moving EFLAGS to / from another register requires a push and a pop. 1636 if (SrcRC == &X86::CCRRegClass) { 1637 if (SrcReg != X86::EFLAGS) 1638 return false; 1639 if (DestRC == &X86::GR64RegClass) { 1640 BuildMI(MBB, MI, get(X86::PUSHFQ)); 1641 BuildMI(MBB, MI, get(X86::POP64r), DestReg); 1642 return true; 1643 } else if (DestRC == &X86::GR32RegClass) { 1644 BuildMI(MBB, MI, get(X86::PUSHFD)); 1645 BuildMI(MBB, MI, get(X86::POP32r), DestReg); 1646 return true; 1647 } 1648 } else if (DestRC == &X86::CCRRegClass) { 1649 if (DestReg != X86::EFLAGS) 1650 return false; 1651 if (SrcRC == &X86::GR64RegClass) { 1652 BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg); 1653 BuildMI(MBB, MI, get(X86::POPFQ)); 1654 return true; 1655 } else if (SrcRC == &X86::GR32RegClass) { 1656 BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg); 1657 BuildMI(MBB, MI, get(X86::POPFD)); 1658 return true; 1659 } 1660 } 1661 1662 // Moving from ST(0) turns into FpGET_ST0_32 etc. 1663 if (SrcRC == &X86::RSTRegClass) { 1664 // Copying from ST(0)/ST(1). 1665 if (SrcReg != X86::ST0 && SrcReg != X86::ST1) 1666 // Can only copy from ST(0)/ST(1) right now 1667 return false; 1668 bool isST0 = SrcReg == X86::ST0; 1669 unsigned Opc; 1670 if (DestRC == &X86::RFP32RegClass) 1671 Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32; 1672 else if (DestRC == &X86::RFP64RegClass) 1673 Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64; 1674 else { 1675 if (DestRC != &X86::RFP80RegClass) 1676 return false; 1677 Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80; 1678 } 1679 BuildMI(MBB, MI, get(Opc), DestReg); 1680 return true; 1681 } 1682 1683 // Moving to ST(0) turns into FpSET_ST0_32 etc. 1684 if (DestRC == &X86::RSTRegClass) { 1685 // Copying to ST(0). FIXME: handle ST(1) also 1686 if (DestReg != X86::ST0) 1687 // Can only copy to TOS right now 1688 return false; 1689 unsigned Opc; 1690 if (SrcRC == &X86::RFP32RegClass) 1691 Opc = X86::FpSET_ST0_32; 1692 else if (SrcRC == &X86::RFP64RegClass) 1693 Opc = X86::FpSET_ST0_64; 1694 else { 1695 if (SrcRC != &X86::RFP80RegClass) 1696 return false; 1697 Opc = X86::FpSET_ST0_80; 1698 } 1699 BuildMI(MBB, MI, get(Opc)).addReg(SrcReg); 1700 return true; 1701 } 1702 1703 // Not yet supported! 1704 return false; 1705} 1706 1707static unsigned getStoreRegOpcode(const TargetRegisterClass *RC, 1708 bool isStackAligned) { 1709 unsigned Opc = 0; 1710 if (RC == &X86::GR64RegClass) { 1711 Opc = X86::MOV64mr; 1712 } else if (RC == &X86::GR32RegClass) { 1713 Opc = X86::MOV32mr; 1714 } else if (RC == &X86::GR16RegClass) { 1715 Opc = X86::MOV16mr; 1716 } else if (RC == &X86::GR8RegClass) { 1717 Opc = X86::MOV8mr; 1718 } else if (RC == &X86::GR32_RegClass) { 1719 Opc = X86::MOV32_mr; 1720 } else if (RC == &X86::GR16_RegClass) { 1721 Opc = X86::MOV16_mr; 1722 } else if (RC == &X86::RFP80RegClass) { 1723 Opc = X86::ST_FpP80m; // pops 1724 } else if (RC == &X86::RFP64RegClass) { 1725 Opc = X86::ST_Fp64m; 1726 } else if (RC == &X86::RFP32RegClass) { 1727 Opc = X86::ST_Fp32m; 1728 } else if (RC == &X86::FR32RegClass) { 1729 Opc = X86::MOVSSmr; 1730 } else if (RC == &X86::FR64RegClass) { 1731 Opc = X86::MOVSDmr; 1732 } else if (RC == &X86::VR128RegClass) { 1733 // If stack is realigned we can use aligned stores. 1734 Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr; 1735 } else if (RC == &X86::VR64RegClass) { 1736 Opc = X86::MMX_MOVQ64mr; 1737 } else { 1738 assert(0 && "Unknown regclass"); 1739 abort(); 1740 } 1741 1742 return Opc; 1743} 1744 1745void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1746 MachineBasicBlock::iterator MI, 1747 unsigned SrcReg, bool isKill, int FrameIdx, 1748 const TargetRegisterClass *RC) const { 1749 const MachineFunction &MF = *MBB.getParent(); 1750 bool isAligned = (RI.getStackAlignment() >= 16) || 1751 RI.needsStackRealignment(MF); 1752 unsigned Opc = getStoreRegOpcode(RC, isAligned); 1753 addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx) 1754 .addReg(SrcReg, false, false, isKill); 1755} 1756 1757void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 1758 bool isKill, 1759 SmallVectorImpl<MachineOperand> &Addr, 1760 const TargetRegisterClass *RC, 1761 SmallVectorImpl<MachineInstr*> &NewMIs) const { 1762 bool isAligned = (RI.getStackAlignment() >= 16) || 1763 RI.needsStackRealignment(MF); 1764 unsigned Opc = getStoreRegOpcode(RC, isAligned); 1765 MachineInstrBuilder MIB = BuildMI(MF, get(Opc)); 1766 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 1767 MIB = X86InstrAddOperand(MIB, Addr[i]); 1768 MIB.addReg(SrcReg, false, false, isKill); 1769 NewMIs.push_back(MIB); 1770} 1771 1772static unsigned getLoadRegOpcode(const TargetRegisterClass *RC, 1773 bool isStackAligned) { 1774 unsigned Opc = 0; 1775 if (RC == &X86::GR64RegClass) { 1776 Opc = X86::MOV64rm; 1777 } else if (RC == &X86::GR32RegClass) { 1778 Opc = X86::MOV32rm; 1779 } else if (RC == &X86::GR16RegClass) { 1780 Opc = X86::MOV16rm; 1781 } else if (RC == &X86::GR8RegClass) { 1782 Opc = X86::MOV8rm; 1783 } else if (RC == &X86::GR32_RegClass) { 1784 Opc = X86::MOV32_rm; 1785 } else if (RC == &X86::GR16_RegClass) { 1786 Opc = X86::MOV16_rm; 1787 } else if (RC == &X86::RFP80RegClass) { 1788 Opc = X86::LD_Fp80m; 1789 } else if (RC == &X86::RFP64RegClass) { 1790 Opc = X86::LD_Fp64m; 1791 } else if (RC == &X86::RFP32RegClass) { 1792 Opc = X86::LD_Fp32m; 1793 } else if (RC == &X86::FR32RegClass) { 1794 Opc = X86::MOVSSrm; 1795 } else if (RC == &X86::FR64RegClass) { 1796 Opc = X86::MOVSDrm; 1797 } else if (RC == &X86::VR128RegClass) { 1798 // If stack is realigned we can use aligned loads. 1799 Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm; 1800 } else if (RC == &X86::VR64RegClass) { 1801 Opc = X86::MMX_MOVQ64rm; 1802 } else { 1803 assert(0 && "Unknown regclass"); 1804 abort(); 1805 } 1806 1807 return Opc; 1808} 1809 1810void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1811 MachineBasicBlock::iterator MI, 1812 unsigned DestReg, int FrameIdx, 1813 const TargetRegisterClass *RC) const{ 1814 const MachineFunction &MF = *MBB.getParent(); 1815 bool isAligned = (RI.getStackAlignment() >= 16) || 1816 RI.needsStackRealignment(MF); 1817 unsigned Opc = getLoadRegOpcode(RC, isAligned); 1818 addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx); 1819} 1820 1821void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 1822 SmallVectorImpl<MachineOperand> &Addr, 1823 const TargetRegisterClass *RC, 1824 SmallVectorImpl<MachineInstr*> &NewMIs) const { 1825 bool isAligned = (RI.getStackAlignment() >= 16) || 1826 RI.needsStackRealignment(MF); 1827 unsigned Opc = getLoadRegOpcode(RC, isAligned); 1828 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg); 1829 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 1830 MIB = X86InstrAddOperand(MIB, Addr[i]); 1831 NewMIs.push_back(MIB); 1832} 1833 1834bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1835 MachineBasicBlock::iterator MI, 1836 const std::vector<CalleeSavedInfo> &CSI) const { 1837 if (CSI.empty()) 1838 return false; 1839 1840 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1841 unsigned SlotSize = is64Bit ? 8 : 4; 1842 1843 MachineFunction &MF = *MBB.getParent(); 1844 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1845 X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize); 1846 1847 unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r; 1848 for (unsigned i = CSI.size(); i != 0; --i) { 1849 unsigned Reg = CSI[i-1].getReg(); 1850 // Add the callee-saved register as live-in. It's killed at the spill. 1851 MBB.addLiveIn(Reg); 1852 BuildMI(MBB, MI, get(Opc)).addReg(Reg); 1853 } 1854 return true; 1855} 1856 1857bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1858 MachineBasicBlock::iterator MI, 1859 const std::vector<CalleeSavedInfo> &CSI) const { 1860 if (CSI.empty()) 1861 return false; 1862 1863 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1864 1865 unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r; 1866 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1867 unsigned Reg = CSI[i].getReg(); 1868 BuildMI(MBB, MI, get(Opc), Reg); 1869 } 1870 return true; 1871} 1872 1873static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 1874 SmallVector<MachineOperand,4> &MOs, 1875 MachineInstr *MI, const TargetInstrInfo &TII) { 1876 // Create the base instruction with the memory operand as the first part. 1877 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), true); 1878 MachineInstrBuilder MIB(NewMI); 1879 unsigned NumAddrOps = MOs.size(); 1880 for (unsigned i = 0; i != NumAddrOps; ++i) 1881 MIB = X86InstrAddOperand(MIB, MOs[i]); 1882 if (NumAddrOps < 4) // FrameIndex only 1883 MIB.addImm(1).addReg(0).addImm(0); 1884 1885 // Loop over the rest of the ri operands, converting them over. 1886 unsigned NumOps = MI->getDesc().getNumOperands()-2; 1887 for (unsigned i = 0; i != NumOps; ++i) { 1888 MachineOperand &MO = MI->getOperand(i+2); 1889 MIB = X86InstrAddOperand(MIB, MO); 1890 } 1891 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) { 1892 MachineOperand &MO = MI->getOperand(i); 1893 MIB = X86InstrAddOperand(MIB, MO); 1894 } 1895 return MIB; 1896} 1897 1898static MachineInstr *FuseInst(MachineFunction &MF, 1899 unsigned Opcode, unsigned OpNo, 1900 SmallVector<MachineOperand,4> &MOs, 1901 MachineInstr *MI, const TargetInstrInfo &TII) { 1902 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), true); 1903 MachineInstrBuilder MIB(NewMI); 1904 1905 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1906 MachineOperand &MO = MI->getOperand(i); 1907 if (i == OpNo) { 1908 assert(MO.isRegister() && "Expected to fold into reg operand!"); 1909 unsigned NumAddrOps = MOs.size(); 1910 for (unsigned i = 0; i != NumAddrOps; ++i) 1911 MIB = X86InstrAddOperand(MIB, MOs[i]); 1912 if (NumAddrOps < 4) // FrameIndex only 1913 MIB.addImm(1).addReg(0).addImm(0); 1914 } else { 1915 MIB = X86InstrAddOperand(MIB, MO); 1916 } 1917 } 1918 return MIB; 1919} 1920 1921static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 1922 SmallVector<MachineOperand,4> &MOs, 1923 MachineInstr *MI) { 1924 MachineFunction &MF = *MI->getParent()->getParent(); 1925 MachineInstrBuilder MIB = BuildMI(MF, TII.get(Opcode)); 1926 1927 unsigned NumAddrOps = MOs.size(); 1928 for (unsigned i = 0; i != NumAddrOps; ++i) 1929 MIB = X86InstrAddOperand(MIB, MOs[i]); 1930 if (NumAddrOps < 4) // FrameIndex only 1931 MIB.addImm(1).addReg(0).addImm(0); 1932 return MIB.addImm(0); 1933} 1934 1935MachineInstr* 1936X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 1937 MachineInstr *MI, unsigned i, 1938 SmallVector<MachineOperand,4> &MOs) const { 1939 const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL; 1940 bool isTwoAddrFold = false; 1941 unsigned NumOps = MI->getDesc().getNumOperands(); 1942 bool isTwoAddr = NumOps > 1 && 1943 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1; 1944 1945 MachineInstr *NewMI = NULL; 1946 // Folding a memory location into the two-address part of a two-address 1947 // instruction is different than folding it other places. It requires 1948 // replacing the *two* registers with the memory location. 1949 if (isTwoAddr && NumOps >= 2 && i < 2 && 1950 MI->getOperand(0).isRegister() && 1951 MI->getOperand(1).isRegister() && 1952 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 1953 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 1954 isTwoAddrFold = true; 1955 } else if (i == 0) { // If operand 0 1956 if (MI->getOpcode() == X86::MOV16r0) 1957 NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); 1958 else if (MI->getOpcode() == X86::MOV32r0) 1959 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); 1960 else if (MI->getOpcode() == X86::MOV64r0) 1961 NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI); 1962 else if (MI->getOpcode() == X86::MOV8r0) 1963 NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); 1964 if (NewMI) 1965 return NewMI; 1966 1967 OpcodeTablePtr = &RegOp2MemOpTable0; 1968 } else if (i == 1) { 1969 OpcodeTablePtr = &RegOp2MemOpTable1; 1970 } else if (i == 2) { 1971 OpcodeTablePtr = &RegOp2MemOpTable2; 1972 } 1973 1974 // If table selected... 1975 if (OpcodeTablePtr) { 1976 // Find the Opcode to fuse 1977 DenseMap<unsigned*, unsigned>::iterator I = 1978 OpcodeTablePtr->find((unsigned*)MI->getOpcode()); 1979 if (I != OpcodeTablePtr->end()) { 1980 if (isTwoAddrFold) 1981 NewMI = FuseTwoAddrInst(MF, I->second, MOs, MI, *this); 1982 else 1983 NewMI = FuseInst(MF, I->second, i, MOs, MI, *this); 1984 return NewMI; 1985 } 1986 } 1987 1988 // No fusion 1989 if (PrintFailedFusing) 1990 cerr << "We failed to fuse operand " << i << *MI; 1991 return NULL; 1992} 1993 1994 1995MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 1996 MachineInstr *MI, 1997 SmallVectorImpl<unsigned> &Ops, 1998 int FrameIndex) const { 1999 // Check switch flag 2000 if (NoFusing) return NULL; 2001 2002 const MachineFrameInfo *MFI = MF.getFrameInfo(); 2003 unsigned Alignment = MFI->getObjectAlignment(FrameIndex); 2004 // FIXME: Move alignment requirement into tables? 2005 if (Alignment < 16) { 2006 switch (MI->getOpcode()) { 2007 default: break; 2008 // Not always safe to fold movsd into these instructions since their load 2009 // folding variants expects the address to be 16 byte aligned. 2010 case X86::FsANDNPDrr: 2011 case X86::FsANDNPSrr: 2012 case X86::FsANDPDrr: 2013 case X86::FsANDPSrr: 2014 case X86::FsORPDrr: 2015 case X86::FsORPSrr: 2016 case X86::FsXORPDrr: 2017 case X86::FsXORPSrr: 2018 return NULL; 2019 } 2020 } 2021 2022 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2023 unsigned NewOpc = 0; 2024 switch (MI->getOpcode()) { 2025 default: return NULL; 2026 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 2027 case X86::TEST16rr: NewOpc = X86::CMP16ri; break; 2028 case X86::TEST32rr: NewOpc = X86::CMP32ri; break; 2029 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; 2030 } 2031 // Change to CMPXXri r, 0 first. 2032 MI->setDesc(get(NewOpc)); 2033 MI->getOperand(1).ChangeToImmediate(0); 2034 } else if (Ops.size() != 1) 2035 return NULL; 2036 2037 SmallVector<MachineOperand,4> MOs; 2038 MOs.push_back(MachineOperand::CreateFI(FrameIndex)); 2039 return foldMemoryOperand(MF, MI, Ops[0], MOs); 2040} 2041 2042MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 2043 MachineInstr *MI, 2044 SmallVectorImpl<unsigned> &Ops, 2045 MachineInstr *LoadMI) const { 2046 // Check switch flag 2047 if (NoFusing) return NULL; 2048 2049 // Determine the alignment of the load. 2050 unsigned Alignment = 0; 2051 if (LoadMI->hasOneMemOperand()) 2052 Alignment = LoadMI->memoperands_begin()->getAlignment(); 2053 2054 // FIXME: Move alignment requirement into tables? 2055 if (Alignment < 16) { 2056 switch (MI->getOpcode()) { 2057 default: break; 2058 // Not always safe to fold movsd into these instructions since their load 2059 // folding variants expects the address to be 16 byte aligned. 2060 case X86::FsANDNPDrr: 2061 case X86::FsANDNPSrr: 2062 case X86::FsANDPDrr: 2063 case X86::FsANDPSrr: 2064 case X86::FsORPDrr: 2065 case X86::FsORPSrr: 2066 case X86::FsXORPDrr: 2067 case X86::FsXORPSrr: 2068 return NULL; 2069 } 2070 } 2071 2072 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2073 unsigned NewOpc = 0; 2074 switch (MI->getOpcode()) { 2075 default: return NULL; 2076 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 2077 case X86::TEST16rr: NewOpc = X86::CMP16ri; break; 2078 case X86::TEST32rr: NewOpc = X86::CMP32ri; break; 2079 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; 2080 } 2081 // Change to CMPXXri r, 0 first. 2082 MI->setDesc(get(NewOpc)); 2083 MI->getOperand(1).ChangeToImmediate(0); 2084 } else if (Ops.size() != 1) 2085 return NULL; 2086 2087 SmallVector<MachineOperand,4> MOs; 2088 unsigned NumOps = LoadMI->getDesc().getNumOperands(); 2089 for (unsigned i = NumOps - 4; i != NumOps; ++i) 2090 MOs.push_back(LoadMI->getOperand(i)); 2091 return foldMemoryOperand(MF, MI, Ops[0], MOs); 2092} 2093 2094 2095bool X86InstrInfo::canFoldMemoryOperand(MachineInstr *MI, 2096 SmallVectorImpl<unsigned> &Ops) const { 2097 // Check switch flag 2098 if (NoFusing) return 0; 2099 2100 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2101 switch (MI->getOpcode()) { 2102 default: return false; 2103 case X86::TEST8rr: 2104 case X86::TEST16rr: 2105 case X86::TEST32rr: 2106 case X86::TEST64rr: 2107 return true; 2108 } 2109 } 2110 2111 if (Ops.size() != 1) 2112 return false; 2113 2114 unsigned OpNum = Ops[0]; 2115 unsigned Opc = MI->getOpcode(); 2116 unsigned NumOps = MI->getDesc().getNumOperands(); 2117 bool isTwoAddr = NumOps > 1 && 2118 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1; 2119 2120 // Folding a memory location into the two-address part of a two-address 2121 // instruction is different than folding it other places. It requires 2122 // replacing the *two* registers with the memory location. 2123 const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL; 2124 if (isTwoAddr && NumOps >= 2 && OpNum < 2) { 2125 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 2126 } else if (OpNum == 0) { // If operand 0 2127 switch (Opc) { 2128 case X86::MOV16r0: 2129 case X86::MOV32r0: 2130 case X86::MOV64r0: 2131 case X86::MOV8r0: 2132 return true; 2133 default: break; 2134 } 2135 OpcodeTablePtr = &RegOp2MemOpTable0; 2136 } else if (OpNum == 1) { 2137 OpcodeTablePtr = &RegOp2MemOpTable1; 2138 } else if (OpNum == 2) { 2139 OpcodeTablePtr = &RegOp2MemOpTable2; 2140 } 2141 2142 if (OpcodeTablePtr) { 2143 // Find the Opcode to fuse 2144 DenseMap<unsigned*, unsigned>::iterator I = 2145 OpcodeTablePtr->find((unsigned*)Opc); 2146 if (I != OpcodeTablePtr->end()) 2147 return true; 2148 } 2149 return false; 2150} 2151 2152bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 2153 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 2154 SmallVectorImpl<MachineInstr*> &NewMIs) const { 2155 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2156 MemOp2RegOpTable.find((unsigned*)MI->getOpcode()); 2157 if (I == MemOp2RegOpTable.end()) 2158 return false; 2159 unsigned Opc = I->second.first; 2160 unsigned Index = I->second.second & 0xf; 2161 bool FoldedLoad = I->second.second & (1 << 4); 2162 bool FoldedStore = I->second.second & (1 << 5); 2163 if (UnfoldLoad && !FoldedLoad) 2164 return false; 2165 UnfoldLoad &= FoldedLoad; 2166 if (UnfoldStore && !FoldedStore) 2167 return false; 2168 UnfoldStore &= FoldedStore; 2169 2170 const TargetInstrDesc &TID = get(Opc); 2171 const TargetOperandInfo &TOI = TID.OpInfo[Index]; 2172 const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() 2173 ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); 2174 SmallVector<MachineOperand,4> AddrOps; 2175 SmallVector<MachineOperand,2> BeforeOps; 2176 SmallVector<MachineOperand,2> AfterOps; 2177 SmallVector<MachineOperand,4> ImpOps; 2178 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 2179 MachineOperand &Op = MI->getOperand(i); 2180 if (i >= Index && i < Index+4) 2181 AddrOps.push_back(Op); 2182 else if (Op.isRegister() && Op.isImplicit()) 2183 ImpOps.push_back(Op); 2184 else if (i < Index) 2185 BeforeOps.push_back(Op); 2186 else if (i > Index) 2187 AfterOps.push_back(Op); 2188 } 2189 2190 // Emit the load instruction. 2191 if (UnfoldLoad) { 2192 loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs); 2193 if (UnfoldStore) { 2194 // Address operands cannot be marked isKill. 2195 for (unsigned i = 1; i != 5; ++i) { 2196 MachineOperand &MO = NewMIs[0]->getOperand(i); 2197 if (MO.isRegister()) 2198 MO.setIsKill(false); 2199 } 2200 } 2201 } 2202 2203 // Emit the data processing instruction. 2204 MachineInstr *DataMI = MF.CreateMachineInstr(TID, true); 2205 MachineInstrBuilder MIB(DataMI); 2206 2207 if (FoldedStore) 2208 MIB.addReg(Reg, true); 2209 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i) 2210 MIB = X86InstrAddOperand(MIB, BeforeOps[i]); 2211 if (FoldedLoad) 2212 MIB.addReg(Reg); 2213 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i) 2214 MIB = X86InstrAddOperand(MIB, AfterOps[i]); 2215 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) { 2216 MachineOperand &MO = ImpOps[i]; 2217 MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead()); 2218 } 2219 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 2220 unsigned NewOpc = 0; 2221 switch (DataMI->getOpcode()) { 2222 default: break; 2223 case X86::CMP64ri32: 2224 case X86::CMP32ri: 2225 case X86::CMP16ri: 2226 case X86::CMP8ri: { 2227 MachineOperand &MO0 = DataMI->getOperand(0); 2228 MachineOperand &MO1 = DataMI->getOperand(1); 2229 if (MO1.getImm() == 0) { 2230 switch (DataMI->getOpcode()) { 2231 default: break; 2232 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 2233 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 2234 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 2235 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 2236 } 2237 DataMI->setDesc(get(NewOpc)); 2238 MO1.ChangeToRegister(MO0.getReg(), false); 2239 } 2240 } 2241 } 2242 NewMIs.push_back(DataMI); 2243 2244 // Emit the store instruction. 2245 if (UnfoldStore) { 2246 const TargetOperandInfo &DstTOI = TID.OpInfo[0]; 2247 const TargetRegisterClass *DstRC = DstTOI.isLookupPtrRegClass() 2248 ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass); 2249 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs); 2250 } 2251 2252 return true; 2253} 2254 2255bool 2256X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 2257 SmallVectorImpl<SDNode*> &NewNodes) const { 2258 if (!N->isMachineOpcode()) 2259 return false; 2260 2261 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2262 MemOp2RegOpTable.find((unsigned*)N->getMachineOpcode()); 2263 if (I == MemOp2RegOpTable.end()) 2264 return false; 2265 unsigned Opc = I->second.first; 2266 unsigned Index = I->second.second & 0xf; 2267 bool FoldedLoad = I->second.second & (1 << 4); 2268 bool FoldedStore = I->second.second & (1 << 5); 2269 const TargetInstrDesc &TID = get(Opc); 2270 const TargetOperandInfo &TOI = TID.OpInfo[Index]; 2271 const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() 2272 ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); 2273 std::vector<SDValue> AddrOps; 2274 std::vector<SDValue> BeforeOps; 2275 std::vector<SDValue> AfterOps; 2276 unsigned NumOps = N->getNumOperands(); 2277 for (unsigned i = 0; i != NumOps-1; ++i) { 2278 SDValue Op = N->getOperand(i); 2279 if (i >= Index && i < Index+4) 2280 AddrOps.push_back(Op); 2281 else if (i < Index) 2282 BeforeOps.push_back(Op); 2283 else if (i > Index) 2284 AfterOps.push_back(Op); 2285 } 2286 SDValue Chain = N->getOperand(NumOps-1); 2287 AddrOps.push_back(Chain); 2288 2289 // Emit the load instruction. 2290 SDNode *Load = 0; 2291 const MachineFunction &MF = DAG.getMachineFunction(); 2292 if (FoldedLoad) { 2293 MVT VT = *RC->vt_begin(); 2294 bool isAligned = (RI.getStackAlignment() >= 16) || 2295 RI.needsStackRealignment(MF); 2296 Load = DAG.getTargetNode(getLoadRegOpcode(RC, isAligned), 2297 VT, MVT::Other, 2298 &AddrOps[0], AddrOps.size()); 2299 NewNodes.push_back(Load); 2300 } 2301 2302 // Emit the data processing instruction. 2303 std::vector<MVT> VTs; 2304 const TargetRegisterClass *DstRC = 0; 2305 if (TID.getNumDefs() > 0) { 2306 const TargetOperandInfo &DstTOI = TID.OpInfo[0]; 2307 DstRC = DstTOI.isLookupPtrRegClass() 2308 ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass); 2309 VTs.push_back(*DstRC->vt_begin()); 2310 } 2311 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 2312 MVT VT = N->getValueType(i); 2313 if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs()) 2314 VTs.push_back(VT); 2315 } 2316 if (Load) 2317 BeforeOps.push_back(SDValue(Load, 0)); 2318 std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); 2319 SDNode *NewNode= DAG.getTargetNode(Opc, VTs, &BeforeOps[0], BeforeOps.size()); 2320 NewNodes.push_back(NewNode); 2321 2322 // Emit the store instruction. 2323 if (FoldedStore) { 2324 AddrOps.pop_back(); 2325 AddrOps.push_back(SDValue(NewNode, 0)); 2326 AddrOps.push_back(Chain); 2327 bool isAligned = (RI.getStackAlignment() >= 16) || 2328 RI.needsStackRealignment(MF); 2329 SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, isAligned), 2330 MVT::Other, &AddrOps[0], AddrOps.size()); 2331 NewNodes.push_back(Store); 2332 } 2333 2334 return true; 2335} 2336 2337unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 2338 bool UnfoldLoad, bool UnfoldStore) const { 2339 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2340 MemOp2RegOpTable.find((unsigned*)Opc); 2341 if (I == MemOp2RegOpTable.end()) 2342 return 0; 2343 bool FoldedLoad = I->second.second & (1 << 4); 2344 bool FoldedStore = I->second.second & (1 << 5); 2345 if (UnfoldLoad && !FoldedLoad) 2346 return 0; 2347 if (UnfoldStore && !FoldedStore) 2348 return 0; 2349 return I->second.first; 2350} 2351 2352bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const { 2353 if (MBB.empty()) return false; 2354 2355 switch (MBB.back().getOpcode()) { 2356 case X86::TCRETURNri: 2357 case X86::TCRETURNdi: 2358 case X86::RET: // Return. 2359 case X86::RETI: 2360 case X86::TAILJMPd: 2361 case X86::TAILJMPr: 2362 case X86::TAILJMPm: 2363 case X86::JMP: // Uncond branch. 2364 case X86::JMP32r: // Indirect branch. 2365 case X86::JMP64r: // Indirect branch (64-bit). 2366 case X86::JMP32m: // Indirect branch through mem. 2367 case X86::JMP64m: // Indirect branch through mem (64-bit). 2368 return true; 2369 default: return false; 2370 } 2371} 2372 2373bool X86InstrInfo:: 2374ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 2375 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 2376 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 2377 Cond[0].setImm(GetOppositeBranchCondition(CC)); 2378 return false; 2379} 2380 2381const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const { 2382 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 2383 if (Subtarget->is64Bit()) 2384 return &X86::GR64RegClass; 2385 else 2386 return &X86::GR32RegClass; 2387} 2388 2389unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) { 2390 switch (Desc->TSFlags & X86II::ImmMask) { 2391 case X86II::Imm8: return 1; 2392 case X86II::Imm16: return 2; 2393 case X86II::Imm32: return 4; 2394 case X86II::Imm64: return 8; 2395 default: assert(0 && "Immediate size not set!"); 2396 return 0; 2397 } 2398} 2399 2400/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register? 2401/// e.g. r8, xmm8, etc. 2402bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) { 2403 if (!MO.isRegister()) return false; 2404 switch (MO.getReg()) { 2405 default: break; 2406 case X86::R8: case X86::R9: case X86::R10: case X86::R11: 2407 case X86::R12: case X86::R13: case X86::R14: case X86::R15: 2408 case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D: 2409 case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D: 2410 case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W: 2411 case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W: 2412 case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B: 2413 case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B: 2414 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: 2415 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: 2416 return true; 2417 } 2418 return false; 2419} 2420 2421 2422/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64 2423/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 2424/// size, and 3) use of X86-64 extended registers. 2425unsigned X86InstrInfo::determineREX(const MachineInstr &MI) { 2426 unsigned REX = 0; 2427 const TargetInstrDesc &Desc = MI.getDesc(); 2428 2429 // Pseudo instructions do not need REX prefix byte. 2430 if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo) 2431 return 0; 2432 if (Desc.TSFlags & X86II::REX_W) 2433 REX |= 1 << 3; 2434 2435 unsigned NumOps = Desc.getNumOperands(); 2436 if (NumOps) { 2437 bool isTwoAddr = NumOps > 1 && 2438 Desc.getOperandConstraint(1, TOI::TIED_TO) != -1; 2439 2440 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 2441 unsigned i = isTwoAddr ? 1 : 0; 2442 for (unsigned e = NumOps; i != e; ++i) { 2443 const MachineOperand& MO = MI.getOperand(i); 2444 if (MO.isRegister()) { 2445 unsigned Reg = MO.getReg(); 2446 if (isX86_64NonExtLowByteReg(Reg)) 2447 REX |= 0x40; 2448 } 2449 } 2450 2451 switch (Desc.TSFlags & X86II::FormMask) { 2452 case X86II::MRMInitReg: 2453 if (isX86_64ExtendedReg(MI.getOperand(0))) 2454 REX |= (1 << 0) | (1 << 2); 2455 break; 2456 case X86II::MRMSrcReg: { 2457 if (isX86_64ExtendedReg(MI.getOperand(0))) 2458 REX |= 1 << 2; 2459 i = isTwoAddr ? 2 : 1; 2460 for (unsigned e = NumOps; i != e; ++i) { 2461 const MachineOperand& MO = MI.getOperand(i); 2462 if (isX86_64ExtendedReg(MO)) 2463 REX |= 1 << 0; 2464 } 2465 break; 2466 } 2467 case X86II::MRMSrcMem: { 2468 if (isX86_64ExtendedReg(MI.getOperand(0))) 2469 REX |= 1 << 2; 2470 unsigned Bit = 0; 2471 i = isTwoAddr ? 2 : 1; 2472 for (; i != NumOps; ++i) { 2473 const MachineOperand& MO = MI.getOperand(i); 2474 if (MO.isRegister()) { 2475 if (isX86_64ExtendedReg(MO)) 2476 REX |= 1 << Bit; 2477 Bit++; 2478 } 2479 } 2480 break; 2481 } 2482 case X86II::MRM0m: case X86II::MRM1m: 2483 case X86II::MRM2m: case X86II::MRM3m: 2484 case X86II::MRM4m: case X86II::MRM5m: 2485 case X86II::MRM6m: case X86II::MRM7m: 2486 case X86II::MRMDestMem: { 2487 unsigned e = isTwoAddr ? 5 : 4; 2488 i = isTwoAddr ? 1 : 0; 2489 if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e))) 2490 REX |= 1 << 2; 2491 unsigned Bit = 0; 2492 for (; i != e; ++i) { 2493 const MachineOperand& MO = MI.getOperand(i); 2494 if (MO.isRegister()) { 2495 if (isX86_64ExtendedReg(MO)) 2496 REX |= 1 << Bit; 2497 Bit++; 2498 } 2499 } 2500 break; 2501 } 2502 default: { 2503 if (isX86_64ExtendedReg(MI.getOperand(0))) 2504 REX |= 1 << 0; 2505 i = isTwoAddr ? 2 : 1; 2506 for (unsigned e = NumOps; i != e; ++i) { 2507 const MachineOperand& MO = MI.getOperand(i); 2508 if (isX86_64ExtendedReg(MO)) 2509 REX |= 1 << 2; 2510 } 2511 break; 2512 } 2513 } 2514 } 2515 return REX; 2516} 2517 2518/// sizePCRelativeBlockAddress - This method returns the size of a PC 2519/// relative block address instruction 2520/// 2521static unsigned sizePCRelativeBlockAddress() { 2522 return 4; 2523} 2524 2525/// sizeGlobalAddress - Give the size of the emission of this global address 2526/// 2527static unsigned sizeGlobalAddress(bool dword) { 2528 return dword ? 8 : 4; 2529} 2530 2531/// sizeConstPoolAddress - Give the size of the emission of this constant 2532/// pool address 2533/// 2534static unsigned sizeConstPoolAddress(bool dword) { 2535 return dword ? 8 : 4; 2536} 2537 2538/// sizeExternalSymbolAddress - Give the size of the emission of this external 2539/// symbol 2540/// 2541static unsigned sizeExternalSymbolAddress(bool dword) { 2542 return dword ? 8 : 4; 2543} 2544 2545/// sizeJumpTableAddress - Give the size of the emission of this jump 2546/// table address 2547/// 2548static unsigned sizeJumpTableAddress(bool dword) { 2549 return dword ? 8 : 4; 2550} 2551 2552static unsigned sizeConstant(unsigned Size) { 2553 return Size; 2554} 2555 2556static unsigned sizeRegModRMByte(){ 2557 return 1; 2558} 2559 2560static unsigned sizeSIBByte(){ 2561 return 1; 2562} 2563 2564static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) { 2565 unsigned FinalSize = 0; 2566 // If this is a simple integer displacement that doesn't require a relocation. 2567 if (!RelocOp) { 2568 FinalSize += sizeConstant(4); 2569 return FinalSize; 2570 } 2571 2572 // Otherwise, this is something that requires a relocation. 2573 if (RelocOp->isGlobalAddress()) { 2574 FinalSize += sizeGlobalAddress(false); 2575 } else if (RelocOp->isConstantPoolIndex()) { 2576 FinalSize += sizeConstPoolAddress(false); 2577 } else if (RelocOp->isJumpTableIndex()) { 2578 FinalSize += sizeJumpTableAddress(false); 2579 } else { 2580 assert(0 && "Unknown value to relocate!"); 2581 } 2582 return FinalSize; 2583} 2584 2585static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op, 2586 bool IsPIC, bool Is64BitMode) { 2587 const MachineOperand &Op3 = MI.getOperand(Op+3); 2588 int DispVal = 0; 2589 const MachineOperand *DispForReloc = 0; 2590 unsigned FinalSize = 0; 2591 2592 // Figure out what sort of displacement we have to handle here. 2593 if (Op3.isGlobalAddress()) { 2594 DispForReloc = &Op3; 2595 } else if (Op3.isConstantPoolIndex()) { 2596 if (Is64BitMode || IsPIC) { 2597 DispForReloc = &Op3; 2598 } else { 2599 DispVal = 1; 2600 } 2601 } else if (Op3.isJumpTableIndex()) { 2602 if (Is64BitMode || IsPIC) { 2603 DispForReloc = &Op3; 2604 } else { 2605 DispVal = 1; 2606 } 2607 } else { 2608 DispVal = 1; 2609 } 2610 2611 const MachineOperand &Base = MI.getOperand(Op); 2612 const MachineOperand &IndexReg = MI.getOperand(Op+2); 2613 2614 unsigned BaseReg = Base.getReg(); 2615 2616 // Is a SIB byte needed? 2617 if (IndexReg.getReg() == 0 && 2618 (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) { 2619 if (BaseReg == 0) { // Just a displacement? 2620 // Emit special case [disp32] encoding 2621 ++FinalSize; 2622 FinalSize += getDisplacementFieldSize(DispForReloc); 2623 } else { 2624 unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg); 2625 if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) { 2626 // Emit simple indirect register encoding... [EAX] f.e. 2627 ++FinalSize; 2628 // Be pessimistic and assume it's a disp32, not a disp8 2629 } else { 2630 // Emit the most general non-SIB encoding: [REG+disp32] 2631 ++FinalSize; 2632 FinalSize += getDisplacementFieldSize(DispForReloc); 2633 } 2634 } 2635 2636 } else { // We need a SIB byte, so start by outputting the ModR/M byte first 2637 assert(IndexReg.getReg() != X86::ESP && 2638 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 2639 2640 bool ForceDisp32 = false; 2641 if (BaseReg == 0 || DispForReloc) { 2642 // Emit the normal disp32 encoding. 2643 ++FinalSize; 2644 ForceDisp32 = true; 2645 } else { 2646 ++FinalSize; 2647 } 2648 2649 FinalSize += sizeSIBByte(); 2650 2651 // Do we need to output a displacement? 2652 if (DispVal != 0 || ForceDisp32) { 2653 FinalSize += getDisplacementFieldSize(DispForReloc); 2654 } 2655 } 2656 return FinalSize; 2657} 2658 2659 2660static unsigned GetInstSizeWithDesc(const MachineInstr &MI, 2661 const TargetInstrDesc *Desc, 2662 bool IsPIC, bool Is64BitMode) { 2663 2664 unsigned Opcode = Desc->Opcode; 2665 unsigned FinalSize = 0; 2666 2667 // Emit the lock opcode prefix as needed. 2668 if (Desc->TSFlags & X86II::LOCK) ++FinalSize; 2669 2670 // Emit the repeat opcode prefix as needed. 2671 if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize; 2672 2673 // Emit the operand size opcode prefix as needed. 2674 if (Desc->TSFlags & X86II::OpSize) ++FinalSize; 2675 2676 // Emit the address size opcode prefix as needed. 2677 if (Desc->TSFlags & X86II::AdSize) ++FinalSize; 2678 2679 bool Need0FPrefix = false; 2680 switch (Desc->TSFlags & X86II::Op0Mask) { 2681 case X86II::TB: // Two-byte opcode prefix 2682 case X86II::T8: // 0F 38 2683 case X86II::TA: // 0F 3A 2684 Need0FPrefix = true; 2685 break; 2686 case X86II::REP: break; // already handled. 2687 case X86II::XS: // F3 0F 2688 ++FinalSize; 2689 Need0FPrefix = true; 2690 break; 2691 case X86II::XD: // F2 0F 2692 ++FinalSize; 2693 Need0FPrefix = true; 2694 break; 2695 case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB: 2696 case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF: 2697 ++FinalSize; 2698 break; // Two-byte opcode prefix 2699 default: assert(0 && "Invalid prefix!"); 2700 case 0: break; // No prefix! 2701 } 2702 2703 if (Is64BitMode) { 2704 // REX prefix 2705 unsigned REX = X86InstrInfo::determineREX(MI); 2706 if (REX) 2707 ++FinalSize; 2708 } 2709 2710 // 0x0F escape code must be emitted just before the opcode. 2711 if (Need0FPrefix) 2712 ++FinalSize; 2713 2714 switch (Desc->TSFlags & X86II::Op0Mask) { 2715 case X86II::T8: // 0F 38 2716 ++FinalSize; 2717 break; 2718 case X86II::TA: // 0F 3A 2719 ++FinalSize; 2720 break; 2721 } 2722 2723 // If this is a two-address instruction, skip one of the register operands. 2724 unsigned NumOps = Desc->getNumOperands(); 2725 unsigned CurOp = 0; 2726 if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1) 2727 CurOp++; 2728 2729 switch (Desc->TSFlags & X86II::FormMask) { 2730 default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!"); 2731 case X86II::Pseudo: 2732 // Remember the current PC offset, this is the PIC relocation 2733 // base address. 2734 switch (Opcode) { 2735 default: 2736 break; 2737 case TargetInstrInfo::INLINEASM: { 2738 const MachineFunction *MF = MI.getParent()->getParent(); 2739 const char *AsmStr = MI.getOperand(0).getSymbolName(); 2740 const TargetAsmInfo* AI = MF->getTarget().getTargetAsmInfo(); 2741 FinalSize += AI->getInlineAsmLength(AsmStr); 2742 break; 2743 } 2744 case TargetInstrInfo::DBG_LABEL: 2745 case TargetInstrInfo::EH_LABEL: 2746 break; 2747 case TargetInstrInfo::IMPLICIT_DEF: 2748 case TargetInstrInfo::DECLARE: 2749 case X86::DWARF_LOC: 2750 case X86::FP_REG_KILL: 2751 break; 2752 case X86::MOVPC32r: { 2753 // This emits the "call" portion of this pseudo instruction. 2754 ++FinalSize; 2755 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2756 break; 2757 } 2758 } 2759 CurOp = NumOps; 2760 break; 2761 case X86II::RawFrm: 2762 ++FinalSize; 2763 2764 if (CurOp != NumOps) { 2765 const MachineOperand &MO = MI.getOperand(CurOp++); 2766 if (MO.isMachineBasicBlock()) { 2767 FinalSize += sizePCRelativeBlockAddress(); 2768 } else if (MO.isGlobalAddress()) { 2769 FinalSize += sizeGlobalAddress(false); 2770 } else if (MO.isExternalSymbol()) { 2771 FinalSize += sizeExternalSymbolAddress(false); 2772 } else if (MO.isImmediate()) { 2773 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2774 } else { 2775 assert(0 && "Unknown RawFrm operand!"); 2776 } 2777 } 2778 break; 2779 2780 case X86II::AddRegFrm: 2781 ++FinalSize; 2782 ++CurOp; 2783 2784 if (CurOp != NumOps) { 2785 const MachineOperand &MO1 = MI.getOperand(CurOp++); 2786 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2787 if (MO1.isImmediate()) 2788 FinalSize += sizeConstant(Size); 2789 else { 2790 bool dword = false; 2791 if (Opcode == X86::MOV64ri) 2792 dword = true; 2793 if (MO1.isGlobalAddress()) { 2794 FinalSize += sizeGlobalAddress(dword); 2795 } else if (MO1.isExternalSymbol()) 2796 FinalSize += sizeExternalSymbolAddress(dword); 2797 else if (MO1.isConstantPoolIndex()) 2798 FinalSize += sizeConstPoolAddress(dword); 2799 else if (MO1.isJumpTableIndex()) 2800 FinalSize += sizeJumpTableAddress(dword); 2801 } 2802 } 2803 break; 2804 2805 case X86II::MRMDestReg: { 2806 ++FinalSize; 2807 FinalSize += sizeRegModRMByte(); 2808 CurOp += 2; 2809 if (CurOp != NumOps) { 2810 ++CurOp; 2811 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2812 } 2813 break; 2814 } 2815 case X86II::MRMDestMem: { 2816 ++FinalSize; 2817 FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); 2818 CurOp += 5; 2819 if (CurOp != NumOps) { 2820 ++CurOp; 2821 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2822 } 2823 break; 2824 } 2825 2826 case X86II::MRMSrcReg: 2827 ++FinalSize; 2828 FinalSize += sizeRegModRMByte(); 2829 CurOp += 2; 2830 if (CurOp != NumOps) { 2831 ++CurOp; 2832 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2833 } 2834 break; 2835 2836 case X86II::MRMSrcMem: { 2837 2838 ++FinalSize; 2839 FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode); 2840 CurOp += 5; 2841 if (CurOp != NumOps) { 2842 ++CurOp; 2843 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2844 } 2845 break; 2846 } 2847 2848 case X86II::MRM0r: case X86II::MRM1r: 2849 case X86II::MRM2r: case X86II::MRM3r: 2850 case X86II::MRM4r: case X86II::MRM5r: 2851 case X86II::MRM6r: case X86II::MRM7r: 2852 ++FinalSize; 2853 ++CurOp; 2854 FinalSize += sizeRegModRMByte(); 2855 2856 if (CurOp != NumOps) { 2857 const MachineOperand &MO1 = MI.getOperand(CurOp++); 2858 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2859 if (MO1.isImmediate()) 2860 FinalSize += sizeConstant(Size); 2861 else { 2862 bool dword = false; 2863 if (Opcode == X86::MOV64ri32) 2864 dword = true; 2865 if (MO1.isGlobalAddress()) { 2866 FinalSize += sizeGlobalAddress(dword); 2867 } else if (MO1.isExternalSymbol()) 2868 FinalSize += sizeExternalSymbolAddress(dword); 2869 else if (MO1.isConstantPoolIndex()) 2870 FinalSize += sizeConstPoolAddress(dword); 2871 else if (MO1.isJumpTableIndex()) 2872 FinalSize += sizeJumpTableAddress(dword); 2873 } 2874 } 2875 break; 2876 2877 case X86II::MRM0m: case X86II::MRM1m: 2878 case X86II::MRM2m: case X86II::MRM3m: 2879 case X86II::MRM4m: case X86II::MRM5m: 2880 case X86II::MRM6m: case X86II::MRM7m: { 2881 2882 ++FinalSize; 2883 FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); 2884 CurOp += 4; 2885 2886 if (CurOp != NumOps) { 2887 const MachineOperand &MO = MI.getOperand(CurOp++); 2888 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2889 if (MO.isImmediate()) 2890 FinalSize += sizeConstant(Size); 2891 else { 2892 bool dword = false; 2893 if (Opcode == X86::MOV64mi32) 2894 dword = true; 2895 if (MO.isGlobalAddress()) { 2896 FinalSize += sizeGlobalAddress(dword); 2897 } else if (MO.isExternalSymbol()) 2898 FinalSize += sizeExternalSymbolAddress(dword); 2899 else if (MO.isConstantPoolIndex()) 2900 FinalSize += sizeConstPoolAddress(dword); 2901 else if (MO.isJumpTableIndex()) 2902 FinalSize += sizeJumpTableAddress(dword); 2903 } 2904 } 2905 break; 2906 } 2907 2908 case X86II::MRMInitReg: 2909 ++FinalSize; 2910 // Duplicate register, used by things like MOV8r0 (aka xor reg,reg). 2911 FinalSize += sizeRegModRMByte(); 2912 ++CurOp; 2913 break; 2914 } 2915 2916 if (!Desc->isVariadic() && CurOp != NumOps) { 2917 cerr << "Cannot determine size: "; 2918 MI.dump(); 2919 cerr << '\n'; 2920 abort(); 2921 } 2922 2923 2924 return FinalSize; 2925} 2926 2927 2928unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 2929 const TargetInstrDesc &Desc = MI->getDesc(); 2930 bool IsPIC = (TM.getRelocationModel() == Reloc::PIC_); 2931 bool Is64BitMode = TM.getSubtargetImpl()->is64Bit(); 2932 unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode); 2933 if (Desc.getOpcode() == X86::MOVPC32r) { 2934 Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode); 2935 } 2936 return Size; 2937} 2938