X86InstrInfo.cpp revision 546e36a2c17f9eb7b2b1f2f19e522673153948aa
1//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86GenInstrInfo.inc" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/ADT/STLExtras.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/CodeGen/LiveVariables.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Target/TargetOptions.h" 28#include "llvm/Target/TargetAsmInfo.h" 29 30using namespace llvm; 31 32namespace { 33 cl::opt<bool> 34 NoFusing("disable-spill-fusing", 35 cl::desc("Disable fusing of spill code into instructions")); 36 cl::opt<bool> 37 PrintFailedFusing("print-failed-fuse-candidates", 38 cl::desc("Print instructions that the allocator wants to" 39 " fuse, but the X86 backend currently can't"), 40 cl::Hidden); 41 cl::opt<bool> 42 ReMatPICStubLoad("remat-pic-stub-load", 43 cl::desc("Re-materialize load from stub in PIC mode"), 44 cl::init(false), cl::Hidden); 45} 46 47X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) 48 : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)), 49 TM(tm), RI(tm, *this) { 50 SmallVector<unsigned,16> AmbEntries; 51 static const unsigned OpTbl2Addr[][2] = { 52 { X86::ADC32ri, X86::ADC32mi }, 53 { X86::ADC32ri8, X86::ADC32mi8 }, 54 { X86::ADC32rr, X86::ADC32mr }, 55 { X86::ADC64ri32, X86::ADC64mi32 }, 56 { X86::ADC64ri8, X86::ADC64mi8 }, 57 { X86::ADC64rr, X86::ADC64mr }, 58 { X86::ADD16ri, X86::ADD16mi }, 59 { X86::ADD16ri8, X86::ADD16mi8 }, 60 { X86::ADD16rr, X86::ADD16mr }, 61 { X86::ADD32ri, X86::ADD32mi }, 62 { X86::ADD32ri8, X86::ADD32mi8 }, 63 { X86::ADD32rr, X86::ADD32mr }, 64 { X86::ADD64ri32, X86::ADD64mi32 }, 65 { X86::ADD64ri8, X86::ADD64mi8 }, 66 { X86::ADD64rr, X86::ADD64mr }, 67 { X86::ADD8ri, X86::ADD8mi }, 68 { X86::ADD8rr, X86::ADD8mr }, 69 { X86::AND16ri, X86::AND16mi }, 70 { X86::AND16ri8, X86::AND16mi8 }, 71 { X86::AND16rr, X86::AND16mr }, 72 { X86::AND32ri, X86::AND32mi }, 73 { X86::AND32ri8, X86::AND32mi8 }, 74 { X86::AND32rr, X86::AND32mr }, 75 { X86::AND64ri32, X86::AND64mi32 }, 76 { X86::AND64ri8, X86::AND64mi8 }, 77 { X86::AND64rr, X86::AND64mr }, 78 { X86::AND8ri, X86::AND8mi }, 79 { X86::AND8rr, X86::AND8mr }, 80 { X86::DEC16r, X86::DEC16m }, 81 { X86::DEC32r, X86::DEC32m }, 82 { X86::DEC64_16r, X86::DEC64_16m }, 83 { X86::DEC64_32r, X86::DEC64_32m }, 84 { X86::DEC64r, X86::DEC64m }, 85 { X86::DEC8r, X86::DEC8m }, 86 { X86::INC16r, X86::INC16m }, 87 { X86::INC32r, X86::INC32m }, 88 { X86::INC64_16r, X86::INC64_16m }, 89 { X86::INC64_32r, X86::INC64_32m }, 90 { X86::INC64r, X86::INC64m }, 91 { X86::INC8r, X86::INC8m }, 92 { X86::NEG16r, X86::NEG16m }, 93 { X86::NEG32r, X86::NEG32m }, 94 { X86::NEG64r, X86::NEG64m }, 95 { X86::NEG8r, X86::NEG8m }, 96 { X86::NOT16r, X86::NOT16m }, 97 { X86::NOT32r, X86::NOT32m }, 98 { X86::NOT64r, X86::NOT64m }, 99 { X86::NOT8r, X86::NOT8m }, 100 { X86::OR16ri, X86::OR16mi }, 101 { X86::OR16ri8, X86::OR16mi8 }, 102 { X86::OR16rr, X86::OR16mr }, 103 { X86::OR32ri, X86::OR32mi }, 104 { X86::OR32ri8, X86::OR32mi8 }, 105 { X86::OR32rr, X86::OR32mr }, 106 { X86::OR64ri32, X86::OR64mi32 }, 107 { X86::OR64ri8, X86::OR64mi8 }, 108 { X86::OR64rr, X86::OR64mr }, 109 { X86::OR8ri, X86::OR8mi }, 110 { X86::OR8rr, X86::OR8mr }, 111 { X86::ROL16r1, X86::ROL16m1 }, 112 { X86::ROL16rCL, X86::ROL16mCL }, 113 { X86::ROL16ri, X86::ROL16mi }, 114 { X86::ROL32r1, X86::ROL32m1 }, 115 { X86::ROL32rCL, X86::ROL32mCL }, 116 { X86::ROL32ri, X86::ROL32mi }, 117 { X86::ROL64r1, X86::ROL64m1 }, 118 { X86::ROL64rCL, X86::ROL64mCL }, 119 { X86::ROL64ri, X86::ROL64mi }, 120 { X86::ROL8r1, X86::ROL8m1 }, 121 { X86::ROL8rCL, X86::ROL8mCL }, 122 { X86::ROL8ri, X86::ROL8mi }, 123 { X86::ROR16r1, X86::ROR16m1 }, 124 { X86::ROR16rCL, X86::ROR16mCL }, 125 { X86::ROR16ri, X86::ROR16mi }, 126 { X86::ROR32r1, X86::ROR32m1 }, 127 { X86::ROR32rCL, X86::ROR32mCL }, 128 { X86::ROR32ri, X86::ROR32mi }, 129 { X86::ROR64r1, X86::ROR64m1 }, 130 { X86::ROR64rCL, X86::ROR64mCL }, 131 { X86::ROR64ri, X86::ROR64mi }, 132 { X86::ROR8r1, X86::ROR8m1 }, 133 { X86::ROR8rCL, X86::ROR8mCL }, 134 { X86::ROR8ri, X86::ROR8mi }, 135 { X86::SAR16r1, X86::SAR16m1 }, 136 { X86::SAR16rCL, X86::SAR16mCL }, 137 { X86::SAR16ri, X86::SAR16mi }, 138 { X86::SAR32r1, X86::SAR32m1 }, 139 { X86::SAR32rCL, X86::SAR32mCL }, 140 { X86::SAR32ri, X86::SAR32mi }, 141 { X86::SAR64r1, X86::SAR64m1 }, 142 { X86::SAR64rCL, X86::SAR64mCL }, 143 { X86::SAR64ri, X86::SAR64mi }, 144 { X86::SAR8r1, X86::SAR8m1 }, 145 { X86::SAR8rCL, X86::SAR8mCL }, 146 { X86::SAR8ri, X86::SAR8mi }, 147 { X86::SBB32ri, X86::SBB32mi }, 148 { X86::SBB32ri8, X86::SBB32mi8 }, 149 { X86::SBB32rr, X86::SBB32mr }, 150 { X86::SBB64ri32, X86::SBB64mi32 }, 151 { X86::SBB64ri8, X86::SBB64mi8 }, 152 { X86::SBB64rr, X86::SBB64mr }, 153 { X86::SHL16rCL, X86::SHL16mCL }, 154 { X86::SHL16ri, X86::SHL16mi }, 155 { X86::SHL32rCL, X86::SHL32mCL }, 156 { X86::SHL32ri, X86::SHL32mi }, 157 { X86::SHL64rCL, X86::SHL64mCL }, 158 { X86::SHL64ri, X86::SHL64mi }, 159 { X86::SHL8rCL, X86::SHL8mCL }, 160 { X86::SHL8ri, X86::SHL8mi }, 161 { X86::SHLD16rrCL, X86::SHLD16mrCL }, 162 { X86::SHLD16rri8, X86::SHLD16mri8 }, 163 { X86::SHLD32rrCL, X86::SHLD32mrCL }, 164 { X86::SHLD32rri8, X86::SHLD32mri8 }, 165 { X86::SHLD64rrCL, X86::SHLD64mrCL }, 166 { X86::SHLD64rri8, X86::SHLD64mri8 }, 167 { X86::SHR16r1, X86::SHR16m1 }, 168 { X86::SHR16rCL, X86::SHR16mCL }, 169 { X86::SHR16ri, X86::SHR16mi }, 170 { X86::SHR32r1, X86::SHR32m1 }, 171 { X86::SHR32rCL, X86::SHR32mCL }, 172 { X86::SHR32ri, X86::SHR32mi }, 173 { X86::SHR64r1, X86::SHR64m1 }, 174 { X86::SHR64rCL, X86::SHR64mCL }, 175 { X86::SHR64ri, X86::SHR64mi }, 176 { X86::SHR8r1, X86::SHR8m1 }, 177 { X86::SHR8rCL, X86::SHR8mCL }, 178 { X86::SHR8ri, X86::SHR8mi }, 179 { X86::SHRD16rrCL, X86::SHRD16mrCL }, 180 { X86::SHRD16rri8, X86::SHRD16mri8 }, 181 { X86::SHRD32rrCL, X86::SHRD32mrCL }, 182 { X86::SHRD32rri8, X86::SHRD32mri8 }, 183 { X86::SHRD64rrCL, X86::SHRD64mrCL }, 184 { X86::SHRD64rri8, X86::SHRD64mri8 }, 185 { X86::SUB16ri, X86::SUB16mi }, 186 { X86::SUB16ri8, X86::SUB16mi8 }, 187 { X86::SUB16rr, X86::SUB16mr }, 188 { X86::SUB32ri, X86::SUB32mi }, 189 { X86::SUB32ri8, X86::SUB32mi8 }, 190 { X86::SUB32rr, X86::SUB32mr }, 191 { X86::SUB64ri32, X86::SUB64mi32 }, 192 { X86::SUB64ri8, X86::SUB64mi8 }, 193 { X86::SUB64rr, X86::SUB64mr }, 194 { X86::SUB8ri, X86::SUB8mi }, 195 { X86::SUB8rr, X86::SUB8mr }, 196 { X86::XOR16ri, X86::XOR16mi }, 197 { X86::XOR16ri8, X86::XOR16mi8 }, 198 { X86::XOR16rr, X86::XOR16mr }, 199 { X86::XOR32ri, X86::XOR32mi }, 200 { X86::XOR32ri8, X86::XOR32mi8 }, 201 { X86::XOR32rr, X86::XOR32mr }, 202 { X86::XOR64ri32, X86::XOR64mi32 }, 203 { X86::XOR64ri8, X86::XOR64mi8 }, 204 { X86::XOR64rr, X86::XOR64mr }, 205 { X86::XOR8ri, X86::XOR8mi }, 206 { X86::XOR8rr, X86::XOR8mr } 207 }; 208 209 for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) { 210 unsigned RegOp = OpTbl2Addr[i][0]; 211 unsigned MemOp = OpTbl2Addr[i][1]; 212 if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp, MemOp))) 213 assert(false && "Duplicated entries?"); 214 unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); // Index 0,folded load and store 215 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 216 std::make_pair(RegOp, AuxInfo)))) 217 AmbEntries.push_back(MemOp); 218 } 219 220 // If the third value is 1, then it's folding either a load or a store. 221 static const unsigned OpTbl0[][3] = { 222 { X86::CALL32r, X86::CALL32m, 1 }, 223 { X86::CALL64r, X86::CALL64m, 1 }, 224 { X86::CMP16ri, X86::CMP16mi, 1 }, 225 { X86::CMP16ri8, X86::CMP16mi8, 1 }, 226 { X86::CMP16rr, X86::CMP16mr, 1 }, 227 { X86::CMP32ri, X86::CMP32mi, 1 }, 228 { X86::CMP32ri8, X86::CMP32mi8, 1 }, 229 { X86::CMP32rr, X86::CMP32mr, 1 }, 230 { X86::CMP64ri32, X86::CMP64mi32, 1 }, 231 { X86::CMP64ri8, X86::CMP64mi8, 1 }, 232 { X86::CMP64rr, X86::CMP64mr, 1 }, 233 { X86::CMP8ri, X86::CMP8mi, 1 }, 234 { X86::CMP8rr, X86::CMP8mr, 1 }, 235 { X86::DIV16r, X86::DIV16m, 1 }, 236 { X86::DIV32r, X86::DIV32m, 1 }, 237 { X86::DIV64r, X86::DIV64m, 1 }, 238 { X86::DIV8r, X86::DIV8m, 1 }, 239 { X86::FsMOVAPDrr, X86::MOVSDmr, 0 }, 240 { X86::FsMOVAPSrr, X86::MOVSSmr, 0 }, 241 { X86::IDIV16r, X86::IDIV16m, 1 }, 242 { X86::IDIV32r, X86::IDIV32m, 1 }, 243 { X86::IDIV64r, X86::IDIV64m, 1 }, 244 { X86::IDIV8r, X86::IDIV8m, 1 }, 245 { X86::IMUL16r, X86::IMUL16m, 1 }, 246 { X86::IMUL32r, X86::IMUL32m, 1 }, 247 { X86::IMUL64r, X86::IMUL64m, 1 }, 248 { X86::IMUL8r, X86::IMUL8m, 1 }, 249 { X86::JMP32r, X86::JMP32m, 1 }, 250 { X86::JMP64r, X86::JMP64m, 1 }, 251 { X86::MOV16ri, X86::MOV16mi, 0 }, 252 { X86::MOV16rr, X86::MOV16mr, 0 }, 253 { X86::MOV16to16_, X86::MOV16_mr, 0 }, 254 { X86::MOV32ri, X86::MOV32mi, 0 }, 255 { X86::MOV32rr, X86::MOV32mr, 0 }, 256 { X86::MOV32to32_, X86::MOV32_mr, 0 }, 257 { X86::MOV64ri32, X86::MOV64mi32, 0 }, 258 { X86::MOV64rr, X86::MOV64mr, 0 }, 259 { X86::MOV8ri, X86::MOV8mi, 0 }, 260 { X86::MOV8rr, X86::MOV8mr, 0 }, 261 { X86::MOVAPDrr, X86::MOVAPDmr, 0 }, 262 { X86::MOVAPSrr, X86::MOVAPSmr, 0 }, 263 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0 }, 264 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0 }, 265 { X86::MOVPS2SSrr, X86::MOVPS2SSmr, 0 }, 266 { X86::MOVSDrr, X86::MOVSDmr, 0 }, 267 { X86::MOVSDto64rr, X86::MOVSDto64mr, 0 }, 268 { X86::MOVSS2DIrr, X86::MOVSS2DImr, 0 }, 269 { X86::MOVSSrr, X86::MOVSSmr, 0 }, 270 { X86::MOVUPDrr, X86::MOVUPDmr, 0 }, 271 { X86::MOVUPSrr, X86::MOVUPSmr, 0 }, 272 { X86::MUL16r, X86::MUL16m, 1 }, 273 { X86::MUL32r, X86::MUL32m, 1 }, 274 { X86::MUL64r, X86::MUL64m, 1 }, 275 { X86::MUL8r, X86::MUL8m, 1 }, 276 { X86::SETAEr, X86::SETAEm, 0 }, 277 { X86::SETAr, X86::SETAm, 0 }, 278 { X86::SETBEr, X86::SETBEm, 0 }, 279 { X86::SETBr, X86::SETBm, 0 }, 280 { X86::SETEr, X86::SETEm, 0 }, 281 { X86::SETGEr, X86::SETGEm, 0 }, 282 { X86::SETGr, X86::SETGm, 0 }, 283 { X86::SETLEr, X86::SETLEm, 0 }, 284 { X86::SETLr, X86::SETLm, 0 }, 285 { X86::SETNEr, X86::SETNEm, 0 }, 286 { X86::SETNPr, X86::SETNPm, 0 }, 287 { X86::SETNSr, X86::SETNSm, 0 }, 288 { X86::SETPr, X86::SETPm, 0 }, 289 { X86::SETSr, X86::SETSm, 0 }, 290 { X86::TAILJMPr, X86::TAILJMPm, 1 }, 291 { X86::TEST16ri, X86::TEST16mi, 1 }, 292 { X86::TEST32ri, X86::TEST32mi, 1 }, 293 { X86::TEST64ri32, X86::TEST64mi32, 1 }, 294 { X86::TEST8ri, X86::TEST8mi, 1 } 295 }; 296 297 for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) { 298 unsigned RegOp = OpTbl0[i][0]; 299 unsigned MemOp = OpTbl0[i][1]; 300 if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp, MemOp))) 301 assert(false && "Duplicated entries?"); 302 unsigned FoldedLoad = OpTbl0[i][2]; 303 // Index 0, folded load or store. 304 unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5); 305 if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr) 306 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 307 std::make_pair(RegOp, AuxInfo)))) 308 AmbEntries.push_back(MemOp); 309 } 310 311 static const unsigned OpTbl1[][2] = { 312 { X86::CMP16rr, X86::CMP16rm }, 313 { X86::CMP32rr, X86::CMP32rm }, 314 { X86::CMP64rr, X86::CMP64rm }, 315 { X86::CMP8rr, X86::CMP8rm }, 316 { X86::CVTSD2SSrr, X86::CVTSD2SSrm }, 317 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm }, 318 { X86::CVTSI2SDrr, X86::CVTSI2SDrm }, 319 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm }, 320 { X86::CVTSI2SSrr, X86::CVTSI2SSrm }, 321 { X86::CVTSS2SDrr, X86::CVTSS2SDrm }, 322 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm }, 323 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm }, 324 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm }, 325 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm }, 326 { X86::FsMOVAPDrr, X86::MOVSDrm }, 327 { X86::FsMOVAPSrr, X86::MOVSSrm }, 328 { X86::IMUL16rri, X86::IMUL16rmi }, 329 { X86::IMUL16rri8, X86::IMUL16rmi8 }, 330 { X86::IMUL32rri, X86::IMUL32rmi }, 331 { X86::IMUL32rri8, X86::IMUL32rmi8 }, 332 { X86::IMUL64rri32, X86::IMUL64rmi32 }, 333 { X86::IMUL64rri8, X86::IMUL64rmi8 }, 334 { X86::Int_CMPSDrr, X86::Int_CMPSDrm }, 335 { X86::Int_CMPSSrr, X86::Int_CMPSSrm }, 336 { X86::Int_COMISDrr, X86::Int_COMISDrm }, 337 { X86::Int_COMISSrr, X86::Int_COMISSrm }, 338 { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm }, 339 { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm }, 340 { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm }, 341 { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm }, 342 { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm }, 343 { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm }, 344 { X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm }, 345 { X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm }, 346 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm }, 347 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm }, 348 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm }, 349 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm }, 350 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm }, 351 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm }, 352 { X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm }, 353 { X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm }, 354 { X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm }, 355 { X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm }, 356 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm }, 357 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm }, 358 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm }, 359 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm }, 360 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm }, 361 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm }, 362 { X86::MOV16rr, X86::MOV16rm }, 363 { X86::MOV16to16_, X86::MOV16_rm }, 364 { X86::MOV32rr, X86::MOV32rm }, 365 { X86::MOV32to32_, X86::MOV32_rm }, 366 { X86::MOV64rr, X86::MOV64rm }, 367 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm }, 368 { X86::MOV64toSDrr, X86::MOV64toSDrm }, 369 { X86::MOV8rr, X86::MOV8rm }, 370 { X86::MOVAPDrr, X86::MOVAPDrm }, 371 { X86::MOVAPSrr, X86::MOVAPSrm }, 372 { X86::MOVDDUPrr, X86::MOVDDUPrm }, 373 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm }, 374 { X86::MOVDI2SSrr, X86::MOVDI2SSrm }, 375 { X86::MOVSD2PDrr, X86::MOVSD2PDrm }, 376 { X86::MOVSDrr, X86::MOVSDrm }, 377 { X86::MOVSHDUPrr, X86::MOVSHDUPrm }, 378 { X86::MOVSLDUPrr, X86::MOVSLDUPrm }, 379 { X86::MOVSS2PSrr, X86::MOVSS2PSrm }, 380 { X86::MOVSSrr, X86::MOVSSrm }, 381 { X86::MOVSX16rr8, X86::MOVSX16rm8 }, 382 { X86::MOVSX32rr16, X86::MOVSX32rm16 }, 383 { X86::MOVSX32rr8, X86::MOVSX32rm8 }, 384 { X86::MOVSX64rr16, X86::MOVSX64rm16 }, 385 { X86::MOVSX64rr32, X86::MOVSX64rm32 }, 386 { X86::MOVSX64rr8, X86::MOVSX64rm8 }, 387 { X86::MOVUPDrr, X86::MOVUPDrm }, 388 { X86::MOVUPSrr, X86::MOVUPSrm }, 389 { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm }, 390 { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm }, 391 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm }, 392 { X86::MOVZX16rr8, X86::MOVZX16rm8 }, 393 { X86::MOVZX32rr16, X86::MOVZX32rm16 }, 394 { X86::MOVZX32rr8, X86::MOVZX32rm8 }, 395 { X86::MOVZX64rr16, X86::MOVZX64rm16 }, 396 { X86::MOVZX64rr8, X86::MOVZX64rm8 }, 397 { X86::PSHUFDri, X86::PSHUFDmi }, 398 { X86::PSHUFHWri, X86::PSHUFHWmi }, 399 { X86::PSHUFLWri, X86::PSHUFLWmi }, 400 { X86::RCPPSr, X86::RCPPSm }, 401 { X86::RCPPSr_Int, X86::RCPPSm_Int }, 402 { X86::RSQRTPSr, X86::RSQRTPSm }, 403 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int }, 404 { X86::RSQRTSSr, X86::RSQRTSSm }, 405 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int }, 406 { X86::SQRTPDr, X86::SQRTPDm }, 407 { X86::SQRTPDr_Int, X86::SQRTPDm_Int }, 408 { X86::SQRTPSr, X86::SQRTPSm }, 409 { X86::SQRTPSr_Int, X86::SQRTPSm_Int }, 410 { X86::SQRTSDr, X86::SQRTSDm }, 411 { X86::SQRTSDr_Int, X86::SQRTSDm_Int }, 412 { X86::SQRTSSr, X86::SQRTSSm }, 413 { X86::SQRTSSr_Int, X86::SQRTSSm_Int }, 414 { X86::TEST16rr, X86::TEST16rm }, 415 { X86::TEST32rr, X86::TEST32rm }, 416 { X86::TEST64rr, X86::TEST64rm }, 417 { X86::TEST8rr, X86::TEST8rm }, 418 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 419 { X86::UCOMISDrr, X86::UCOMISDrm }, 420 { X86::UCOMISSrr, X86::UCOMISSrm } 421 }; 422 423 for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) { 424 unsigned RegOp = OpTbl1[i][0]; 425 unsigned MemOp = OpTbl1[i][1]; 426 if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp, MemOp))) 427 assert(false && "Duplicated entries?"); 428 unsigned AuxInfo = 1 | (1 << 4); // Index 1, folded load 429 if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr) 430 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 431 std::make_pair(RegOp, AuxInfo)))) 432 AmbEntries.push_back(MemOp); 433 } 434 435 static const unsigned OpTbl2[][2] = { 436 { X86::ADC32rr, X86::ADC32rm }, 437 { X86::ADC64rr, X86::ADC64rm }, 438 { X86::ADD16rr, X86::ADD16rm }, 439 { X86::ADD32rr, X86::ADD32rm }, 440 { X86::ADD64rr, X86::ADD64rm }, 441 { X86::ADD8rr, X86::ADD8rm }, 442 { X86::ADDPDrr, X86::ADDPDrm }, 443 { X86::ADDPSrr, X86::ADDPSrm }, 444 { X86::ADDSDrr, X86::ADDSDrm }, 445 { X86::ADDSSrr, X86::ADDSSrm }, 446 { X86::ADDSUBPDrr, X86::ADDSUBPDrm }, 447 { X86::ADDSUBPSrr, X86::ADDSUBPSrm }, 448 { X86::AND16rr, X86::AND16rm }, 449 { X86::AND32rr, X86::AND32rm }, 450 { X86::AND64rr, X86::AND64rm }, 451 { X86::AND8rr, X86::AND8rm }, 452 { X86::ANDNPDrr, X86::ANDNPDrm }, 453 { X86::ANDNPSrr, X86::ANDNPSrm }, 454 { X86::ANDPDrr, X86::ANDPDrm }, 455 { X86::ANDPSrr, X86::ANDPSrm }, 456 { X86::CMOVA16rr, X86::CMOVA16rm }, 457 { X86::CMOVA32rr, X86::CMOVA32rm }, 458 { X86::CMOVA64rr, X86::CMOVA64rm }, 459 { X86::CMOVAE16rr, X86::CMOVAE16rm }, 460 { X86::CMOVAE32rr, X86::CMOVAE32rm }, 461 { X86::CMOVAE64rr, X86::CMOVAE64rm }, 462 { X86::CMOVB16rr, X86::CMOVB16rm }, 463 { X86::CMOVB32rr, X86::CMOVB32rm }, 464 { X86::CMOVB64rr, X86::CMOVB64rm }, 465 { X86::CMOVBE16rr, X86::CMOVBE16rm }, 466 { X86::CMOVBE32rr, X86::CMOVBE32rm }, 467 { X86::CMOVBE64rr, X86::CMOVBE64rm }, 468 { X86::CMOVE16rr, X86::CMOVE16rm }, 469 { X86::CMOVE32rr, X86::CMOVE32rm }, 470 { X86::CMOVE64rr, X86::CMOVE64rm }, 471 { X86::CMOVG16rr, X86::CMOVG16rm }, 472 { X86::CMOVG32rr, X86::CMOVG32rm }, 473 { X86::CMOVG64rr, X86::CMOVG64rm }, 474 { X86::CMOVGE16rr, X86::CMOVGE16rm }, 475 { X86::CMOVGE32rr, X86::CMOVGE32rm }, 476 { X86::CMOVGE64rr, X86::CMOVGE64rm }, 477 { X86::CMOVL16rr, X86::CMOVL16rm }, 478 { X86::CMOVL32rr, X86::CMOVL32rm }, 479 { X86::CMOVL64rr, X86::CMOVL64rm }, 480 { X86::CMOVLE16rr, X86::CMOVLE16rm }, 481 { X86::CMOVLE32rr, X86::CMOVLE32rm }, 482 { X86::CMOVLE64rr, X86::CMOVLE64rm }, 483 { X86::CMOVNE16rr, X86::CMOVNE16rm }, 484 { X86::CMOVNE32rr, X86::CMOVNE32rm }, 485 { X86::CMOVNE64rr, X86::CMOVNE64rm }, 486 { X86::CMOVNP16rr, X86::CMOVNP16rm }, 487 { X86::CMOVNP32rr, X86::CMOVNP32rm }, 488 { X86::CMOVNP64rr, X86::CMOVNP64rm }, 489 { X86::CMOVNS16rr, X86::CMOVNS16rm }, 490 { X86::CMOVNS32rr, X86::CMOVNS32rm }, 491 { X86::CMOVNS64rr, X86::CMOVNS64rm }, 492 { X86::CMOVP16rr, X86::CMOVP16rm }, 493 { X86::CMOVP32rr, X86::CMOVP32rm }, 494 { X86::CMOVP64rr, X86::CMOVP64rm }, 495 { X86::CMOVS16rr, X86::CMOVS16rm }, 496 { X86::CMOVS32rr, X86::CMOVS32rm }, 497 { X86::CMOVS64rr, X86::CMOVS64rm }, 498 { X86::CMPPDrri, X86::CMPPDrmi }, 499 { X86::CMPPSrri, X86::CMPPSrmi }, 500 { X86::CMPSDrr, X86::CMPSDrm }, 501 { X86::CMPSSrr, X86::CMPSSrm }, 502 { X86::DIVPDrr, X86::DIVPDrm }, 503 { X86::DIVPSrr, X86::DIVPSrm }, 504 { X86::DIVSDrr, X86::DIVSDrm }, 505 { X86::DIVSSrr, X86::DIVSSrm }, 506 { X86::FsANDNPDrr, X86::FsANDNPDrm }, 507 { X86::FsANDNPSrr, X86::FsANDNPSrm }, 508 { X86::FsANDPDrr, X86::FsANDPDrm }, 509 { X86::FsANDPSrr, X86::FsANDPSrm }, 510 { X86::FsORPDrr, X86::FsORPDrm }, 511 { X86::FsORPSrr, X86::FsORPSrm }, 512 { X86::FsXORPDrr, X86::FsXORPDrm }, 513 { X86::FsXORPSrr, X86::FsXORPSrm }, 514 { X86::HADDPDrr, X86::HADDPDrm }, 515 { X86::HADDPSrr, X86::HADDPSrm }, 516 { X86::HSUBPDrr, X86::HSUBPDrm }, 517 { X86::HSUBPSrr, X86::HSUBPSrm }, 518 { X86::IMUL16rr, X86::IMUL16rm }, 519 { X86::IMUL32rr, X86::IMUL32rm }, 520 { X86::IMUL64rr, X86::IMUL64rm }, 521 { X86::MAXPDrr, X86::MAXPDrm }, 522 { X86::MAXPDrr_Int, X86::MAXPDrm_Int }, 523 { X86::MAXPSrr, X86::MAXPSrm }, 524 { X86::MAXPSrr_Int, X86::MAXPSrm_Int }, 525 { X86::MAXSDrr, X86::MAXSDrm }, 526 { X86::MAXSDrr_Int, X86::MAXSDrm_Int }, 527 { X86::MAXSSrr, X86::MAXSSrm }, 528 { X86::MAXSSrr_Int, X86::MAXSSrm_Int }, 529 { X86::MINPDrr, X86::MINPDrm }, 530 { X86::MINPDrr_Int, X86::MINPDrm_Int }, 531 { X86::MINPSrr, X86::MINPSrm }, 532 { X86::MINPSrr_Int, X86::MINPSrm_Int }, 533 { X86::MINSDrr, X86::MINSDrm }, 534 { X86::MINSDrr_Int, X86::MINSDrm_Int }, 535 { X86::MINSSrr, X86::MINSSrm }, 536 { X86::MINSSrr_Int, X86::MINSSrm_Int }, 537 { X86::MULPDrr, X86::MULPDrm }, 538 { X86::MULPSrr, X86::MULPSrm }, 539 { X86::MULSDrr, X86::MULSDrm }, 540 { X86::MULSSrr, X86::MULSSrm }, 541 { X86::OR16rr, X86::OR16rm }, 542 { X86::OR32rr, X86::OR32rm }, 543 { X86::OR64rr, X86::OR64rm }, 544 { X86::OR8rr, X86::OR8rm }, 545 { X86::ORPDrr, X86::ORPDrm }, 546 { X86::ORPSrr, X86::ORPSrm }, 547 { X86::PACKSSDWrr, X86::PACKSSDWrm }, 548 { X86::PACKSSWBrr, X86::PACKSSWBrm }, 549 { X86::PACKUSWBrr, X86::PACKUSWBrm }, 550 { X86::PADDBrr, X86::PADDBrm }, 551 { X86::PADDDrr, X86::PADDDrm }, 552 { X86::PADDQrr, X86::PADDQrm }, 553 { X86::PADDSBrr, X86::PADDSBrm }, 554 { X86::PADDSWrr, X86::PADDSWrm }, 555 { X86::PADDWrr, X86::PADDWrm }, 556 { X86::PANDNrr, X86::PANDNrm }, 557 { X86::PANDrr, X86::PANDrm }, 558 { X86::PAVGBrr, X86::PAVGBrm }, 559 { X86::PAVGWrr, X86::PAVGWrm }, 560 { X86::PCMPEQBrr, X86::PCMPEQBrm }, 561 { X86::PCMPEQDrr, X86::PCMPEQDrm }, 562 { X86::PCMPEQWrr, X86::PCMPEQWrm }, 563 { X86::PCMPGTBrr, X86::PCMPGTBrm }, 564 { X86::PCMPGTDrr, X86::PCMPGTDrm }, 565 { X86::PCMPGTWrr, X86::PCMPGTWrm }, 566 { X86::PINSRWrri, X86::PINSRWrmi }, 567 { X86::PMADDWDrr, X86::PMADDWDrm }, 568 { X86::PMAXSWrr, X86::PMAXSWrm }, 569 { X86::PMAXUBrr, X86::PMAXUBrm }, 570 { X86::PMINSWrr, X86::PMINSWrm }, 571 { X86::PMINUBrr, X86::PMINUBrm }, 572 { X86::PMULHUWrr, X86::PMULHUWrm }, 573 { X86::PMULHWrr, X86::PMULHWrm }, 574 { X86::PMULLWrr, X86::PMULLWrm }, 575 { X86::PMULUDQrr, X86::PMULUDQrm }, 576 { X86::PORrr, X86::PORrm }, 577 { X86::PSADBWrr, X86::PSADBWrm }, 578 { X86::PSLLDrr, X86::PSLLDrm }, 579 { X86::PSLLQrr, X86::PSLLQrm }, 580 { X86::PSLLWrr, X86::PSLLWrm }, 581 { X86::PSRADrr, X86::PSRADrm }, 582 { X86::PSRAWrr, X86::PSRAWrm }, 583 { X86::PSRLDrr, X86::PSRLDrm }, 584 { X86::PSRLQrr, X86::PSRLQrm }, 585 { X86::PSRLWrr, X86::PSRLWrm }, 586 { X86::PSUBBrr, X86::PSUBBrm }, 587 { X86::PSUBDrr, X86::PSUBDrm }, 588 { X86::PSUBSBrr, X86::PSUBSBrm }, 589 { X86::PSUBSWrr, X86::PSUBSWrm }, 590 { X86::PSUBWrr, X86::PSUBWrm }, 591 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm }, 592 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm }, 593 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm }, 594 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm }, 595 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm }, 596 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm }, 597 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm }, 598 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm }, 599 { X86::PXORrr, X86::PXORrm }, 600 { X86::SBB32rr, X86::SBB32rm }, 601 { X86::SBB64rr, X86::SBB64rm }, 602 { X86::SHUFPDrri, X86::SHUFPDrmi }, 603 { X86::SHUFPSrri, X86::SHUFPSrmi }, 604 { X86::SUB16rr, X86::SUB16rm }, 605 { X86::SUB32rr, X86::SUB32rm }, 606 { X86::SUB64rr, X86::SUB64rm }, 607 { X86::SUB8rr, X86::SUB8rm }, 608 { X86::SUBPDrr, X86::SUBPDrm }, 609 { X86::SUBPSrr, X86::SUBPSrm }, 610 { X86::SUBSDrr, X86::SUBSDrm }, 611 { X86::SUBSSrr, X86::SUBSSrm }, 612 // FIXME: TEST*rr -> swapped operand of TEST*mr. 613 { X86::UNPCKHPDrr, X86::UNPCKHPDrm }, 614 { X86::UNPCKHPSrr, X86::UNPCKHPSrm }, 615 { X86::UNPCKLPDrr, X86::UNPCKLPDrm }, 616 { X86::UNPCKLPSrr, X86::UNPCKLPSrm }, 617 { X86::XOR16rr, X86::XOR16rm }, 618 { X86::XOR32rr, X86::XOR32rm }, 619 { X86::XOR64rr, X86::XOR64rm }, 620 { X86::XOR8rr, X86::XOR8rm }, 621 { X86::XORPDrr, X86::XORPDrm }, 622 { X86::XORPSrr, X86::XORPSrm } 623 }; 624 625 for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) { 626 unsigned RegOp = OpTbl2[i][0]; 627 unsigned MemOp = OpTbl2[i][1]; 628 if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp, MemOp))) 629 assert(false && "Duplicated entries?"); 630 unsigned AuxInfo = 2 | (1 << 4); // Index 1, folded load 631 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 632 std::make_pair(RegOp, AuxInfo)))) 633 AmbEntries.push_back(MemOp); 634 } 635 636 // Remove ambiguous entries. 637 assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?"); 638} 639 640bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, 641 unsigned& sourceReg, 642 unsigned& destReg) const { 643 switch (MI.getOpcode()) { 644 default: 645 return false; 646 case X86::MOV8rr: 647 case X86::MOV16rr: 648 case X86::MOV32rr: 649 case X86::MOV64rr: 650 case X86::MOV16to16_: 651 case X86::MOV32to32_: 652 case X86::MOVSSrr: 653 case X86::MOVSDrr: 654 655 // FP Stack register class copies 656 case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080: 657 case X86::MOV_Fp3264: case X86::MOV_Fp3280: 658 case X86::MOV_Fp6432: case X86::MOV_Fp8032: 659 660 case X86::FsMOVAPSrr: 661 case X86::FsMOVAPDrr: 662 case X86::MOVAPSrr: 663 case X86::MOVAPDrr: 664 case X86::MOVSS2PSrr: 665 case X86::MOVSD2PDrr: 666 case X86::MOVPS2SSrr: 667 case X86::MOVPD2SDrr: 668 case X86::MMX_MOVD64rr: 669 case X86::MMX_MOVQ64rr: 670 assert(MI.getNumOperands() >= 2 && 671 MI.getOperand(0).isRegister() && 672 MI.getOperand(1).isRegister() && 673 "invalid register-register move instruction"); 674 sourceReg = MI.getOperand(1).getReg(); 675 destReg = MI.getOperand(0).getReg(); 676 return true; 677 } 678} 679 680unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI, 681 int &FrameIndex) const { 682 switch (MI->getOpcode()) { 683 default: break; 684 case X86::MOV8rm: 685 case X86::MOV16rm: 686 case X86::MOV16_rm: 687 case X86::MOV32rm: 688 case X86::MOV32_rm: 689 case X86::MOV64rm: 690 case X86::LD_Fp64m: 691 case X86::MOVSSrm: 692 case X86::MOVSDrm: 693 case X86::MOVAPSrm: 694 case X86::MOVAPDrm: 695 case X86::MMX_MOVD64rm: 696 case X86::MMX_MOVQ64rm: 697 if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() && 698 MI->getOperand(3).isReg() && MI->getOperand(4).isImm() && 699 MI->getOperand(2).getImm() == 1 && 700 MI->getOperand(3).getReg() == 0 && 701 MI->getOperand(4).getImm() == 0) { 702 FrameIndex = MI->getOperand(1).getIndex(); 703 return MI->getOperand(0).getReg(); 704 } 705 break; 706 } 707 return 0; 708} 709 710unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI, 711 int &FrameIndex) const { 712 switch (MI->getOpcode()) { 713 default: break; 714 case X86::MOV8mr: 715 case X86::MOV16mr: 716 case X86::MOV16_mr: 717 case X86::MOV32mr: 718 case X86::MOV32_mr: 719 case X86::MOV64mr: 720 case X86::ST_FpP64m: 721 case X86::MOVSSmr: 722 case X86::MOVSDmr: 723 case X86::MOVAPSmr: 724 case X86::MOVAPDmr: 725 case X86::MMX_MOVD64mr: 726 case X86::MMX_MOVQ64mr: 727 case X86::MMX_MOVNTQmr: 728 if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() && 729 MI->getOperand(2).isReg() && MI->getOperand(3).isImm() && 730 MI->getOperand(1).getImm() == 1 && 731 MI->getOperand(2).getReg() == 0 && 732 MI->getOperand(3).getImm() == 0) { 733 FrameIndex = MI->getOperand(0).getIndex(); 734 return MI->getOperand(4).getReg(); 735 } 736 break; 737 } 738 return 0; 739} 740 741 742/// regIsPICBase - Return true if register is PIC base (i.e.g defined by 743/// X86::MOVPC32r. 744static bool regIsPICBase(unsigned BaseReg, MachineRegisterInfo &MRI) { 745 bool isPICBase = false; 746 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 747 E = MRI.def_end(); I != E; ++I) { 748 MachineInstr *DefMI = I.getOperand().getParent(); 749 if (DefMI->getOpcode() != X86::MOVPC32r) 750 return false; 751 assert(!isPICBase && "More than one PIC base?"); 752 isPICBase = true; 753 } 754 return isPICBase; 755} 756 757/// isGVStub - Return true if the GV requires an extra load to get the 758/// real address. 759static inline bool isGVStub(GlobalValue *GV, X86TargetMachine &TM) { 760 return TM.getSubtarget<X86Subtarget>().GVRequiresExtraLoad(GV, TM, false); 761} 762 763bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const { 764 switch (MI->getOpcode()) { 765 default: break; 766 case X86::MOV8rm: 767 case X86::MOV16rm: 768 case X86::MOV16_rm: 769 case X86::MOV32rm: 770 case X86::MOV32_rm: 771 case X86::MOV64rm: 772 case X86::LD_Fp64m: 773 case X86::MOVSSrm: 774 case X86::MOVSDrm: 775 case X86::MOVAPSrm: 776 case X86::MOVAPDrm: 777 case X86::MMX_MOVD64rm: 778 case X86::MMX_MOVQ64rm: { 779 // Loads from constant pools are trivially rematerializable. 780 if (MI->getOperand(1).isReg() && 781 MI->getOperand(2).isImm() && 782 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && 783 (MI->getOperand(4).isCPI() || 784 (MI->getOperand(4).isGlobal() && 785 isGVStub(MI->getOperand(4).getGlobal(), TM)))) { 786 unsigned BaseReg = MI->getOperand(1).getReg(); 787 if (BaseReg == 0) 788 return true; 789 // Allow re-materialization of PIC load. 790 if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal()) 791 return false; 792 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 793 bool isPICBase = false; 794 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 795 E = MRI.def_end(); I != E; ++I) { 796 MachineInstr *DefMI = I.getOperand().getParent(); 797 if (DefMI->getOpcode() != X86::MOVPC32r) 798 return false; 799 assert(!isPICBase && "More than one PIC base?"); 800 isPICBase = true; 801 } 802 return isPICBase; 803 } 804 return false; 805 } 806 807 case X86::LEA32r: 808 case X86::LEA64r: { 809 if (MI->getOperand(1).isReg() && 810 MI->getOperand(2).isImm() && 811 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && 812 !MI->getOperand(4).isReg()) { 813 // lea fi#, lea GV, etc. are all rematerializable. 814 unsigned BaseReg = MI->getOperand(1).getReg(); 815 if (BaseReg == 0) 816 return true; 817 // Allow re-materialization of lea PICBase + x. 818 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 819 return regIsPICBase(BaseReg, MRI); 820 } 821 return false; 822 } 823 } 824 825 // All other instructions marked M_REMATERIALIZABLE are always trivially 826 // rematerializable. 827 return true; 828} 829 830void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 831 MachineBasicBlock::iterator I, 832 unsigned DestReg, 833 const MachineInstr *Orig) const { 834 unsigned SubIdx = Orig->getOperand(0).isReg() 835 ? Orig->getOperand(0).getSubReg() : 0; 836 bool ChangeSubIdx = SubIdx != 0; 837 if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) { 838 DestReg = RI.getSubReg(DestReg, SubIdx); 839 SubIdx = 0; 840 } 841 842 // MOV32r0 etc. are implemented with xor which clobbers condition code. 843 // Re-materialize them as movri instructions to avoid side effects. 844 switch (Orig->getOpcode()) { 845 case X86::MOV8r0: 846 BuildMI(MBB, I, get(X86::MOV8ri), DestReg).addImm(0); 847 break; 848 case X86::MOV16r0: 849 BuildMI(MBB, I, get(X86::MOV16ri), DestReg).addImm(0); 850 break; 851 case X86::MOV32r0: 852 BuildMI(MBB, I, get(X86::MOV32ri), DestReg).addImm(0); 853 break; 854 case X86::MOV64r0: 855 BuildMI(MBB, I, get(X86::MOV64ri32), DestReg).addImm(0); 856 break; 857 default: { 858 MachineInstr *MI = Orig->clone(); 859 MI->getOperand(0).setReg(DestReg); 860 MBB.insert(I, MI); 861 break; 862 } 863 } 864 865 if (ChangeSubIdx) { 866 MachineInstr *NewMI = prior(I); 867 NewMI->getOperand(0).setSubReg(SubIdx); 868 } 869} 870 871/// isInvariantLoad - Return true if the specified instruction (which is marked 872/// mayLoad) is loading from a location whose value is invariant across the 873/// function. For example, loading a value from the constant pool or from 874/// from the argument area of a function if it does not change. This should 875/// only return true of *all* loads the instruction does are invariant (if it 876/// does multiple loads). 877bool X86InstrInfo::isInvariantLoad(MachineInstr *MI) const { 878 // This code cares about loads from three cases: constant pool entries, 879 // invariant argument slots, and global stubs. In order to handle these cases 880 // for all of the myriad of X86 instructions, we just scan for a CP/FI/GV 881 // operand and base our analysis on it. This is safe because the address of 882 // none of these three cases is ever used as anything other than a load base 883 // and X86 doesn't have any instructions that load from multiple places. 884 885 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 886 const MachineOperand &MO = MI->getOperand(i); 887 // Loads from constant pools are trivially invariant. 888 if (MO.isCPI()) 889 return true; 890 891 if (MO.isGlobal()) 892 return isGVStub(MO.getGlobal(), TM); 893 894 // If this is a load from an invariant stack slot, the load is a constant. 895 if (MO.isFI()) { 896 const MachineFrameInfo &MFI = 897 *MI->getParent()->getParent()->getFrameInfo(); 898 int Idx = MO.getIndex(); 899 return MFI.isFixedObjectIndex(Idx) && MFI.isImmutableObjectIndex(Idx); 900 } 901 } 902 903 // All other instances of these instructions are presumed to have other 904 // issues. 905 return false; 906} 907 908/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that 909/// is not marked dead. 910static bool hasLiveCondCodeDef(MachineInstr *MI) { 911 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 912 MachineOperand &MO = MI->getOperand(i); 913 if (MO.isRegister() && MO.isDef() && 914 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 915 return true; 916 } 917 } 918 return false; 919} 920 921/// convertToThreeAddress - This method must be implemented by targets that 922/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 923/// may be able to convert a two-address instruction into a true 924/// three-address instruction on demand. This allows the X86 target (for 925/// example) to convert ADD and SHL instructions into LEA instructions if they 926/// would require register copies due to two-addressness. 927/// 928/// This method returns a null pointer if the transformation cannot be 929/// performed, otherwise it returns the new instruction. 930/// 931MachineInstr * 932X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 933 MachineBasicBlock::iterator &MBBI, 934 LiveVariables &LV) const { 935 MachineInstr *MI = MBBI; 936 // All instructions input are two-addr instructions. Get the known operands. 937 unsigned Dest = MI->getOperand(0).getReg(); 938 unsigned Src = MI->getOperand(1).getReg(); 939 940 MachineInstr *NewMI = NULL; 941 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 942 // we have better subtarget support, enable the 16-bit LEA generation here. 943 bool DisableLEA16 = true; 944 945 unsigned MIOpc = MI->getOpcode(); 946 switch (MIOpc) { 947 case X86::SHUFPSrri: { 948 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); 949 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 950 951 unsigned A = MI->getOperand(0).getReg(); 952 unsigned B = MI->getOperand(1).getReg(); 953 unsigned C = MI->getOperand(2).getReg(); 954 unsigned M = MI->getOperand(3).getImm(); 955 if (B != C) return 0; 956 NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M); 957 break; 958 } 959 case X86::SHL64ri: { 960 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 961 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 962 // the flags produced by a shift yet, so this is safe. 963 unsigned Dest = MI->getOperand(0).getReg(); 964 unsigned Src = MI->getOperand(1).getReg(); 965 unsigned ShAmt = MI->getOperand(2).getImm(); 966 if (ShAmt == 0 || ShAmt >= 4) return 0; 967 968 NewMI = BuildMI(get(X86::LEA64r), Dest) 969 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 970 break; 971 } 972 case X86::SHL32ri: { 973 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 974 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 975 // the flags produced by a shift yet, so this is safe. 976 unsigned Dest = MI->getOperand(0).getReg(); 977 unsigned Src = MI->getOperand(1).getReg(); 978 unsigned ShAmt = MI->getOperand(2).getImm(); 979 if (ShAmt == 0 || ShAmt >= 4) return 0; 980 981 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ? 982 X86::LEA64_32r : X86::LEA32r; 983 NewMI = BuildMI(get(Opc), Dest) 984 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 985 break; 986 } 987 case X86::SHL16ri: { 988 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 989 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 990 // the flags produced by a shift yet, so this is safe. 991 unsigned Dest = MI->getOperand(0).getReg(); 992 unsigned Src = MI->getOperand(1).getReg(); 993 unsigned ShAmt = MI->getOperand(2).getImm(); 994 if (ShAmt == 0 || ShAmt >= 4) return 0; 995 996 if (DisableLEA16) { 997 // If 16-bit LEA is disabled, use 32-bit LEA via subregisters. 998 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 999 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() 1000 ? X86::LEA64_32r : X86::LEA32r; 1001 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1002 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1003 1004 // Build and insert into an implicit UNDEF value. This is OK because 1005 // well be shifting and then extracting the lower 16-bits. 1006 MachineInstr *Undef = BuildMI(get(X86::IMPLICIT_DEF), leaInReg); 1007 1008 MachineInstr *Ins = 1009 BuildMI(get(X86::INSERT_SUBREG),leaInReg) 1010 .addReg(leaInReg).addReg(Src).addImm(X86::SUBREG_16BIT); 1011 1012 NewMI = BuildMI(get(Opc), leaOutReg) 1013 .addReg(0).addImm(1 << ShAmt).addReg(leaInReg).addImm(0); 1014 1015 MachineInstr *Ext = 1016 BuildMI(get(X86::EXTRACT_SUBREG), Dest) 1017 .addReg(leaOutReg).addImm(X86::SUBREG_16BIT); 1018 Ext->copyKillDeadInfo(MI); 1019 1020 MFI->insert(MBBI, Undef); 1021 MFI->insert(MBBI, Ins); // Insert the insert_subreg 1022 LV.instructionChanged(MI, NewMI); // Update live variables 1023 LV.addVirtualRegisterKilled(leaInReg, NewMI); 1024 MFI->insert(MBBI, NewMI); // Insert the new inst 1025 LV.addVirtualRegisterKilled(leaOutReg, Ext); 1026 MFI->insert(MBBI, Ext); // Insert the extract_subreg 1027 return Ext; 1028 } else { 1029 NewMI = BuildMI(get(X86::LEA16r), Dest) 1030 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0); 1031 } 1032 break; 1033 } 1034 default: { 1035 // The following opcodes also sets the condition code register(s). Only 1036 // convert them to equivalent lea if the condition code register def's 1037 // are dead! 1038 if (hasLiveCondCodeDef(MI)) 1039 return 0; 1040 1041 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1042 switch (MIOpc) { 1043 default: return 0; 1044 case X86::INC64r: 1045 case X86::INC32r: { 1046 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1047 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 1048 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1049 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, 1); 1050 break; 1051 } 1052 case X86::INC16r: 1053 case X86::INC64_16r: 1054 if (DisableLEA16) return 0; 1055 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1056 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1); 1057 break; 1058 case X86::DEC64r: 1059 case X86::DEC32r: { 1060 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1061 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 1062 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1063 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, -1); 1064 break; 1065 } 1066 case X86::DEC16r: 1067 case X86::DEC64_16r: 1068 if (DisableLEA16) return 0; 1069 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1070 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1); 1071 break; 1072 case X86::ADD64rr: 1073 case X86::ADD32rr: { 1074 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1075 unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r 1076 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1077 NewMI = addRegReg(BuildMI(get(Opc), Dest), Src, 1078 MI->getOperand(2).getReg()); 1079 break; 1080 } 1081 case X86::ADD16rr: 1082 if (DisableLEA16) return 0; 1083 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1084 NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src, 1085 MI->getOperand(2).getReg()); 1086 break; 1087 case X86::ADD64ri32: 1088 case X86::ADD64ri8: 1089 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1090 if (MI->getOperand(2).isImmediate()) 1091 NewMI = addRegOffset(BuildMI(get(X86::LEA64r), Dest), Src, 1092 MI->getOperand(2).getImm()); 1093 break; 1094 case X86::ADD32ri: 1095 case X86::ADD32ri8: 1096 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1097 if (MI->getOperand(2).isImmediate()) { 1098 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 1099 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, 1100 MI->getOperand(2).getImm()); 1101 } 1102 break; 1103 case X86::ADD16ri: 1104 case X86::ADD16ri8: 1105 if (DisableLEA16) return 0; 1106 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1107 if (MI->getOperand(2).isImmediate()) 1108 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1109 MI->getOperand(2).getImm()); 1110 break; 1111 case X86::SHL16ri: 1112 if (DisableLEA16) return 0; 1113 case X86::SHL32ri: 1114 case X86::SHL64ri: { 1115 assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() && 1116 "Unknown shl instruction!"); 1117 unsigned ShAmt = MI->getOperand(2).getImm(); 1118 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) { 1119 X86AddressMode AM; 1120 AM.Scale = 1 << ShAmt; 1121 AM.IndexReg = Src; 1122 unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r 1123 : (MIOpc == X86::SHL32ri 1124 ? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r); 1125 NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM); 1126 } 1127 break; 1128 } 1129 } 1130 } 1131 } 1132 1133 if (!NewMI) return 0; 1134 1135 NewMI->copyKillDeadInfo(MI); 1136 LV.instructionChanged(MI, NewMI); // Update live variables 1137 MFI->insert(MBBI, NewMI); // Insert the new inst 1138 return NewMI; 1139} 1140 1141/// commuteInstruction - We have a few instructions that must be hacked on to 1142/// commute them. 1143/// 1144MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const { 1145 switch (MI->getOpcode()) { 1146 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 1147 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 1148 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 1149 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 1150 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 1151 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 1152 unsigned Opc; 1153 unsigned Size; 1154 switch (MI->getOpcode()) { 1155 default: assert(0 && "Unreachable!"); 1156 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 1157 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 1158 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 1159 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 1160 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 1161 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 1162 } 1163 unsigned Amt = MI->getOperand(3).getImm(); 1164 unsigned A = MI->getOperand(0).getReg(); 1165 unsigned B = MI->getOperand(1).getReg(); 1166 unsigned C = MI->getOperand(2).getReg(); 1167 bool BisKill = MI->getOperand(1).isKill(); 1168 bool CisKill = MI->getOperand(2).isKill(); 1169 // If machine instrs are no longer in two-address forms, update 1170 // destination register as well. 1171 if (A == B) { 1172 // Must be two address instruction! 1173 assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) && 1174 "Expecting a two-address instruction!"); 1175 A = C; 1176 CisKill = false; 1177 } 1178 return BuildMI(get(Opc), A).addReg(C, false, false, CisKill) 1179 .addReg(B, false, false, BisKill).addImm(Size-Amt); 1180 } 1181 case X86::CMOVB16rr: 1182 case X86::CMOVB32rr: 1183 case X86::CMOVB64rr: 1184 case X86::CMOVAE16rr: 1185 case X86::CMOVAE32rr: 1186 case X86::CMOVAE64rr: 1187 case X86::CMOVE16rr: 1188 case X86::CMOVE32rr: 1189 case X86::CMOVE64rr: 1190 case X86::CMOVNE16rr: 1191 case X86::CMOVNE32rr: 1192 case X86::CMOVNE64rr: 1193 case X86::CMOVBE16rr: 1194 case X86::CMOVBE32rr: 1195 case X86::CMOVBE64rr: 1196 case X86::CMOVA16rr: 1197 case X86::CMOVA32rr: 1198 case X86::CMOVA64rr: 1199 case X86::CMOVL16rr: 1200 case X86::CMOVL32rr: 1201 case X86::CMOVL64rr: 1202 case X86::CMOVGE16rr: 1203 case X86::CMOVGE32rr: 1204 case X86::CMOVGE64rr: 1205 case X86::CMOVLE16rr: 1206 case X86::CMOVLE32rr: 1207 case X86::CMOVLE64rr: 1208 case X86::CMOVG16rr: 1209 case X86::CMOVG32rr: 1210 case X86::CMOVG64rr: 1211 case X86::CMOVS16rr: 1212 case X86::CMOVS32rr: 1213 case X86::CMOVS64rr: 1214 case X86::CMOVNS16rr: 1215 case X86::CMOVNS32rr: 1216 case X86::CMOVNS64rr: 1217 case X86::CMOVP16rr: 1218 case X86::CMOVP32rr: 1219 case X86::CMOVP64rr: 1220 case X86::CMOVNP16rr: 1221 case X86::CMOVNP32rr: 1222 case X86::CMOVNP64rr: { 1223 unsigned Opc = 0; 1224 switch (MI->getOpcode()) { 1225 default: break; 1226 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 1227 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 1228 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 1229 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 1230 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 1231 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 1232 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 1233 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 1234 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 1235 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 1236 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 1237 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 1238 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 1239 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 1240 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 1241 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 1242 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 1243 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 1244 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 1245 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 1246 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 1247 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 1248 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 1249 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 1250 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 1251 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 1252 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 1253 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 1254 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 1255 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 1256 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 1257 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 1258 case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break; 1259 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 1260 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 1261 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 1262 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 1263 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 1264 case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break; 1265 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 1266 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 1267 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 1268 } 1269 1270 MI->setDesc(get(Opc)); 1271 // Fallthrough intended. 1272 } 1273 default: 1274 return TargetInstrInfoImpl::commuteInstruction(MI); 1275 } 1276} 1277 1278static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { 1279 switch (BrOpc) { 1280 default: return X86::COND_INVALID; 1281 case X86::JE: return X86::COND_E; 1282 case X86::JNE: return X86::COND_NE; 1283 case X86::JL: return X86::COND_L; 1284 case X86::JLE: return X86::COND_LE; 1285 case X86::JG: return X86::COND_G; 1286 case X86::JGE: return X86::COND_GE; 1287 case X86::JB: return X86::COND_B; 1288 case X86::JBE: return X86::COND_BE; 1289 case X86::JA: return X86::COND_A; 1290 case X86::JAE: return X86::COND_AE; 1291 case X86::JS: return X86::COND_S; 1292 case X86::JNS: return X86::COND_NS; 1293 case X86::JP: return X86::COND_P; 1294 case X86::JNP: return X86::COND_NP; 1295 case X86::JO: return X86::COND_O; 1296 case X86::JNO: return X86::COND_NO; 1297 } 1298} 1299 1300unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 1301 switch (CC) { 1302 default: assert(0 && "Illegal condition code!"); 1303 case X86::COND_E: return X86::JE; 1304 case X86::COND_NE: return X86::JNE; 1305 case X86::COND_L: return X86::JL; 1306 case X86::COND_LE: return X86::JLE; 1307 case X86::COND_G: return X86::JG; 1308 case X86::COND_GE: return X86::JGE; 1309 case X86::COND_B: return X86::JB; 1310 case X86::COND_BE: return X86::JBE; 1311 case X86::COND_A: return X86::JA; 1312 case X86::COND_AE: return X86::JAE; 1313 case X86::COND_S: return X86::JS; 1314 case X86::COND_NS: return X86::JNS; 1315 case X86::COND_P: return X86::JP; 1316 case X86::COND_NP: return X86::JNP; 1317 case X86::COND_O: return X86::JO; 1318 case X86::COND_NO: return X86::JNO; 1319 } 1320} 1321 1322/// GetOppositeBranchCondition - Return the inverse of the specified condition, 1323/// e.g. turning COND_E to COND_NE. 1324X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 1325 switch (CC) { 1326 default: assert(0 && "Illegal condition code!"); 1327 case X86::COND_E: return X86::COND_NE; 1328 case X86::COND_NE: return X86::COND_E; 1329 case X86::COND_L: return X86::COND_GE; 1330 case X86::COND_LE: return X86::COND_G; 1331 case X86::COND_G: return X86::COND_LE; 1332 case X86::COND_GE: return X86::COND_L; 1333 case X86::COND_B: return X86::COND_AE; 1334 case X86::COND_BE: return X86::COND_A; 1335 case X86::COND_A: return X86::COND_BE; 1336 case X86::COND_AE: return X86::COND_B; 1337 case X86::COND_S: return X86::COND_NS; 1338 case X86::COND_NS: return X86::COND_S; 1339 case X86::COND_P: return X86::COND_NP; 1340 case X86::COND_NP: return X86::COND_P; 1341 case X86::COND_O: return X86::COND_NO; 1342 case X86::COND_NO: return X86::COND_O; 1343 } 1344} 1345 1346bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 1347 const TargetInstrDesc &TID = MI->getDesc(); 1348 if (!TID.isTerminator()) return false; 1349 1350 // Conditional branch is a special case. 1351 if (TID.isBranch() && !TID.isBarrier()) 1352 return true; 1353 if (!TID.isPredicable()) 1354 return true; 1355 return !isPredicated(MI); 1356} 1357 1358// For purposes of branch analysis do not count FP_REG_KILL as a terminator. 1359static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI, 1360 const X86InstrInfo &TII) { 1361 if (MI->getOpcode() == X86::FP_REG_KILL) 1362 return false; 1363 return TII.isUnpredicatedTerminator(MI); 1364} 1365 1366bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 1367 MachineBasicBlock *&TBB, 1368 MachineBasicBlock *&FBB, 1369 std::vector<MachineOperand> &Cond) const { 1370 // If the block has no terminators, it just falls into the block after it. 1371 MachineBasicBlock::iterator I = MBB.end(); 1372 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) 1373 return false; 1374 1375 // Get the last instruction in the block. 1376 MachineInstr *LastInst = I; 1377 1378 // If there is only one terminator instruction, process it. 1379 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) { 1380 if (!LastInst->getDesc().isBranch()) 1381 return true; 1382 1383 // If the block ends with a branch there are 3 possibilities: 1384 // it's an unconditional, conditional, or indirect branch. 1385 1386 if (LastInst->getOpcode() == X86::JMP) { 1387 TBB = LastInst->getOperand(0).getMBB(); 1388 return false; 1389 } 1390 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode()); 1391 if (BranchCode == X86::COND_INVALID) 1392 return true; // Can't handle indirect branch. 1393 1394 // Otherwise, block ends with fall-through condbranch. 1395 TBB = LastInst->getOperand(0).getMBB(); 1396 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 1397 return false; 1398 } 1399 1400 // Get the instruction before it if it's a terminator. 1401 MachineInstr *SecondLastInst = I; 1402 1403 // If there are three terminators, we don't know what sort of block this is. 1404 if (SecondLastInst && I != MBB.begin() && 1405 isBrAnalysisUnpredicatedTerminator(--I, *this)) 1406 return true; 1407 1408 // If the block ends with X86::JMP and a conditional branch, handle it. 1409 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode()); 1410 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) { 1411 TBB = SecondLastInst->getOperand(0).getMBB(); 1412 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 1413 FBB = LastInst->getOperand(0).getMBB(); 1414 return false; 1415 } 1416 1417 // If the block ends with two X86::JMPs, handle it. The second one is not 1418 // executed, so remove it. 1419 if (SecondLastInst->getOpcode() == X86::JMP && 1420 LastInst->getOpcode() == X86::JMP) { 1421 TBB = SecondLastInst->getOperand(0).getMBB(); 1422 I = LastInst; 1423 I->eraseFromParent(); 1424 return false; 1425 } 1426 1427 // Otherwise, can't handle this. 1428 return true; 1429} 1430 1431unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 1432 MachineBasicBlock::iterator I = MBB.end(); 1433 if (I == MBB.begin()) return 0; 1434 --I; 1435 if (I->getOpcode() != X86::JMP && 1436 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 1437 return 0; 1438 1439 // Remove the branch. 1440 I->eraseFromParent(); 1441 1442 I = MBB.end(); 1443 1444 if (I == MBB.begin()) return 1; 1445 --I; 1446 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 1447 return 1; 1448 1449 // Remove the branch. 1450 I->eraseFromParent(); 1451 return 2; 1452} 1453 1454static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB, 1455 MachineOperand &MO) { 1456 if (MO.isRegister()) 1457 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(), 1458 false, false, MO.getSubReg()); 1459 else if (MO.isImmediate()) 1460 MIB = MIB.addImm(MO.getImm()); 1461 else if (MO.isFrameIndex()) 1462 MIB = MIB.addFrameIndex(MO.getIndex()); 1463 else if (MO.isGlobalAddress()) 1464 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 1465 else if (MO.isConstantPoolIndex()) 1466 MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset()); 1467 else if (MO.isJumpTableIndex()) 1468 MIB = MIB.addJumpTableIndex(MO.getIndex()); 1469 else if (MO.isExternalSymbol()) 1470 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 1471 else 1472 assert(0 && "Unknown operand for X86InstrAddOperand!"); 1473 1474 return MIB; 1475} 1476 1477unsigned 1478X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 1479 MachineBasicBlock *FBB, 1480 const std::vector<MachineOperand> &Cond) const { 1481 // Shouldn't be a fall through. 1482 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 1483 assert((Cond.size() == 1 || Cond.size() == 0) && 1484 "X86 branch conditions have one component!"); 1485 1486 if (FBB == 0) { // One way branch. 1487 if (Cond.empty()) { 1488 // Unconditional branch? 1489 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB); 1490 } else { 1491 // Conditional branch. 1492 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 1493 BuildMI(&MBB, get(Opc)).addMBB(TBB); 1494 } 1495 return 1; 1496 } 1497 1498 // Two-way Conditional branch. 1499 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm()); 1500 BuildMI(&MBB, get(Opc)).addMBB(TBB); 1501 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB); 1502 return 2; 1503} 1504 1505void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, 1506 MachineBasicBlock::iterator MI, 1507 unsigned DestReg, unsigned SrcReg, 1508 const TargetRegisterClass *DestRC, 1509 const TargetRegisterClass *SrcRC) const { 1510 if (DestRC == SrcRC) { 1511 unsigned Opc; 1512 if (DestRC == &X86::GR64RegClass) { 1513 Opc = X86::MOV64rr; 1514 } else if (DestRC == &X86::GR32RegClass) { 1515 Opc = X86::MOV32rr; 1516 } else if (DestRC == &X86::GR16RegClass) { 1517 Opc = X86::MOV16rr; 1518 } else if (DestRC == &X86::GR8RegClass) { 1519 Opc = X86::MOV8rr; 1520 } else if (DestRC == &X86::GR32_RegClass) { 1521 Opc = X86::MOV32_rr; 1522 } else if (DestRC == &X86::GR16_RegClass) { 1523 Opc = X86::MOV16_rr; 1524 } else if (DestRC == &X86::RFP32RegClass) { 1525 Opc = X86::MOV_Fp3232; 1526 } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) { 1527 Opc = X86::MOV_Fp6464; 1528 } else if (DestRC == &X86::RFP80RegClass) { 1529 Opc = X86::MOV_Fp8080; 1530 } else if (DestRC == &X86::FR32RegClass) { 1531 Opc = X86::FsMOVAPSrr; 1532 } else if (DestRC == &X86::FR64RegClass) { 1533 Opc = X86::FsMOVAPDrr; 1534 } else if (DestRC == &X86::VR128RegClass) { 1535 Opc = X86::MOVAPSrr; 1536 } else if (DestRC == &X86::VR64RegClass) { 1537 Opc = X86::MMX_MOVQ64rr; 1538 } else { 1539 assert(0 && "Unknown regclass"); 1540 abort(); 1541 } 1542 BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg); 1543 return; 1544 } 1545 1546 // Moving EFLAGS to / from another register requires a push and a pop. 1547 if (SrcRC == &X86::CCRRegClass) { 1548 assert(SrcReg == X86::EFLAGS); 1549 if (DestRC == &X86::GR64RegClass) { 1550 BuildMI(MBB, MI, get(X86::PUSHFQ)); 1551 BuildMI(MBB, MI, get(X86::POP64r), DestReg); 1552 return; 1553 } else if (DestRC == &X86::GR32RegClass) { 1554 BuildMI(MBB, MI, get(X86::PUSHFD)); 1555 BuildMI(MBB, MI, get(X86::POP32r), DestReg); 1556 return; 1557 } 1558 } else if (DestRC == &X86::CCRRegClass) { 1559 assert(DestReg == X86::EFLAGS); 1560 if (SrcRC == &X86::GR64RegClass) { 1561 BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg); 1562 BuildMI(MBB, MI, get(X86::POPFQ)); 1563 return; 1564 } else if (SrcRC == &X86::GR32RegClass) { 1565 BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg); 1566 BuildMI(MBB, MI, get(X86::POPFD)); 1567 return; 1568 } 1569 } 1570 1571 // Moving from ST(0) turns into FpGET_ST0_32 etc. 1572 if (SrcRC == &X86::RSTRegClass) { 1573 // Copying from ST(0)/ST(1). 1574 assert((SrcReg == X86::ST0 || SrcReg == X86::ST1) && 1575 "Can only copy from ST(0)/ST(1) right now"); 1576 bool isST0 = SrcReg == X86::ST0; 1577 unsigned Opc; 1578 if (DestRC == &X86::RFP32RegClass) 1579 Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32; 1580 else if (DestRC == &X86::RFP64RegClass) 1581 Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64; 1582 else { 1583 assert(DestRC == &X86::RFP80RegClass); 1584 Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80; 1585 } 1586 BuildMI(MBB, MI, get(Opc), DestReg); 1587 return; 1588 } 1589 1590 // Moving to ST(0) turns into FpSET_ST0_32 etc. 1591 if (DestRC == &X86::RSTRegClass) { 1592 // Copying to ST(0). FIXME: handle ST(1) also 1593 assert(DestReg == X86::ST0 && "Can only copy to TOS right now"); 1594 unsigned Opc; 1595 if (SrcRC == &X86::RFP32RegClass) 1596 Opc = X86::FpSET_ST0_32; 1597 else if (SrcRC == &X86::RFP64RegClass) 1598 Opc = X86::FpSET_ST0_64; 1599 else { 1600 assert(SrcRC == &X86::RFP80RegClass); 1601 Opc = X86::FpSET_ST0_80; 1602 } 1603 BuildMI(MBB, MI, get(Opc)).addReg(SrcReg); 1604 return; 1605 } 1606 1607 assert(0 && "Not yet supported!"); 1608 abort(); 1609} 1610 1611static unsigned getStoreRegOpcode(const TargetRegisterClass *RC, 1612 unsigned StackAlign) { 1613 unsigned Opc = 0; 1614 if (RC == &X86::GR64RegClass) { 1615 Opc = X86::MOV64mr; 1616 } else if (RC == &X86::GR32RegClass) { 1617 Opc = X86::MOV32mr; 1618 } else if (RC == &X86::GR16RegClass) { 1619 Opc = X86::MOV16mr; 1620 } else if (RC == &X86::GR8RegClass) { 1621 Opc = X86::MOV8mr; 1622 } else if (RC == &X86::GR32_RegClass) { 1623 Opc = X86::MOV32_mr; 1624 } else if (RC == &X86::GR16_RegClass) { 1625 Opc = X86::MOV16_mr; 1626 } else if (RC == &X86::RFP80RegClass) { 1627 Opc = X86::ST_FpP80m; // pops 1628 } else if (RC == &X86::RFP64RegClass) { 1629 Opc = X86::ST_Fp64m; 1630 } else if (RC == &X86::RFP32RegClass) { 1631 Opc = X86::ST_Fp32m; 1632 } else if (RC == &X86::FR32RegClass) { 1633 Opc = X86::MOVSSmr; 1634 } else if (RC == &X86::FR64RegClass) { 1635 Opc = X86::MOVSDmr; 1636 } else if (RC == &X86::VR128RegClass) { 1637 // FIXME: Use movaps once we are capable of selectively 1638 // aligning functions that spill SSE registers on 16-byte boundaries. 1639 Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr; 1640 } else if (RC == &X86::VR64RegClass) { 1641 Opc = X86::MMX_MOVQ64mr; 1642 } else { 1643 assert(0 && "Unknown regclass"); 1644 abort(); 1645 } 1646 1647 return Opc; 1648} 1649 1650void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1651 MachineBasicBlock::iterator MI, 1652 unsigned SrcReg, bool isKill, int FrameIdx, 1653 const TargetRegisterClass *RC) const { 1654 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment()); 1655 addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx) 1656 .addReg(SrcReg, false, false, isKill); 1657} 1658 1659void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 1660 bool isKill, 1661 SmallVectorImpl<MachineOperand> &Addr, 1662 const TargetRegisterClass *RC, 1663 SmallVectorImpl<MachineInstr*> &NewMIs) const { 1664 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment()); 1665 MachineInstrBuilder MIB = BuildMI(get(Opc)); 1666 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 1667 MIB = X86InstrAddOperand(MIB, Addr[i]); 1668 MIB.addReg(SrcReg, false, false, isKill); 1669 NewMIs.push_back(MIB); 1670} 1671 1672static unsigned getLoadRegOpcode(const TargetRegisterClass *RC, 1673 unsigned StackAlign) { 1674 unsigned Opc = 0; 1675 if (RC == &X86::GR64RegClass) { 1676 Opc = X86::MOV64rm; 1677 } else if (RC == &X86::GR32RegClass) { 1678 Opc = X86::MOV32rm; 1679 } else if (RC == &X86::GR16RegClass) { 1680 Opc = X86::MOV16rm; 1681 } else if (RC == &X86::GR8RegClass) { 1682 Opc = X86::MOV8rm; 1683 } else if (RC == &X86::GR32_RegClass) { 1684 Opc = X86::MOV32_rm; 1685 } else if (RC == &X86::GR16_RegClass) { 1686 Opc = X86::MOV16_rm; 1687 } else if (RC == &X86::RFP80RegClass) { 1688 Opc = X86::LD_Fp80m; 1689 } else if (RC == &X86::RFP64RegClass) { 1690 Opc = X86::LD_Fp64m; 1691 } else if (RC == &X86::RFP32RegClass) { 1692 Opc = X86::LD_Fp32m; 1693 } else if (RC == &X86::FR32RegClass) { 1694 Opc = X86::MOVSSrm; 1695 } else if (RC == &X86::FR64RegClass) { 1696 Opc = X86::MOVSDrm; 1697 } else if (RC == &X86::VR128RegClass) { 1698 // FIXME: Use movaps once we are capable of selectively 1699 // aligning functions that spill SSE registers on 16-byte boundaries. 1700 Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm; 1701 } else if (RC == &X86::VR64RegClass) { 1702 Opc = X86::MMX_MOVQ64rm; 1703 } else { 1704 assert(0 && "Unknown regclass"); 1705 abort(); 1706 } 1707 1708 return Opc; 1709} 1710 1711void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1712 MachineBasicBlock::iterator MI, 1713 unsigned DestReg, int FrameIdx, 1714 const TargetRegisterClass *RC) const{ 1715 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment()); 1716 addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx); 1717} 1718 1719void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 1720 SmallVectorImpl<MachineOperand> &Addr, 1721 const TargetRegisterClass *RC, 1722 SmallVectorImpl<MachineInstr*> &NewMIs) const { 1723 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment()); 1724 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg); 1725 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 1726 MIB = X86InstrAddOperand(MIB, Addr[i]); 1727 NewMIs.push_back(MIB); 1728} 1729 1730bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1731 MachineBasicBlock::iterator MI, 1732 const std::vector<CalleeSavedInfo> &CSI) const { 1733 if (CSI.empty()) 1734 return false; 1735 1736 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1737 unsigned SlotSize = is64Bit ? 8 : 4; 1738 1739 MachineFunction &MF = *MBB.getParent(); 1740 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1741 X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize); 1742 1743 unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r; 1744 for (unsigned i = CSI.size(); i != 0; --i) { 1745 unsigned Reg = CSI[i-1].getReg(); 1746 // Add the callee-saved register as live-in. It's killed at the spill. 1747 MBB.addLiveIn(Reg); 1748 BuildMI(MBB, MI, get(Opc)).addReg(Reg); 1749 } 1750 return true; 1751} 1752 1753bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1754 MachineBasicBlock::iterator MI, 1755 const std::vector<CalleeSavedInfo> &CSI) const { 1756 if (CSI.empty()) 1757 return false; 1758 1759 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1760 1761 unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r; 1762 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1763 unsigned Reg = CSI[i].getReg(); 1764 BuildMI(MBB, MI, get(Opc), Reg); 1765 } 1766 return true; 1767} 1768 1769static MachineInstr *FuseTwoAddrInst(unsigned Opcode, 1770 SmallVector<MachineOperand,4> &MOs, 1771 MachineInstr *MI, const TargetInstrInfo &TII) { 1772 // Create the base instruction with the memory operand as the first part. 1773 MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true); 1774 MachineInstrBuilder MIB(NewMI); 1775 unsigned NumAddrOps = MOs.size(); 1776 for (unsigned i = 0; i != NumAddrOps; ++i) 1777 MIB = X86InstrAddOperand(MIB, MOs[i]); 1778 if (NumAddrOps < 4) // FrameIndex only 1779 MIB.addImm(1).addReg(0).addImm(0); 1780 1781 // Loop over the rest of the ri operands, converting them over. 1782 unsigned NumOps = MI->getDesc().getNumOperands()-2; 1783 for (unsigned i = 0; i != NumOps; ++i) { 1784 MachineOperand &MO = MI->getOperand(i+2); 1785 MIB = X86InstrAddOperand(MIB, MO); 1786 } 1787 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) { 1788 MachineOperand &MO = MI->getOperand(i); 1789 MIB = X86InstrAddOperand(MIB, MO); 1790 } 1791 return MIB; 1792} 1793 1794static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo, 1795 SmallVector<MachineOperand,4> &MOs, 1796 MachineInstr *MI, const TargetInstrInfo &TII) { 1797 MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true); 1798 MachineInstrBuilder MIB(NewMI); 1799 1800 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1801 MachineOperand &MO = MI->getOperand(i); 1802 if (i == OpNo) { 1803 assert(MO.isRegister() && "Expected to fold into reg operand!"); 1804 unsigned NumAddrOps = MOs.size(); 1805 for (unsigned i = 0; i != NumAddrOps; ++i) 1806 MIB = X86InstrAddOperand(MIB, MOs[i]); 1807 if (NumAddrOps < 4) // FrameIndex only 1808 MIB.addImm(1).addReg(0).addImm(0); 1809 } else { 1810 MIB = X86InstrAddOperand(MIB, MO); 1811 } 1812 } 1813 return MIB; 1814} 1815 1816static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 1817 SmallVector<MachineOperand,4> &MOs, 1818 MachineInstr *MI) { 1819 MachineInstrBuilder MIB = BuildMI(TII.get(Opcode)); 1820 1821 unsigned NumAddrOps = MOs.size(); 1822 for (unsigned i = 0; i != NumAddrOps; ++i) 1823 MIB = X86InstrAddOperand(MIB, MOs[i]); 1824 if (NumAddrOps < 4) // FrameIndex only 1825 MIB.addImm(1).addReg(0).addImm(0); 1826 return MIB.addImm(0); 1827} 1828 1829MachineInstr* 1830X86InstrInfo::foldMemoryOperand(MachineInstr *MI, unsigned i, 1831 SmallVector<MachineOperand,4> &MOs) const { 1832 const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL; 1833 bool isTwoAddrFold = false; 1834 unsigned NumOps = MI->getDesc().getNumOperands(); 1835 bool isTwoAddr = NumOps > 1 && 1836 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1; 1837 1838 MachineInstr *NewMI = NULL; 1839 // Folding a memory location into the two-address part of a two-address 1840 // instruction is different than folding it other places. It requires 1841 // replacing the *two* registers with the memory location. 1842 if (isTwoAddr && NumOps >= 2 && i < 2 && 1843 MI->getOperand(0).isRegister() && 1844 MI->getOperand(1).isRegister() && 1845 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 1846 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 1847 isTwoAddrFold = true; 1848 } else if (i == 0) { // If operand 0 1849 if (MI->getOpcode() == X86::MOV16r0) 1850 NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); 1851 else if (MI->getOpcode() == X86::MOV32r0) 1852 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); 1853 else if (MI->getOpcode() == X86::MOV64r0) 1854 NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI); 1855 else if (MI->getOpcode() == X86::MOV8r0) 1856 NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); 1857 if (NewMI) { 1858 NewMI->copyKillDeadInfo(MI); 1859 return NewMI; 1860 } 1861 1862 OpcodeTablePtr = &RegOp2MemOpTable0; 1863 } else if (i == 1) { 1864 OpcodeTablePtr = &RegOp2MemOpTable1; 1865 } else if (i == 2) { 1866 OpcodeTablePtr = &RegOp2MemOpTable2; 1867 } 1868 1869 // If table selected... 1870 if (OpcodeTablePtr) { 1871 // Find the Opcode to fuse 1872 DenseMap<unsigned*, unsigned>::iterator I = 1873 OpcodeTablePtr->find((unsigned*)MI->getOpcode()); 1874 if (I != OpcodeTablePtr->end()) { 1875 if (isTwoAddrFold) 1876 NewMI = FuseTwoAddrInst(I->second, MOs, MI, *this); 1877 else 1878 NewMI = FuseInst(I->second, i, MOs, MI, *this); 1879 NewMI->copyKillDeadInfo(MI); 1880 return NewMI; 1881 } 1882 } 1883 1884 // No fusion 1885 if (PrintFailedFusing) 1886 cerr << "We failed to fuse operand " << i << *MI; 1887 return NULL; 1888} 1889 1890 1891MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 1892 MachineInstr *MI, 1893 SmallVectorImpl<unsigned> &Ops, 1894 int FrameIndex) const { 1895 // Check switch flag 1896 if (NoFusing) return NULL; 1897 1898 const MachineFrameInfo *MFI = MF.getFrameInfo(); 1899 unsigned Alignment = MFI->getObjectAlignment(FrameIndex); 1900 // FIXME: Move alignment requirement into tables? 1901 if (Alignment < 16) { 1902 switch (MI->getOpcode()) { 1903 default: break; 1904 // Not always safe to fold movsd into these instructions since their load 1905 // folding variants expects the address to be 16 byte aligned. 1906 case X86::FsANDNPDrr: 1907 case X86::FsANDNPSrr: 1908 case X86::FsANDPDrr: 1909 case X86::FsANDPSrr: 1910 case X86::FsORPDrr: 1911 case X86::FsORPSrr: 1912 case X86::FsXORPDrr: 1913 case X86::FsXORPSrr: 1914 return NULL; 1915 } 1916 } 1917 1918 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 1919 unsigned NewOpc = 0; 1920 switch (MI->getOpcode()) { 1921 default: return NULL; 1922 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 1923 case X86::TEST16rr: NewOpc = X86::CMP16ri; break; 1924 case X86::TEST32rr: NewOpc = X86::CMP32ri; break; 1925 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; 1926 } 1927 // Change to CMPXXri r, 0 first. 1928 MI->setDesc(get(NewOpc)); 1929 MI->getOperand(1).ChangeToImmediate(0); 1930 } else if (Ops.size() != 1) 1931 return NULL; 1932 1933 SmallVector<MachineOperand,4> MOs; 1934 MOs.push_back(MachineOperand::CreateFI(FrameIndex)); 1935 return foldMemoryOperand(MI, Ops[0], MOs); 1936} 1937 1938MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 1939 MachineInstr *MI, 1940 SmallVectorImpl<unsigned> &Ops, 1941 MachineInstr *LoadMI) const { 1942 // Check switch flag 1943 if (NoFusing) return NULL; 1944 1945 unsigned Alignment = 0; 1946 for (unsigned i = 0, e = LoadMI->getNumMemOperands(); i != e; ++i) { 1947 const MachineMemOperand &MRO = LoadMI->getMemOperand(i); 1948 unsigned Align = MRO.getAlignment(); 1949 if (Align > Alignment) 1950 Alignment = Align; 1951 } 1952 1953 // FIXME: Move alignment requirement into tables? 1954 if (Alignment < 16) { 1955 switch (MI->getOpcode()) { 1956 default: break; 1957 // Not always safe to fold movsd into these instructions since their load 1958 // folding variants expects the address to be 16 byte aligned. 1959 case X86::FsANDNPDrr: 1960 case X86::FsANDNPSrr: 1961 case X86::FsANDPDrr: 1962 case X86::FsANDPSrr: 1963 case X86::FsORPDrr: 1964 case X86::FsORPSrr: 1965 case X86::FsXORPDrr: 1966 case X86::FsXORPSrr: 1967 return NULL; 1968 } 1969 } 1970 1971 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 1972 unsigned NewOpc = 0; 1973 switch (MI->getOpcode()) { 1974 default: return NULL; 1975 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 1976 case X86::TEST16rr: NewOpc = X86::CMP16ri; break; 1977 case X86::TEST32rr: NewOpc = X86::CMP32ri; break; 1978 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; 1979 } 1980 // Change to CMPXXri r, 0 first. 1981 MI->setDesc(get(NewOpc)); 1982 MI->getOperand(1).ChangeToImmediate(0); 1983 } else if (Ops.size() != 1) 1984 return NULL; 1985 1986 SmallVector<MachineOperand,4> MOs; 1987 unsigned NumOps = LoadMI->getDesc().getNumOperands(); 1988 for (unsigned i = NumOps - 4; i != NumOps; ++i) 1989 MOs.push_back(LoadMI->getOperand(i)); 1990 return foldMemoryOperand(MI, Ops[0], MOs); 1991} 1992 1993 1994bool X86InstrInfo::canFoldMemoryOperand(MachineInstr *MI, 1995 SmallVectorImpl<unsigned> &Ops) const { 1996 // Check switch flag 1997 if (NoFusing) return 0; 1998 1999 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2000 switch (MI->getOpcode()) { 2001 default: return false; 2002 case X86::TEST8rr: 2003 case X86::TEST16rr: 2004 case X86::TEST32rr: 2005 case X86::TEST64rr: 2006 return true; 2007 } 2008 } 2009 2010 if (Ops.size() != 1) 2011 return false; 2012 2013 unsigned OpNum = Ops[0]; 2014 unsigned Opc = MI->getOpcode(); 2015 unsigned NumOps = MI->getDesc().getNumOperands(); 2016 bool isTwoAddr = NumOps > 1 && 2017 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1; 2018 2019 // Folding a memory location into the two-address part of a two-address 2020 // instruction is different than folding it other places. It requires 2021 // replacing the *two* registers with the memory location. 2022 const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL; 2023 if (isTwoAddr && NumOps >= 2 && OpNum < 2) { 2024 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 2025 } else if (OpNum == 0) { // If operand 0 2026 switch (Opc) { 2027 case X86::MOV16r0: 2028 case X86::MOV32r0: 2029 case X86::MOV64r0: 2030 case X86::MOV8r0: 2031 return true; 2032 default: break; 2033 } 2034 OpcodeTablePtr = &RegOp2MemOpTable0; 2035 } else if (OpNum == 1) { 2036 OpcodeTablePtr = &RegOp2MemOpTable1; 2037 } else if (OpNum == 2) { 2038 OpcodeTablePtr = &RegOp2MemOpTable2; 2039 } 2040 2041 if (OpcodeTablePtr) { 2042 // Find the Opcode to fuse 2043 DenseMap<unsigned*, unsigned>::iterator I = 2044 OpcodeTablePtr->find((unsigned*)Opc); 2045 if (I != OpcodeTablePtr->end()) 2046 return true; 2047 } 2048 return false; 2049} 2050 2051bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 2052 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 2053 SmallVectorImpl<MachineInstr*> &NewMIs) const { 2054 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2055 MemOp2RegOpTable.find((unsigned*)MI->getOpcode()); 2056 if (I == MemOp2RegOpTable.end()) 2057 return false; 2058 unsigned Opc = I->second.first; 2059 unsigned Index = I->second.second & 0xf; 2060 bool FoldedLoad = I->second.second & (1 << 4); 2061 bool FoldedStore = I->second.second & (1 << 5); 2062 if (UnfoldLoad && !FoldedLoad) 2063 return false; 2064 UnfoldLoad &= FoldedLoad; 2065 if (UnfoldStore && !FoldedStore) 2066 return false; 2067 UnfoldStore &= FoldedStore; 2068 2069 const TargetInstrDesc &TID = get(Opc); 2070 const TargetOperandInfo &TOI = TID.OpInfo[Index]; 2071 const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() 2072 ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); 2073 SmallVector<MachineOperand,4> AddrOps; 2074 SmallVector<MachineOperand,2> BeforeOps; 2075 SmallVector<MachineOperand,2> AfterOps; 2076 SmallVector<MachineOperand,4> ImpOps; 2077 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 2078 MachineOperand &Op = MI->getOperand(i); 2079 if (i >= Index && i < Index+4) 2080 AddrOps.push_back(Op); 2081 else if (Op.isRegister() && Op.isImplicit()) 2082 ImpOps.push_back(Op); 2083 else if (i < Index) 2084 BeforeOps.push_back(Op); 2085 else if (i > Index) 2086 AfterOps.push_back(Op); 2087 } 2088 2089 // Emit the load instruction. 2090 if (UnfoldLoad) { 2091 loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs); 2092 if (UnfoldStore) { 2093 // Address operands cannot be marked isKill. 2094 for (unsigned i = 1; i != 5; ++i) { 2095 MachineOperand &MO = NewMIs[0]->getOperand(i); 2096 if (MO.isRegister()) 2097 MO.setIsKill(false); 2098 } 2099 } 2100 } 2101 2102 // Emit the data processing instruction. 2103 MachineInstr *DataMI = new MachineInstr(TID, true); 2104 MachineInstrBuilder MIB(DataMI); 2105 2106 if (FoldedStore) 2107 MIB.addReg(Reg, true); 2108 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i) 2109 MIB = X86InstrAddOperand(MIB, BeforeOps[i]); 2110 if (FoldedLoad) 2111 MIB.addReg(Reg); 2112 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i) 2113 MIB = X86InstrAddOperand(MIB, AfterOps[i]); 2114 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) { 2115 MachineOperand &MO = ImpOps[i]; 2116 MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead()); 2117 } 2118 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 2119 unsigned NewOpc = 0; 2120 switch (DataMI->getOpcode()) { 2121 default: break; 2122 case X86::CMP64ri32: 2123 case X86::CMP32ri: 2124 case X86::CMP16ri: 2125 case X86::CMP8ri: { 2126 MachineOperand &MO0 = DataMI->getOperand(0); 2127 MachineOperand &MO1 = DataMI->getOperand(1); 2128 if (MO1.getImm() == 0) { 2129 switch (DataMI->getOpcode()) { 2130 default: break; 2131 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 2132 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 2133 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 2134 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 2135 } 2136 DataMI->setDesc(get(NewOpc)); 2137 MO1.ChangeToRegister(MO0.getReg(), false); 2138 } 2139 } 2140 } 2141 NewMIs.push_back(DataMI); 2142 2143 // Emit the store instruction. 2144 if (UnfoldStore) { 2145 const TargetOperandInfo &DstTOI = TID.OpInfo[0]; 2146 const TargetRegisterClass *DstRC = DstTOI.isLookupPtrRegClass() 2147 ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass); 2148 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs); 2149 } 2150 2151 return true; 2152} 2153 2154bool 2155X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 2156 SmallVectorImpl<SDNode*> &NewNodes) const { 2157 if (!N->isTargetOpcode()) 2158 return false; 2159 2160 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2161 MemOp2RegOpTable.find((unsigned*)N->getTargetOpcode()); 2162 if (I == MemOp2RegOpTable.end()) 2163 return false; 2164 unsigned Opc = I->second.first; 2165 unsigned Index = I->second.second & 0xf; 2166 bool FoldedLoad = I->second.second & (1 << 4); 2167 bool FoldedStore = I->second.second & (1 << 5); 2168 const TargetInstrDesc &TID = get(Opc); 2169 const TargetOperandInfo &TOI = TID.OpInfo[Index]; 2170 const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() 2171 ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); 2172 std::vector<SDOperand> AddrOps; 2173 std::vector<SDOperand> BeforeOps; 2174 std::vector<SDOperand> AfterOps; 2175 unsigned NumOps = N->getNumOperands(); 2176 for (unsigned i = 0; i != NumOps-1; ++i) { 2177 SDOperand Op = N->getOperand(i); 2178 if (i >= Index && i < Index+4) 2179 AddrOps.push_back(Op); 2180 else if (i < Index) 2181 BeforeOps.push_back(Op); 2182 else if (i > Index) 2183 AfterOps.push_back(Op); 2184 } 2185 SDOperand Chain = N->getOperand(NumOps-1); 2186 AddrOps.push_back(Chain); 2187 2188 // Emit the load instruction. 2189 SDNode *Load = 0; 2190 if (FoldedLoad) { 2191 MVT::ValueType VT = *RC->vt_begin(); 2192 Load = DAG.getTargetNode(getLoadRegOpcode(RC, RI.getStackAlignment()), VT, 2193 MVT::Other, &AddrOps[0], AddrOps.size()); 2194 NewNodes.push_back(Load); 2195 } 2196 2197 // Emit the data processing instruction. 2198 std::vector<MVT::ValueType> VTs; 2199 const TargetRegisterClass *DstRC = 0; 2200 if (TID.getNumDefs() > 0) { 2201 const TargetOperandInfo &DstTOI = TID.OpInfo[0]; 2202 DstRC = DstTOI.isLookupPtrRegClass() 2203 ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass); 2204 VTs.push_back(*DstRC->vt_begin()); 2205 } 2206 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 2207 MVT::ValueType VT = N->getValueType(i); 2208 if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs()) 2209 VTs.push_back(VT); 2210 } 2211 if (Load) 2212 BeforeOps.push_back(SDOperand(Load, 0)); 2213 std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); 2214 SDNode *NewNode= DAG.getTargetNode(Opc, VTs, &BeforeOps[0], BeforeOps.size()); 2215 NewNodes.push_back(NewNode); 2216 2217 // Emit the store instruction. 2218 if (FoldedStore) { 2219 AddrOps.pop_back(); 2220 AddrOps.push_back(SDOperand(NewNode, 0)); 2221 AddrOps.push_back(Chain); 2222 SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, RI.getStackAlignment()), 2223 MVT::Other, &AddrOps[0], AddrOps.size()); 2224 NewNodes.push_back(Store); 2225 } 2226 2227 return true; 2228} 2229 2230unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 2231 bool UnfoldLoad, bool UnfoldStore) const { 2232 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2233 MemOp2RegOpTable.find((unsigned*)Opc); 2234 if (I == MemOp2RegOpTable.end()) 2235 return 0; 2236 bool FoldedLoad = I->second.second & (1 << 4); 2237 bool FoldedStore = I->second.second & (1 << 5); 2238 if (UnfoldLoad && !FoldedLoad) 2239 return 0; 2240 if (UnfoldStore && !FoldedStore) 2241 return 0; 2242 return I->second.first; 2243} 2244 2245bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const { 2246 if (MBB.empty()) return false; 2247 2248 switch (MBB.back().getOpcode()) { 2249 case X86::TCRETURNri: 2250 case X86::TCRETURNdi: 2251 case X86::RET: // Return. 2252 case X86::RETI: 2253 case X86::TAILJMPd: 2254 case X86::TAILJMPr: 2255 case X86::TAILJMPm: 2256 case X86::JMP: // Uncond branch. 2257 case X86::JMP32r: // Indirect branch. 2258 case X86::JMP64r: // Indirect branch (64-bit). 2259 case X86::JMP32m: // Indirect branch through mem. 2260 case X86::JMP64m: // Indirect branch through mem (64-bit). 2261 return true; 2262 default: return false; 2263 } 2264} 2265 2266bool X86InstrInfo:: 2267ReverseBranchCondition(std::vector<MachineOperand> &Cond) const { 2268 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 2269 Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm())); 2270 return false; 2271} 2272 2273const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const { 2274 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 2275 if (Subtarget->is64Bit()) 2276 return &X86::GR64RegClass; 2277 else 2278 return &X86::GR32RegClass; 2279} 2280 2281unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) { 2282 switch (Desc->TSFlags & X86II::ImmMask) { 2283 case X86II::Imm8: return 1; 2284 case X86II::Imm16: return 2; 2285 case X86II::Imm32: return 4; 2286 case X86II::Imm64: return 8; 2287 default: assert(0 && "Immediate size not set!"); 2288 return 0; 2289 } 2290} 2291 2292/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register? 2293/// e.g. r8, xmm8, etc. 2294bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) { 2295 if (!MO.isRegister()) return false; 2296 switch (MO.getReg()) { 2297 default: break; 2298 case X86::R8: case X86::R9: case X86::R10: case X86::R11: 2299 case X86::R12: case X86::R13: case X86::R14: case X86::R15: 2300 case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D: 2301 case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D: 2302 case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W: 2303 case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W: 2304 case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B: 2305 case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B: 2306 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: 2307 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: 2308 return true; 2309 } 2310 return false; 2311} 2312 2313 2314/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64 2315/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 2316/// size, and 3) use of X86-64 extended registers. 2317unsigned X86InstrInfo::determineREX(const MachineInstr &MI) { 2318 unsigned REX = 0; 2319 const TargetInstrDesc &Desc = MI.getDesc(); 2320 2321 // Pseudo instructions do not need REX prefix byte. 2322 if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo) 2323 return 0; 2324 if (Desc.TSFlags & X86II::REX_W) 2325 REX |= 1 << 3; 2326 2327 unsigned NumOps = Desc.getNumOperands(); 2328 if (NumOps) { 2329 bool isTwoAddr = NumOps > 1 && 2330 Desc.getOperandConstraint(1, TOI::TIED_TO) != -1; 2331 2332 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 2333 unsigned i = isTwoAddr ? 1 : 0; 2334 for (unsigned e = NumOps; i != e; ++i) { 2335 const MachineOperand& MO = MI.getOperand(i); 2336 if (MO.isRegister()) { 2337 unsigned Reg = MO.getReg(); 2338 if (isX86_64NonExtLowByteReg(Reg)) 2339 REX |= 0x40; 2340 } 2341 } 2342 2343 switch (Desc.TSFlags & X86II::FormMask) { 2344 case X86II::MRMInitReg: 2345 if (isX86_64ExtendedReg(MI.getOperand(0))) 2346 REX |= (1 << 0) | (1 << 2); 2347 break; 2348 case X86II::MRMSrcReg: { 2349 if (isX86_64ExtendedReg(MI.getOperand(0))) 2350 REX |= 1 << 2; 2351 i = isTwoAddr ? 2 : 1; 2352 for (unsigned e = NumOps; i != e; ++i) { 2353 const MachineOperand& MO = MI.getOperand(i); 2354 if (isX86_64ExtendedReg(MO)) 2355 REX |= 1 << 0; 2356 } 2357 break; 2358 } 2359 case X86II::MRMSrcMem: { 2360 if (isX86_64ExtendedReg(MI.getOperand(0))) 2361 REX |= 1 << 2; 2362 unsigned Bit = 0; 2363 i = isTwoAddr ? 2 : 1; 2364 for (; i != NumOps; ++i) { 2365 const MachineOperand& MO = MI.getOperand(i); 2366 if (MO.isRegister()) { 2367 if (isX86_64ExtendedReg(MO)) 2368 REX |= 1 << Bit; 2369 Bit++; 2370 } 2371 } 2372 break; 2373 } 2374 case X86II::MRM0m: case X86II::MRM1m: 2375 case X86II::MRM2m: case X86II::MRM3m: 2376 case X86II::MRM4m: case X86II::MRM5m: 2377 case X86II::MRM6m: case X86II::MRM7m: 2378 case X86II::MRMDestMem: { 2379 unsigned e = isTwoAddr ? 5 : 4; 2380 i = isTwoAddr ? 1 : 0; 2381 if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e))) 2382 REX |= 1 << 2; 2383 unsigned Bit = 0; 2384 for (; i != e; ++i) { 2385 const MachineOperand& MO = MI.getOperand(i); 2386 if (MO.isRegister()) { 2387 if (isX86_64ExtendedReg(MO)) 2388 REX |= 1 << Bit; 2389 Bit++; 2390 } 2391 } 2392 break; 2393 } 2394 default: { 2395 if (isX86_64ExtendedReg(MI.getOperand(0))) 2396 REX |= 1 << 0; 2397 i = isTwoAddr ? 2 : 1; 2398 for (unsigned e = NumOps; i != e; ++i) { 2399 const MachineOperand& MO = MI.getOperand(i); 2400 if (isX86_64ExtendedReg(MO)) 2401 REX |= 1 << 2; 2402 } 2403 break; 2404 } 2405 } 2406 } 2407 return REX; 2408} 2409 2410/// sizePCRelativeBlockAddress - This method returns the size of a PC 2411/// relative block address instruction 2412/// 2413static unsigned sizePCRelativeBlockAddress() { 2414 return 4; 2415} 2416 2417/// sizeGlobalAddress - Give the size of the emission of this global address 2418/// 2419static unsigned sizeGlobalAddress(bool dword) { 2420 return dword ? 8 : 4; 2421} 2422 2423/// sizeConstPoolAddress - Give the size of the emission of this constant 2424/// pool address 2425/// 2426static unsigned sizeConstPoolAddress(bool dword) { 2427 return dword ? 8 : 4; 2428} 2429 2430/// sizeExternalSymbolAddress - Give the size of the emission of this external 2431/// symbol 2432/// 2433static unsigned sizeExternalSymbolAddress(bool dword) { 2434 return dword ? 8 : 4; 2435} 2436 2437/// sizeJumpTableAddress - Give the size of the emission of this jump 2438/// table address 2439/// 2440static unsigned sizeJumpTableAddress(bool dword) { 2441 return dword ? 8 : 4; 2442} 2443 2444static unsigned sizeConstant(unsigned Size) { 2445 return Size; 2446} 2447 2448static unsigned sizeRegModRMByte(){ 2449 return 1; 2450} 2451 2452static unsigned sizeSIBByte(){ 2453 return 1; 2454} 2455 2456static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) { 2457 unsigned FinalSize = 0; 2458 // If this is a simple integer displacement that doesn't require a relocation. 2459 if (!RelocOp) { 2460 FinalSize += sizeConstant(4); 2461 return FinalSize; 2462 } 2463 2464 // Otherwise, this is something that requires a relocation. 2465 if (RelocOp->isGlobalAddress()) { 2466 FinalSize += sizeGlobalAddress(false); 2467 } else if (RelocOp->isConstantPoolIndex()) { 2468 FinalSize += sizeConstPoolAddress(false); 2469 } else if (RelocOp->isJumpTableIndex()) { 2470 FinalSize += sizeJumpTableAddress(false); 2471 } else { 2472 assert(0 && "Unknown value to relocate!"); 2473 } 2474 return FinalSize; 2475} 2476 2477static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op, 2478 bool IsPIC, bool Is64BitMode) { 2479 const MachineOperand &Op3 = MI.getOperand(Op+3); 2480 int DispVal = 0; 2481 const MachineOperand *DispForReloc = 0; 2482 unsigned FinalSize = 0; 2483 2484 // Figure out what sort of displacement we have to handle here. 2485 if (Op3.isGlobalAddress()) { 2486 DispForReloc = &Op3; 2487 } else if (Op3.isConstantPoolIndex()) { 2488 if (Is64BitMode || IsPIC) { 2489 DispForReloc = &Op3; 2490 } else { 2491 DispVal = 1; 2492 } 2493 } else if (Op3.isJumpTableIndex()) { 2494 if (Is64BitMode || IsPIC) { 2495 DispForReloc = &Op3; 2496 } else { 2497 DispVal = 1; 2498 } 2499 } else { 2500 DispVal = 1; 2501 } 2502 2503 const MachineOperand &Base = MI.getOperand(Op); 2504 const MachineOperand &IndexReg = MI.getOperand(Op+2); 2505 2506 unsigned BaseReg = Base.getReg(); 2507 2508 // Is a SIB byte needed? 2509 if (IndexReg.getReg() == 0 && 2510 (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) { 2511 if (BaseReg == 0) { // Just a displacement? 2512 // Emit special case [disp32] encoding 2513 ++FinalSize; 2514 FinalSize += getDisplacementFieldSize(DispForReloc); 2515 } else { 2516 unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg); 2517 if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) { 2518 // Emit simple indirect register encoding... [EAX] f.e. 2519 ++FinalSize; 2520 // Be pessimistic and assume it's a disp32, not a disp8 2521 } else { 2522 // Emit the most general non-SIB encoding: [REG+disp32] 2523 ++FinalSize; 2524 FinalSize += getDisplacementFieldSize(DispForReloc); 2525 } 2526 } 2527 2528 } else { // We need a SIB byte, so start by outputting the ModR/M byte first 2529 assert(IndexReg.getReg() != X86::ESP && 2530 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 2531 2532 bool ForceDisp32 = false; 2533 if (BaseReg == 0 || DispForReloc) { 2534 // Emit the normal disp32 encoding. 2535 ++FinalSize; 2536 ForceDisp32 = true; 2537 } else { 2538 ++FinalSize; 2539 } 2540 2541 FinalSize += sizeSIBByte(); 2542 2543 // Do we need to output a displacement? 2544 if (DispVal != 0 || ForceDisp32) { 2545 FinalSize += getDisplacementFieldSize(DispForReloc); 2546 } 2547 } 2548 return FinalSize; 2549} 2550 2551 2552static unsigned GetInstSizeWithDesc(const MachineInstr &MI, 2553 const TargetInstrDesc *Desc, 2554 bool IsPIC, bool Is64BitMode) { 2555 2556 unsigned Opcode = Desc->Opcode; 2557 unsigned FinalSize = 0; 2558 2559 // Emit the lock opcode prefix as needed. 2560 if (Desc->TSFlags & X86II::LOCK) ++FinalSize; 2561 2562 // Emit the repeat opcode prefix as needed. 2563 if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize; 2564 2565 // Emit the operand size opcode prefix as needed. 2566 if (Desc->TSFlags & X86II::OpSize) ++FinalSize; 2567 2568 // Emit the address size opcode prefix as needed. 2569 if (Desc->TSFlags & X86II::AdSize) ++FinalSize; 2570 2571 bool Need0FPrefix = false; 2572 switch (Desc->TSFlags & X86II::Op0Mask) { 2573 case X86II::TB: // Two-byte opcode prefix 2574 case X86II::T8: // 0F 38 2575 case X86II::TA: // 0F 3A 2576 Need0FPrefix = true; 2577 break; 2578 case X86II::REP: break; // already handled. 2579 case X86II::XS: // F3 0F 2580 ++FinalSize; 2581 Need0FPrefix = true; 2582 break; 2583 case X86II::XD: // F2 0F 2584 ++FinalSize; 2585 Need0FPrefix = true; 2586 break; 2587 case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB: 2588 case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF: 2589 ++FinalSize; 2590 break; // Two-byte opcode prefix 2591 default: assert(0 && "Invalid prefix!"); 2592 case 0: break; // No prefix! 2593 } 2594 2595 if (Is64BitMode) { 2596 // REX prefix 2597 unsigned REX = X86InstrInfo::determineREX(MI); 2598 if (REX) 2599 ++FinalSize; 2600 } 2601 2602 // 0x0F escape code must be emitted just before the opcode. 2603 if (Need0FPrefix) 2604 ++FinalSize; 2605 2606 switch (Desc->TSFlags & X86II::Op0Mask) { 2607 case X86II::T8: // 0F 38 2608 ++FinalSize; 2609 break; 2610 case X86II::TA: // 0F 3A 2611 ++FinalSize; 2612 break; 2613 } 2614 2615 // If this is a two-address instruction, skip one of the register operands. 2616 unsigned NumOps = Desc->getNumOperands(); 2617 unsigned CurOp = 0; 2618 if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1) 2619 CurOp++; 2620 2621 switch (Desc->TSFlags & X86II::FormMask) { 2622 default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!"); 2623 case X86II::Pseudo: 2624 // Remember the current PC offset, this is the PIC relocation 2625 // base address. 2626 switch (Opcode) { 2627 default: 2628 break; 2629 case TargetInstrInfo::INLINEASM: { 2630 const MachineFunction *MF = MI.getParent()->getParent(); 2631 const char *AsmStr = MI.getOperand(0).getSymbolName(); 2632 const TargetAsmInfo* AI = MF->getTarget().getTargetAsmInfo(); 2633 FinalSize += AI->getInlineAsmLength(AsmStr); 2634 break; 2635 } 2636 case TargetInstrInfo::LABEL: 2637 break; 2638 case TargetInstrInfo::IMPLICIT_DEF: 2639 case TargetInstrInfo::DECLARE: 2640 case X86::DWARF_LOC: 2641 case X86::FP_REG_KILL: 2642 break; 2643 case X86::MOVPC32r: { 2644 // This emits the "call" portion of this pseudo instruction. 2645 ++FinalSize; 2646 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2647 break; 2648 } 2649 } 2650 CurOp = NumOps; 2651 break; 2652 case X86II::RawFrm: 2653 ++FinalSize; 2654 2655 if (CurOp != NumOps) { 2656 const MachineOperand &MO = MI.getOperand(CurOp++); 2657 if (MO.isMachineBasicBlock()) { 2658 FinalSize += sizePCRelativeBlockAddress(); 2659 } else if (MO.isGlobalAddress()) { 2660 FinalSize += sizeGlobalAddress(false); 2661 } else if (MO.isExternalSymbol()) { 2662 FinalSize += sizeExternalSymbolAddress(false); 2663 } else if (MO.isImmediate()) { 2664 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2665 } else { 2666 assert(0 && "Unknown RawFrm operand!"); 2667 } 2668 } 2669 break; 2670 2671 case X86II::AddRegFrm: 2672 ++FinalSize; 2673 ++CurOp; 2674 2675 if (CurOp != NumOps) { 2676 const MachineOperand &MO1 = MI.getOperand(CurOp++); 2677 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2678 if (MO1.isImmediate()) 2679 FinalSize += sizeConstant(Size); 2680 else { 2681 bool dword = false; 2682 if (Opcode == X86::MOV64ri) 2683 dword = true; 2684 if (MO1.isGlobalAddress()) { 2685 FinalSize += sizeGlobalAddress(dword); 2686 } else if (MO1.isExternalSymbol()) 2687 FinalSize += sizeExternalSymbolAddress(dword); 2688 else if (MO1.isConstantPoolIndex()) 2689 FinalSize += sizeConstPoolAddress(dword); 2690 else if (MO1.isJumpTableIndex()) 2691 FinalSize += sizeJumpTableAddress(dword); 2692 } 2693 } 2694 break; 2695 2696 case X86II::MRMDestReg: { 2697 ++FinalSize; 2698 FinalSize += sizeRegModRMByte(); 2699 CurOp += 2; 2700 if (CurOp != NumOps) { 2701 ++CurOp; 2702 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2703 } 2704 break; 2705 } 2706 case X86II::MRMDestMem: { 2707 ++FinalSize; 2708 FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); 2709 CurOp += 5; 2710 if (CurOp != NumOps) { 2711 ++CurOp; 2712 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2713 } 2714 break; 2715 } 2716 2717 case X86II::MRMSrcReg: 2718 ++FinalSize; 2719 FinalSize += sizeRegModRMByte(); 2720 CurOp += 2; 2721 if (CurOp != NumOps) { 2722 ++CurOp; 2723 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2724 } 2725 break; 2726 2727 case X86II::MRMSrcMem: { 2728 2729 ++FinalSize; 2730 FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode); 2731 CurOp += 5; 2732 if (CurOp != NumOps) { 2733 ++CurOp; 2734 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2735 } 2736 break; 2737 } 2738 2739 case X86II::MRM0r: case X86II::MRM1r: 2740 case X86II::MRM2r: case X86II::MRM3r: 2741 case X86II::MRM4r: case X86II::MRM5r: 2742 case X86II::MRM6r: case X86II::MRM7r: 2743 ++FinalSize; 2744 ++CurOp; 2745 FinalSize += sizeRegModRMByte(); 2746 2747 if (CurOp != NumOps) { 2748 const MachineOperand &MO1 = MI.getOperand(CurOp++); 2749 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2750 if (MO1.isImmediate()) 2751 FinalSize += sizeConstant(Size); 2752 else { 2753 bool dword = false; 2754 if (Opcode == X86::MOV64ri32) 2755 dword = true; 2756 if (MO1.isGlobalAddress()) { 2757 FinalSize += sizeGlobalAddress(dword); 2758 } else if (MO1.isExternalSymbol()) 2759 FinalSize += sizeExternalSymbolAddress(dword); 2760 else if (MO1.isConstantPoolIndex()) 2761 FinalSize += sizeConstPoolAddress(dword); 2762 else if (MO1.isJumpTableIndex()) 2763 FinalSize += sizeJumpTableAddress(dword); 2764 } 2765 } 2766 break; 2767 2768 case X86II::MRM0m: case X86II::MRM1m: 2769 case X86II::MRM2m: case X86II::MRM3m: 2770 case X86II::MRM4m: case X86II::MRM5m: 2771 case X86II::MRM6m: case X86II::MRM7m: { 2772 2773 ++FinalSize; 2774 FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); 2775 CurOp += 4; 2776 2777 if (CurOp != NumOps) { 2778 const MachineOperand &MO = MI.getOperand(CurOp++); 2779 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2780 if (MO.isImmediate()) 2781 FinalSize += sizeConstant(Size); 2782 else { 2783 bool dword = false; 2784 if (Opcode == X86::MOV64mi32) 2785 dword = true; 2786 if (MO.isGlobalAddress()) { 2787 FinalSize += sizeGlobalAddress(dword); 2788 } else if (MO.isExternalSymbol()) 2789 FinalSize += sizeExternalSymbolAddress(dword); 2790 else if (MO.isConstantPoolIndex()) 2791 FinalSize += sizeConstPoolAddress(dword); 2792 else if (MO.isJumpTableIndex()) 2793 FinalSize += sizeJumpTableAddress(dword); 2794 } 2795 } 2796 break; 2797 } 2798 2799 case X86II::MRMInitReg: 2800 ++FinalSize; 2801 // Duplicate register, used by things like MOV8r0 (aka xor reg,reg). 2802 FinalSize += sizeRegModRMByte(); 2803 ++CurOp; 2804 break; 2805 } 2806 2807 if (!Desc->isVariadic() && CurOp != NumOps) { 2808 cerr << "Cannot determine size: "; 2809 MI.dump(); 2810 cerr << '\n'; 2811 abort(); 2812 } 2813 2814 2815 return FinalSize; 2816} 2817 2818 2819unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 2820 const TargetInstrDesc &Desc = MI->getDesc(); 2821 bool IsPIC = (TM.getRelocationModel() == Reloc::PIC_); 2822 bool Is64BitMode = ((X86Subtarget*)TM.getSubtargetImpl())->is64Bit(); 2823 unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode); 2824 if (Desc.getOpcode() == X86::MOVPC32r) { 2825 Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode); 2826 } 2827 return Size; 2828} 2829