X86InstrInfo.cpp revision 23066288fdf4867f53f208f9aaf2952b1c049394
1//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the X86 implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "X86InstrInfo.h" 15#include "X86.h" 16#include "X86GenInstrInfo.inc" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86Subtarget.h" 20#include "X86TargetMachine.h" 21#include "llvm/ADT/STLExtras.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineInstrBuilder.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/CodeGen/LiveVariables.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Target/TargetOptions.h" 28#include "llvm/Target/TargetAsmInfo.h" 29 30using namespace llvm; 31 32namespace { 33 cl::opt<bool> 34 NoFusing("disable-spill-fusing", 35 cl::desc("Disable fusing of spill code into instructions")); 36 cl::opt<bool> 37 PrintFailedFusing("print-failed-fuse-candidates", 38 cl::desc("Print instructions that the allocator wants to" 39 " fuse, but the X86 backend currently can't"), 40 cl::Hidden); 41 cl::opt<bool> 42 ReMatPICStubLoad("remat-pic-stub-load", 43 cl::desc("Re-materialize load from stub in PIC mode"), 44 cl::init(false), cl::Hidden); 45} 46 47X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) 48 : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)), 49 TM(tm), RI(tm, *this) { 50 SmallVector<unsigned,16> AmbEntries; 51 static const unsigned OpTbl2Addr[][2] = { 52 { X86::ADC32ri, X86::ADC32mi }, 53 { X86::ADC32ri8, X86::ADC32mi8 }, 54 { X86::ADC32rr, X86::ADC32mr }, 55 { X86::ADC64ri32, X86::ADC64mi32 }, 56 { X86::ADC64ri8, X86::ADC64mi8 }, 57 { X86::ADC64rr, X86::ADC64mr }, 58 { X86::ADD16ri, X86::ADD16mi }, 59 { X86::ADD16ri8, X86::ADD16mi8 }, 60 { X86::ADD16rr, X86::ADD16mr }, 61 { X86::ADD32ri, X86::ADD32mi }, 62 { X86::ADD32ri8, X86::ADD32mi8 }, 63 { X86::ADD32rr, X86::ADD32mr }, 64 { X86::ADD64ri32, X86::ADD64mi32 }, 65 { X86::ADD64ri8, X86::ADD64mi8 }, 66 { X86::ADD64rr, X86::ADD64mr }, 67 { X86::ADD8ri, X86::ADD8mi }, 68 { X86::ADD8rr, X86::ADD8mr }, 69 { X86::AND16ri, X86::AND16mi }, 70 { X86::AND16ri8, X86::AND16mi8 }, 71 { X86::AND16rr, X86::AND16mr }, 72 { X86::AND32ri, X86::AND32mi }, 73 { X86::AND32ri8, X86::AND32mi8 }, 74 { X86::AND32rr, X86::AND32mr }, 75 { X86::AND64ri32, X86::AND64mi32 }, 76 { X86::AND64ri8, X86::AND64mi8 }, 77 { X86::AND64rr, X86::AND64mr }, 78 { X86::AND8ri, X86::AND8mi }, 79 { X86::AND8rr, X86::AND8mr }, 80 { X86::DEC16r, X86::DEC16m }, 81 { X86::DEC32r, X86::DEC32m }, 82 { X86::DEC64_16r, X86::DEC64_16m }, 83 { X86::DEC64_32r, X86::DEC64_32m }, 84 { X86::DEC64r, X86::DEC64m }, 85 { X86::DEC8r, X86::DEC8m }, 86 { X86::INC16r, X86::INC16m }, 87 { X86::INC32r, X86::INC32m }, 88 { X86::INC64_16r, X86::INC64_16m }, 89 { X86::INC64_32r, X86::INC64_32m }, 90 { X86::INC64r, X86::INC64m }, 91 { X86::INC8r, X86::INC8m }, 92 { X86::NEG16r, X86::NEG16m }, 93 { X86::NEG32r, X86::NEG32m }, 94 { X86::NEG64r, X86::NEG64m }, 95 { X86::NEG8r, X86::NEG8m }, 96 { X86::NOT16r, X86::NOT16m }, 97 { X86::NOT32r, X86::NOT32m }, 98 { X86::NOT64r, X86::NOT64m }, 99 { X86::NOT8r, X86::NOT8m }, 100 { X86::OR16ri, X86::OR16mi }, 101 { X86::OR16ri8, X86::OR16mi8 }, 102 { X86::OR16rr, X86::OR16mr }, 103 { X86::OR32ri, X86::OR32mi }, 104 { X86::OR32ri8, X86::OR32mi8 }, 105 { X86::OR32rr, X86::OR32mr }, 106 { X86::OR64ri32, X86::OR64mi32 }, 107 { X86::OR64ri8, X86::OR64mi8 }, 108 { X86::OR64rr, X86::OR64mr }, 109 { X86::OR8ri, X86::OR8mi }, 110 { X86::OR8rr, X86::OR8mr }, 111 { X86::ROL16r1, X86::ROL16m1 }, 112 { X86::ROL16rCL, X86::ROL16mCL }, 113 { X86::ROL16ri, X86::ROL16mi }, 114 { X86::ROL32r1, X86::ROL32m1 }, 115 { X86::ROL32rCL, X86::ROL32mCL }, 116 { X86::ROL32ri, X86::ROL32mi }, 117 { X86::ROL64r1, X86::ROL64m1 }, 118 { X86::ROL64rCL, X86::ROL64mCL }, 119 { X86::ROL64ri, X86::ROL64mi }, 120 { X86::ROL8r1, X86::ROL8m1 }, 121 { X86::ROL8rCL, X86::ROL8mCL }, 122 { X86::ROL8ri, X86::ROL8mi }, 123 { X86::ROR16r1, X86::ROR16m1 }, 124 { X86::ROR16rCL, X86::ROR16mCL }, 125 { X86::ROR16ri, X86::ROR16mi }, 126 { X86::ROR32r1, X86::ROR32m1 }, 127 { X86::ROR32rCL, X86::ROR32mCL }, 128 { X86::ROR32ri, X86::ROR32mi }, 129 { X86::ROR64r1, X86::ROR64m1 }, 130 { X86::ROR64rCL, X86::ROR64mCL }, 131 { X86::ROR64ri, X86::ROR64mi }, 132 { X86::ROR8r1, X86::ROR8m1 }, 133 { X86::ROR8rCL, X86::ROR8mCL }, 134 { X86::ROR8ri, X86::ROR8mi }, 135 { X86::SAR16r1, X86::SAR16m1 }, 136 { X86::SAR16rCL, X86::SAR16mCL }, 137 { X86::SAR16ri, X86::SAR16mi }, 138 { X86::SAR32r1, X86::SAR32m1 }, 139 { X86::SAR32rCL, X86::SAR32mCL }, 140 { X86::SAR32ri, X86::SAR32mi }, 141 { X86::SAR64r1, X86::SAR64m1 }, 142 { X86::SAR64rCL, X86::SAR64mCL }, 143 { X86::SAR64ri, X86::SAR64mi }, 144 { X86::SAR8r1, X86::SAR8m1 }, 145 { X86::SAR8rCL, X86::SAR8mCL }, 146 { X86::SAR8ri, X86::SAR8mi }, 147 { X86::SBB32ri, X86::SBB32mi }, 148 { X86::SBB32ri8, X86::SBB32mi8 }, 149 { X86::SBB32rr, X86::SBB32mr }, 150 { X86::SBB64ri32, X86::SBB64mi32 }, 151 { X86::SBB64ri8, X86::SBB64mi8 }, 152 { X86::SBB64rr, X86::SBB64mr }, 153 { X86::SHL16rCL, X86::SHL16mCL }, 154 { X86::SHL16ri, X86::SHL16mi }, 155 { X86::SHL32rCL, X86::SHL32mCL }, 156 { X86::SHL32ri, X86::SHL32mi }, 157 { X86::SHL64rCL, X86::SHL64mCL }, 158 { X86::SHL64ri, X86::SHL64mi }, 159 { X86::SHL8rCL, X86::SHL8mCL }, 160 { X86::SHL8ri, X86::SHL8mi }, 161 { X86::SHLD16rrCL, X86::SHLD16mrCL }, 162 { X86::SHLD16rri8, X86::SHLD16mri8 }, 163 { X86::SHLD32rrCL, X86::SHLD32mrCL }, 164 { X86::SHLD32rri8, X86::SHLD32mri8 }, 165 { X86::SHLD64rrCL, X86::SHLD64mrCL }, 166 { X86::SHLD64rri8, X86::SHLD64mri8 }, 167 { X86::SHR16r1, X86::SHR16m1 }, 168 { X86::SHR16rCL, X86::SHR16mCL }, 169 { X86::SHR16ri, X86::SHR16mi }, 170 { X86::SHR32r1, X86::SHR32m1 }, 171 { X86::SHR32rCL, X86::SHR32mCL }, 172 { X86::SHR32ri, X86::SHR32mi }, 173 { X86::SHR64r1, X86::SHR64m1 }, 174 { X86::SHR64rCL, X86::SHR64mCL }, 175 { X86::SHR64ri, X86::SHR64mi }, 176 { X86::SHR8r1, X86::SHR8m1 }, 177 { X86::SHR8rCL, X86::SHR8mCL }, 178 { X86::SHR8ri, X86::SHR8mi }, 179 { X86::SHRD16rrCL, X86::SHRD16mrCL }, 180 { X86::SHRD16rri8, X86::SHRD16mri8 }, 181 { X86::SHRD32rrCL, X86::SHRD32mrCL }, 182 { X86::SHRD32rri8, X86::SHRD32mri8 }, 183 { X86::SHRD64rrCL, X86::SHRD64mrCL }, 184 { X86::SHRD64rri8, X86::SHRD64mri8 }, 185 { X86::SUB16ri, X86::SUB16mi }, 186 { X86::SUB16ri8, X86::SUB16mi8 }, 187 { X86::SUB16rr, X86::SUB16mr }, 188 { X86::SUB32ri, X86::SUB32mi }, 189 { X86::SUB32ri8, X86::SUB32mi8 }, 190 { X86::SUB32rr, X86::SUB32mr }, 191 { X86::SUB64ri32, X86::SUB64mi32 }, 192 { X86::SUB64ri8, X86::SUB64mi8 }, 193 { X86::SUB64rr, X86::SUB64mr }, 194 { X86::SUB8ri, X86::SUB8mi }, 195 { X86::SUB8rr, X86::SUB8mr }, 196 { X86::XOR16ri, X86::XOR16mi }, 197 { X86::XOR16ri8, X86::XOR16mi8 }, 198 { X86::XOR16rr, X86::XOR16mr }, 199 { X86::XOR32ri, X86::XOR32mi }, 200 { X86::XOR32ri8, X86::XOR32mi8 }, 201 { X86::XOR32rr, X86::XOR32mr }, 202 { X86::XOR64ri32, X86::XOR64mi32 }, 203 { X86::XOR64ri8, X86::XOR64mi8 }, 204 { X86::XOR64rr, X86::XOR64mr }, 205 { X86::XOR8ri, X86::XOR8mi }, 206 { X86::XOR8rr, X86::XOR8mr } 207 }; 208 209 for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) { 210 unsigned RegOp = OpTbl2Addr[i][0]; 211 unsigned MemOp = OpTbl2Addr[i][1]; 212 if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp, 213 MemOp)).second) 214 assert(false && "Duplicated entries?"); 215 unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); // Index 0,folded load and store 216 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 217 std::make_pair(RegOp, 218 AuxInfo))).second) 219 AmbEntries.push_back(MemOp); 220 } 221 222 // If the third value is 1, then it's folding either a load or a store. 223 static const unsigned OpTbl0[][3] = { 224 { X86::CALL32r, X86::CALL32m, 1 }, 225 { X86::CALL64r, X86::CALL64m, 1 }, 226 { X86::CMP16ri, X86::CMP16mi, 1 }, 227 { X86::CMP16ri8, X86::CMP16mi8, 1 }, 228 { X86::CMP16rr, X86::CMP16mr, 1 }, 229 { X86::CMP32ri, X86::CMP32mi, 1 }, 230 { X86::CMP32ri8, X86::CMP32mi8, 1 }, 231 { X86::CMP32rr, X86::CMP32mr, 1 }, 232 { X86::CMP64ri32, X86::CMP64mi32, 1 }, 233 { X86::CMP64ri8, X86::CMP64mi8, 1 }, 234 { X86::CMP64rr, X86::CMP64mr, 1 }, 235 { X86::CMP8ri, X86::CMP8mi, 1 }, 236 { X86::CMP8rr, X86::CMP8mr, 1 }, 237 { X86::DIV16r, X86::DIV16m, 1 }, 238 { X86::DIV32r, X86::DIV32m, 1 }, 239 { X86::DIV64r, X86::DIV64m, 1 }, 240 { X86::DIV8r, X86::DIV8m, 1 }, 241 { X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0 }, 242 { X86::FsMOVAPDrr, X86::MOVSDmr, 0 }, 243 { X86::FsMOVAPSrr, X86::MOVSSmr, 0 }, 244 { X86::IDIV16r, X86::IDIV16m, 1 }, 245 { X86::IDIV32r, X86::IDIV32m, 1 }, 246 { X86::IDIV64r, X86::IDIV64m, 1 }, 247 { X86::IDIV8r, X86::IDIV8m, 1 }, 248 { X86::IMUL16r, X86::IMUL16m, 1 }, 249 { X86::IMUL32r, X86::IMUL32m, 1 }, 250 { X86::IMUL64r, X86::IMUL64m, 1 }, 251 { X86::IMUL8r, X86::IMUL8m, 1 }, 252 { X86::JMP32r, X86::JMP32m, 1 }, 253 { X86::JMP64r, X86::JMP64m, 1 }, 254 { X86::MOV16ri, X86::MOV16mi, 0 }, 255 { X86::MOV16rr, X86::MOV16mr, 0 }, 256 { X86::MOV16to16_, X86::MOV16_mr, 0 }, 257 { X86::MOV32ri, X86::MOV32mi, 0 }, 258 { X86::MOV32rr, X86::MOV32mr, 0 }, 259 { X86::MOV32to32_, X86::MOV32_mr, 0 }, 260 { X86::MOV64ri32, X86::MOV64mi32, 0 }, 261 { X86::MOV64rr, X86::MOV64mr, 0 }, 262 { X86::MOV8ri, X86::MOV8mi, 0 }, 263 { X86::MOV8rr, X86::MOV8mr, 0 }, 264 { X86::MOVAPDrr, X86::MOVAPDmr, 0 }, 265 { X86::MOVAPSrr, X86::MOVAPSmr, 0 }, 266 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0 }, 267 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0 }, 268 { X86::MOVPS2SSrr, X86::MOVPS2SSmr, 0 }, 269 { X86::MOVSDrr, X86::MOVSDmr, 0 }, 270 { X86::MOVSDto64rr, X86::MOVSDto64mr, 0 }, 271 { X86::MOVSS2DIrr, X86::MOVSS2DImr, 0 }, 272 { X86::MOVSSrr, X86::MOVSSmr, 0 }, 273 { X86::MOVUPDrr, X86::MOVUPDmr, 0 }, 274 { X86::MOVUPSrr, X86::MOVUPSmr, 0 }, 275 { X86::MUL16r, X86::MUL16m, 1 }, 276 { X86::MUL32r, X86::MUL32m, 1 }, 277 { X86::MUL64r, X86::MUL64m, 1 }, 278 { X86::MUL8r, X86::MUL8m, 1 }, 279 { X86::SETAEr, X86::SETAEm, 0 }, 280 { X86::SETAr, X86::SETAm, 0 }, 281 { X86::SETBEr, X86::SETBEm, 0 }, 282 { X86::SETBr, X86::SETBm, 0 }, 283 { X86::SETEr, X86::SETEm, 0 }, 284 { X86::SETGEr, X86::SETGEm, 0 }, 285 { X86::SETGr, X86::SETGm, 0 }, 286 { X86::SETLEr, X86::SETLEm, 0 }, 287 { X86::SETLr, X86::SETLm, 0 }, 288 { X86::SETNEr, X86::SETNEm, 0 }, 289 { X86::SETNPr, X86::SETNPm, 0 }, 290 { X86::SETNSr, X86::SETNSm, 0 }, 291 { X86::SETPr, X86::SETPm, 0 }, 292 { X86::SETSr, X86::SETSm, 0 }, 293 { X86::TAILJMPr, X86::TAILJMPm, 1 }, 294 { X86::TEST16ri, X86::TEST16mi, 1 }, 295 { X86::TEST32ri, X86::TEST32mi, 1 }, 296 { X86::TEST64ri32, X86::TEST64mi32, 1 }, 297 { X86::TEST8ri, X86::TEST8mi, 1 } 298 }; 299 300 for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) { 301 unsigned RegOp = OpTbl0[i][0]; 302 unsigned MemOp = OpTbl0[i][1]; 303 if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp, 304 MemOp)).second) 305 assert(false && "Duplicated entries?"); 306 unsigned FoldedLoad = OpTbl0[i][2]; 307 // Index 0, folded load or store. 308 unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5); 309 if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr) 310 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 311 std::make_pair(RegOp, AuxInfo))).second) 312 AmbEntries.push_back(MemOp); 313 } 314 315 static const unsigned OpTbl1[][2] = { 316 { X86::CMP16rr, X86::CMP16rm }, 317 { X86::CMP32rr, X86::CMP32rm }, 318 { X86::CMP64rr, X86::CMP64rm }, 319 { X86::CMP8rr, X86::CMP8rm }, 320 { X86::CVTSD2SSrr, X86::CVTSD2SSrm }, 321 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm }, 322 { X86::CVTSI2SDrr, X86::CVTSI2SDrm }, 323 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm }, 324 { X86::CVTSI2SSrr, X86::CVTSI2SSrm }, 325 { X86::CVTSS2SDrr, X86::CVTSS2SDrm }, 326 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm }, 327 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm }, 328 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm }, 329 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm }, 330 { X86::FsMOVAPDrr, X86::MOVSDrm }, 331 { X86::FsMOVAPSrr, X86::MOVSSrm }, 332 { X86::IMUL16rri, X86::IMUL16rmi }, 333 { X86::IMUL16rri8, X86::IMUL16rmi8 }, 334 { X86::IMUL32rri, X86::IMUL32rmi }, 335 { X86::IMUL32rri8, X86::IMUL32rmi8 }, 336 { X86::IMUL64rri32, X86::IMUL64rmi32 }, 337 { X86::IMUL64rri8, X86::IMUL64rmi8 }, 338 { X86::Int_CMPSDrr, X86::Int_CMPSDrm }, 339 { X86::Int_CMPSSrr, X86::Int_CMPSSrm }, 340 { X86::Int_COMISDrr, X86::Int_COMISDrm }, 341 { X86::Int_COMISSrr, X86::Int_COMISSrm }, 342 { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm }, 343 { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm }, 344 { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm }, 345 { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm }, 346 { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm }, 347 { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm }, 348 { X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm }, 349 { X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm }, 350 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm }, 351 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm }, 352 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm }, 353 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm }, 354 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm }, 355 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm }, 356 { X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm }, 357 { X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm }, 358 { X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm }, 359 { X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm }, 360 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm }, 361 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm }, 362 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm }, 363 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm }, 364 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm }, 365 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm }, 366 { X86::MOV16rr, X86::MOV16rm }, 367 { X86::MOV16to16_, X86::MOV16_rm }, 368 { X86::MOV32rr, X86::MOV32rm }, 369 { X86::MOV32to32_, X86::MOV32_rm }, 370 { X86::MOV64rr, X86::MOV64rm }, 371 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm }, 372 { X86::MOV64toSDrr, X86::MOV64toSDrm }, 373 { X86::MOV8rr, X86::MOV8rm }, 374 { X86::MOVAPDrr, X86::MOVAPDrm }, 375 { X86::MOVAPSrr, X86::MOVAPSrm }, 376 { X86::MOVDDUPrr, X86::MOVDDUPrm }, 377 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm }, 378 { X86::MOVDI2SSrr, X86::MOVDI2SSrm }, 379 { X86::MOVSD2PDrr, X86::MOVSD2PDrm }, 380 { X86::MOVSDrr, X86::MOVSDrm }, 381 { X86::MOVSHDUPrr, X86::MOVSHDUPrm }, 382 { X86::MOVSLDUPrr, X86::MOVSLDUPrm }, 383 { X86::MOVSS2PSrr, X86::MOVSS2PSrm }, 384 { X86::MOVSSrr, X86::MOVSSrm }, 385 { X86::MOVSX16rr8, X86::MOVSX16rm8 }, 386 { X86::MOVSX32rr16, X86::MOVSX32rm16 }, 387 { X86::MOVSX32rr8, X86::MOVSX32rm8 }, 388 { X86::MOVSX64rr16, X86::MOVSX64rm16 }, 389 { X86::MOVSX64rr32, X86::MOVSX64rm32 }, 390 { X86::MOVSX64rr8, X86::MOVSX64rm8 }, 391 { X86::MOVUPDrr, X86::MOVUPDrm }, 392 { X86::MOVUPSrr, X86::MOVUPSrm }, 393 { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm }, 394 { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm }, 395 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm }, 396 { X86::MOVZX16rr8, X86::MOVZX16rm8 }, 397 { X86::MOVZX32rr16, X86::MOVZX32rm16 }, 398 { X86::MOVZX32rr8, X86::MOVZX32rm8 }, 399 { X86::MOVZX64rr16, X86::MOVZX64rm16 }, 400 { X86::MOVZX64rr32, X86::MOVZX64rm32 }, 401 { X86::MOVZX64rr8, X86::MOVZX64rm8 }, 402 { X86::PSHUFDri, X86::PSHUFDmi }, 403 { X86::PSHUFHWri, X86::PSHUFHWmi }, 404 { X86::PSHUFLWri, X86::PSHUFLWmi }, 405 { X86::RCPPSr, X86::RCPPSm }, 406 { X86::RCPPSr_Int, X86::RCPPSm_Int }, 407 { X86::RSQRTPSr, X86::RSQRTPSm }, 408 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int }, 409 { X86::RSQRTSSr, X86::RSQRTSSm }, 410 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int }, 411 { X86::SQRTPDr, X86::SQRTPDm }, 412 { X86::SQRTPDr_Int, X86::SQRTPDm_Int }, 413 { X86::SQRTPSr, X86::SQRTPSm }, 414 { X86::SQRTPSr_Int, X86::SQRTPSm_Int }, 415 { X86::SQRTSDr, X86::SQRTSDm }, 416 { X86::SQRTSDr_Int, X86::SQRTSDm_Int }, 417 { X86::SQRTSSr, X86::SQRTSSm }, 418 { X86::SQRTSSr_Int, X86::SQRTSSm_Int }, 419 { X86::TEST16rr, X86::TEST16rm }, 420 { X86::TEST32rr, X86::TEST32rm }, 421 { X86::TEST64rr, X86::TEST64rm }, 422 { X86::TEST8rr, X86::TEST8rm }, 423 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0 424 { X86::UCOMISDrr, X86::UCOMISDrm }, 425 { X86::UCOMISSrr, X86::UCOMISSrm } 426 }; 427 428 for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) { 429 unsigned RegOp = OpTbl1[i][0]; 430 unsigned MemOp = OpTbl1[i][1]; 431 if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp, 432 MemOp)).second) 433 assert(false && "Duplicated entries?"); 434 unsigned AuxInfo = 1 | (1 << 4); // Index 1, folded load 435 if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr) 436 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 437 std::make_pair(RegOp, AuxInfo))).second) 438 AmbEntries.push_back(MemOp); 439 } 440 441 static const unsigned OpTbl2[][2] = { 442 { X86::ADC32rr, X86::ADC32rm }, 443 { X86::ADC64rr, X86::ADC64rm }, 444 { X86::ADD16rr, X86::ADD16rm }, 445 { X86::ADD32rr, X86::ADD32rm }, 446 { X86::ADD64rr, X86::ADD64rm }, 447 { X86::ADD8rr, X86::ADD8rm }, 448 { X86::ADDPDrr, X86::ADDPDrm }, 449 { X86::ADDPSrr, X86::ADDPSrm }, 450 { X86::ADDSDrr, X86::ADDSDrm }, 451 { X86::ADDSSrr, X86::ADDSSrm }, 452 { X86::ADDSUBPDrr, X86::ADDSUBPDrm }, 453 { X86::ADDSUBPSrr, X86::ADDSUBPSrm }, 454 { X86::AND16rr, X86::AND16rm }, 455 { X86::AND32rr, X86::AND32rm }, 456 { X86::AND64rr, X86::AND64rm }, 457 { X86::AND8rr, X86::AND8rm }, 458 { X86::ANDNPDrr, X86::ANDNPDrm }, 459 { X86::ANDNPSrr, X86::ANDNPSrm }, 460 { X86::ANDPDrr, X86::ANDPDrm }, 461 { X86::ANDPSrr, X86::ANDPSrm }, 462 { X86::CMOVA16rr, X86::CMOVA16rm }, 463 { X86::CMOVA32rr, X86::CMOVA32rm }, 464 { X86::CMOVA64rr, X86::CMOVA64rm }, 465 { X86::CMOVAE16rr, X86::CMOVAE16rm }, 466 { X86::CMOVAE32rr, X86::CMOVAE32rm }, 467 { X86::CMOVAE64rr, X86::CMOVAE64rm }, 468 { X86::CMOVB16rr, X86::CMOVB16rm }, 469 { X86::CMOVB32rr, X86::CMOVB32rm }, 470 { X86::CMOVB64rr, X86::CMOVB64rm }, 471 { X86::CMOVBE16rr, X86::CMOVBE16rm }, 472 { X86::CMOVBE32rr, X86::CMOVBE32rm }, 473 { X86::CMOVBE64rr, X86::CMOVBE64rm }, 474 { X86::CMOVE16rr, X86::CMOVE16rm }, 475 { X86::CMOVE32rr, X86::CMOVE32rm }, 476 { X86::CMOVE64rr, X86::CMOVE64rm }, 477 { X86::CMOVG16rr, X86::CMOVG16rm }, 478 { X86::CMOVG32rr, X86::CMOVG32rm }, 479 { X86::CMOVG64rr, X86::CMOVG64rm }, 480 { X86::CMOVGE16rr, X86::CMOVGE16rm }, 481 { X86::CMOVGE32rr, X86::CMOVGE32rm }, 482 { X86::CMOVGE64rr, X86::CMOVGE64rm }, 483 { X86::CMOVL16rr, X86::CMOVL16rm }, 484 { X86::CMOVL32rr, X86::CMOVL32rm }, 485 { X86::CMOVL64rr, X86::CMOVL64rm }, 486 { X86::CMOVLE16rr, X86::CMOVLE16rm }, 487 { X86::CMOVLE32rr, X86::CMOVLE32rm }, 488 { X86::CMOVLE64rr, X86::CMOVLE64rm }, 489 { X86::CMOVNE16rr, X86::CMOVNE16rm }, 490 { X86::CMOVNE32rr, X86::CMOVNE32rm }, 491 { X86::CMOVNE64rr, X86::CMOVNE64rm }, 492 { X86::CMOVNP16rr, X86::CMOVNP16rm }, 493 { X86::CMOVNP32rr, X86::CMOVNP32rm }, 494 { X86::CMOVNP64rr, X86::CMOVNP64rm }, 495 { X86::CMOVNS16rr, X86::CMOVNS16rm }, 496 { X86::CMOVNS32rr, X86::CMOVNS32rm }, 497 { X86::CMOVNS64rr, X86::CMOVNS64rm }, 498 { X86::CMOVP16rr, X86::CMOVP16rm }, 499 { X86::CMOVP32rr, X86::CMOVP32rm }, 500 { X86::CMOVP64rr, X86::CMOVP64rm }, 501 { X86::CMOVS16rr, X86::CMOVS16rm }, 502 { X86::CMOVS32rr, X86::CMOVS32rm }, 503 { X86::CMOVS64rr, X86::CMOVS64rm }, 504 { X86::CMPPDrri, X86::CMPPDrmi }, 505 { X86::CMPPSrri, X86::CMPPSrmi }, 506 { X86::CMPSDrr, X86::CMPSDrm }, 507 { X86::CMPSSrr, X86::CMPSSrm }, 508 { X86::DIVPDrr, X86::DIVPDrm }, 509 { X86::DIVPSrr, X86::DIVPSrm }, 510 { X86::DIVSDrr, X86::DIVSDrm }, 511 { X86::DIVSSrr, X86::DIVSSrm }, 512 { X86::FsANDNPDrr, X86::FsANDNPDrm }, 513 { X86::FsANDNPSrr, X86::FsANDNPSrm }, 514 { X86::FsANDPDrr, X86::FsANDPDrm }, 515 { X86::FsANDPSrr, X86::FsANDPSrm }, 516 { X86::FsORPDrr, X86::FsORPDrm }, 517 { X86::FsORPSrr, X86::FsORPSrm }, 518 { X86::FsXORPDrr, X86::FsXORPDrm }, 519 { X86::FsXORPSrr, X86::FsXORPSrm }, 520 { X86::HADDPDrr, X86::HADDPDrm }, 521 { X86::HADDPSrr, X86::HADDPSrm }, 522 { X86::HSUBPDrr, X86::HSUBPDrm }, 523 { X86::HSUBPSrr, X86::HSUBPSrm }, 524 { X86::IMUL16rr, X86::IMUL16rm }, 525 { X86::IMUL32rr, X86::IMUL32rm }, 526 { X86::IMUL64rr, X86::IMUL64rm }, 527 { X86::MAXPDrr, X86::MAXPDrm }, 528 { X86::MAXPDrr_Int, X86::MAXPDrm_Int }, 529 { X86::MAXPSrr, X86::MAXPSrm }, 530 { X86::MAXPSrr_Int, X86::MAXPSrm_Int }, 531 { X86::MAXSDrr, X86::MAXSDrm }, 532 { X86::MAXSDrr_Int, X86::MAXSDrm_Int }, 533 { X86::MAXSSrr, X86::MAXSSrm }, 534 { X86::MAXSSrr_Int, X86::MAXSSrm_Int }, 535 { X86::MINPDrr, X86::MINPDrm }, 536 { X86::MINPDrr_Int, X86::MINPDrm_Int }, 537 { X86::MINPSrr, X86::MINPSrm }, 538 { X86::MINPSrr_Int, X86::MINPSrm_Int }, 539 { X86::MINSDrr, X86::MINSDrm }, 540 { X86::MINSDrr_Int, X86::MINSDrm_Int }, 541 { X86::MINSSrr, X86::MINSSrm }, 542 { X86::MINSSrr_Int, X86::MINSSrm_Int }, 543 { X86::MULPDrr, X86::MULPDrm }, 544 { X86::MULPSrr, X86::MULPSrm }, 545 { X86::MULSDrr, X86::MULSDrm }, 546 { X86::MULSSrr, X86::MULSSrm }, 547 { X86::OR16rr, X86::OR16rm }, 548 { X86::OR32rr, X86::OR32rm }, 549 { X86::OR64rr, X86::OR64rm }, 550 { X86::OR8rr, X86::OR8rm }, 551 { X86::ORPDrr, X86::ORPDrm }, 552 { X86::ORPSrr, X86::ORPSrm }, 553 { X86::PACKSSDWrr, X86::PACKSSDWrm }, 554 { X86::PACKSSWBrr, X86::PACKSSWBrm }, 555 { X86::PACKUSWBrr, X86::PACKUSWBrm }, 556 { X86::PADDBrr, X86::PADDBrm }, 557 { X86::PADDDrr, X86::PADDDrm }, 558 { X86::PADDQrr, X86::PADDQrm }, 559 { X86::PADDSBrr, X86::PADDSBrm }, 560 { X86::PADDSWrr, X86::PADDSWrm }, 561 { X86::PADDWrr, X86::PADDWrm }, 562 { X86::PANDNrr, X86::PANDNrm }, 563 { X86::PANDrr, X86::PANDrm }, 564 { X86::PAVGBrr, X86::PAVGBrm }, 565 { X86::PAVGWrr, X86::PAVGWrm }, 566 { X86::PCMPEQBrr, X86::PCMPEQBrm }, 567 { X86::PCMPEQDrr, X86::PCMPEQDrm }, 568 { X86::PCMPEQWrr, X86::PCMPEQWrm }, 569 { X86::PCMPGTBrr, X86::PCMPGTBrm }, 570 { X86::PCMPGTDrr, X86::PCMPGTDrm }, 571 { X86::PCMPGTWrr, X86::PCMPGTWrm }, 572 { X86::PINSRWrri, X86::PINSRWrmi }, 573 { X86::PMADDWDrr, X86::PMADDWDrm }, 574 { X86::PMAXSWrr, X86::PMAXSWrm }, 575 { X86::PMAXUBrr, X86::PMAXUBrm }, 576 { X86::PMINSWrr, X86::PMINSWrm }, 577 { X86::PMINUBrr, X86::PMINUBrm }, 578 { X86::PMULDQrr, X86::PMULDQrm }, 579 { X86::PMULDQrr_int, X86::PMULDQrm_int }, 580 { X86::PMULHUWrr, X86::PMULHUWrm }, 581 { X86::PMULHWrr, X86::PMULHWrm }, 582 { X86::PMULLDrr, X86::PMULLDrm }, 583 { X86::PMULLDrr_int, X86::PMULLDrm_int }, 584 { X86::PMULLWrr, X86::PMULLWrm }, 585 { X86::PMULUDQrr, X86::PMULUDQrm }, 586 { X86::PORrr, X86::PORrm }, 587 { X86::PSADBWrr, X86::PSADBWrm }, 588 { X86::PSLLDrr, X86::PSLLDrm }, 589 { X86::PSLLQrr, X86::PSLLQrm }, 590 { X86::PSLLWrr, X86::PSLLWrm }, 591 { X86::PSRADrr, X86::PSRADrm }, 592 { X86::PSRAWrr, X86::PSRAWrm }, 593 { X86::PSRLDrr, X86::PSRLDrm }, 594 { X86::PSRLQrr, X86::PSRLQrm }, 595 { X86::PSRLWrr, X86::PSRLWrm }, 596 { X86::PSUBBrr, X86::PSUBBrm }, 597 { X86::PSUBDrr, X86::PSUBDrm }, 598 { X86::PSUBSBrr, X86::PSUBSBrm }, 599 { X86::PSUBSWrr, X86::PSUBSWrm }, 600 { X86::PSUBWrr, X86::PSUBWrm }, 601 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm }, 602 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm }, 603 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm }, 604 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm }, 605 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm }, 606 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm }, 607 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm }, 608 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm }, 609 { X86::PXORrr, X86::PXORrm }, 610 { X86::SBB32rr, X86::SBB32rm }, 611 { X86::SBB64rr, X86::SBB64rm }, 612 { X86::SHUFPDrri, X86::SHUFPDrmi }, 613 { X86::SHUFPSrri, X86::SHUFPSrmi }, 614 { X86::SUB16rr, X86::SUB16rm }, 615 { X86::SUB32rr, X86::SUB32rm }, 616 { X86::SUB64rr, X86::SUB64rm }, 617 { X86::SUB8rr, X86::SUB8rm }, 618 { X86::SUBPDrr, X86::SUBPDrm }, 619 { X86::SUBPSrr, X86::SUBPSrm }, 620 { X86::SUBSDrr, X86::SUBSDrm }, 621 { X86::SUBSSrr, X86::SUBSSrm }, 622 // FIXME: TEST*rr -> swapped operand of TEST*mr. 623 { X86::UNPCKHPDrr, X86::UNPCKHPDrm }, 624 { X86::UNPCKHPSrr, X86::UNPCKHPSrm }, 625 { X86::UNPCKLPDrr, X86::UNPCKLPDrm }, 626 { X86::UNPCKLPSrr, X86::UNPCKLPSrm }, 627 { X86::XOR16rr, X86::XOR16rm }, 628 { X86::XOR32rr, X86::XOR32rm }, 629 { X86::XOR64rr, X86::XOR64rm }, 630 { X86::XOR8rr, X86::XOR8rm }, 631 { X86::XORPDrr, X86::XORPDrm }, 632 { X86::XORPSrr, X86::XORPSrm } 633 }; 634 635 for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) { 636 unsigned RegOp = OpTbl2[i][0]; 637 unsigned MemOp = OpTbl2[i][1]; 638 if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp, 639 MemOp)).second) 640 assert(false && "Duplicated entries?"); 641 unsigned AuxInfo = 2 | (1 << 4); // Index 1, folded load 642 if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp, 643 std::make_pair(RegOp, AuxInfo))).second) 644 AmbEntries.push_back(MemOp); 645 } 646 647 // Remove ambiguous entries. 648 assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?"); 649} 650 651bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, 652 unsigned& sourceReg, 653 unsigned& destReg) const { 654 switch (MI.getOpcode()) { 655 default: 656 return false; 657 case X86::MOV8rr: 658 case X86::MOV16rr: 659 case X86::MOV32rr: 660 case X86::MOV64rr: 661 case X86::MOV16to16_: 662 case X86::MOV32to32_: 663 case X86::MOVSSrr: 664 case X86::MOVSDrr: 665 666 // FP Stack register class copies 667 case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080: 668 case X86::MOV_Fp3264: case X86::MOV_Fp3280: 669 case X86::MOV_Fp6432: case X86::MOV_Fp8032: 670 671 case X86::FsMOVAPSrr: 672 case X86::FsMOVAPDrr: 673 case X86::MOVAPSrr: 674 case X86::MOVAPDrr: 675 case X86::MOVSS2PSrr: 676 case X86::MOVSD2PDrr: 677 case X86::MOVPS2SSrr: 678 case X86::MOVPD2SDrr: 679 case X86::MMX_MOVD64rr: 680 case X86::MMX_MOVQ64rr: 681 assert(MI.getNumOperands() >= 2 && 682 MI.getOperand(0).isReg() && 683 MI.getOperand(1).isReg() && 684 "invalid register-register move instruction"); 685 sourceReg = MI.getOperand(1).getReg(); 686 destReg = MI.getOperand(0).getReg(); 687 return true; 688 } 689} 690 691unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI, 692 int &FrameIndex) const { 693 switch (MI->getOpcode()) { 694 default: break; 695 case X86::MOV8rm: 696 case X86::MOV16rm: 697 case X86::MOV16_rm: 698 case X86::MOV32rm: 699 case X86::MOV32_rm: 700 case X86::MOV64rm: 701 case X86::LD_Fp64m: 702 case X86::MOVSSrm: 703 case X86::MOVSDrm: 704 case X86::MOVAPSrm: 705 case X86::MOVAPDrm: 706 case X86::MMX_MOVD64rm: 707 case X86::MMX_MOVQ64rm: 708 if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() && 709 MI->getOperand(3).isReg() && MI->getOperand(4).isImm() && 710 MI->getOperand(2).getImm() == 1 && 711 MI->getOperand(3).getReg() == 0 && 712 MI->getOperand(4).getImm() == 0) { 713 FrameIndex = MI->getOperand(1).getIndex(); 714 return MI->getOperand(0).getReg(); 715 } 716 break; 717 } 718 return 0; 719} 720 721unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI, 722 int &FrameIndex) const { 723 switch (MI->getOpcode()) { 724 default: break; 725 case X86::MOV8mr: 726 case X86::MOV16mr: 727 case X86::MOV16_mr: 728 case X86::MOV32mr: 729 case X86::MOV32_mr: 730 case X86::MOV64mr: 731 case X86::ST_FpP64m: 732 case X86::MOVSSmr: 733 case X86::MOVSDmr: 734 case X86::MOVAPSmr: 735 case X86::MOVAPDmr: 736 case X86::MMX_MOVD64mr: 737 case X86::MMX_MOVQ64mr: 738 case X86::MMX_MOVNTQmr: 739 if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() && 740 MI->getOperand(2).isReg() && MI->getOperand(3).isImm() && 741 MI->getOperand(1).getImm() == 1 && 742 MI->getOperand(2).getReg() == 0 && 743 MI->getOperand(3).getImm() == 0) { 744 FrameIndex = MI->getOperand(0).getIndex(); 745 return MI->getOperand(4).getReg(); 746 } 747 break; 748 } 749 return 0; 750} 751 752 753/// regIsPICBase - Return true if register is PIC base (i.e.g defined by 754/// X86::MOVPC32r. 755static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { 756 bool isPICBase = false; 757 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 758 E = MRI.def_end(); I != E; ++I) { 759 MachineInstr *DefMI = I.getOperand().getParent(); 760 if (DefMI->getOpcode() != X86::MOVPC32r) 761 return false; 762 assert(!isPICBase && "More than one PIC base?"); 763 isPICBase = true; 764 } 765 return isPICBase; 766} 767 768/// isGVStub - Return true if the GV requires an extra load to get the 769/// real address. 770static inline bool isGVStub(GlobalValue *GV, X86TargetMachine &TM) { 771 return TM.getSubtarget<X86Subtarget>().GVRequiresExtraLoad(GV, TM, false); 772} 773 774bool 775X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const { 776 switch (MI->getOpcode()) { 777 default: break; 778 case X86::MOV8rm: 779 case X86::MOV16rm: 780 case X86::MOV16_rm: 781 case X86::MOV32rm: 782 case X86::MOV32_rm: 783 case X86::MOV64rm: 784 case X86::LD_Fp64m: 785 case X86::MOVSSrm: 786 case X86::MOVSDrm: 787 case X86::MOVAPSrm: 788 case X86::MOVAPDrm: 789 case X86::MMX_MOVD64rm: 790 case X86::MMX_MOVQ64rm: { 791 // Loads from constant pools are trivially rematerializable. 792 if (MI->getOperand(1).isReg() && 793 MI->getOperand(2).isImm() && 794 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && 795 (MI->getOperand(4).isCPI() || 796 (MI->getOperand(4).isGlobal() && 797 isGVStub(MI->getOperand(4).getGlobal(), TM)))) { 798 unsigned BaseReg = MI->getOperand(1).getReg(); 799 if (BaseReg == 0) 800 return true; 801 // Allow re-materialization of PIC load. 802 if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal()) 803 return false; 804 const MachineFunction &MF = *MI->getParent()->getParent(); 805 const MachineRegisterInfo &MRI = MF.getRegInfo(); 806 bool isPICBase = false; 807 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg), 808 E = MRI.def_end(); I != E; ++I) { 809 MachineInstr *DefMI = I.getOperand().getParent(); 810 if (DefMI->getOpcode() != X86::MOVPC32r) 811 return false; 812 assert(!isPICBase && "More than one PIC base?"); 813 isPICBase = true; 814 } 815 return isPICBase; 816 } 817 return false; 818 } 819 820 case X86::LEA32r: 821 case X86::LEA64r: { 822 if (MI->getOperand(2).isImm() && 823 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && 824 !MI->getOperand(4).isReg()) { 825 // lea fi#, lea GV, etc. are all rematerializable. 826 if (!MI->getOperand(1).isReg()) 827 return true; 828 unsigned BaseReg = MI->getOperand(1).getReg(); 829 if (BaseReg == 0) 830 return true; 831 // Allow re-materialization of lea PICBase + x. 832 const MachineFunction &MF = *MI->getParent()->getParent(); 833 const MachineRegisterInfo &MRI = MF.getRegInfo(); 834 return regIsPICBase(BaseReg, MRI); 835 } 836 return false; 837 } 838 } 839 840 // All other instructions marked M_REMATERIALIZABLE are always trivially 841 // rematerializable. 842 return true; 843} 844 845/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that 846/// would clobber the EFLAGS condition register. Note the result may be 847/// conservative. If it cannot definitely determine the safety after visiting 848/// two instructions it assumes it's not safe. 849static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 850 MachineBasicBlock::iterator I) { 851 // It's always safe to clobber EFLAGS at the end of a block. 852 if (I == MBB.end()) 853 return true; 854 855 // For compile time consideration, if we are not able to determine the 856 // safety after visiting 2 instructions, we will assume it's not safe. 857 for (unsigned i = 0; i < 2; ++i) { 858 bool SeenDef = false; 859 for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) { 860 MachineOperand &MO = I->getOperand(j); 861 if (!MO.isReg()) 862 continue; 863 if (MO.getReg() == X86::EFLAGS) { 864 if (MO.isUse()) 865 return false; 866 SeenDef = true; 867 } 868 } 869 870 if (SeenDef) 871 // This instruction defines EFLAGS, no need to look any further. 872 return true; 873 ++I; 874 875 // If we make it to the end of the block, it's safe to clobber EFLAGS. 876 if (I == MBB.end()) 877 return true; 878 } 879 880 // Conservative answer. 881 return false; 882} 883 884void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, 885 MachineBasicBlock::iterator I, 886 unsigned DestReg, 887 const MachineInstr *Orig) const { 888 unsigned SubIdx = Orig->getOperand(0).isReg() 889 ? Orig->getOperand(0).getSubReg() : 0; 890 bool ChangeSubIdx = SubIdx != 0; 891 if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) { 892 DestReg = RI.getSubReg(DestReg, SubIdx); 893 SubIdx = 0; 894 } 895 896 // MOV32r0 etc. are implemented with xor which clobbers condition code. 897 // Re-materialize them as movri instructions to avoid side effects. 898 bool Emitted = false; 899 switch (Orig->getOpcode()) { 900 default: break; 901 case X86::MOV8r0: 902 case X86::MOV16r0: 903 case X86::MOV32r0: 904 case X86::MOV64r0: { 905 if (!isSafeToClobberEFLAGS(MBB, I)) { 906 unsigned Opc = 0; 907 switch (Orig->getOpcode()) { 908 default: break; 909 case X86::MOV8r0: Opc = X86::MOV8ri; break; 910 case X86::MOV16r0: Opc = X86::MOV16ri; break; 911 case X86::MOV32r0: Opc = X86::MOV32ri; break; 912 case X86::MOV64r0: Opc = X86::MOV64ri32; break; 913 } 914 BuildMI(MBB, I, get(Opc), DestReg).addImm(0); 915 Emitted = true; 916 } 917 break; 918 } 919 } 920 921 if (!Emitted) { 922 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 923 MI->getOperand(0).setReg(DestReg); 924 MBB.insert(I, MI); 925 } 926 927 if (ChangeSubIdx) { 928 MachineInstr *NewMI = prior(I); 929 NewMI->getOperand(0).setSubReg(SubIdx); 930 } 931} 932 933/// isInvariantLoad - Return true if the specified instruction (which is marked 934/// mayLoad) is loading from a location whose value is invariant across the 935/// function. For example, loading a value from the constant pool or from 936/// from the argument area of a function if it does not change. This should 937/// only return true of *all* loads the instruction does are invariant (if it 938/// does multiple loads). 939bool X86InstrInfo::isInvariantLoad(MachineInstr *MI) const { 940 // This code cares about loads from three cases: constant pool entries, 941 // invariant argument slots, and global stubs. In order to handle these cases 942 // for all of the myriad of X86 instructions, we just scan for a CP/FI/GV 943 // operand and base our analysis on it. This is safe because the address of 944 // none of these three cases is ever used as anything other than a load base 945 // and X86 doesn't have any instructions that load from multiple places. 946 947 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 948 const MachineOperand &MO = MI->getOperand(i); 949 // Loads from constant pools are trivially invariant. 950 if (MO.isCPI()) 951 return true; 952 953 if (MO.isGlobal()) 954 return isGVStub(MO.getGlobal(), TM); 955 956 // If this is a load from an invariant stack slot, the load is a constant. 957 if (MO.isFI()) { 958 const MachineFrameInfo &MFI = 959 *MI->getParent()->getParent()->getFrameInfo(); 960 int Idx = MO.getIndex(); 961 return MFI.isFixedObjectIndex(Idx) && MFI.isImmutableObjectIndex(Idx); 962 } 963 } 964 965 // All other instances of these instructions are presumed to have other 966 // issues. 967 return false; 968} 969 970/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that 971/// is not marked dead. 972static bool hasLiveCondCodeDef(MachineInstr *MI) { 973 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 974 MachineOperand &MO = MI->getOperand(i); 975 if (MO.isReg() && MO.isDef() && 976 MO.getReg() == X86::EFLAGS && !MO.isDead()) { 977 return true; 978 } 979 } 980 return false; 981} 982 983/// convertToThreeAddress - This method must be implemented by targets that 984/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 985/// may be able to convert a two-address instruction into a true 986/// three-address instruction on demand. This allows the X86 target (for 987/// example) to convert ADD and SHL instructions into LEA instructions if they 988/// would require register copies due to two-addressness. 989/// 990/// This method returns a null pointer if the transformation cannot be 991/// performed, otherwise it returns the new instruction. 992/// 993MachineInstr * 994X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 995 MachineBasicBlock::iterator &MBBI, 996 LiveVariables *LV) const { 997 MachineInstr *MI = MBBI; 998 MachineFunction &MF = *MI->getParent()->getParent(); 999 // All instructions input are two-addr instructions. Get the known operands. 1000 unsigned Dest = MI->getOperand(0).getReg(); 1001 unsigned Src = MI->getOperand(1).getReg(); 1002 bool isDead = MI->getOperand(0).isDead(); 1003 bool isKill = MI->getOperand(1).isKill(); 1004 1005 MachineInstr *NewMI = NULL; 1006 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When 1007 // we have better subtarget support, enable the 16-bit LEA generation here. 1008 bool DisableLEA16 = true; 1009 1010 unsigned MIOpc = MI->getOpcode(); 1011 switch (MIOpc) { 1012 case X86::SHUFPSrri: { 1013 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); 1014 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0; 1015 1016 unsigned B = MI->getOperand(1).getReg(); 1017 unsigned C = MI->getOperand(2).getReg(); 1018 if (B != C) return 0; 1019 unsigned A = MI->getOperand(0).getReg(); 1020 unsigned M = MI->getOperand(3).getImm(); 1021 NewMI = BuildMI(MF, get(X86::PSHUFDri)).addReg(A, true, false, false, isDead) 1022 .addReg(B, false, false, isKill).addImm(M); 1023 break; 1024 } 1025 case X86::SHL64ri: { 1026 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1027 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1028 // the flags produced by a shift yet, so this is safe. 1029 unsigned ShAmt = MI->getOperand(2).getImm(); 1030 if (ShAmt == 0 || ShAmt >= 4) return 0; 1031 1032 NewMI = BuildMI(MF, get(X86::LEA64r)).addReg(Dest, true, false, false, isDead) 1033 .addReg(0).addImm(1 << ShAmt).addReg(Src, false, false, isKill).addImm(0); 1034 break; 1035 } 1036 case X86::SHL32ri: { 1037 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1038 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1039 // the flags produced by a shift yet, so this is safe. 1040 unsigned ShAmt = MI->getOperand(2).getImm(); 1041 if (ShAmt == 0 || ShAmt >= 4) return 0; 1042 1043 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ? 1044 X86::LEA64_32r : X86::LEA32r; 1045 NewMI = BuildMI(MF, get(Opc)).addReg(Dest, true, false, false, isDead) 1046 .addReg(0).addImm(1 << ShAmt) 1047 .addReg(Src, false, false, isKill).addImm(0); 1048 break; 1049 } 1050 case X86::SHL16ri: { 1051 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!"); 1052 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses 1053 // the flags produced by a shift yet, so this is safe. 1054 unsigned ShAmt = MI->getOperand(2).getImm(); 1055 if (ShAmt == 0 || ShAmt >= 4) return 0; 1056 1057 if (DisableLEA16) { 1058 // If 16-bit LEA is disabled, use 32-bit LEA via subregisters. 1059 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); 1060 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() 1061 ? X86::LEA64_32r : X86::LEA32r; 1062 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1063 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); 1064 1065 // Build and insert into an implicit UNDEF value. This is OK because 1066 // well be shifting and then extracting the lower 16-bits. 1067 BuildMI(*MFI, MBBI, get(X86::IMPLICIT_DEF), leaInReg); 1068 MachineInstr *InsMI = BuildMI(*MFI, MBBI, get(X86::INSERT_SUBREG),leaInReg) 1069 .addReg(leaInReg).addReg(Src, false, false, isKill) 1070 .addImm(X86::SUBREG_16BIT); 1071 1072 NewMI = BuildMI(*MFI, MBBI, get(Opc), leaOutReg).addReg(0).addImm(1 << ShAmt) 1073 .addReg(leaInReg, false, false, true).addImm(0); 1074 1075 MachineInstr *ExtMI = BuildMI(*MFI, MBBI, get(X86::EXTRACT_SUBREG)) 1076 .addReg(Dest, true, false, false, isDead) 1077 .addReg(leaOutReg, false, false, true).addImm(X86::SUBREG_16BIT); 1078 if (LV) { 1079 // Update live variables 1080 LV->getVarInfo(leaInReg).Kills.push_back(NewMI); 1081 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI); 1082 if (isKill) 1083 LV->replaceKillInstruction(Src, MI, InsMI); 1084 if (isDead) 1085 LV->replaceKillInstruction(Dest, MI, ExtMI); 1086 } 1087 return ExtMI; 1088 } else { 1089 NewMI = BuildMI(MF, get(X86::LEA16r)).addReg(Dest, true, false, false, isDead) 1090 .addReg(0).addImm(1 << ShAmt) 1091 .addReg(Src, false, false, isKill).addImm(0); 1092 } 1093 break; 1094 } 1095 default: { 1096 // The following opcodes also sets the condition code register(s). Only 1097 // convert them to equivalent lea if the condition code register def's 1098 // are dead! 1099 if (hasLiveCondCodeDef(MI)) 1100 return 0; 1101 1102 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1103 switch (MIOpc) { 1104 default: return 0; 1105 case X86::INC64r: 1106 case X86::INC32r: { 1107 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1108 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r 1109 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1110 NewMI = addRegOffset(BuildMI(MF, get(Opc)) 1111 .addReg(Dest, true, false, false, isDead), 1112 Src, isKill, 1); 1113 break; 1114 } 1115 case X86::INC16r: 1116 case X86::INC64_16r: 1117 if (DisableLEA16) return 0; 1118 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); 1119 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r)) 1120 .addReg(Dest, true, false, false, isDead), 1121 Src, isKill, 1); 1122 break; 1123 case X86::DEC64r: 1124 case X86::DEC32r: { 1125 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1126 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r 1127 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1128 NewMI = addRegOffset(BuildMI(MF, get(Opc)) 1129 .addReg(Dest, true, false, false, isDead), 1130 Src, isKill, -1); 1131 break; 1132 } 1133 case X86::DEC16r: 1134 case X86::DEC64_16r: 1135 if (DisableLEA16) return 0; 1136 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); 1137 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r)) 1138 .addReg(Dest, true, false, false, isDead), 1139 Src, isKill, -1); 1140 break; 1141 case X86::ADD64rr: 1142 case X86::ADD32rr: { 1143 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1144 unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r 1145 : (is64Bit ? X86::LEA64_32r : X86::LEA32r); 1146 unsigned Src2 = MI->getOperand(2).getReg(); 1147 bool isKill2 = MI->getOperand(2).isKill(); 1148 NewMI = addRegReg(BuildMI(MF, get(Opc)) 1149 .addReg(Dest, true, false, false, isDead), 1150 Src, isKill, Src2, isKill2); 1151 if (LV && isKill2) 1152 LV->replaceKillInstruction(Src2, MI, NewMI); 1153 break; 1154 } 1155 case X86::ADD16rr: { 1156 if (DisableLEA16) return 0; 1157 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1158 unsigned Src2 = MI->getOperand(2).getReg(); 1159 bool isKill2 = MI->getOperand(2).isKill(); 1160 NewMI = addRegReg(BuildMI(MF, get(X86::LEA16r)) 1161 .addReg(Dest, true, false, false, isDead), 1162 Src, isKill, Src2, isKill2); 1163 if (LV && isKill2) 1164 LV->replaceKillInstruction(Src2, MI, NewMI); 1165 break; 1166 } 1167 case X86::ADD64ri32: 1168 case X86::ADD64ri8: 1169 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1170 if (MI->getOperand(2).isImm()) 1171 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA64r)) 1172 .addReg(Dest, true, false, false, isDead), 1173 Src, isKill, MI->getOperand(2).getImm()); 1174 break; 1175 case X86::ADD32ri: 1176 case X86::ADD32ri8: 1177 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1178 if (MI->getOperand(2).isImm()) { 1179 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; 1180 NewMI = addRegOffset(BuildMI(MF, get(Opc)) 1181 .addReg(Dest, true, false, false, isDead), 1182 Src, isKill, MI->getOperand(2).getImm()); 1183 } 1184 break; 1185 case X86::ADD16ri: 1186 case X86::ADD16ri8: 1187 if (DisableLEA16) return 0; 1188 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); 1189 if (MI->getOperand(2).isImm()) 1190 NewMI = addRegOffset(BuildMI(MF, get(X86::LEA16r)) 1191 .addReg(Dest, true, false, false, isDead), 1192 Src, isKill, MI->getOperand(2).getImm()); 1193 break; 1194 case X86::SHL16ri: 1195 if (DisableLEA16) return 0; 1196 case X86::SHL32ri: 1197 case X86::SHL64ri: { 1198 assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImm() && 1199 "Unknown shl instruction!"); 1200 unsigned ShAmt = MI->getOperand(2).getImm(); 1201 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) { 1202 X86AddressMode AM; 1203 AM.Scale = 1 << ShAmt; 1204 AM.IndexReg = Src; 1205 unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r 1206 : (MIOpc == X86::SHL32ri 1207 ? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r); 1208 NewMI = addFullAddress(BuildMI(MF, get(Opc)) 1209 .addReg(Dest, true, false, false, isDead), AM); 1210 if (isKill) 1211 NewMI->getOperand(3).setIsKill(true); 1212 } 1213 break; 1214 } 1215 } 1216 } 1217 } 1218 1219 if (!NewMI) return 0; 1220 1221 if (LV) { // Update live variables 1222 if (isKill) 1223 LV->replaceKillInstruction(Src, MI, NewMI); 1224 if (isDead) 1225 LV->replaceKillInstruction(Dest, MI, NewMI); 1226 } 1227 1228 MFI->insert(MBBI, NewMI); // Insert the new inst 1229 return NewMI; 1230} 1231 1232/// commuteInstruction - We have a few instructions that must be hacked on to 1233/// commute them. 1234/// 1235MachineInstr * 1236X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 1237 switch (MI->getOpcode()) { 1238 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I) 1239 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I) 1240 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I) 1241 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I) 1242 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I) 1243 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I) 1244 unsigned Opc; 1245 unsigned Size; 1246 switch (MI->getOpcode()) { 1247 default: assert(0 && "Unreachable!"); 1248 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break; 1249 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break; 1250 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break; 1251 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break; 1252 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break; 1253 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break; 1254 } 1255 unsigned Amt = MI->getOperand(3).getImm(); 1256 if (NewMI) { 1257 MachineFunction &MF = *MI->getParent()->getParent(); 1258 MI = MF.CloneMachineInstr(MI); 1259 NewMI = false; 1260 } 1261 MI->setDesc(get(Opc)); 1262 MI->getOperand(3).setImm(Size-Amt); 1263 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1264 } 1265 case X86::CMOVB16rr: 1266 case X86::CMOVB32rr: 1267 case X86::CMOVB64rr: 1268 case X86::CMOVAE16rr: 1269 case X86::CMOVAE32rr: 1270 case X86::CMOVAE64rr: 1271 case X86::CMOVE16rr: 1272 case X86::CMOVE32rr: 1273 case X86::CMOVE64rr: 1274 case X86::CMOVNE16rr: 1275 case X86::CMOVNE32rr: 1276 case X86::CMOVNE64rr: 1277 case X86::CMOVBE16rr: 1278 case X86::CMOVBE32rr: 1279 case X86::CMOVBE64rr: 1280 case X86::CMOVA16rr: 1281 case X86::CMOVA32rr: 1282 case X86::CMOVA64rr: 1283 case X86::CMOVL16rr: 1284 case X86::CMOVL32rr: 1285 case X86::CMOVL64rr: 1286 case X86::CMOVGE16rr: 1287 case X86::CMOVGE32rr: 1288 case X86::CMOVGE64rr: 1289 case X86::CMOVLE16rr: 1290 case X86::CMOVLE32rr: 1291 case X86::CMOVLE64rr: 1292 case X86::CMOVG16rr: 1293 case X86::CMOVG32rr: 1294 case X86::CMOVG64rr: 1295 case X86::CMOVS16rr: 1296 case X86::CMOVS32rr: 1297 case X86::CMOVS64rr: 1298 case X86::CMOVNS16rr: 1299 case X86::CMOVNS32rr: 1300 case X86::CMOVNS64rr: 1301 case X86::CMOVP16rr: 1302 case X86::CMOVP32rr: 1303 case X86::CMOVP64rr: 1304 case X86::CMOVNP16rr: 1305 case X86::CMOVNP32rr: 1306 case X86::CMOVNP64rr: { 1307 unsigned Opc = 0; 1308 switch (MI->getOpcode()) { 1309 default: break; 1310 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; 1311 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; 1312 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; 1313 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break; 1314 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break; 1315 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break; 1316 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break; 1317 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break; 1318 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break; 1319 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break; 1320 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break; 1321 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break; 1322 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break; 1323 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break; 1324 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break; 1325 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break; 1326 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break; 1327 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break; 1328 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break; 1329 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break; 1330 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break; 1331 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break; 1332 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break; 1333 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break; 1334 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break; 1335 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break; 1336 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break; 1337 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break; 1338 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break; 1339 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break; 1340 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break; 1341 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break; 1342 case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break; 1343 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break; 1344 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break; 1345 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break; 1346 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break; 1347 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break; 1348 case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break; 1349 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break; 1350 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break; 1351 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break; 1352 } 1353 if (NewMI) { 1354 MachineFunction &MF = *MI->getParent()->getParent(); 1355 MI = MF.CloneMachineInstr(MI); 1356 NewMI = false; 1357 } 1358 MI->setDesc(get(Opc)); 1359 // Fallthrough intended. 1360 } 1361 default: 1362 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1363 } 1364} 1365 1366static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { 1367 switch (BrOpc) { 1368 default: return X86::COND_INVALID; 1369 case X86::JE: return X86::COND_E; 1370 case X86::JNE: return X86::COND_NE; 1371 case X86::JL: return X86::COND_L; 1372 case X86::JLE: return X86::COND_LE; 1373 case X86::JG: return X86::COND_G; 1374 case X86::JGE: return X86::COND_GE; 1375 case X86::JB: return X86::COND_B; 1376 case X86::JBE: return X86::COND_BE; 1377 case X86::JA: return X86::COND_A; 1378 case X86::JAE: return X86::COND_AE; 1379 case X86::JS: return X86::COND_S; 1380 case X86::JNS: return X86::COND_NS; 1381 case X86::JP: return X86::COND_P; 1382 case X86::JNP: return X86::COND_NP; 1383 case X86::JO: return X86::COND_O; 1384 case X86::JNO: return X86::COND_NO; 1385 } 1386} 1387 1388unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { 1389 switch (CC) { 1390 default: assert(0 && "Illegal condition code!"); 1391 case X86::COND_E: return X86::JE; 1392 case X86::COND_NE: return X86::JNE; 1393 case X86::COND_L: return X86::JL; 1394 case X86::COND_LE: return X86::JLE; 1395 case X86::COND_G: return X86::JG; 1396 case X86::COND_GE: return X86::JGE; 1397 case X86::COND_B: return X86::JB; 1398 case X86::COND_BE: return X86::JBE; 1399 case X86::COND_A: return X86::JA; 1400 case X86::COND_AE: return X86::JAE; 1401 case X86::COND_S: return X86::JS; 1402 case X86::COND_NS: return X86::JNS; 1403 case X86::COND_P: return X86::JP; 1404 case X86::COND_NP: return X86::JNP; 1405 case X86::COND_O: return X86::JO; 1406 case X86::COND_NO: return X86::JNO; 1407 } 1408} 1409 1410/// GetOppositeBranchCondition - Return the inverse of the specified condition, 1411/// e.g. turning COND_E to COND_NE. 1412X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { 1413 switch (CC) { 1414 default: assert(0 && "Illegal condition code!"); 1415 case X86::COND_E: return X86::COND_NE; 1416 case X86::COND_NE: return X86::COND_E; 1417 case X86::COND_L: return X86::COND_GE; 1418 case X86::COND_LE: return X86::COND_G; 1419 case X86::COND_G: return X86::COND_LE; 1420 case X86::COND_GE: return X86::COND_L; 1421 case X86::COND_B: return X86::COND_AE; 1422 case X86::COND_BE: return X86::COND_A; 1423 case X86::COND_A: return X86::COND_BE; 1424 case X86::COND_AE: return X86::COND_B; 1425 case X86::COND_S: return X86::COND_NS; 1426 case X86::COND_NS: return X86::COND_S; 1427 case X86::COND_P: return X86::COND_NP; 1428 case X86::COND_NP: return X86::COND_P; 1429 case X86::COND_O: return X86::COND_NO; 1430 case X86::COND_NO: return X86::COND_O; 1431 } 1432} 1433 1434bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 1435 const TargetInstrDesc &TID = MI->getDesc(); 1436 if (!TID.isTerminator()) return false; 1437 1438 // Conditional branch is a special case. 1439 if (TID.isBranch() && !TID.isBarrier()) 1440 return true; 1441 if (!TID.isPredicable()) 1442 return true; 1443 return !isPredicated(MI); 1444} 1445 1446// For purposes of branch analysis do not count FP_REG_KILL as a terminator. 1447static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI, 1448 const X86InstrInfo &TII) { 1449 if (MI->getOpcode() == X86::FP_REG_KILL) 1450 return false; 1451 return TII.isUnpredicatedTerminator(MI); 1452} 1453 1454bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 1455 MachineBasicBlock *&TBB, 1456 MachineBasicBlock *&FBB, 1457 SmallVectorImpl<MachineOperand> &Cond) const { 1458 // Start from the bottom of the block and work up, examining the 1459 // terminator instructions. 1460 MachineBasicBlock::iterator I = MBB.end(); 1461 while (I != MBB.begin()) { 1462 --I; 1463 // Working from the bottom, when we see a non-terminator 1464 // instruction, we're done. 1465 if (!isBrAnalysisUnpredicatedTerminator(I, *this)) 1466 break; 1467 // A terminator that isn't a branch can't easily be handled 1468 // by this analysis. 1469 if (!I->getDesc().isBranch()) 1470 return true; 1471 // Handle unconditional branches. 1472 if (I->getOpcode() == X86::JMP) { 1473 // If the block has any instructions after a JMP, delete them. 1474 while (next(I) != MBB.end()) 1475 next(I)->eraseFromParent(); 1476 Cond.clear(); 1477 FBB = 0; 1478 // Delete the JMP if it's equivalent to a fall-through. 1479 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) { 1480 TBB = 0; 1481 I->eraseFromParent(); 1482 I = MBB.end(); 1483 continue; 1484 } 1485 // TBB is used to indicate the unconditinal destination. 1486 TBB = I->getOperand(0).getMBB(); 1487 continue; 1488 } 1489 // Handle conditional branches. 1490 X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode()); 1491 if (BranchCode == X86::COND_INVALID) 1492 return true; // Can't handle indirect branch. 1493 // Working from the bottom, handle the first conditional branch. 1494 if (Cond.empty()) { 1495 FBB = TBB; 1496 TBB = I->getOperand(0).getMBB(); 1497 Cond.push_back(MachineOperand::CreateImm(BranchCode)); 1498 continue; 1499 } 1500 // Handle subsequent conditional branches. Only handle the case 1501 // where all conditional branches branch to the same destination 1502 // and their condition opcodes fit one of the special 1503 // multi-branch idioms. 1504 assert(Cond.size() == 1); 1505 assert(TBB); 1506 // Only handle the case where all conditional branches branch to 1507 // the same destination. 1508 if (TBB != I->getOperand(0).getMBB()) 1509 return true; 1510 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm(); 1511 // If the conditions are the same, we can leave them alone. 1512 if (OldBranchCode == BranchCode) 1513 continue; 1514 // If they differ, see if they fit one of the known patterns. 1515 // Theoretically we could handle more patterns here, but 1516 // we shouldn't expect to see them if instruction selection 1517 // has done a reasonable job. 1518 if ((OldBranchCode == X86::COND_NP && 1519 BranchCode == X86::COND_E) || 1520 (OldBranchCode == X86::COND_E && 1521 BranchCode == X86::COND_NP)) 1522 BranchCode = X86::COND_NP_OR_E; 1523 else if ((OldBranchCode == X86::COND_P && 1524 BranchCode == X86::COND_NE) || 1525 (OldBranchCode == X86::COND_NE && 1526 BranchCode == X86::COND_P)) 1527 BranchCode = X86::COND_NE_OR_P; 1528 else 1529 return true; 1530 // Update the MachineOperand. 1531 Cond[0].setImm(BranchCode); 1532 } 1533 1534 return false; 1535} 1536 1537unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 1538 MachineBasicBlock::iterator I = MBB.end(); 1539 unsigned Count = 0; 1540 1541 while (I != MBB.begin()) { 1542 --I; 1543 if (I->getOpcode() != X86::JMP && 1544 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) 1545 break; 1546 // Remove the branch. 1547 I->eraseFromParent(); 1548 I = MBB.end(); 1549 ++Count; 1550 } 1551 1552 return Count; 1553} 1554 1555static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB, 1556 const MachineOperand &MO) { 1557 if (MO.isReg()) 1558 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(), 1559 MO.isKill(), MO.isDead(), MO.getSubReg()); 1560 else if (MO.isImm()) 1561 MIB = MIB.addImm(MO.getImm()); 1562 else if (MO.isFI()) 1563 MIB = MIB.addFrameIndex(MO.getIndex()); 1564 else if (MO.isGlobal()) 1565 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset()); 1566 else if (MO.isCPI()) 1567 MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset()); 1568 else if (MO.isJTI()) 1569 MIB = MIB.addJumpTableIndex(MO.getIndex()); 1570 else if (MO.isSymbol()) 1571 MIB = MIB.addExternalSymbol(MO.getSymbolName()); 1572 else 1573 assert(0 && "Unknown operand for X86InstrAddOperand!"); 1574 1575 return MIB; 1576} 1577 1578unsigned 1579X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 1580 MachineBasicBlock *FBB, 1581 const SmallVectorImpl<MachineOperand> &Cond) const { 1582 // Shouldn't be a fall through. 1583 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 1584 assert((Cond.size() == 1 || Cond.size() == 0) && 1585 "X86 branch conditions have one component!"); 1586 1587 if (Cond.empty()) { 1588 // Unconditional branch? 1589 assert(!FBB && "Unconditional branch with multiple successors!"); 1590 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB); 1591 return 1; 1592 } 1593 1594 // Conditional branch. 1595 unsigned Count = 0; 1596 X86::CondCode CC = (X86::CondCode)Cond[0].getImm(); 1597 switch (CC) { 1598 case X86::COND_NP_OR_E: 1599 // Synthesize NP_OR_E with two branches. 1600 BuildMI(&MBB, get(X86::JNP)).addMBB(TBB); 1601 ++Count; 1602 BuildMI(&MBB, get(X86::JE)).addMBB(TBB); 1603 ++Count; 1604 break; 1605 case X86::COND_NE_OR_P: 1606 // Synthesize NE_OR_P with two branches. 1607 BuildMI(&MBB, get(X86::JNE)).addMBB(TBB); 1608 ++Count; 1609 BuildMI(&MBB, get(X86::JP)).addMBB(TBB); 1610 ++Count; 1611 break; 1612 default: { 1613 unsigned Opc = GetCondBranchFromCond(CC); 1614 BuildMI(&MBB, get(Opc)).addMBB(TBB); 1615 ++Count; 1616 } 1617 } 1618 if (FBB) { 1619 // Two-way Conditional branch. Insert the second branch. 1620 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB); 1621 ++Count; 1622 } 1623 return Count; 1624} 1625 1626bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, 1627 MachineBasicBlock::iterator MI, 1628 unsigned DestReg, unsigned SrcReg, 1629 const TargetRegisterClass *DestRC, 1630 const TargetRegisterClass *SrcRC) const { 1631 if (DestRC == SrcRC) { 1632 unsigned Opc; 1633 if (DestRC == &X86::GR64RegClass) { 1634 Opc = X86::MOV64rr; 1635 } else if (DestRC == &X86::GR32RegClass) { 1636 Opc = X86::MOV32rr; 1637 } else if (DestRC == &X86::GR16RegClass) { 1638 Opc = X86::MOV16rr; 1639 } else if (DestRC == &X86::GR8RegClass) { 1640 Opc = X86::MOV8rr; 1641 } else if (DestRC == &X86::GR32_RegClass) { 1642 Opc = X86::MOV32_rr; 1643 } else if (DestRC == &X86::GR16_RegClass) { 1644 Opc = X86::MOV16_rr; 1645 } else if (DestRC == &X86::RFP32RegClass) { 1646 Opc = X86::MOV_Fp3232; 1647 } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) { 1648 Opc = X86::MOV_Fp6464; 1649 } else if (DestRC == &X86::RFP80RegClass) { 1650 Opc = X86::MOV_Fp8080; 1651 } else if (DestRC == &X86::FR32RegClass) { 1652 Opc = X86::FsMOVAPSrr; 1653 } else if (DestRC == &X86::FR64RegClass) { 1654 Opc = X86::FsMOVAPDrr; 1655 } else if (DestRC == &X86::VR128RegClass) { 1656 Opc = X86::MOVAPSrr; 1657 } else if (DestRC == &X86::VR64RegClass) { 1658 Opc = X86::MMX_MOVQ64rr; 1659 } else { 1660 return false; 1661 } 1662 BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg); 1663 return true; 1664 } 1665 1666 // Moving EFLAGS to / from another register requires a push and a pop. 1667 if (SrcRC == &X86::CCRRegClass) { 1668 if (SrcReg != X86::EFLAGS) 1669 return false; 1670 if (DestRC == &X86::GR64RegClass) { 1671 BuildMI(MBB, MI, get(X86::PUSHFQ)); 1672 BuildMI(MBB, MI, get(X86::POP64r), DestReg); 1673 return true; 1674 } else if (DestRC == &X86::GR32RegClass) { 1675 BuildMI(MBB, MI, get(X86::PUSHFD)); 1676 BuildMI(MBB, MI, get(X86::POP32r), DestReg); 1677 return true; 1678 } 1679 } else if (DestRC == &X86::CCRRegClass) { 1680 if (DestReg != X86::EFLAGS) 1681 return false; 1682 if (SrcRC == &X86::GR64RegClass) { 1683 BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg); 1684 BuildMI(MBB, MI, get(X86::POPFQ)); 1685 return true; 1686 } else if (SrcRC == &X86::GR32RegClass) { 1687 BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg); 1688 BuildMI(MBB, MI, get(X86::POPFD)); 1689 return true; 1690 } 1691 } 1692 1693 // Moving from ST(0) turns into FpGET_ST0_32 etc. 1694 if (SrcRC == &X86::RSTRegClass) { 1695 // Copying from ST(0)/ST(1). 1696 if (SrcReg != X86::ST0 && SrcReg != X86::ST1) 1697 // Can only copy from ST(0)/ST(1) right now 1698 return false; 1699 bool isST0 = SrcReg == X86::ST0; 1700 unsigned Opc; 1701 if (DestRC == &X86::RFP32RegClass) 1702 Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32; 1703 else if (DestRC == &X86::RFP64RegClass) 1704 Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64; 1705 else { 1706 if (DestRC != &X86::RFP80RegClass) 1707 return false; 1708 Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80; 1709 } 1710 BuildMI(MBB, MI, get(Opc), DestReg); 1711 return true; 1712 } 1713 1714 // Moving to ST(0) turns into FpSET_ST0_32 etc. 1715 if (DestRC == &X86::RSTRegClass) { 1716 // Copying to ST(0). FIXME: handle ST(1) also 1717 if (DestReg != X86::ST0) 1718 // Can only copy to TOS right now 1719 return false; 1720 unsigned Opc; 1721 if (SrcRC == &X86::RFP32RegClass) 1722 Opc = X86::FpSET_ST0_32; 1723 else if (SrcRC == &X86::RFP64RegClass) 1724 Opc = X86::FpSET_ST0_64; 1725 else { 1726 if (SrcRC != &X86::RFP80RegClass) 1727 return false; 1728 Opc = X86::FpSET_ST0_80; 1729 } 1730 BuildMI(MBB, MI, get(Opc)).addReg(SrcReg); 1731 return true; 1732 } 1733 1734 // Not yet supported! 1735 return false; 1736} 1737 1738static unsigned getStoreRegOpcode(const TargetRegisterClass *RC, 1739 bool isStackAligned) { 1740 unsigned Opc = 0; 1741 if (RC == &X86::GR64RegClass) { 1742 Opc = X86::MOV64mr; 1743 } else if (RC == &X86::GR32RegClass) { 1744 Opc = X86::MOV32mr; 1745 } else if (RC == &X86::GR16RegClass) { 1746 Opc = X86::MOV16mr; 1747 } else if (RC == &X86::GR8RegClass) { 1748 Opc = X86::MOV8mr; 1749 } else if (RC == &X86::GR32_RegClass) { 1750 Opc = X86::MOV32_mr; 1751 } else if (RC == &X86::GR16_RegClass) { 1752 Opc = X86::MOV16_mr; 1753 } else if (RC == &X86::RFP80RegClass) { 1754 Opc = X86::ST_FpP80m; // pops 1755 } else if (RC == &X86::RFP64RegClass) { 1756 Opc = X86::ST_Fp64m; 1757 } else if (RC == &X86::RFP32RegClass) { 1758 Opc = X86::ST_Fp32m; 1759 } else if (RC == &X86::FR32RegClass) { 1760 Opc = X86::MOVSSmr; 1761 } else if (RC == &X86::FR64RegClass) { 1762 Opc = X86::MOVSDmr; 1763 } else if (RC == &X86::VR128RegClass) { 1764 // If stack is realigned we can use aligned stores. 1765 Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr; 1766 } else if (RC == &X86::VR64RegClass) { 1767 Opc = X86::MMX_MOVQ64mr; 1768 } else { 1769 assert(0 && "Unknown regclass"); 1770 abort(); 1771 } 1772 1773 return Opc; 1774} 1775 1776void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1777 MachineBasicBlock::iterator MI, 1778 unsigned SrcReg, bool isKill, int FrameIdx, 1779 const TargetRegisterClass *RC) const { 1780 const MachineFunction &MF = *MBB.getParent(); 1781 bool isAligned = (RI.getStackAlignment() >= 16) || 1782 RI.needsStackRealignment(MF); 1783 unsigned Opc = getStoreRegOpcode(RC, isAligned); 1784 addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx) 1785 .addReg(SrcReg, false, false, isKill); 1786} 1787 1788void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, 1789 bool isKill, 1790 SmallVectorImpl<MachineOperand> &Addr, 1791 const TargetRegisterClass *RC, 1792 SmallVectorImpl<MachineInstr*> &NewMIs) const { 1793 bool isAligned = (RI.getStackAlignment() >= 16) || 1794 RI.needsStackRealignment(MF); 1795 unsigned Opc = getStoreRegOpcode(RC, isAligned); 1796 MachineInstrBuilder MIB = BuildMI(MF, get(Opc)); 1797 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 1798 MIB = X86InstrAddOperand(MIB, Addr[i]); 1799 MIB.addReg(SrcReg, false, false, isKill); 1800 NewMIs.push_back(MIB); 1801} 1802 1803static unsigned getLoadRegOpcode(const TargetRegisterClass *RC, 1804 bool isStackAligned) { 1805 unsigned Opc = 0; 1806 if (RC == &X86::GR64RegClass) { 1807 Opc = X86::MOV64rm; 1808 } else if (RC == &X86::GR32RegClass) { 1809 Opc = X86::MOV32rm; 1810 } else if (RC == &X86::GR16RegClass) { 1811 Opc = X86::MOV16rm; 1812 } else if (RC == &X86::GR8RegClass) { 1813 Opc = X86::MOV8rm; 1814 } else if (RC == &X86::GR32_RegClass) { 1815 Opc = X86::MOV32_rm; 1816 } else if (RC == &X86::GR16_RegClass) { 1817 Opc = X86::MOV16_rm; 1818 } else if (RC == &X86::RFP80RegClass) { 1819 Opc = X86::LD_Fp80m; 1820 } else if (RC == &X86::RFP64RegClass) { 1821 Opc = X86::LD_Fp64m; 1822 } else if (RC == &X86::RFP32RegClass) { 1823 Opc = X86::LD_Fp32m; 1824 } else if (RC == &X86::FR32RegClass) { 1825 Opc = X86::MOVSSrm; 1826 } else if (RC == &X86::FR64RegClass) { 1827 Opc = X86::MOVSDrm; 1828 } else if (RC == &X86::VR128RegClass) { 1829 // If stack is realigned we can use aligned loads. 1830 Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm; 1831 } else if (RC == &X86::VR64RegClass) { 1832 Opc = X86::MMX_MOVQ64rm; 1833 } else { 1834 assert(0 && "Unknown regclass"); 1835 abort(); 1836 } 1837 1838 return Opc; 1839} 1840 1841void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1842 MachineBasicBlock::iterator MI, 1843 unsigned DestReg, int FrameIdx, 1844 const TargetRegisterClass *RC) const{ 1845 const MachineFunction &MF = *MBB.getParent(); 1846 bool isAligned = (RI.getStackAlignment() >= 16) || 1847 RI.needsStackRealignment(MF); 1848 unsigned Opc = getLoadRegOpcode(RC, isAligned); 1849 addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx); 1850} 1851 1852void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 1853 SmallVectorImpl<MachineOperand> &Addr, 1854 const TargetRegisterClass *RC, 1855 SmallVectorImpl<MachineInstr*> &NewMIs) const { 1856 bool isAligned = (RI.getStackAlignment() >= 16) || 1857 RI.needsStackRealignment(MF); 1858 unsigned Opc = getLoadRegOpcode(RC, isAligned); 1859 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg); 1860 for (unsigned i = 0, e = Addr.size(); i != e; ++i) 1861 MIB = X86InstrAddOperand(MIB, Addr[i]); 1862 NewMIs.push_back(MIB); 1863} 1864 1865bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1866 MachineBasicBlock::iterator MI, 1867 const std::vector<CalleeSavedInfo> &CSI) const { 1868 if (CSI.empty()) 1869 return false; 1870 1871 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1872 unsigned SlotSize = is64Bit ? 8 : 4; 1873 1874 MachineFunction &MF = *MBB.getParent(); 1875 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>(); 1876 X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize); 1877 1878 unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r; 1879 for (unsigned i = CSI.size(); i != 0; --i) { 1880 unsigned Reg = CSI[i-1].getReg(); 1881 // Add the callee-saved register as live-in. It's killed at the spill. 1882 MBB.addLiveIn(Reg); 1883 BuildMI(MBB, MI, get(Opc)).addReg(Reg); 1884 } 1885 return true; 1886} 1887 1888bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1889 MachineBasicBlock::iterator MI, 1890 const std::vector<CalleeSavedInfo> &CSI) const { 1891 if (CSI.empty()) 1892 return false; 1893 1894 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); 1895 1896 unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r; 1897 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1898 unsigned Reg = CSI[i].getReg(); 1899 BuildMI(MBB, MI, get(Opc), Reg); 1900 } 1901 return true; 1902} 1903 1904static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, 1905 const SmallVector<MachineOperand,4> &MOs, 1906 MachineInstr *MI, const TargetInstrInfo &TII) { 1907 // Create the base instruction with the memory operand as the first part. 1908 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), true); 1909 MachineInstrBuilder MIB(NewMI); 1910 unsigned NumAddrOps = MOs.size(); 1911 for (unsigned i = 0; i != NumAddrOps; ++i) 1912 MIB = X86InstrAddOperand(MIB, MOs[i]); 1913 if (NumAddrOps < 4) // FrameIndex only 1914 MIB.addImm(1).addReg(0).addImm(0); 1915 1916 // Loop over the rest of the ri operands, converting them over. 1917 unsigned NumOps = MI->getDesc().getNumOperands()-2; 1918 for (unsigned i = 0; i != NumOps; ++i) { 1919 MachineOperand &MO = MI->getOperand(i+2); 1920 MIB = X86InstrAddOperand(MIB, MO); 1921 } 1922 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) { 1923 MachineOperand &MO = MI->getOperand(i); 1924 MIB = X86InstrAddOperand(MIB, MO); 1925 } 1926 return MIB; 1927} 1928 1929static MachineInstr *FuseInst(MachineFunction &MF, 1930 unsigned Opcode, unsigned OpNo, 1931 const SmallVector<MachineOperand,4> &MOs, 1932 MachineInstr *MI, const TargetInstrInfo &TII) { 1933 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), true); 1934 MachineInstrBuilder MIB(NewMI); 1935 1936 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1937 MachineOperand &MO = MI->getOperand(i); 1938 if (i == OpNo) { 1939 assert(MO.isReg() && "Expected to fold into reg operand!"); 1940 unsigned NumAddrOps = MOs.size(); 1941 for (unsigned i = 0; i != NumAddrOps; ++i) 1942 MIB = X86InstrAddOperand(MIB, MOs[i]); 1943 if (NumAddrOps < 4) // FrameIndex only 1944 MIB.addImm(1).addReg(0).addImm(0); 1945 } else { 1946 MIB = X86InstrAddOperand(MIB, MO); 1947 } 1948 } 1949 return MIB; 1950} 1951 1952static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, 1953 const SmallVector<MachineOperand,4> &MOs, 1954 MachineInstr *MI) { 1955 MachineFunction &MF = *MI->getParent()->getParent(); 1956 MachineInstrBuilder MIB = BuildMI(MF, TII.get(Opcode)); 1957 1958 unsigned NumAddrOps = MOs.size(); 1959 for (unsigned i = 0; i != NumAddrOps; ++i) 1960 MIB = X86InstrAddOperand(MIB, MOs[i]); 1961 if (NumAddrOps < 4) // FrameIndex only 1962 MIB.addImm(1).addReg(0).addImm(0); 1963 return MIB.addImm(0); 1964} 1965 1966MachineInstr* 1967X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 1968 MachineInstr *MI, unsigned i, 1969 const SmallVector<MachineOperand,4> &MOs) const{ 1970 const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL; 1971 bool isTwoAddrFold = false; 1972 unsigned NumOps = MI->getDesc().getNumOperands(); 1973 bool isTwoAddr = NumOps > 1 && 1974 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1; 1975 1976 MachineInstr *NewMI = NULL; 1977 // Folding a memory location into the two-address part of a two-address 1978 // instruction is different than folding it other places. It requires 1979 // replacing the *two* registers with the memory location. 1980 if (isTwoAddr && NumOps >= 2 && i < 2 && 1981 MI->getOperand(0).isReg() && 1982 MI->getOperand(1).isReg() && 1983 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 1984 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 1985 isTwoAddrFold = true; 1986 } else if (i == 0) { // If operand 0 1987 if (MI->getOpcode() == X86::MOV16r0) 1988 NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); 1989 else if (MI->getOpcode() == X86::MOV32r0) 1990 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); 1991 else if (MI->getOpcode() == X86::MOV64r0) 1992 NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI); 1993 else if (MI->getOpcode() == X86::MOV8r0) 1994 NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); 1995 if (NewMI) 1996 return NewMI; 1997 1998 OpcodeTablePtr = &RegOp2MemOpTable0; 1999 } else if (i == 1) { 2000 OpcodeTablePtr = &RegOp2MemOpTable1; 2001 } else if (i == 2) { 2002 OpcodeTablePtr = &RegOp2MemOpTable2; 2003 } 2004 2005 // If table selected... 2006 if (OpcodeTablePtr) { 2007 // Find the Opcode to fuse 2008 DenseMap<unsigned*, unsigned>::iterator I = 2009 OpcodeTablePtr->find((unsigned*)MI->getOpcode()); 2010 if (I != OpcodeTablePtr->end()) { 2011 if (isTwoAddrFold) 2012 NewMI = FuseTwoAddrInst(MF, I->second, MOs, MI, *this); 2013 else 2014 NewMI = FuseInst(MF, I->second, i, MOs, MI, *this); 2015 return NewMI; 2016 } 2017 } 2018 2019 // No fusion 2020 if (PrintFailedFusing) 2021 cerr << "We failed to fuse operand " << i << *MI; 2022 return NULL; 2023} 2024 2025 2026MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 2027 MachineInstr *MI, 2028 const SmallVectorImpl<unsigned> &Ops, 2029 int FrameIndex) const { 2030 // Check switch flag 2031 if (NoFusing) return NULL; 2032 2033 const MachineFrameInfo *MFI = MF.getFrameInfo(); 2034 unsigned Alignment = MFI->getObjectAlignment(FrameIndex); 2035 // FIXME: Move alignment requirement into tables? 2036 if (Alignment < 16) { 2037 switch (MI->getOpcode()) { 2038 default: break; 2039 // Not always safe to fold movsd into these instructions since their load 2040 // folding variants expects the address to be 16 byte aligned. 2041 case X86::FsANDNPDrr: 2042 case X86::FsANDNPSrr: 2043 case X86::FsANDPDrr: 2044 case X86::FsANDPSrr: 2045 case X86::FsORPDrr: 2046 case X86::FsORPSrr: 2047 case X86::FsXORPDrr: 2048 case X86::FsXORPSrr: 2049 return NULL; 2050 } 2051 } 2052 2053 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2054 unsigned NewOpc = 0; 2055 switch (MI->getOpcode()) { 2056 default: return NULL; 2057 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 2058 case X86::TEST16rr: NewOpc = X86::CMP16ri; break; 2059 case X86::TEST32rr: NewOpc = X86::CMP32ri; break; 2060 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; 2061 } 2062 // Change to CMPXXri r, 0 first. 2063 MI->setDesc(get(NewOpc)); 2064 MI->getOperand(1).ChangeToImmediate(0); 2065 } else if (Ops.size() != 1) 2066 return NULL; 2067 2068 SmallVector<MachineOperand,4> MOs; 2069 MOs.push_back(MachineOperand::CreateFI(FrameIndex)); 2070 return foldMemoryOperand(MF, MI, Ops[0], MOs); 2071} 2072 2073MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, 2074 MachineInstr *MI, 2075 const SmallVectorImpl<unsigned> &Ops, 2076 MachineInstr *LoadMI) const { 2077 // Check switch flag 2078 if (NoFusing) return NULL; 2079 2080 // Determine the alignment of the load. 2081 unsigned Alignment = 0; 2082 if (LoadMI->hasOneMemOperand()) 2083 Alignment = LoadMI->memoperands_begin()->getAlignment(); 2084 2085 // FIXME: Move alignment requirement into tables? 2086 if (Alignment < 16) { 2087 switch (MI->getOpcode()) { 2088 default: break; 2089 // Not always safe to fold movsd into these instructions since their load 2090 // folding variants expects the address to be 16 byte aligned. 2091 case X86::FsANDNPDrr: 2092 case X86::FsANDNPSrr: 2093 case X86::FsANDPDrr: 2094 case X86::FsANDPSrr: 2095 case X86::FsORPDrr: 2096 case X86::FsORPSrr: 2097 case X86::FsXORPDrr: 2098 case X86::FsXORPSrr: 2099 return NULL; 2100 } 2101 } 2102 2103 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2104 unsigned NewOpc = 0; 2105 switch (MI->getOpcode()) { 2106 default: return NULL; 2107 case X86::TEST8rr: NewOpc = X86::CMP8ri; break; 2108 case X86::TEST16rr: NewOpc = X86::CMP16ri; break; 2109 case X86::TEST32rr: NewOpc = X86::CMP32ri; break; 2110 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break; 2111 } 2112 // Change to CMPXXri r, 0 first. 2113 MI->setDesc(get(NewOpc)); 2114 MI->getOperand(1).ChangeToImmediate(0); 2115 } else if (Ops.size() != 1) 2116 return NULL; 2117 2118 SmallVector<MachineOperand,4> MOs; 2119 unsigned NumOps = LoadMI->getDesc().getNumOperands(); 2120 for (unsigned i = NumOps - 4; i != NumOps; ++i) 2121 MOs.push_back(LoadMI->getOperand(i)); 2122 return foldMemoryOperand(MF, MI, Ops[0], MOs); 2123} 2124 2125 2126bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, 2127 const SmallVectorImpl<unsigned> &Ops) const { 2128 // Check switch flag 2129 if (NoFusing) return 0; 2130 2131 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 2132 switch (MI->getOpcode()) { 2133 default: return false; 2134 case X86::TEST8rr: 2135 case X86::TEST16rr: 2136 case X86::TEST32rr: 2137 case X86::TEST64rr: 2138 return true; 2139 } 2140 } 2141 2142 if (Ops.size() != 1) 2143 return false; 2144 2145 unsigned OpNum = Ops[0]; 2146 unsigned Opc = MI->getOpcode(); 2147 unsigned NumOps = MI->getDesc().getNumOperands(); 2148 bool isTwoAddr = NumOps > 1 && 2149 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1; 2150 2151 // Folding a memory location into the two-address part of a two-address 2152 // instruction is different than folding it other places. It requires 2153 // replacing the *two* registers with the memory location. 2154 const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL; 2155 if (isTwoAddr && NumOps >= 2 && OpNum < 2) { 2156 OpcodeTablePtr = &RegOp2MemOpTable2Addr; 2157 } else if (OpNum == 0) { // If operand 0 2158 switch (Opc) { 2159 case X86::MOV16r0: 2160 case X86::MOV32r0: 2161 case X86::MOV64r0: 2162 case X86::MOV8r0: 2163 return true; 2164 default: break; 2165 } 2166 OpcodeTablePtr = &RegOp2MemOpTable0; 2167 } else if (OpNum == 1) { 2168 OpcodeTablePtr = &RegOp2MemOpTable1; 2169 } else if (OpNum == 2) { 2170 OpcodeTablePtr = &RegOp2MemOpTable2; 2171 } 2172 2173 if (OpcodeTablePtr) { 2174 // Find the Opcode to fuse 2175 DenseMap<unsigned*, unsigned>::iterator I = 2176 OpcodeTablePtr->find((unsigned*)Opc); 2177 if (I != OpcodeTablePtr->end()) 2178 return true; 2179 } 2180 return false; 2181} 2182 2183bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 2184 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 2185 SmallVectorImpl<MachineInstr*> &NewMIs) const { 2186 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2187 MemOp2RegOpTable.find((unsigned*)MI->getOpcode()); 2188 if (I == MemOp2RegOpTable.end()) 2189 return false; 2190 unsigned Opc = I->second.first; 2191 unsigned Index = I->second.second & 0xf; 2192 bool FoldedLoad = I->second.second & (1 << 4); 2193 bool FoldedStore = I->second.second & (1 << 5); 2194 if (UnfoldLoad && !FoldedLoad) 2195 return false; 2196 UnfoldLoad &= FoldedLoad; 2197 if (UnfoldStore && !FoldedStore) 2198 return false; 2199 UnfoldStore &= FoldedStore; 2200 2201 const TargetInstrDesc &TID = get(Opc); 2202 const TargetOperandInfo &TOI = TID.OpInfo[Index]; 2203 const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() 2204 ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); 2205 SmallVector<MachineOperand,4> AddrOps; 2206 SmallVector<MachineOperand,2> BeforeOps; 2207 SmallVector<MachineOperand,2> AfterOps; 2208 SmallVector<MachineOperand,4> ImpOps; 2209 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 2210 MachineOperand &Op = MI->getOperand(i); 2211 if (i >= Index && i < Index+4) 2212 AddrOps.push_back(Op); 2213 else if (Op.isReg() && Op.isImplicit()) 2214 ImpOps.push_back(Op); 2215 else if (i < Index) 2216 BeforeOps.push_back(Op); 2217 else if (i > Index) 2218 AfterOps.push_back(Op); 2219 } 2220 2221 // Emit the load instruction. 2222 if (UnfoldLoad) { 2223 loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs); 2224 if (UnfoldStore) { 2225 // Address operands cannot be marked isKill. 2226 for (unsigned i = 1; i != 5; ++i) { 2227 MachineOperand &MO = NewMIs[0]->getOperand(i); 2228 if (MO.isReg()) 2229 MO.setIsKill(false); 2230 } 2231 } 2232 } 2233 2234 // Emit the data processing instruction. 2235 MachineInstr *DataMI = MF.CreateMachineInstr(TID, true); 2236 MachineInstrBuilder MIB(DataMI); 2237 2238 if (FoldedStore) 2239 MIB.addReg(Reg, true); 2240 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i) 2241 MIB = X86InstrAddOperand(MIB, BeforeOps[i]); 2242 if (FoldedLoad) 2243 MIB.addReg(Reg); 2244 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i) 2245 MIB = X86InstrAddOperand(MIB, AfterOps[i]); 2246 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) { 2247 MachineOperand &MO = ImpOps[i]; 2248 MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead()); 2249 } 2250 // Change CMP32ri r, 0 back to TEST32rr r, r, etc. 2251 unsigned NewOpc = 0; 2252 switch (DataMI->getOpcode()) { 2253 default: break; 2254 case X86::CMP64ri32: 2255 case X86::CMP32ri: 2256 case X86::CMP16ri: 2257 case X86::CMP8ri: { 2258 MachineOperand &MO0 = DataMI->getOperand(0); 2259 MachineOperand &MO1 = DataMI->getOperand(1); 2260 if (MO1.getImm() == 0) { 2261 switch (DataMI->getOpcode()) { 2262 default: break; 2263 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; 2264 case X86::CMP32ri: NewOpc = X86::TEST32rr; break; 2265 case X86::CMP16ri: NewOpc = X86::TEST16rr; break; 2266 case X86::CMP8ri: NewOpc = X86::TEST8rr; break; 2267 } 2268 DataMI->setDesc(get(NewOpc)); 2269 MO1.ChangeToRegister(MO0.getReg(), false); 2270 } 2271 } 2272 } 2273 NewMIs.push_back(DataMI); 2274 2275 // Emit the store instruction. 2276 if (UnfoldStore) { 2277 const TargetOperandInfo &DstTOI = TID.OpInfo[0]; 2278 const TargetRegisterClass *DstRC = DstTOI.isLookupPtrRegClass() 2279 ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass); 2280 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs); 2281 } 2282 2283 return true; 2284} 2285 2286bool 2287X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 2288 SmallVectorImpl<SDNode*> &NewNodes) const { 2289 if (!N->isMachineOpcode()) 2290 return false; 2291 2292 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2293 MemOp2RegOpTable.find((unsigned*)N->getMachineOpcode()); 2294 if (I == MemOp2RegOpTable.end()) 2295 return false; 2296 unsigned Opc = I->second.first; 2297 unsigned Index = I->second.second & 0xf; 2298 bool FoldedLoad = I->second.second & (1 << 4); 2299 bool FoldedStore = I->second.second & (1 << 5); 2300 const TargetInstrDesc &TID = get(Opc); 2301 const TargetOperandInfo &TOI = TID.OpInfo[Index]; 2302 const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() 2303 ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); 2304 std::vector<SDValue> AddrOps; 2305 std::vector<SDValue> BeforeOps; 2306 std::vector<SDValue> AfterOps; 2307 unsigned NumOps = N->getNumOperands(); 2308 for (unsigned i = 0; i != NumOps-1; ++i) { 2309 SDValue Op = N->getOperand(i); 2310 if (i >= Index && i < Index+4) 2311 AddrOps.push_back(Op); 2312 else if (i < Index) 2313 BeforeOps.push_back(Op); 2314 else if (i > Index) 2315 AfterOps.push_back(Op); 2316 } 2317 SDValue Chain = N->getOperand(NumOps-1); 2318 AddrOps.push_back(Chain); 2319 2320 // Emit the load instruction. 2321 SDNode *Load = 0; 2322 const MachineFunction &MF = DAG.getMachineFunction(); 2323 if (FoldedLoad) { 2324 MVT VT = *RC->vt_begin(); 2325 bool isAligned = (RI.getStackAlignment() >= 16) || 2326 RI.needsStackRealignment(MF); 2327 Load = DAG.getTargetNode(getLoadRegOpcode(RC, isAligned), 2328 VT, MVT::Other, 2329 &AddrOps[0], AddrOps.size()); 2330 NewNodes.push_back(Load); 2331 } 2332 2333 // Emit the data processing instruction. 2334 std::vector<MVT> VTs; 2335 const TargetRegisterClass *DstRC = 0; 2336 if (TID.getNumDefs() > 0) { 2337 const TargetOperandInfo &DstTOI = TID.OpInfo[0]; 2338 DstRC = DstTOI.isLookupPtrRegClass() 2339 ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass); 2340 VTs.push_back(*DstRC->vt_begin()); 2341 } 2342 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 2343 MVT VT = N->getValueType(i); 2344 if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs()) 2345 VTs.push_back(VT); 2346 } 2347 if (Load) 2348 BeforeOps.push_back(SDValue(Load, 0)); 2349 std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); 2350 SDNode *NewNode= DAG.getTargetNode(Opc, VTs, &BeforeOps[0], BeforeOps.size()); 2351 NewNodes.push_back(NewNode); 2352 2353 // Emit the store instruction. 2354 if (FoldedStore) { 2355 AddrOps.pop_back(); 2356 AddrOps.push_back(SDValue(NewNode, 0)); 2357 AddrOps.push_back(Chain); 2358 bool isAligned = (RI.getStackAlignment() >= 16) || 2359 RI.needsStackRealignment(MF); 2360 SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, isAligned), 2361 MVT::Other, &AddrOps[0], AddrOps.size()); 2362 NewNodes.push_back(Store); 2363 } 2364 2365 return true; 2366} 2367 2368unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, 2369 bool UnfoldLoad, bool UnfoldStore) const { 2370 DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I = 2371 MemOp2RegOpTable.find((unsigned*)Opc); 2372 if (I == MemOp2RegOpTable.end()) 2373 return 0; 2374 bool FoldedLoad = I->second.second & (1 << 4); 2375 bool FoldedStore = I->second.second & (1 << 5); 2376 if (UnfoldLoad && !FoldedLoad) 2377 return 0; 2378 if (UnfoldStore && !FoldedStore) 2379 return 0; 2380 return I->second.first; 2381} 2382 2383bool X86InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const { 2384 if (MBB.empty()) return false; 2385 2386 switch (MBB.back().getOpcode()) { 2387 case X86::TCRETURNri: 2388 case X86::TCRETURNdi: 2389 case X86::RET: // Return. 2390 case X86::RETI: 2391 case X86::TAILJMPd: 2392 case X86::TAILJMPr: 2393 case X86::TAILJMPm: 2394 case X86::JMP: // Uncond branch. 2395 case X86::JMP32r: // Indirect branch. 2396 case X86::JMP64r: // Indirect branch (64-bit). 2397 case X86::JMP32m: // Indirect branch through mem. 2398 case X86::JMP64m: // Indirect branch through mem (64-bit). 2399 return true; 2400 default: return false; 2401 } 2402} 2403 2404bool X86InstrInfo:: 2405ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 2406 assert(Cond.size() == 1 && "Invalid X86 branch condition!"); 2407 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm()); 2408 if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E) 2409 return true; 2410 Cond[0].setImm(GetOppositeBranchCondition(CC)); 2411 return false; 2412} 2413 2414bool X86InstrInfo:: 2415IgnoreRegisterClassBarriers(const TargetRegisterClass *RC) const { 2416 // FIXME: Ignore bariers of x87 stack registers for now. We can't 2417 // allow any loads of these registers before FpGet_ST0_80. 2418 return RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass || 2419 RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass; 2420} 2421 2422const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const { 2423 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 2424 if (Subtarget->is64Bit()) 2425 return &X86::GR64RegClass; 2426 else 2427 return &X86::GR32RegClass; 2428} 2429 2430unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) { 2431 switch (Desc->TSFlags & X86II::ImmMask) { 2432 case X86II::Imm8: return 1; 2433 case X86II::Imm16: return 2; 2434 case X86II::Imm32: return 4; 2435 case X86II::Imm64: return 8; 2436 default: assert(0 && "Immediate size not set!"); 2437 return 0; 2438 } 2439} 2440 2441/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register? 2442/// e.g. r8, xmm8, etc. 2443bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) { 2444 if (!MO.isReg()) return false; 2445 switch (MO.getReg()) { 2446 default: break; 2447 case X86::R8: case X86::R9: case X86::R10: case X86::R11: 2448 case X86::R12: case X86::R13: case X86::R14: case X86::R15: 2449 case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D: 2450 case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D: 2451 case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W: 2452 case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W: 2453 case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B: 2454 case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B: 2455 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: 2456 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: 2457 return true; 2458 } 2459 return false; 2460} 2461 2462 2463/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64 2464/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 2465/// size, and 3) use of X86-64 extended registers. 2466unsigned X86InstrInfo::determineREX(const MachineInstr &MI) { 2467 unsigned REX = 0; 2468 const TargetInstrDesc &Desc = MI.getDesc(); 2469 2470 // Pseudo instructions do not need REX prefix byte. 2471 if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo) 2472 return 0; 2473 if (Desc.TSFlags & X86II::REX_W) 2474 REX |= 1 << 3; 2475 2476 unsigned NumOps = Desc.getNumOperands(); 2477 if (NumOps) { 2478 bool isTwoAddr = NumOps > 1 && 2479 Desc.getOperandConstraint(1, TOI::TIED_TO) != -1; 2480 2481 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 2482 unsigned i = isTwoAddr ? 1 : 0; 2483 for (unsigned e = NumOps; i != e; ++i) { 2484 const MachineOperand& MO = MI.getOperand(i); 2485 if (MO.isReg()) { 2486 unsigned Reg = MO.getReg(); 2487 if (isX86_64NonExtLowByteReg(Reg)) 2488 REX |= 0x40; 2489 } 2490 } 2491 2492 switch (Desc.TSFlags & X86II::FormMask) { 2493 case X86II::MRMInitReg: 2494 if (isX86_64ExtendedReg(MI.getOperand(0))) 2495 REX |= (1 << 0) | (1 << 2); 2496 break; 2497 case X86II::MRMSrcReg: { 2498 if (isX86_64ExtendedReg(MI.getOperand(0))) 2499 REX |= 1 << 2; 2500 i = isTwoAddr ? 2 : 1; 2501 for (unsigned e = NumOps; i != e; ++i) { 2502 const MachineOperand& MO = MI.getOperand(i); 2503 if (isX86_64ExtendedReg(MO)) 2504 REX |= 1 << 0; 2505 } 2506 break; 2507 } 2508 case X86II::MRMSrcMem: { 2509 if (isX86_64ExtendedReg(MI.getOperand(0))) 2510 REX |= 1 << 2; 2511 unsigned Bit = 0; 2512 i = isTwoAddr ? 2 : 1; 2513 for (; i != NumOps; ++i) { 2514 const MachineOperand& MO = MI.getOperand(i); 2515 if (MO.isReg()) { 2516 if (isX86_64ExtendedReg(MO)) 2517 REX |= 1 << Bit; 2518 Bit++; 2519 } 2520 } 2521 break; 2522 } 2523 case X86II::MRM0m: case X86II::MRM1m: 2524 case X86II::MRM2m: case X86II::MRM3m: 2525 case X86II::MRM4m: case X86II::MRM5m: 2526 case X86II::MRM6m: case X86II::MRM7m: 2527 case X86II::MRMDestMem: { 2528 unsigned e = isTwoAddr ? 5 : 4; 2529 i = isTwoAddr ? 1 : 0; 2530 if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e))) 2531 REX |= 1 << 2; 2532 unsigned Bit = 0; 2533 for (; i != e; ++i) { 2534 const MachineOperand& MO = MI.getOperand(i); 2535 if (MO.isReg()) { 2536 if (isX86_64ExtendedReg(MO)) 2537 REX |= 1 << Bit; 2538 Bit++; 2539 } 2540 } 2541 break; 2542 } 2543 default: { 2544 if (isX86_64ExtendedReg(MI.getOperand(0))) 2545 REX |= 1 << 0; 2546 i = isTwoAddr ? 2 : 1; 2547 for (unsigned e = NumOps; i != e; ++i) { 2548 const MachineOperand& MO = MI.getOperand(i); 2549 if (isX86_64ExtendedReg(MO)) 2550 REX |= 1 << 2; 2551 } 2552 break; 2553 } 2554 } 2555 } 2556 return REX; 2557} 2558 2559/// sizePCRelativeBlockAddress - This method returns the size of a PC 2560/// relative block address instruction 2561/// 2562static unsigned sizePCRelativeBlockAddress() { 2563 return 4; 2564} 2565 2566/// sizeGlobalAddress - Give the size of the emission of this global address 2567/// 2568static unsigned sizeGlobalAddress(bool dword) { 2569 return dword ? 8 : 4; 2570} 2571 2572/// sizeConstPoolAddress - Give the size of the emission of this constant 2573/// pool address 2574/// 2575static unsigned sizeConstPoolAddress(bool dword) { 2576 return dword ? 8 : 4; 2577} 2578 2579/// sizeExternalSymbolAddress - Give the size of the emission of this external 2580/// symbol 2581/// 2582static unsigned sizeExternalSymbolAddress(bool dword) { 2583 return dword ? 8 : 4; 2584} 2585 2586/// sizeJumpTableAddress - Give the size of the emission of this jump 2587/// table address 2588/// 2589static unsigned sizeJumpTableAddress(bool dword) { 2590 return dword ? 8 : 4; 2591} 2592 2593static unsigned sizeConstant(unsigned Size) { 2594 return Size; 2595} 2596 2597static unsigned sizeRegModRMByte(){ 2598 return 1; 2599} 2600 2601static unsigned sizeSIBByte(){ 2602 return 1; 2603} 2604 2605static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) { 2606 unsigned FinalSize = 0; 2607 // If this is a simple integer displacement that doesn't require a relocation. 2608 if (!RelocOp) { 2609 FinalSize += sizeConstant(4); 2610 return FinalSize; 2611 } 2612 2613 // Otherwise, this is something that requires a relocation. 2614 if (RelocOp->isGlobal()) { 2615 FinalSize += sizeGlobalAddress(false); 2616 } else if (RelocOp->isCPI()) { 2617 FinalSize += sizeConstPoolAddress(false); 2618 } else if (RelocOp->isJTI()) { 2619 FinalSize += sizeJumpTableAddress(false); 2620 } else { 2621 assert(0 && "Unknown value to relocate!"); 2622 } 2623 return FinalSize; 2624} 2625 2626static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op, 2627 bool IsPIC, bool Is64BitMode) { 2628 const MachineOperand &Op3 = MI.getOperand(Op+3); 2629 int DispVal = 0; 2630 const MachineOperand *DispForReloc = 0; 2631 unsigned FinalSize = 0; 2632 2633 // Figure out what sort of displacement we have to handle here. 2634 if (Op3.isGlobal()) { 2635 DispForReloc = &Op3; 2636 } else if (Op3.isCPI()) { 2637 if (Is64BitMode || IsPIC) { 2638 DispForReloc = &Op3; 2639 } else { 2640 DispVal = 1; 2641 } 2642 } else if (Op3.isJTI()) { 2643 if (Is64BitMode || IsPIC) { 2644 DispForReloc = &Op3; 2645 } else { 2646 DispVal = 1; 2647 } 2648 } else { 2649 DispVal = 1; 2650 } 2651 2652 const MachineOperand &Base = MI.getOperand(Op); 2653 const MachineOperand &IndexReg = MI.getOperand(Op+2); 2654 2655 unsigned BaseReg = Base.getReg(); 2656 2657 // Is a SIB byte needed? 2658 if (IndexReg.getReg() == 0 && 2659 (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) { 2660 if (BaseReg == 0) { // Just a displacement? 2661 // Emit special case [disp32] encoding 2662 ++FinalSize; 2663 FinalSize += getDisplacementFieldSize(DispForReloc); 2664 } else { 2665 unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg); 2666 if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) { 2667 // Emit simple indirect register encoding... [EAX] f.e. 2668 ++FinalSize; 2669 // Be pessimistic and assume it's a disp32, not a disp8 2670 } else { 2671 // Emit the most general non-SIB encoding: [REG+disp32] 2672 ++FinalSize; 2673 FinalSize += getDisplacementFieldSize(DispForReloc); 2674 } 2675 } 2676 2677 } else { // We need a SIB byte, so start by outputting the ModR/M byte first 2678 assert(IndexReg.getReg() != X86::ESP && 2679 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 2680 2681 bool ForceDisp32 = false; 2682 if (BaseReg == 0 || DispForReloc) { 2683 // Emit the normal disp32 encoding. 2684 ++FinalSize; 2685 ForceDisp32 = true; 2686 } else { 2687 ++FinalSize; 2688 } 2689 2690 FinalSize += sizeSIBByte(); 2691 2692 // Do we need to output a displacement? 2693 if (DispVal != 0 || ForceDisp32) { 2694 FinalSize += getDisplacementFieldSize(DispForReloc); 2695 } 2696 } 2697 return FinalSize; 2698} 2699 2700 2701static unsigned GetInstSizeWithDesc(const MachineInstr &MI, 2702 const TargetInstrDesc *Desc, 2703 bool IsPIC, bool Is64BitMode) { 2704 2705 unsigned Opcode = Desc->Opcode; 2706 unsigned FinalSize = 0; 2707 2708 // Emit the lock opcode prefix as needed. 2709 if (Desc->TSFlags & X86II::LOCK) ++FinalSize; 2710 2711 // Emit segment overrid opcode prefix as needed. 2712 switch (Desc->TSFlags & X86II::SegOvrMask) { 2713 case X86II::FS: 2714 case X86II::GS: 2715 ++FinalSize; 2716 break; 2717 default: assert(0 && "Invalid segment!"); 2718 case 0: break; // No segment override! 2719 } 2720 2721 // Emit the repeat opcode prefix as needed. 2722 if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize; 2723 2724 // Emit the operand size opcode prefix as needed. 2725 if (Desc->TSFlags & X86II::OpSize) ++FinalSize; 2726 2727 // Emit the address size opcode prefix as needed. 2728 if (Desc->TSFlags & X86II::AdSize) ++FinalSize; 2729 2730 bool Need0FPrefix = false; 2731 switch (Desc->TSFlags & X86II::Op0Mask) { 2732 case X86II::TB: // Two-byte opcode prefix 2733 case X86II::T8: // 0F 38 2734 case X86II::TA: // 0F 3A 2735 Need0FPrefix = true; 2736 break; 2737 case X86II::REP: break; // already handled. 2738 case X86II::XS: // F3 0F 2739 ++FinalSize; 2740 Need0FPrefix = true; 2741 break; 2742 case X86II::XD: // F2 0F 2743 ++FinalSize; 2744 Need0FPrefix = true; 2745 break; 2746 case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB: 2747 case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF: 2748 ++FinalSize; 2749 break; // Two-byte opcode prefix 2750 default: assert(0 && "Invalid prefix!"); 2751 case 0: break; // No prefix! 2752 } 2753 2754 if (Is64BitMode) { 2755 // REX prefix 2756 unsigned REX = X86InstrInfo::determineREX(MI); 2757 if (REX) 2758 ++FinalSize; 2759 } 2760 2761 // 0x0F escape code must be emitted just before the opcode. 2762 if (Need0FPrefix) 2763 ++FinalSize; 2764 2765 switch (Desc->TSFlags & X86II::Op0Mask) { 2766 case X86II::T8: // 0F 38 2767 ++FinalSize; 2768 break; 2769 case X86II::TA: // 0F 3A 2770 ++FinalSize; 2771 break; 2772 } 2773 2774 // If this is a two-address instruction, skip one of the register operands. 2775 unsigned NumOps = Desc->getNumOperands(); 2776 unsigned CurOp = 0; 2777 if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1) 2778 CurOp++; 2779 2780 switch (Desc->TSFlags & X86II::FormMask) { 2781 default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!"); 2782 case X86II::Pseudo: 2783 // Remember the current PC offset, this is the PIC relocation 2784 // base address. 2785 switch (Opcode) { 2786 default: 2787 break; 2788 case TargetInstrInfo::INLINEASM: { 2789 const MachineFunction *MF = MI.getParent()->getParent(); 2790 const char *AsmStr = MI.getOperand(0).getSymbolName(); 2791 const TargetAsmInfo* AI = MF->getTarget().getTargetAsmInfo(); 2792 FinalSize += AI->getInlineAsmLength(AsmStr); 2793 break; 2794 } 2795 case TargetInstrInfo::DBG_LABEL: 2796 case TargetInstrInfo::EH_LABEL: 2797 break; 2798 case TargetInstrInfo::IMPLICIT_DEF: 2799 case TargetInstrInfo::DECLARE: 2800 case X86::DWARF_LOC: 2801 case X86::FP_REG_KILL: 2802 break; 2803 case X86::MOVPC32r: { 2804 // This emits the "call" portion of this pseudo instruction. 2805 ++FinalSize; 2806 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2807 break; 2808 } 2809 case X86::TLS_tp: 2810 case X86::TLS_gs_ri: 2811 FinalSize += 2; 2812 FinalSize += sizeGlobalAddress(false); 2813 break; 2814 } 2815 CurOp = NumOps; 2816 break; 2817 case X86II::RawFrm: 2818 ++FinalSize; 2819 2820 if (CurOp != NumOps) { 2821 const MachineOperand &MO = MI.getOperand(CurOp++); 2822 if (MO.isMBB()) { 2823 FinalSize += sizePCRelativeBlockAddress(); 2824 } else if (MO.isGlobal()) { 2825 FinalSize += sizeGlobalAddress(false); 2826 } else if (MO.isSymbol()) { 2827 FinalSize += sizeExternalSymbolAddress(false); 2828 } else if (MO.isImm()) { 2829 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2830 } else { 2831 assert(0 && "Unknown RawFrm operand!"); 2832 } 2833 } 2834 break; 2835 2836 case X86II::AddRegFrm: 2837 ++FinalSize; 2838 ++CurOp; 2839 2840 if (CurOp != NumOps) { 2841 const MachineOperand &MO1 = MI.getOperand(CurOp++); 2842 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2843 if (MO1.isImm()) 2844 FinalSize += sizeConstant(Size); 2845 else { 2846 bool dword = false; 2847 if (Opcode == X86::MOV64ri) 2848 dword = true; 2849 if (MO1.isGlobal()) { 2850 FinalSize += sizeGlobalAddress(dword); 2851 } else if (MO1.isSymbol()) 2852 FinalSize += sizeExternalSymbolAddress(dword); 2853 else if (MO1.isCPI()) 2854 FinalSize += sizeConstPoolAddress(dword); 2855 else if (MO1.isJTI()) 2856 FinalSize += sizeJumpTableAddress(dword); 2857 } 2858 } 2859 break; 2860 2861 case X86II::MRMDestReg: { 2862 ++FinalSize; 2863 FinalSize += sizeRegModRMByte(); 2864 CurOp += 2; 2865 if (CurOp != NumOps) { 2866 ++CurOp; 2867 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2868 } 2869 break; 2870 } 2871 case X86II::MRMDestMem: { 2872 ++FinalSize; 2873 FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); 2874 CurOp += 5; 2875 if (CurOp != NumOps) { 2876 ++CurOp; 2877 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2878 } 2879 break; 2880 } 2881 2882 case X86II::MRMSrcReg: 2883 ++FinalSize; 2884 FinalSize += sizeRegModRMByte(); 2885 CurOp += 2; 2886 if (CurOp != NumOps) { 2887 ++CurOp; 2888 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2889 } 2890 break; 2891 2892 case X86II::MRMSrcMem: { 2893 2894 ++FinalSize; 2895 FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode); 2896 CurOp += 5; 2897 if (CurOp != NumOps) { 2898 ++CurOp; 2899 FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc)); 2900 } 2901 break; 2902 } 2903 2904 case X86II::MRM0r: case X86II::MRM1r: 2905 case X86II::MRM2r: case X86II::MRM3r: 2906 case X86II::MRM4r: case X86II::MRM5r: 2907 case X86II::MRM6r: case X86II::MRM7r: 2908 ++FinalSize; 2909 ++CurOp; 2910 FinalSize += sizeRegModRMByte(); 2911 2912 if (CurOp != NumOps) { 2913 const MachineOperand &MO1 = MI.getOperand(CurOp++); 2914 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2915 if (MO1.isImm()) 2916 FinalSize += sizeConstant(Size); 2917 else { 2918 bool dword = false; 2919 if (Opcode == X86::MOV64ri32) 2920 dword = true; 2921 if (MO1.isGlobal()) { 2922 FinalSize += sizeGlobalAddress(dword); 2923 } else if (MO1.isSymbol()) 2924 FinalSize += sizeExternalSymbolAddress(dword); 2925 else if (MO1.isCPI()) 2926 FinalSize += sizeConstPoolAddress(dword); 2927 else if (MO1.isJTI()) 2928 FinalSize += sizeJumpTableAddress(dword); 2929 } 2930 } 2931 break; 2932 2933 case X86II::MRM0m: case X86II::MRM1m: 2934 case X86II::MRM2m: case X86II::MRM3m: 2935 case X86II::MRM4m: case X86II::MRM5m: 2936 case X86II::MRM6m: case X86II::MRM7m: { 2937 2938 ++FinalSize; 2939 FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); 2940 CurOp += 4; 2941 2942 if (CurOp != NumOps) { 2943 const MachineOperand &MO = MI.getOperand(CurOp++); 2944 unsigned Size = X86InstrInfo::sizeOfImm(Desc); 2945 if (MO.isImm()) 2946 FinalSize += sizeConstant(Size); 2947 else { 2948 bool dword = false; 2949 if (Opcode == X86::MOV64mi32) 2950 dword = true; 2951 if (MO.isGlobal()) { 2952 FinalSize += sizeGlobalAddress(dword); 2953 } else if (MO.isSymbol()) 2954 FinalSize += sizeExternalSymbolAddress(dword); 2955 else if (MO.isCPI()) 2956 FinalSize += sizeConstPoolAddress(dword); 2957 else if (MO.isJTI()) 2958 FinalSize += sizeJumpTableAddress(dword); 2959 } 2960 } 2961 break; 2962 } 2963 2964 case X86II::MRMInitReg: 2965 ++FinalSize; 2966 // Duplicate register, used by things like MOV8r0 (aka xor reg,reg). 2967 FinalSize += sizeRegModRMByte(); 2968 ++CurOp; 2969 break; 2970 } 2971 2972 if (!Desc->isVariadic() && CurOp != NumOps) { 2973 cerr << "Cannot determine size: "; 2974 MI.dump(); 2975 cerr << '\n'; 2976 abort(); 2977 } 2978 2979 2980 return FinalSize; 2981} 2982 2983 2984unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 2985 const TargetInstrDesc &Desc = MI->getDesc(); 2986 bool IsPIC = (TM.getRelocationModel() == Reloc::PIC_); 2987 bool Is64BitMode = TM.getSubtargetImpl()->is64Bit(); 2988 unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode); 2989 if (Desc.getOpcode() == X86::MOVPC32r) { 2990 Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode); 2991 } 2992 return Size; 2993} 2994 2995/// getGlobalBaseReg - Return a virtual register initialized with the 2996/// the global base register value. Output instructions required to 2997/// initialize the register in the function entry block, if necessary. 2998/// 2999unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { 3000 assert(!TM.getSubtarget<X86Subtarget>().is64Bit() && 3001 "X86-64 PIC uses RIP relative addressing"); 3002 3003 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); 3004 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg(); 3005 if (GlobalBaseReg != 0) 3006 return GlobalBaseReg; 3007 3008 // Insert the set of GlobalBaseReg into the first MBB of the function 3009 MachineBasicBlock &FirstMBB = MF->front(); 3010 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 3011 MachineRegisterInfo &RegInfo = MF->getRegInfo(); 3012 unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass); 3013 3014 const TargetInstrInfo *TII = TM.getInstrInfo(); 3015 // Operand of MovePCtoStack is completely ignored by asm printer. It's 3016 // only used in JIT code emission as displacement to pc. 3017 BuildMI(FirstMBB, MBBI, TII->get(X86::MOVPC32r), PC).addImm(0); 3018 3019 // If we're using vanilla 'GOT' PIC style, we should use relative addressing 3020 // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external 3021 if (TM.getRelocationModel() == Reloc::PIC_ && 3022 TM.getSubtarget<X86Subtarget>().isPICStyleGOT()) { 3023 GlobalBaseReg = 3024 RegInfo.createVirtualRegister(X86::GR32RegisterClass); 3025 BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg) 3026 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_"); 3027 } else { 3028 GlobalBaseReg = PC; 3029 } 3030 3031 X86FI->setGlobalBaseReg(GlobalBaseReg); 3032 return GlobalBaseReg; 3033} 3034