AArch64MCCodeEmitter.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the AArch64MCCodeEmitter class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "mccodeemitter" 15#include "MCTargetDesc/AArch64FixupKinds.h" 16#include "MCTargetDesc/AArch64MCExpr.h" 17#include "MCTargetDesc/AArch64MCTargetDesc.h" 18#include "Utils/AArch64BaseInfo.h" 19#include "llvm/MC/MCCodeEmitter.h" 20#include "llvm/MC/MCContext.h" 21#include "llvm/MC/MCInst.h" 22#include "llvm/MC/MCInstrInfo.h" 23#include "llvm/MC/MCRegisterInfo.h" 24#include "llvm/MC/MCSubtargetInfo.h" 25#include "llvm/Support/ErrorHandling.h" 26#include "llvm/Support/raw_ostream.h" 27 28using namespace llvm; 29 30namespace { 31class AArch64MCCodeEmitter : public MCCodeEmitter { 32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION; 33 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION; 34 MCContext &Ctx; 35 36public: 37 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {} 38 39 ~AArch64MCCodeEmitter() {} 40 41 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 42 SmallVectorImpl<MCFixup> &Fixups, 43 const MCSubtargetInfo &STI) const; 44 45 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx, 46 SmallVectorImpl<MCFixup> &Fixups, 47 const MCSubtargetInfo &STI) const; 48 49 template<int MemSize> 50 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, 51 SmallVectorImpl<MCFixup> &Fixups, 52 const MCSubtargetInfo &STI) const { 53 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, STI, MemSize); 54 } 55 56 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, 57 SmallVectorImpl<MCFixup> &Fixups, 58 const MCSubtargetInfo &STI, 59 int MemSize) const; 60 61 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx, 62 SmallVectorImpl<MCFixup> &Fixups, 63 const MCSubtargetInfo &STI) const; 64 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx, 65 SmallVectorImpl<MCFixup> &Fixups, 66 const MCSubtargetInfo &STI) const; 67 68 unsigned getShiftRightImm8(const MCInst &MI, unsigned Op, 69 SmallVectorImpl<MCFixup> &Fixups, 70 const MCSubtargetInfo &STI) const; 71 unsigned getShiftRightImm16(const MCInst &MI, unsigned Op, 72 SmallVectorImpl<MCFixup> &Fixups, 73 const MCSubtargetInfo &STI) const; 74 unsigned getShiftRightImm32(const MCInst &MI, unsigned Op, 75 SmallVectorImpl<MCFixup> &Fixups, 76 const MCSubtargetInfo &STI) const; 77 unsigned getShiftRightImm64(const MCInst &MI, unsigned Op, 78 SmallVectorImpl<MCFixup> &Fixups, 79 const MCSubtargetInfo &STI) const; 80 81 unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op, 82 SmallVectorImpl<MCFixup> &Fixups, 83 const MCSubtargetInfo &STI) const; 84 unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op, 85 SmallVectorImpl<MCFixup> &Fixups, 86 const MCSubtargetInfo &STI) const; 87 unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op, 88 SmallVectorImpl<MCFixup> &Fixups, 89 const MCSubtargetInfo &STI) const; 90 unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op, 91 SmallVectorImpl<MCFixup> &Fixups, 92 const MCSubtargetInfo &STI) const; 93 94 // Labels are handled mostly the same way: a symbol is needed, and 95 // just gets some fixup attached. 96 template<AArch64::Fixups fixupDesired> 97 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx, 98 SmallVectorImpl<MCFixup> &Fixups, 99 const MCSubtargetInfo &STI) const; 100 101 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx, 102 SmallVectorImpl<MCFixup> &Fixups, 103 const MCSubtargetInfo &STI) const; 104 105 106 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 107 SmallVectorImpl<MCFixup> &Fixups, 108 const MCSubtargetInfo &STI) const; 109 110 111 unsigned getAddressWithFixup(const MCOperand &MO, 112 unsigned FixupKind, 113 SmallVectorImpl<MCFixup> &Fixups, 114 const MCSubtargetInfo &STI) const; 115 116 117 // getBinaryCodeForInstr - TableGen'erated function for getting the 118 // binary encoding for an instruction. 119 uint64_t getBinaryCodeForInstr(const MCInst &MI, 120 SmallVectorImpl<MCFixup> &Fixups, 121 const MCSubtargetInfo &STI) const; 122 123 /// getMachineOpValue - Return binary encoding of operand. If the machine 124 /// operand requires relocation, record the relocation and return zero. 125 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, 126 SmallVectorImpl<MCFixup> &Fixups, 127 const MCSubtargetInfo &STI) const; 128 129 130 void EmitByte(unsigned char C, raw_ostream &OS) const { 131 OS << (char)C; 132 } 133 134 void EmitInstruction(uint32_t Val, raw_ostream &OS) const { 135 // Output the constant in little endian byte order. 136 for (unsigned i = 0; i != 4; ++i) { 137 EmitByte(Val & 0xff, OS); 138 Val >>= 8; 139 } 140 } 141 142 143 void EncodeInstruction(const MCInst &MI, raw_ostream &OS, 144 SmallVectorImpl<MCFixup> &Fixups, 145 const MCSubtargetInfo &STI) const; 146 147 template<int hasRs, int hasRt2> unsigned 148 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue, 149 const MCSubtargetInfo &STI) const; 150 151 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, 152 const MCSubtargetInfo &STI) const; 153 154 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue, 155 const MCSubtargetInfo &STI) const; 156 157 158}; 159 160} // end anonymous namespace 161 162unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO, 163 unsigned FixupKind, 164 SmallVectorImpl<MCFixup> &Fixups, 165 const MCSubtargetInfo &STI) const { 166 if (!MO.isExpr()) { 167 // This can occur for manually decoded or constructed MCInsts, but neither 168 // the assembly-parser nor instruction selection will currently produce an 169 // MCInst that's not a symbol reference. 170 assert(MO.isImm() && "Unexpected address requested"); 171 return MO.getImm(); 172 } 173 174 const MCExpr *Expr = MO.getExpr(); 175 MCFixupKind Kind = MCFixupKind(FixupKind); 176 Fixups.push_back(MCFixup::Create(0, Expr, Kind)); 177 178 return 0; 179} 180 181unsigned AArch64MCCodeEmitter:: 182getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, 183 SmallVectorImpl<MCFixup> &Fixups, 184 const MCSubtargetInfo &STI, 185 int MemSize) const { 186 const MCOperand &ImmOp = MI.getOperand(OpIdx); 187 if (ImmOp.isImm()) 188 return ImmOp.getImm(); 189 190 assert(ImmOp.isExpr() && "Unexpected operand type"); 191 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr()); 192 unsigned FixupKind; 193 194 195 switch (Expr->getKind()) { 196 default: llvm_unreachable("Unexpected operand modifier"); 197 case AArch64MCExpr::VK_AARCH64_LO12: { 198 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12, 199 AArch64::fixup_a64_ldst16_lo12, 200 AArch64::fixup_a64_ldst32_lo12, 201 AArch64::fixup_a64_ldst64_lo12, 202 AArch64::fixup_a64_ldst128_lo12 }; 203 assert(MemSize <= 16 && "Invalid fixup for operation"); 204 FixupKind = FixupsBySize[Log2_32(MemSize)]; 205 break; 206 } 207 case AArch64MCExpr::VK_AARCH64_GOT_LO12: 208 assert(MemSize == 8 && "Invalid fixup for operation"); 209 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc; 210 break; 211 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: { 212 static const unsigned FixupsBySize[] = { 213 AArch64::fixup_a64_ldst8_dtprel_lo12, 214 AArch64::fixup_a64_ldst16_dtprel_lo12, 215 AArch64::fixup_a64_ldst32_dtprel_lo12, 216 AArch64::fixup_a64_ldst64_dtprel_lo12 217 }; 218 assert(MemSize <= 8 && "Invalid fixup for operation"); 219 FixupKind = FixupsBySize[Log2_32(MemSize)]; 220 break; 221 } 222 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: { 223 static const unsigned FixupsBySize[] = { 224 AArch64::fixup_a64_ldst8_dtprel_lo12_nc, 225 AArch64::fixup_a64_ldst16_dtprel_lo12_nc, 226 AArch64::fixup_a64_ldst32_dtprel_lo12_nc, 227 AArch64::fixup_a64_ldst64_dtprel_lo12_nc 228 }; 229 assert(MemSize <= 8 && "Invalid fixup for operation"); 230 FixupKind = FixupsBySize[Log2_32(MemSize)]; 231 break; 232 } 233 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12: 234 assert(MemSize == 8 && "Invalid fixup for operation"); 235 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc; 236 break; 237 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{ 238 static const unsigned FixupsBySize[] = { 239 AArch64::fixup_a64_ldst8_tprel_lo12, 240 AArch64::fixup_a64_ldst16_tprel_lo12, 241 AArch64::fixup_a64_ldst32_tprel_lo12, 242 AArch64::fixup_a64_ldst64_tprel_lo12 243 }; 244 assert(MemSize <= 8 && "Invalid fixup for operation"); 245 FixupKind = FixupsBySize[Log2_32(MemSize)]; 246 break; 247 } 248 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: { 249 static const unsigned FixupsBySize[] = { 250 AArch64::fixup_a64_ldst8_tprel_lo12_nc, 251 AArch64::fixup_a64_ldst16_tprel_lo12_nc, 252 AArch64::fixup_a64_ldst32_tprel_lo12_nc, 253 AArch64::fixup_a64_ldst64_tprel_lo12_nc 254 }; 255 assert(MemSize <= 8 && "Invalid fixup for operation"); 256 FixupKind = FixupsBySize[Log2_32(MemSize)]; 257 break; 258 } 259 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12: 260 assert(MemSize == 8 && "Invalid fixup for operation"); 261 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc; 262 break; 263 } 264 265 return getAddressWithFixup(ImmOp, FixupKind, Fixups, STI); 266} 267 268unsigned 269AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 270 SmallVectorImpl<MCFixup> &Fixups, 271 const MCSubtargetInfo &STI) const { 272 const MCOperand &MO = MI.getOperand(OpIdx); 273 if (MO.isImm()) 274 return static_cast<unsigned>(MO.getImm()); 275 276 assert(MO.isExpr()); 277 278 unsigned FixupKind = 0; 279 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) { 280 default: llvm_unreachable("Invalid expression modifier"); 281 case AArch64MCExpr::VK_AARCH64_LO12: 282 FixupKind = AArch64::fixup_a64_add_lo12; break; 283 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12: 284 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break; 285 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: 286 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break; 287 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: 288 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break; 289 case AArch64MCExpr::VK_AARCH64_TPREL_HI12: 290 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break; 291 case AArch64MCExpr::VK_AARCH64_TPREL_LO12: 292 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break; 293 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: 294 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break; 295 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12: 296 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break; 297 } 298 299 return getAddressWithFixup(MO, FixupKind, Fixups, STI); 300} 301 302unsigned 303AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx, 304 SmallVectorImpl<MCFixup> &Fixups, 305 const MCSubtargetInfo &STI) const { 306 307 const MCOperand &MO = MI.getOperand(OpIdx); 308 if (MO.isImm()) 309 return static_cast<unsigned>(MO.getImm()); 310 311 assert(MO.isExpr()); 312 313 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None; 314 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr())) 315 Modifier = Expr->getKind(); 316 317 unsigned FixupKind = 0; 318 switch(Modifier) { 319 case AArch64MCExpr::VK_AARCH64_None: 320 FixupKind = AArch64::fixup_a64_adr_prel_page; 321 break; 322 case AArch64MCExpr::VK_AARCH64_GOT: 323 FixupKind = AArch64::fixup_a64_adr_prel_got_page; 324 break; 325 case AArch64MCExpr::VK_AARCH64_GOTTPREL: 326 FixupKind = AArch64::fixup_a64_adr_gottprel_page; 327 break; 328 case AArch64MCExpr::VK_AARCH64_TLSDESC: 329 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page; 330 break; 331 default: 332 llvm_unreachable("Unknown symbol reference kind for ADRP instruction"); 333 } 334 335 return getAddressWithFixup(MO, FixupKind, Fixups, STI); 336} 337 338unsigned 339AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx, 340 SmallVectorImpl<MCFixup> &Fixups, 341 const MCSubtargetInfo &STI) const { 342 343 const MCOperand &MO = MI.getOperand(OpIdx); 344 assert(MO.isImm() && "Only immediate expected for shift"); 345 346 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6; 347} 348 349unsigned 350AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx, 351 SmallVectorImpl<MCFixup> &Fixups, 352 const MCSubtargetInfo &STI) const { 353 354 const MCOperand &MO = MI.getOperand(OpIdx); 355 assert(MO.isImm() && "Only immediate expected for shift"); 356 357 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6; 358} 359 360unsigned AArch64MCCodeEmitter::getShiftRightImm8( 361 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 362 const MCSubtargetInfo &STI) const { 363 return 8 - MI.getOperand(Op).getImm(); 364} 365 366unsigned AArch64MCCodeEmitter::getShiftRightImm16( 367 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 368 const MCSubtargetInfo &STI) const { 369 return 16 - MI.getOperand(Op).getImm(); 370} 371 372unsigned AArch64MCCodeEmitter::getShiftRightImm32( 373 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 374 const MCSubtargetInfo &STI) const { 375 return 32 - MI.getOperand(Op).getImm(); 376} 377 378unsigned AArch64MCCodeEmitter::getShiftRightImm64( 379 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 380 const MCSubtargetInfo &STI) const { 381 return 64 - MI.getOperand(Op).getImm(); 382} 383 384unsigned AArch64MCCodeEmitter::getShiftLeftImm8( 385 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 386 const MCSubtargetInfo &STI) const { 387 return MI.getOperand(Op).getImm() - 8; 388} 389 390unsigned AArch64MCCodeEmitter::getShiftLeftImm16( 391 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 392 const MCSubtargetInfo &STI) const { 393 return MI.getOperand(Op).getImm() - 16; 394} 395 396unsigned AArch64MCCodeEmitter::getShiftLeftImm32( 397 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 398 const MCSubtargetInfo &STI) const { 399 return MI.getOperand(Op).getImm() - 32; 400} 401 402unsigned AArch64MCCodeEmitter::getShiftLeftImm64( 403 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups, 404 const MCSubtargetInfo &STI) const { 405 return MI.getOperand(Op).getImm() - 64; 406} 407 408template<AArch64::Fixups fixupDesired> unsigned 409AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI, 410 unsigned OpIdx, 411 SmallVectorImpl<MCFixup> &Fixups, 412 const MCSubtargetInfo &STI) const { 413 const MCOperand &MO = MI.getOperand(OpIdx); 414 415 if (MO.isExpr()) 416 return getAddressWithFixup(MO, fixupDesired, Fixups, STI); 417 418 assert(MO.isImm()); 419 return MO.getImm(); 420} 421 422unsigned 423AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI, 424 unsigned OpIdx, 425 SmallVectorImpl<MCFixup> &Fixups, 426 const MCSubtargetInfo &STI) const { 427 const MCOperand &MO = MI.getOperand(OpIdx); 428 429 if (MO.isImm()) 430 return MO.getImm(); 431 432 assert(MO.isExpr()); 433 434 unsigned FixupKind; 435 if (isa<AArch64MCExpr>(MO.getExpr())) { 436 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind() 437 == AArch64MCExpr::VK_AARCH64_GOTTPREL 438 && "Invalid symbol modifier for literal load"); 439 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19; 440 } else { 441 FixupKind = AArch64::fixup_a64_ld_prel; 442 } 443 444 return getAddressWithFixup(MO, FixupKind, Fixups, STI); 445} 446 447 448unsigned 449AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, 450 const MCOperand &MO, 451 SmallVectorImpl<MCFixup> &Fixups, 452 const MCSubtargetInfo &STI) const { 453 if (MO.isReg()) { 454 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); 455 } else if (MO.isImm()) { 456 return static_cast<unsigned>(MO.getImm()); 457 } 458 459 llvm_unreachable("Unable to encode MCOperand!"); 460 return 0; 461} 462 463unsigned 464AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 465 SmallVectorImpl<MCFixup> &Fixups, 466 const MCSubtargetInfo &STI) const { 467 const MCOperand &UImm16MO = MI.getOperand(OpIdx); 468 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1); 469 470 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16; 471 472 if (UImm16MO.isImm()) { 473 Result |= UImm16MO.getImm(); 474 return Result; 475 } 476 477 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); 478 AArch64::Fixups requestedFixup; 479 switch (A64E->getKind()) { 480 default: llvm_unreachable("unexpected expression modifier"); 481 case AArch64MCExpr::VK_AARCH64_ABS_G0: 482 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break; 483 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC: 484 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break; 485 case AArch64MCExpr::VK_AARCH64_ABS_G1: 486 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break; 487 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC: 488 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break; 489 case AArch64MCExpr::VK_AARCH64_ABS_G2: 490 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break; 491 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC: 492 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break; 493 case AArch64MCExpr::VK_AARCH64_ABS_G3: 494 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break; 495 case AArch64MCExpr::VK_AARCH64_SABS_G0: 496 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break; 497 case AArch64MCExpr::VK_AARCH64_SABS_G1: 498 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break; 499 case AArch64MCExpr::VK_AARCH64_SABS_G2: 500 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break; 501 case AArch64MCExpr::VK_AARCH64_DTPREL_G2: 502 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break; 503 case AArch64MCExpr::VK_AARCH64_DTPREL_G1: 504 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break; 505 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC: 506 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break; 507 case AArch64MCExpr::VK_AARCH64_DTPREL_G0: 508 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break; 509 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC: 510 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break; 511 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1: 512 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break; 513 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC: 514 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break; 515 case AArch64MCExpr::VK_AARCH64_TPREL_G2: 516 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break; 517 case AArch64MCExpr::VK_AARCH64_TPREL_G1: 518 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break; 519 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC: 520 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break; 521 case AArch64MCExpr::VK_AARCH64_TPREL_G0: 522 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break; 523 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC: 524 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break; 525 } 526 527 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups, STI); 528} 529 530template<int hasRs, int hasRt2> unsigned 531AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, 532 unsigned EncodedValue, 533 const MCSubtargetInfo &STI) const { 534 if (!hasRs) EncodedValue |= 0x001F0000; 535 if (!hasRt2) EncodedValue |= 0x00007C00; 536 537 return EncodedValue; 538} 539 540unsigned 541AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, 542 const MCSubtargetInfo &STI) const { 543 // If one of the signed fixup kinds is applied to a MOVZ instruction, the 544 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's 545 // job to ensure that any bits possibly affected by this are 0. This means we 546 // must zero out bit 30 (essentially emitting a MOVN). 547 MCOperand UImm16MO = MI.getOperand(1); 548 549 // Nothing to do if there's no fixup. 550 if (UImm16MO.isImm()) 551 return EncodedValue; 552 553 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); 554 switch (A64E->getKind()) { 555 case AArch64MCExpr::VK_AARCH64_SABS_G0: 556 case AArch64MCExpr::VK_AARCH64_SABS_G1: 557 case AArch64MCExpr::VK_AARCH64_SABS_G2: 558 case AArch64MCExpr::VK_AARCH64_DTPREL_G2: 559 case AArch64MCExpr::VK_AARCH64_DTPREL_G1: 560 case AArch64MCExpr::VK_AARCH64_DTPREL_G0: 561 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1: 562 case AArch64MCExpr::VK_AARCH64_TPREL_G2: 563 case AArch64MCExpr::VK_AARCH64_TPREL_G1: 564 case AArch64MCExpr::VK_AARCH64_TPREL_G0: 565 return EncodedValue & ~(1u << 30); 566 default: 567 // Nothing to do for an unsigned fixup. 568 return EncodedValue; 569 } 570 571 llvm_unreachable("Should have returned by now"); 572} 573 574unsigned 575AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, 576 unsigned EncodedValue, 577 const MCSubtargetInfo &STI) const { 578 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 579 // (i.e. all bits 1) but is ignored by the processor. 580 EncodedValue |= 0x1f << 10; 581 return EncodedValue; 582} 583 584MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, 585 const MCRegisterInfo &MRI, 586 const MCSubtargetInfo &STI, 587 MCContext &Ctx) { 588 return new AArch64MCCodeEmitter(Ctx); 589} 590 591void AArch64MCCodeEmitter:: 592EncodeInstruction(const MCInst &MI, raw_ostream &OS, 593 SmallVectorImpl<MCFixup> &Fixups, 594 const MCSubtargetInfo &STI) const { 595 if (MI.getOpcode() == AArch64::TLSDESCCALL) { 596 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the 597 // following (BLR) instruction. It doesn't emit any code itself so it 598 // doesn't go through the normal TableGenerated channels. 599 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call); 600 const MCExpr *Expr; 601 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx); 602 Fixups.push_back(MCFixup::Create(0, Expr, Fixup)); 603 return; 604 } 605 606 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); 607 608 EmitInstruction(Binary, OS); 609} 610 611 612#include "AArch64GenMCCodeEmitter.inc" 613