AArch64MCCodeEmitter.cpp revision a39058aaed4540fc37681cad728b99546595b2e8
1//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the AArch64MCCodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "mccodeemitter"
15#include "MCTargetDesc/AArch64FixupKinds.h"
16#include "MCTargetDesc/AArch64MCExpr.h"
17#include "MCTargetDesc/AArch64MCTargetDesc.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/MC/MCCodeEmitter.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrInfo.h"
23#include "llvm/MC/MCRegisterInfo.h"
24#include "llvm/MC/MCSubtargetInfo.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/raw_ostream.h"
27
28using namespace llvm;
29
30namespace {
31class AArch64MCCodeEmitter : public MCCodeEmitter {
32  AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33  void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34  const MCInstrInfo &MCII;
35  const MCSubtargetInfo &STI;
36  MCContext &Ctx;
37
38public:
39  AArch64MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
40                       MCContext &ctx)
41    : MCII(mcii), STI(sti), Ctx(ctx) {
42  }
43
44  ~AArch64MCCodeEmitter() {}
45
46  unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
47                               SmallVectorImpl<MCFixup> &Fixups) const;
48
49  unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
50                               SmallVectorImpl<MCFixup> &Fixups) const;
51
52  template<int MemSize>
53  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54                                    SmallVectorImpl<MCFixup> &Fixups) const {
55    return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
56  }
57
58  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
59                                    SmallVectorImpl<MCFixup> &Fixups,
60                                    int MemSize) const;
61
62  unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
63                                   SmallVectorImpl<MCFixup> &Fixups) const;
64  unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
65                                   SmallVectorImpl<MCFixup> &Fixups) const;
66
67
68  // Labels are handled mostly the same way: a symbol is needed, and
69  // just gets some fixup attached.
70  template<AArch64::Fixups fixupDesired>
71  unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
72                           SmallVectorImpl<MCFixup> &Fixups) const;
73
74  unsigned  getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
75                                   SmallVectorImpl<MCFixup> &Fixups) const;
76
77
78  unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
79                                 SmallVectorImpl<MCFixup> &Fixups) const;
80
81
82  unsigned getAddressWithFixup(const MCOperand &MO,
83                               unsigned FixupKind,
84                               SmallVectorImpl<MCFixup> &Fixups) const;
85
86
87  // getBinaryCodeForInstr - TableGen'erated function for getting the
88  // binary encoding for an instruction.
89  uint64_t getBinaryCodeForInstr(const MCInst &MI,
90                                 SmallVectorImpl<MCFixup> &Fixups) const;
91
92  /// getMachineOpValue - Return binary encoding of operand. If the machine
93  /// operand requires relocation, record the relocation and return zero.
94  unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
95                             SmallVectorImpl<MCFixup> &Fixups) const;
96
97
98  void EmitByte(unsigned char C, raw_ostream &OS) const {
99    OS << (char)C;
100  }
101
102  void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
103    // Output the constant in little endian byte order.
104    for (unsigned i = 0; i != 4; ++i) {
105      EmitByte(Val & 0xff, OS);
106      Val >>= 8;
107    }
108  }
109
110
111  void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
112                         SmallVectorImpl<MCFixup> &Fixups) const;
113
114  unsigned fixFCMPImm(const MCInst &MI, unsigned EncodedValue) const;
115
116  template<int hasRs, int hasRt2> unsigned
117  fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
118
119  unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
120
121  unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
122
123
124};
125
126} // end anonymous namespace
127
128unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
129                                       unsigned FixupKind,
130                                       SmallVectorImpl<MCFixup> &Fixups) const {
131  if (!MO.isExpr()) {
132    // This can occur for manually decoded or constructed MCInsts, but neither
133    // the assembly-parser nor instruction selection will currently produce an
134    // MCInst that's not a symbol reference.
135    assert(MO.isImm() && "Unexpected address requested");
136    return MO.getImm();
137  }
138
139  const MCExpr *Expr = MO.getExpr();
140  MCFixupKind Kind = MCFixupKind(FixupKind);
141  Fixups.push_back(MCFixup::Create(0, Expr, Kind));
142
143  return 0;
144}
145
146unsigned AArch64MCCodeEmitter::
147getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
148                       SmallVectorImpl<MCFixup> &Fixups,
149                       int MemSize) const {
150  const MCOperand &ImmOp = MI.getOperand(OpIdx);
151  if (ImmOp.isImm())
152    return ImmOp.getImm();
153
154  assert(ImmOp.isExpr() && "Unexpected operand type");
155  const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
156  unsigned FixupKind;
157
158
159  switch (Expr->getKind()) {
160  default: llvm_unreachable("Unexpected operand modifier");
161  case AArch64MCExpr::VK_AARCH64_LO12: {
162    unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
163                                AArch64::fixup_a64_ldst16_lo12,
164                                AArch64::fixup_a64_ldst32_lo12,
165                                AArch64::fixup_a64_ldst64_lo12,
166                                AArch64::fixup_a64_ldst128_lo12 };
167    assert(MemSize <= 16 && "Invalid fixup for operation");
168    FixupKind = FixupsBySize[Log2_32(MemSize)];
169    break;
170  }
171  case AArch64MCExpr::VK_AARCH64_GOT_LO12:
172    assert(MemSize == 8 && "Invalid fixup for operation");
173    FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
174    break;
175  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:  {
176    unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12,
177                                AArch64::fixup_a64_ldst16_dtprel_lo12,
178                                AArch64::fixup_a64_ldst32_dtprel_lo12,
179                                AArch64::fixup_a64_ldst64_dtprel_lo12 };
180    assert(MemSize <= 8 && "Invalid fixup for operation");
181    FixupKind = FixupsBySize[Log2_32(MemSize)];
182    break;
183  }
184  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
185    unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
186                                AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
187                                AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
188                                AArch64::fixup_a64_ldst64_dtprel_lo12_nc };
189    assert(MemSize <= 8 && "Invalid fixup for operation");
190    FixupKind = FixupsBySize[Log2_32(MemSize)];
191    break;
192  }
193  case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
194    assert(MemSize == 8 && "Invalid fixup for operation");
195    FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
196    break;
197  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
198    unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12,
199                                AArch64::fixup_a64_ldst16_tprel_lo12,
200                                AArch64::fixup_a64_ldst32_tprel_lo12,
201                                AArch64::fixup_a64_ldst64_tprel_lo12 };
202    assert(MemSize <= 8 && "Invalid fixup for operation");
203    FixupKind = FixupsBySize[Log2_32(MemSize)];
204    break;
205  }
206  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
207    unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12_nc,
208                                AArch64::fixup_a64_ldst16_tprel_lo12_nc,
209                                AArch64::fixup_a64_ldst32_tprel_lo12_nc,
210                                AArch64::fixup_a64_ldst64_tprel_lo12_nc };
211    assert(MemSize <= 8 && "Invalid fixup for operation");
212    FixupKind = FixupsBySize[Log2_32(MemSize)];
213    break;
214  }
215  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
216    assert(MemSize == 8 && "Invalid fixup for operation");
217    FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
218    break;
219  }
220
221  return getAddressWithFixup(ImmOp, FixupKind, Fixups);
222}
223
224unsigned
225AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
226                                       SmallVectorImpl<MCFixup> &Fixups) const {
227  const MCOperand &MO = MI.getOperand(OpIdx);
228  if (MO.isImm())
229    return static_cast<unsigned>(MO.getImm());
230
231  assert(MO.isExpr());
232
233  unsigned FixupKind = 0;
234  switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
235  default: llvm_unreachable("Invalid expression modifier");
236  case AArch64MCExpr::VK_AARCH64_LO12:
237    FixupKind = AArch64::fixup_a64_add_lo12; break;
238  case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
239    FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
240  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
241    FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
242  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
243    FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
244  case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
245    FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
246  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
247    FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
248  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
249    FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
250  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
251    FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
252  }
253
254  return getAddressWithFixup(MO, FixupKind, Fixups);
255}
256
257unsigned
258AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
259                                       SmallVectorImpl<MCFixup> &Fixups) const {
260
261  const MCOperand &MO = MI.getOperand(OpIdx);
262  if (MO.isImm())
263    return static_cast<unsigned>(MO.getImm());
264
265  assert(MO.isExpr());
266
267  unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
268  if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
269    Modifier = Expr->getKind();
270
271  unsigned FixupKind = 0;
272  switch(Modifier) {
273  case AArch64MCExpr::VK_AARCH64_None:
274    FixupKind = AArch64::fixup_a64_adr_prel_page;
275    break;
276  case AArch64MCExpr::VK_AARCH64_GOT:
277    FixupKind = AArch64::fixup_a64_adr_prel_got_page;
278    break;
279  case AArch64MCExpr::VK_AARCH64_GOTTPREL:
280    FixupKind = AArch64::fixup_a64_adr_gottprel_page;
281    break;
282  case AArch64MCExpr::VK_AARCH64_TLSDESC:
283    FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
284    break;
285  default:
286    llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
287  }
288
289  return getAddressWithFixup(MO, FixupKind, Fixups);
290}
291
292unsigned
293AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
294                                       SmallVectorImpl<MCFixup> &Fixups) const {
295
296  const MCOperand &MO = MI.getOperand(OpIdx);
297  assert(MO.isImm() && "Only immediate expected for shift");
298
299  return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
300}
301
302unsigned
303AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
304                                       SmallVectorImpl<MCFixup> &Fixups) const {
305
306  const MCOperand &MO = MI.getOperand(OpIdx);
307  assert(MO.isImm() && "Only immediate expected for shift");
308
309  return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
310}
311
312
313template<AArch64::Fixups fixupDesired> unsigned
314AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
315                                      unsigned OpIdx,
316                                      SmallVectorImpl<MCFixup> &Fixups) const {
317  const MCOperand &MO = MI.getOperand(OpIdx);
318
319  if (MO.isExpr())
320    return getAddressWithFixup(MO, fixupDesired, Fixups);
321
322  assert(MO.isImm());
323  return MO.getImm();
324}
325
326unsigned
327AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
328                                       unsigned OpIdx,
329                                       SmallVectorImpl<MCFixup> &Fixups) const {
330  const MCOperand &MO = MI.getOperand(OpIdx);
331
332  if (MO.isImm())
333    return MO.getImm();
334
335  assert(MO.isExpr());
336
337  unsigned FixupKind;
338  if (isa<AArch64MCExpr>(MO.getExpr())) {
339    assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
340           == AArch64MCExpr::VK_AARCH64_GOTTPREL
341           && "Invalid symbol modifier for literal load");
342    FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
343  } else {
344    FixupKind = AArch64::fixup_a64_ld_prel;
345  }
346
347  return getAddressWithFixup(MO, FixupKind, Fixups);
348}
349
350
351unsigned
352AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
353                                       const MCOperand &MO,
354                                       SmallVectorImpl<MCFixup> &Fixups) const {
355  if (MO.isReg()) {
356    return Ctx.getRegisterInfo().getEncodingValue(MO.getReg());
357  } else if (MO.isImm()) {
358    return static_cast<unsigned>(MO.getImm());
359  }
360
361  llvm_unreachable("Unable to encode MCOperand!");
362  return 0;
363}
364
365unsigned
366AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
367                                       SmallVectorImpl<MCFixup> &Fixups) const {
368  const MCOperand &UImm16MO = MI.getOperand(OpIdx);
369  const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
370
371  unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
372
373  if (UImm16MO.isImm()) {
374    Result |= UImm16MO.getImm();
375    return Result;
376  }
377
378  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
379  AArch64::Fixups requestedFixup;
380  switch (A64E->getKind()) {
381  default: llvm_unreachable("unexpected expression modifier");
382  case AArch64MCExpr::VK_AARCH64_ABS_G0:
383    requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
384  case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
385    requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
386  case AArch64MCExpr::VK_AARCH64_ABS_G1:
387    requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
388  case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
389    requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
390  case AArch64MCExpr::VK_AARCH64_ABS_G2:
391    requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
392  case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
393    requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
394  case AArch64MCExpr::VK_AARCH64_ABS_G3:
395    requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
396  case AArch64MCExpr::VK_AARCH64_SABS_G0:
397    requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
398  case AArch64MCExpr::VK_AARCH64_SABS_G1:
399    requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
400  case AArch64MCExpr::VK_AARCH64_SABS_G2:
401    requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
402  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
403    requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
404  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
405    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
406  case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
407    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
408  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
409    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
410  case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
411    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
412  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
413    requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
414  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
415    requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
416  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
417    requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
418  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
419    requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
420  case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
421    requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
422  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
423    requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
424  case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
425    requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
426  }
427
428  return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
429}
430
431unsigned AArch64MCCodeEmitter::fixFCMPImm(const MCInst &MI,
432                                          unsigned EncodedValue) const {
433    // For FCMP[E] Rn, #0.0, the Rm field has a canonical representation
434    // with 0s, but is architecturally ignored
435    EncodedValue &= ~0x1f0000u;
436
437    return EncodedValue;
438}
439
440template<int hasRs, int hasRt2> unsigned
441AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
442                                            unsigned EncodedValue) const {
443  if (!hasRs) EncodedValue |= 0x001F0000;
444  if (!hasRt2) EncodedValue |= 0x00007C00;
445
446  return EncodedValue;
447}
448
449unsigned
450AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
451  // If one of the signed fixup kinds is applied to a MOVZ instruction, the
452  // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
453  // job to ensure that any bits possibly affected by this are 0. This means we
454  // must zero out bit 30 (essentially emitting a MOVN).
455  MCOperand UImm16MO = MI.getOperand(1);
456
457  // Nothing to do if there's no fixup.
458  if (UImm16MO.isImm())
459    return EncodedValue;
460
461  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
462  switch (A64E->getKind()) {
463  case AArch64MCExpr::VK_AARCH64_SABS_G0:
464  case AArch64MCExpr::VK_AARCH64_SABS_G1:
465  case AArch64MCExpr::VK_AARCH64_SABS_G2:
466  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
467  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
468  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
469  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
470  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
471  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
472  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
473    return EncodedValue & ~(1u << 30);
474  default:
475    // Nothing to do for an unsigned fixup.
476    return EncodedValue;
477  }
478
479  llvm_unreachable("Should have returned by now");
480}
481
482unsigned
483AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
484                                 unsigned EncodedValue) const {
485  // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
486  // (i.e. all bits 1) but is ignored by the processor.
487  EncodedValue |= 0x1f << 10;
488  return EncodedValue;
489}
490
491MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
492                                                const MCRegisterInfo &MRI,
493                                                const MCSubtargetInfo &STI,
494                                                MCContext &Ctx) {
495  return new AArch64MCCodeEmitter(MCII, STI, Ctx);
496}
497
498void AArch64MCCodeEmitter::
499EncodeInstruction(const MCInst &MI, raw_ostream &OS,
500                  SmallVectorImpl<MCFixup> &Fixups) const {
501  if (MI.getOpcode() == AArch64::TLSDESCCALL) {
502    // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
503    // following (BLR) instruction. It doesn't emit any code itself so it
504    // doesn't go through the normal TableGenerated channels.
505    MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
506    const MCExpr *Expr;
507    Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
508    Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
509    return;
510  }
511
512  uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
513
514  EmitInstruction(Binary, OS);
515}
516
517
518#include "AArch64GenMCCodeEmitter.inc"
519