AArch64MCCodeEmitter.cpp revision f67c7d7e8c5949037e85dd233876989c1fea7099
1//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the AArch64MCCodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "mccodeemitter"
15#include "MCTargetDesc/AArch64FixupKinds.h"
16#include "MCTargetDesc/AArch64MCExpr.h"
17#include "MCTargetDesc/AArch64MCTargetDesc.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/MC/MCCodeEmitter.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrInfo.h"
23#include "llvm/MC/MCRegisterInfo.h"
24#include "llvm/MC/MCSubtargetInfo.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/raw_ostream.h"
27
28using namespace llvm;
29
30namespace {
31class AArch64MCCodeEmitter : public MCCodeEmitter {
32  AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33  void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34  MCContext &Ctx;
35
36public:
37  AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
38
39  ~AArch64MCCodeEmitter() {}
40
41  unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
42                               SmallVectorImpl<MCFixup> &Fixups) const;
43
44  unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
45                               SmallVectorImpl<MCFixup> &Fixups) const;
46
47  template<int MemSize>
48  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
49                                    SmallVectorImpl<MCFixup> &Fixups) const {
50    return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
51  }
52
53  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54                                    SmallVectorImpl<MCFixup> &Fixups,
55                                    int MemSize) const;
56
57  unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
58                                   SmallVectorImpl<MCFixup> &Fixups) const;
59  unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
60                                   SmallVectorImpl<MCFixup> &Fixups) const;
61
62
63  // Labels are handled mostly the same way: a symbol is needed, and
64  // just gets some fixup attached.
65  template<AArch64::Fixups fixupDesired>
66  unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
67                           SmallVectorImpl<MCFixup> &Fixups) const;
68
69  unsigned  getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
70                                   SmallVectorImpl<MCFixup> &Fixups) const;
71
72
73  unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
74                                 SmallVectorImpl<MCFixup> &Fixups) const;
75
76
77  unsigned getAddressWithFixup(const MCOperand &MO,
78                               unsigned FixupKind,
79                               SmallVectorImpl<MCFixup> &Fixups) const;
80
81
82  // getBinaryCodeForInstr - TableGen'erated function for getting the
83  // binary encoding for an instruction.
84  uint64_t getBinaryCodeForInstr(const MCInst &MI,
85                                 SmallVectorImpl<MCFixup> &Fixups) const;
86
87  /// getMachineOpValue - Return binary encoding of operand. If the machine
88  /// operand requires relocation, record the relocation and return zero.
89  unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
90                             SmallVectorImpl<MCFixup> &Fixups) const;
91
92
93  void EmitByte(unsigned char C, raw_ostream &OS) const {
94    OS << (char)C;
95  }
96
97  void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
98    // Output the constant in little endian byte order.
99    for (unsigned i = 0; i != 4; ++i) {
100      EmitByte(Val & 0xff, OS);
101      Val >>= 8;
102    }
103  }
104
105
106  void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
107                         SmallVectorImpl<MCFixup> &Fixups) const;
108
109  template<int hasRs, int hasRt2> unsigned
110  fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
111
112  unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
113
114  unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
115
116
117};
118
119} // end anonymous namespace
120
121unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
122                                       unsigned FixupKind,
123                                       SmallVectorImpl<MCFixup> &Fixups) const {
124  if (!MO.isExpr()) {
125    // This can occur for manually decoded or constructed MCInsts, but neither
126    // the assembly-parser nor instruction selection will currently produce an
127    // MCInst that's not a symbol reference.
128    assert(MO.isImm() && "Unexpected address requested");
129    return MO.getImm();
130  }
131
132  const MCExpr *Expr = MO.getExpr();
133  MCFixupKind Kind = MCFixupKind(FixupKind);
134  Fixups.push_back(MCFixup::Create(0, Expr, Kind));
135
136  return 0;
137}
138
139unsigned AArch64MCCodeEmitter::
140getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
141                       SmallVectorImpl<MCFixup> &Fixups,
142                       int MemSize) const {
143  const MCOperand &ImmOp = MI.getOperand(OpIdx);
144  if (ImmOp.isImm())
145    return ImmOp.getImm();
146
147  assert(ImmOp.isExpr() && "Unexpected operand type");
148  const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
149  unsigned FixupKind;
150
151
152  switch (Expr->getKind()) {
153  default: llvm_unreachable("Unexpected operand modifier");
154  case AArch64MCExpr::VK_AARCH64_LO12: {
155    static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
156                                             AArch64::fixup_a64_ldst16_lo12,
157                                             AArch64::fixup_a64_ldst32_lo12,
158                                             AArch64::fixup_a64_ldst64_lo12,
159                                AArch64::fixup_a64_ldst128_lo12 };
160    assert(MemSize <= 16 && "Invalid fixup for operation");
161    FixupKind = FixupsBySize[Log2_32(MemSize)];
162    break;
163  }
164  case AArch64MCExpr::VK_AARCH64_GOT_LO12:
165    assert(MemSize == 8 && "Invalid fixup for operation");
166    FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
167    break;
168  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:  {
169    static const unsigned FixupsBySize[] = {
170      AArch64::fixup_a64_ldst8_dtprel_lo12,
171      AArch64::fixup_a64_ldst16_dtprel_lo12,
172      AArch64::fixup_a64_ldst32_dtprel_lo12,
173      AArch64::fixup_a64_ldst64_dtprel_lo12
174    };
175    assert(MemSize <= 8 && "Invalid fixup for operation");
176    FixupKind = FixupsBySize[Log2_32(MemSize)];
177    break;
178  }
179  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
180    static const unsigned FixupsBySize[] = {
181      AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
182      AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
183      AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
184      AArch64::fixup_a64_ldst64_dtprel_lo12_nc
185    };
186    assert(MemSize <= 8 && "Invalid fixup for operation");
187    FixupKind = FixupsBySize[Log2_32(MemSize)];
188    break;
189  }
190  case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
191    assert(MemSize == 8 && "Invalid fixup for operation");
192    FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
193    break;
194  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
195    static const unsigned FixupsBySize[] = {
196      AArch64::fixup_a64_ldst8_tprel_lo12,
197      AArch64::fixup_a64_ldst16_tprel_lo12,
198      AArch64::fixup_a64_ldst32_tprel_lo12,
199      AArch64::fixup_a64_ldst64_tprel_lo12
200    };
201    assert(MemSize <= 8 && "Invalid fixup for operation");
202    FixupKind = FixupsBySize[Log2_32(MemSize)];
203    break;
204  }
205  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
206    static const unsigned FixupsBySize[] = {
207      AArch64::fixup_a64_ldst8_tprel_lo12_nc,
208      AArch64::fixup_a64_ldst16_tprel_lo12_nc,
209      AArch64::fixup_a64_ldst32_tprel_lo12_nc,
210      AArch64::fixup_a64_ldst64_tprel_lo12_nc
211    };
212    assert(MemSize <= 8 && "Invalid fixup for operation");
213    FixupKind = FixupsBySize[Log2_32(MemSize)];
214    break;
215  }
216  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
217    assert(MemSize == 8 && "Invalid fixup for operation");
218    FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
219    break;
220  }
221
222  return getAddressWithFixup(ImmOp, FixupKind, Fixups);
223}
224
225unsigned
226AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
227                                       SmallVectorImpl<MCFixup> &Fixups) const {
228  const MCOperand &MO = MI.getOperand(OpIdx);
229  if (MO.isImm())
230    return static_cast<unsigned>(MO.getImm());
231
232  assert(MO.isExpr());
233
234  unsigned FixupKind = 0;
235  switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
236  default: llvm_unreachable("Invalid expression modifier");
237  case AArch64MCExpr::VK_AARCH64_LO12:
238    FixupKind = AArch64::fixup_a64_add_lo12; break;
239  case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
240    FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
241  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
242    FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
243  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
244    FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
245  case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
246    FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
247  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
248    FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
249  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
250    FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
251  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
252    FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
253  }
254
255  return getAddressWithFixup(MO, FixupKind, Fixups);
256}
257
258unsigned
259AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
260                                       SmallVectorImpl<MCFixup> &Fixups) const {
261
262  const MCOperand &MO = MI.getOperand(OpIdx);
263  if (MO.isImm())
264    return static_cast<unsigned>(MO.getImm());
265
266  assert(MO.isExpr());
267
268  unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
269  if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
270    Modifier = Expr->getKind();
271
272  unsigned FixupKind = 0;
273  switch(Modifier) {
274  case AArch64MCExpr::VK_AARCH64_None:
275    FixupKind = AArch64::fixup_a64_adr_prel_page;
276    break;
277  case AArch64MCExpr::VK_AARCH64_GOT:
278    FixupKind = AArch64::fixup_a64_adr_prel_got_page;
279    break;
280  case AArch64MCExpr::VK_AARCH64_GOTTPREL:
281    FixupKind = AArch64::fixup_a64_adr_gottprel_page;
282    break;
283  case AArch64MCExpr::VK_AARCH64_TLSDESC:
284    FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
285    break;
286  default:
287    llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
288  }
289
290  return getAddressWithFixup(MO, FixupKind, Fixups);
291}
292
293unsigned
294AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
295                                       SmallVectorImpl<MCFixup> &Fixups) const {
296
297  const MCOperand &MO = MI.getOperand(OpIdx);
298  assert(MO.isImm() && "Only immediate expected for shift");
299
300  return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
301}
302
303unsigned
304AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
305                                       SmallVectorImpl<MCFixup> &Fixups) const {
306
307  const MCOperand &MO = MI.getOperand(OpIdx);
308  assert(MO.isImm() && "Only immediate expected for shift");
309
310  return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
311}
312
313
314template<AArch64::Fixups fixupDesired> unsigned
315AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
316                                      unsigned OpIdx,
317                                      SmallVectorImpl<MCFixup> &Fixups) const {
318  const MCOperand &MO = MI.getOperand(OpIdx);
319
320  if (MO.isExpr())
321    return getAddressWithFixup(MO, fixupDesired, Fixups);
322
323  assert(MO.isImm());
324  return MO.getImm();
325}
326
327unsigned
328AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
329                                       unsigned OpIdx,
330                                       SmallVectorImpl<MCFixup> &Fixups) const {
331  const MCOperand &MO = MI.getOperand(OpIdx);
332
333  if (MO.isImm())
334    return MO.getImm();
335
336  assert(MO.isExpr());
337
338  unsigned FixupKind;
339  if (isa<AArch64MCExpr>(MO.getExpr())) {
340    assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
341           == AArch64MCExpr::VK_AARCH64_GOTTPREL
342           && "Invalid symbol modifier for literal load");
343    FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
344  } else {
345    FixupKind = AArch64::fixup_a64_ld_prel;
346  }
347
348  return getAddressWithFixup(MO, FixupKind, Fixups);
349}
350
351
352unsigned
353AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
354                                       const MCOperand &MO,
355                                       SmallVectorImpl<MCFixup> &Fixups) const {
356  if (MO.isReg()) {
357    return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
358  } else if (MO.isImm()) {
359    return static_cast<unsigned>(MO.getImm());
360  }
361
362  llvm_unreachable("Unable to encode MCOperand!");
363  return 0;
364}
365
366unsigned
367AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
368                                       SmallVectorImpl<MCFixup> &Fixups) const {
369  const MCOperand &UImm16MO = MI.getOperand(OpIdx);
370  const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
371
372  unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
373
374  if (UImm16MO.isImm()) {
375    Result |= UImm16MO.getImm();
376    return Result;
377  }
378
379  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
380  AArch64::Fixups requestedFixup;
381  switch (A64E->getKind()) {
382  default: llvm_unreachable("unexpected expression modifier");
383  case AArch64MCExpr::VK_AARCH64_ABS_G0:
384    requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
385  case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
386    requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
387  case AArch64MCExpr::VK_AARCH64_ABS_G1:
388    requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
389  case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
390    requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
391  case AArch64MCExpr::VK_AARCH64_ABS_G2:
392    requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
393  case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
394    requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
395  case AArch64MCExpr::VK_AARCH64_ABS_G3:
396    requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
397  case AArch64MCExpr::VK_AARCH64_SABS_G0:
398    requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
399  case AArch64MCExpr::VK_AARCH64_SABS_G1:
400    requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
401  case AArch64MCExpr::VK_AARCH64_SABS_G2:
402    requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
403  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
404    requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
405  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
406    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
407  case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
408    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
409  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
410    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
411  case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
412    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
413  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
414    requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
415  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
416    requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
417  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
418    requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
419  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
420    requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
421  case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
422    requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
423  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
424    requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
425  case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
426    requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
427  }
428
429  return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
430}
431
432template<int hasRs, int hasRt2> unsigned
433AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
434                                            unsigned EncodedValue) const {
435  if (!hasRs) EncodedValue |= 0x001F0000;
436  if (!hasRt2) EncodedValue |= 0x00007C00;
437
438  return EncodedValue;
439}
440
441unsigned
442AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
443  // If one of the signed fixup kinds is applied to a MOVZ instruction, the
444  // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
445  // job to ensure that any bits possibly affected by this are 0. This means we
446  // must zero out bit 30 (essentially emitting a MOVN).
447  MCOperand UImm16MO = MI.getOperand(1);
448
449  // Nothing to do if there's no fixup.
450  if (UImm16MO.isImm())
451    return EncodedValue;
452
453  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
454  switch (A64E->getKind()) {
455  case AArch64MCExpr::VK_AARCH64_SABS_G0:
456  case AArch64MCExpr::VK_AARCH64_SABS_G1:
457  case AArch64MCExpr::VK_AARCH64_SABS_G2:
458  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
459  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
460  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
461  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
462  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
463  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
464  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
465    return EncodedValue & ~(1u << 30);
466  default:
467    // Nothing to do for an unsigned fixup.
468    return EncodedValue;
469  }
470
471  llvm_unreachable("Should have returned by now");
472}
473
474unsigned
475AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
476                                 unsigned EncodedValue) const {
477  // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
478  // (i.e. all bits 1) but is ignored by the processor.
479  EncodedValue |= 0x1f << 10;
480  return EncodedValue;
481}
482
483MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
484                                                const MCRegisterInfo &MRI,
485                                                const MCSubtargetInfo &STI,
486                                                MCContext &Ctx) {
487  return new AArch64MCCodeEmitter(Ctx);
488}
489
490void AArch64MCCodeEmitter::
491EncodeInstruction(const MCInst &MI, raw_ostream &OS,
492                  SmallVectorImpl<MCFixup> &Fixups) const {
493  if (MI.getOpcode() == AArch64::TLSDESCCALL) {
494    // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
495    // following (BLR) instruction. It doesn't emit any code itself so it
496    // doesn't go through the normal TableGenerated channels.
497    MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
498    const MCExpr *Expr;
499    Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
500    Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
501    return;
502  }
503
504  uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
505
506  EmitInstruction(Binary, OS);
507}
508
509
510#include "AArch64GenMCCodeEmitter.inc"
511