AArch64MCCodeEmitter.cpp revision 19fdc268c316b3b0bdcb2b558449819f4f402d6a
1//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the AArch64MCCodeEmitter class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "mccodeemitter"
15#include "MCTargetDesc/AArch64FixupKinds.h"
16#include "MCTargetDesc/AArch64MCExpr.h"
17#include "MCTargetDesc/AArch64MCTargetDesc.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/MC/MCCodeEmitter.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrInfo.h"
23#include "llvm/MC/MCRegisterInfo.h"
24#include "llvm/MC/MCSubtargetInfo.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Support/raw_ostream.h"
27
28using namespace llvm;
29
30namespace {
31class AArch64MCCodeEmitter : public MCCodeEmitter {
32  AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33  void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34  MCContext &Ctx;
35
36public:
37  AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
38
39  ~AArch64MCCodeEmitter() {}
40
41  unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
42                               SmallVectorImpl<MCFixup> &Fixups) const;
43
44  unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
45                               SmallVectorImpl<MCFixup> &Fixups) const;
46
47  template<int MemSize>
48  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
49                                    SmallVectorImpl<MCFixup> &Fixups) const {
50    return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
51  }
52
53  unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54                                    SmallVectorImpl<MCFixup> &Fixups,
55                                    int MemSize) const;
56
57  unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
58                                   SmallVectorImpl<MCFixup> &Fixups) const;
59  unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
60                                   SmallVectorImpl<MCFixup> &Fixups) const;
61
62  unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
63                             SmallVectorImpl<MCFixup> &Fixups) const;
64  unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
65                              SmallVectorImpl<MCFixup> &Fixups) const;
66  unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
67                              SmallVectorImpl<MCFixup> &Fixups) const;
68  unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
69                              SmallVectorImpl<MCFixup> &Fixups) const;
70
71  // Labels are handled mostly the same way: a symbol is needed, and
72  // just gets some fixup attached.
73  template<AArch64::Fixups fixupDesired>
74  unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
75                           SmallVectorImpl<MCFixup> &Fixups) const;
76
77  unsigned  getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
78                                   SmallVectorImpl<MCFixup> &Fixups) const;
79
80
81  unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
82                                 SmallVectorImpl<MCFixup> &Fixups) const;
83
84
85  unsigned getAddressWithFixup(const MCOperand &MO,
86                               unsigned FixupKind,
87                               SmallVectorImpl<MCFixup> &Fixups) const;
88
89
90  // getBinaryCodeForInstr - TableGen'erated function for getting the
91  // binary encoding for an instruction.
92  uint64_t getBinaryCodeForInstr(const MCInst &MI,
93                                 SmallVectorImpl<MCFixup> &Fixups) const;
94
95  /// getMachineOpValue - Return binary encoding of operand. If the machine
96  /// operand requires relocation, record the relocation and return zero.
97  unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
98                             SmallVectorImpl<MCFixup> &Fixups) const;
99
100
101  void EmitByte(unsigned char C, raw_ostream &OS) const {
102    OS << (char)C;
103  }
104
105  void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
106    // Output the constant in little endian byte order.
107    for (unsigned i = 0; i != 4; ++i) {
108      EmitByte(Val & 0xff, OS);
109      Val >>= 8;
110    }
111  }
112
113
114  void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
115                         SmallVectorImpl<MCFixup> &Fixups) const;
116
117  template<int hasRs, int hasRt2> unsigned
118  fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
119
120  unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
121
122  unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
123
124
125};
126
127} // end anonymous namespace
128
129unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
130                                       unsigned FixupKind,
131                                       SmallVectorImpl<MCFixup> &Fixups) const {
132  if (!MO.isExpr()) {
133    // This can occur for manually decoded or constructed MCInsts, but neither
134    // the assembly-parser nor instruction selection will currently produce an
135    // MCInst that's not a symbol reference.
136    assert(MO.isImm() && "Unexpected address requested");
137    return MO.getImm();
138  }
139
140  const MCExpr *Expr = MO.getExpr();
141  MCFixupKind Kind = MCFixupKind(FixupKind);
142  Fixups.push_back(MCFixup::Create(0, Expr, Kind));
143
144  return 0;
145}
146
147unsigned AArch64MCCodeEmitter::
148getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
149                       SmallVectorImpl<MCFixup> &Fixups,
150                       int MemSize) const {
151  const MCOperand &ImmOp = MI.getOperand(OpIdx);
152  if (ImmOp.isImm())
153    return ImmOp.getImm();
154
155  assert(ImmOp.isExpr() && "Unexpected operand type");
156  const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
157  unsigned FixupKind;
158
159
160  switch (Expr->getKind()) {
161  default: llvm_unreachable("Unexpected operand modifier");
162  case AArch64MCExpr::VK_AARCH64_LO12: {
163    static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
164                                             AArch64::fixup_a64_ldst16_lo12,
165                                             AArch64::fixup_a64_ldst32_lo12,
166                                             AArch64::fixup_a64_ldst64_lo12,
167                                AArch64::fixup_a64_ldst128_lo12 };
168    assert(MemSize <= 16 && "Invalid fixup for operation");
169    FixupKind = FixupsBySize[Log2_32(MemSize)];
170    break;
171  }
172  case AArch64MCExpr::VK_AARCH64_GOT_LO12:
173    assert(MemSize == 8 && "Invalid fixup for operation");
174    FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
175    break;
176  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:  {
177    static const unsigned FixupsBySize[] = {
178      AArch64::fixup_a64_ldst8_dtprel_lo12,
179      AArch64::fixup_a64_ldst16_dtprel_lo12,
180      AArch64::fixup_a64_ldst32_dtprel_lo12,
181      AArch64::fixup_a64_ldst64_dtprel_lo12
182    };
183    assert(MemSize <= 8 && "Invalid fixup for operation");
184    FixupKind = FixupsBySize[Log2_32(MemSize)];
185    break;
186  }
187  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
188    static const unsigned FixupsBySize[] = {
189      AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
190      AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
191      AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
192      AArch64::fixup_a64_ldst64_dtprel_lo12_nc
193    };
194    assert(MemSize <= 8 && "Invalid fixup for operation");
195    FixupKind = FixupsBySize[Log2_32(MemSize)];
196    break;
197  }
198  case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
199    assert(MemSize == 8 && "Invalid fixup for operation");
200    FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
201    break;
202  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
203    static const unsigned FixupsBySize[] = {
204      AArch64::fixup_a64_ldst8_tprel_lo12,
205      AArch64::fixup_a64_ldst16_tprel_lo12,
206      AArch64::fixup_a64_ldst32_tprel_lo12,
207      AArch64::fixup_a64_ldst64_tprel_lo12
208    };
209    assert(MemSize <= 8 && "Invalid fixup for operation");
210    FixupKind = FixupsBySize[Log2_32(MemSize)];
211    break;
212  }
213  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
214    static const unsigned FixupsBySize[] = {
215      AArch64::fixup_a64_ldst8_tprel_lo12_nc,
216      AArch64::fixup_a64_ldst16_tprel_lo12_nc,
217      AArch64::fixup_a64_ldst32_tprel_lo12_nc,
218      AArch64::fixup_a64_ldst64_tprel_lo12_nc
219    };
220    assert(MemSize <= 8 && "Invalid fixup for operation");
221    FixupKind = FixupsBySize[Log2_32(MemSize)];
222    break;
223  }
224  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
225    assert(MemSize == 8 && "Invalid fixup for operation");
226    FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
227    break;
228  }
229
230  return getAddressWithFixup(ImmOp, FixupKind, Fixups);
231}
232
233unsigned
234AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
235                                       SmallVectorImpl<MCFixup> &Fixups) const {
236  const MCOperand &MO = MI.getOperand(OpIdx);
237  if (MO.isImm())
238    return static_cast<unsigned>(MO.getImm());
239
240  assert(MO.isExpr());
241
242  unsigned FixupKind = 0;
243  switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
244  default: llvm_unreachable("Invalid expression modifier");
245  case AArch64MCExpr::VK_AARCH64_LO12:
246    FixupKind = AArch64::fixup_a64_add_lo12; break;
247  case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
248    FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
249  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
250    FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
251  case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
252    FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
253  case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
254    FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
255  case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
256    FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
257  case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
258    FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
259  case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
260    FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
261  }
262
263  return getAddressWithFixup(MO, FixupKind, Fixups);
264}
265
266unsigned
267AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
268                                       SmallVectorImpl<MCFixup> &Fixups) const {
269
270  const MCOperand &MO = MI.getOperand(OpIdx);
271  if (MO.isImm())
272    return static_cast<unsigned>(MO.getImm());
273
274  assert(MO.isExpr());
275
276  unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
277  if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
278    Modifier = Expr->getKind();
279
280  unsigned FixupKind = 0;
281  switch(Modifier) {
282  case AArch64MCExpr::VK_AARCH64_None:
283    FixupKind = AArch64::fixup_a64_adr_prel_page;
284    break;
285  case AArch64MCExpr::VK_AARCH64_GOT:
286    FixupKind = AArch64::fixup_a64_adr_prel_got_page;
287    break;
288  case AArch64MCExpr::VK_AARCH64_GOTTPREL:
289    FixupKind = AArch64::fixup_a64_adr_gottprel_page;
290    break;
291  case AArch64MCExpr::VK_AARCH64_TLSDESC:
292    FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
293    break;
294  default:
295    llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
296  }
297
298  return getAddressWithFixup(MO, FixupKind, Fixups);
299}
300
301unsigned
302AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
303                                       SmallVectorImpl<MCFixup> &Fixups) const {
304
305  const MCOperand &MO = MI.getOperand(OpIdx);
306  assert(MO.isImm() && "Only immediate expected for shift");
307
308  return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
309}
310
311unsigned
312AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
313                                       SmallVectorImpl<MCFixup> &Fixups) const {
314
315  const MCOperand &MO = MI.getOperand(OpIdx);
316  assert(MO.isImm() && "Only immediate expected for shift");
317
318  return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
319}
320
321unsigned AArch64MCCodeEmitter::getShiftRightImm8(
322    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
323  return 8 - MI.getOperand(Op).getImm();
324}
325
326unsigned AArch64MCCodeEmitter::getShiftRightImm16(
327    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
328  return 16 - MI.getOperand(Op).getImm();
329}
330
331unsigned AArch64MCCodeEmitter::getShiftRightImm32(
332    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
333  return 32 - MI.getOperand(Op).getImm();
334}
335
336unsigned AArch64MCCodeEmitter::getShiftRightImm64(
337    const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
338  return 64 - MI.getOperand(Op).getImm();
339}
340
341template<AArch64::Fixups fixupDesired> unsigned
342AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
343                                      unsigned OpIdx,
344                                      SmallVectorImpl<MCFixup> &Fixups) const {
345  const MCOperand &MO = MI.getOperand(OpIdx);
346
347  if (MO.isExpr())
348    return getAddressWithFixup(MO, fixupDesired, Fixups);
349
350  assert(MO.isImm());
351  return MO.getImm();
352}
353
354unsigned
355AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
356                                       unsigned OpIdx,
357                                       SmallVectorImpl<MCFixup> &Fixups) const {
358  const MCOperand &MO = MI.getOperand(OpIdx);
359
360  if (MO.isImm())
361    return MO.getImm();
362
363  assert(MO.isExpr());
364
365  unsigned FixupKind;
366  if (isa<AArch64MCExpr>(MO.getExpr())) {
367    assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
368           == AArch64MCExpr::VK_AARCH64_GOTTPREL
369           && "Invalid symbol modifier for literal load");
370    FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
371  } else {
372    FixupKind = AArch64::fixup_a64_ld_prel;
373  }
374
375  return getAddressWithFixup(MO, FixupKind, Fixups);
376}
377
378
379unsigned
380AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
381                                       const MCOperand &MO,
382                                       SmallVectorImpl<MCFixup> &Fixups) const {
383  if (MO.isReg()) {
384    return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
385  } else if (MO.isImm()) {
386    return static_cast<unsigned>(MO.getImm());
387  }
388
389  llvm_unreachable("Unable to encode MCOperand!");
390  return 0;
391}
392
393unsigned
394AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
395                                       SmallVectorImpl<MCFixup> &Fixups) const {
396  const MCOperand &UImm16MO = MI.getOperand(OpIdx);
397  const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
398
399  unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
400
401  if (UImm16MO.isImm()) {
402    Result |= UImm16MO.getImm();
403    return Result;
404  }
405
406  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
407  AArch64::Fixups requestedFixup;
408  switch (A64E->getKind()) {
409  default: llvm_unreachable("unexpected expression modifier");
410  case AArch64MCExpr::VK_AARCH64_ABS_G0:
411    requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
412  case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
413    requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
414  case AArch64MCExpr::VK_AARCH64_ABS_G1:
415    requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
416  case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
417    requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
418  case AArch64MCExpr::VK_AARCH64_ABS_G2:
419    requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
420  case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
421    requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
422  case AArch64MCExpr::VK_AARCH64_ABS_G3:
423    requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
424  case AArch64MCExpr::VK_AARCH64_SABS_G0:
425    requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
426  case AArch64MCExpr::VK_AARCH64_SABS_G1:
427    requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
428  case AArch64MCExpr::VK_AARCH64_SABS_G2:
429    requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
430  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
431    requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
432  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
433    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
434  case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
435    requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
436  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
437    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
438  case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
439    requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
440  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
441    requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
442  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
443    requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
444  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
445    requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
446  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
447    requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
448  case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
449    requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
450  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
451    requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
452  case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
453    requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
454  }
455
456  return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
457}
458
459template<int hasRs, int hasRt2> unsigned
460AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
461                                            unsigned EncodedValue) const {
462  if (!hasRs) EncodedValue |= 0x001F0000;
463  if (!hasRt2) EncodedValue |= 0x00007C00;
464
465  return EncodedValue;
466}
467
468unsigned
469AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
470  // If one of the signed fixup kinds is applied to a MOVZ instruction, the
471  // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
472  // job to ensure that any bits possibly affected by this are 0. This means we
473  // must zero out bit 30 (essentially emitting a MOVN).
474  MCOperand UImm16MO = MI.getOperand(1);
475
476  // Nothing to do if there's no fixup.
477  if (UImm16MO.isImm())
478    return EncodedValue;
479
480  const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
481  switch (A64E->getKind()) {
482  case AArch64MCExpr::VK_AARCH64_SABS_G0:
483  case AArch64MCExpr::VK_AARCH64_SABS_G1:
484  case AArch64MCExpr::VK_AARCH64_SABS_G2:
485  case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
486  case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
487  case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
488  case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
489  case AArch64MCExpr::VK_AARCH64_TPREL_G2:
490  case AArch64MCExpr::VK_AARCH64_TPREL_G1:
491  case AArch64MCExpr::VK_AARCH64_TPREL_G0:
492    return EncodedValue & ~(1u << 30);
493  default:
494    // Nothing to do for an unsigned fixup.
495    return EncodedValue;
496  }
497
498  llvm_unreachable("Should have returned by now");
499}
500
501unsigned
502AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
503                                 unsigned EncodedValue) const {
504  // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
505  // (i.e. all bits 1) but is ignored by the processor.
506  EncodedValue |= 0x1f << 10;
507  return EncodedValue;
508}
509
510MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
511                                                const MCRegisterInfo &MRI,
512                                                const MCSubtargetInfo &STI,
513                                                MCContext &Ctx) {
514  return new AArch64MCCodeEmitter(Ctx);
515}
516
517void AArch64MCCodeEmitter::
518EncodeInstruction(const MCInst &MI, raw_ostream &OS,
519                  SmallVectorImpl<MCFixup> &Fixups) const {
520  if (MI.getOpcode() == AArch64::TLSDESCCALL) {
521    // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
522    // following (BLR) instruction. It doesn't emit any code itself so it
523    // doesn't go through the normal TableGenerated channels.
524    MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
525    const MCExpr *Expr;
526    Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
527    Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
528    return;
529  }
530
531  uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
532
533  EmitInstruction(Binary, OS);
534}
535
536
537#include "AArch64GenMCCodeEmitter.inc"
538