arm64_lir.h revision 8dea81ca9c0201ceaa88086b927a5838a06a3e69
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
18#define ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
19
20#include "dex/compiler_internals.h"
21
22namespace art {
23
24/*
25 * TODO(Arm64): the comments below are outdated.
26 *
27 * Runtime register usage conventions.
28 *
29 * r0-r3: Argument registers in both Dalvik and C/C++ conventions.
30 *        However, for Dalvik->Dalvik calls we'll pass the target's Method*
31 *        pointer in r0 as a hidden arg0. Otherwise used as codegen scratch
32 *        registers.
33 * r0-r1: As in C/C++ r0 is 32-bit return register and r0/r1 is 64-bit
34 * r4   : (rA64_SUSPEND) is reserved (suspend check/debugger assist)
35 * r5   : Callee save (promotion target)
36 * r6   : Callee save (promotion target)
37 * r7   : Callee save (promotion target)
38 * r8   : Callee save (promotion target)
39 * r9   : (rA64_SELF) is reserved (pointer to thread-local storage)
40 * r10  : Callee save (promotion target)
41 * r11  : Callee save (promotion target)
42 * r12  : Scratch, may be trashed by linkage stubs
43 * r13  : (sp) is reserved
44 * r14  : (lr) is reserved
45 * r15  : (pc) is reserved
46 *
47 * 5 core temps that codegen can use (r0, r1, r2, r3, r12)
48 * 7 core registers that can be used for promotion
49 *
50 * Floating pointer registers
51 * s0-s31
52 * d0-d15, where d0={s0,s1}, d1={s2,s3}, ... , d15={s30,s31}
53 *
54 * s16-s31 (d8-d15) preserved across C calls
55 * s0-s15 (d0-d7) trashed across C calls
56 *
57 * s0-s15/d0-d7 used as codegen temp/scratch
58 * s16-s31/d8-d31 can be used for promotion.
59 *
60 * Calling convention
61 *     o On a call to a Dalvik method, pass target's Method* in r0
62 *     o r1-r3 will be used for up to the first 3 words of arguments
63 *     o Arguments past the first 3 words will be placed in appropriate
64 *       out slots by the caller.
65 *     o If a 64-bit argument would span the register/memory argument
66 *       boundary, it will instead be fully passed in the frame.
67 *     o Maintain a 16-byte stack alignment
68 *
69 *  Stack frame diagram (stack grows down, higher addresses at top):
70 *
71 * +------------------------+
72 * | IN[ins-1]              |  {Note: resides in caller's frame}
73 * |       .                |
74 * | IN[0]                  |
75 * | caller's Method*       |
76 * +========================+  {Note: start of callee's frame}
77 * | spill region           |  {variable sized - will include lr if non-leaf.}
78 * +------------------------+
79 * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
80 * +------------------------+
81 * | V[locals-1]            |
82 * | V[locals-2]            |
83 * |      .                 |
84 * |      .                 |
85 * | V[1]                   |
86 * | V[0]                   |
87 * +------------------------+
88 * |  0 to 3 words padding  |
89 * +------------------------+
90 * | OUT[outs-1]            |
91 * | OUT[outs-2]            |
92 * |       .                |
93 * | OUT[0]                 |
94 * | cur_method*            | <<== sp w/ 16-byte alignment
95 * +========================+
96 */
97
98// First FP callee save.
99#define A64_FP_CALLEE_SAVE_BASE 8
100
101// Temporary macros, used to mark code which wants to distinguish betweek zr/sp.
102#define A64_REG_IS_SP(reg_num) ((reg_num) == rwsp || (reg_num) == rsp)
103#define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
104
105enum Arm64ResourceEncodingPos {
106  kArm64GPReg0   = 0,
107  kArm64RegLR    = 30,
108  kArm64RegSP    = 31,
109  kArm64FPReg0   = 32,
110  kArm64RegEnd   = 64,
111};
112
113#define IS_SIGNED_IMM(size, value) \
114  ((value) >= -(1 << ((size) - 1)) && (value) < (1 << ((size) - 1)))
115#define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)
116#define IS_SIGNED_IMM9(value) IS_SIGNED_IMM(9, value)
117#define IS_SIGNED_IMM12(value) IS_SIGNED_IMM(12, value)
118#define IS_SIGNED_IMM19(value) IS_SIGNED_IMM(19, value)
119#define IS_SIGNED_IMM21(value) IS_SIGNED_IMM(21, value)
120
121// Quick macro used to define the registers.
122#define A64_REGISTER_CODE_LIST(R) \
123  R(0)  R(1)  R(2)  R(3)  R(4)  R(5)  R(6)  R(7) \
124  R(8)  R(9)  R(10) R(11) R(12) R(13) R(14) R(15) \
125  R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
126  R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
127
128// Registers (integer) values.
129enum A64NativeRegisterPool {
130#  define A64_DEFINE_REGISTERS(nr) \
131    rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
132    rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
133    rf##nr = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | nr, \
134    rd##nr = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | nr,
135  A64_REGISTER_CODE_LIST(A64_DEFINE_REGISTERS)
136#undef A64_DEFINE_REGISTERS
137
138  rwzr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0x3f,
139  rxzr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0x3f,
140  rwsp = rw31,
141  rsp = rx31,
142  rA64_SUSPEND = rx19,
143  rA64_SELF = rx18,
144  rA64_SP = rx31,
145  rA64_LR = rx30,
146  /*
147   * FIXME: It's a bit awkward to define both 32 and 64-bit views of these - we'll only ever use
148   * the 64-bit view. However, for now we'll define a 32-bit view to keep these from being
149   * allocated as 32-bit temp registers.
150   */
151  rA32_SUSPEND = rw19,
152  rA32_SELF = rw18,
153  rA32_SP = rw31,
154  rA32_LR = rw30
155};
156
157#define A64_DEFINE_REGSTORAGES(nr) \
158  constexpr RegStorage rs_w##nr(RegStorage::kValid | rw##nr); \
159  constexpr RegStorage rs_x##nr(RegStorage::kValid | rx##nr); \
160  constexpr RegStorage rs_f##nr(RegStorage::kValid | rf##nr); \
161  constexpr RegStorage rs_d##nr(RegStorage::kValid | rd##nr);
162A64_REGISTER_CODE_LIST(A64_DEFINE_REGSTORAGES)
163#undef A64_DEFINE_REGSTORAGES
164
165constexpr RegStorage rs_wzr(RegStorage::kValid | rwzr);
166constexpr RegStorage rs_xzr(RegStorage::kValid | rxzr);
167constexpr RegStorage rs_rA64_SUSPEND(RegStorage::kValid | rA64_SUSPEND);
168constexpr RegStorage rs_rA64_SELF(RegStorage::kValid | rA64_SELF);
169constexpr RegStorage rs_rA64_SP(RegStorage::kValid | rA64_SP);
170constexpr RegStorage rs_rA64_LR(RegStorage::kValid | rA64_LR);
171// TODO: eliminate the need for these.
172constexpr RegStorage rs_rA32_SUSPEND(RegStorage::kValid | rA32_SUSPEND);
173constexpr RegStorage rs_rA32_SELF(RegStorage::kValid | rA32_SELF);
174constexpr RegStorage rs_rA32_SP(RegStorage::kValid | rA32_SP);
175constexpr RegStorage rs_rA32_LR(RegStorage::kValid | rA32_LR);
176
177// RegisterLocation templates return values (following the hard-float calling convention).
178const RegLocation arm_loc_c_return =
179    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_w0, INVALID_SREG, INVALID_SREG};
180const RegLocation arm_loc_c_return_wide =
181    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_x0, INVALID_SREG, INVALID_SREG};
182const RegLocation arm_loc_c_return_float =
183    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_f0, INVALID_SREG, INVALID_SREG};
184const RegLocation arm_loc_c_return_double =
185    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_d0, INVALID_SREG, INVALID_SREG};
186
187/**
188 * @brief Shift-type to be applied to a register via EncodeShift().
189 */
190enum A64ShiftEncodings {
191  kA64Lsl = 0x0,
192  kA64Lsr = 0x1,
193  kA64Asr = 0x2,
194  kA64Ror = 0x3
195};
196
197/**
198 * @brief Extend-type to be applied to a register via EncodeExtend().
199 */
200enum A64RegExtEncodings {
201  kA64Uxtb = 0x0,
202  kA64Uxth = 0x1,
203  kA64Uxtw = 0x2,
204  kA64Uxtx = 0x3,
205  kA64Sxtb = 0x4,
206  kA64Sxth = 0x5,
207  kA64Sxtw = 0x6,
208  kA64Sxtx = 0x7
209};
210
211#define ENCODE_NO_SHIFT (EncodeShift(kA64Lsl, 0))
212
213/*
214 * The following enum defines the list of supported A64 instructions by the
215 * assembler. Their corresponding EncodingMap positions will be defined in
216 * assemble_arm64.cc.
217 */
218enum ArmOpcode {
219  kA64First = 0,
220  kA64Adc3rrr = kA64First,  // adc [00011010000] rm[20-16] [000000] rn[9-5] rd[4-0].
221  kA64Add4RRdT,      // add [s001000100] imm_12[21-10] rn[9-5] rd[4-0].
222  kA64Add4rrro,      // add [00001011000] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
223  kA64Adr2xd,        // adr [0] immlo[30-29] [10000] immhi[23-5] rd[4-0].
224  kA64And3Rrl,       // and [00010010] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
225  kA64And4rrro,      // and [00001010] shift[23-22] [N=0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
226  kA64Asr3rrd,       // asr [0001001100] immr[21-16] imms[15-10] rn[9-5] rd[4-0].
227  kA64Asr3rrr,       // asr alias of "sbfm arg0, arg1, arg2, {#31/#63}".
228  kA64B2ct,          // b.cond [01010100] imm_19[23-5] [0] cond[3-0].
229  kA64Blr1x,         // blr [1101011000111111000000] rn[9-5] [00000].
230  kA64Br1x,          // br  [1101011000011111000000] rn[9-5] [00000].
231  kA64Brk1d,         // brk [11010100001] imm_16[20-5] [00000].
232  kA64B1t,           // b   [00010100] offset_26[25-0].
233  kA64Cbnz2rt,       // cbnz[00110101] imm_19[23-5] rt[4-0].
234  kA64Cbz2rt,        // cbz [00110100] imm_19[23-5] rt[4-0].
235  kA64Cmn3rro,       // cmn [s0101011] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] [11111].
236  kA64Cmn3Rre,       // cmn [s0101011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] [11111].
237  kA64Cmn3RdT,       // cmn [00110001] shift[23-22] imm_12[21-10] rn[9-5] [11111].
238  kA64Cmp3rro,       // cmp [s1101011] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] [11111].
239  kA64Cmp3Rre,       // cmp [s1101011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] [11111].
240  kA64Cmp3RdT,       // cmp [01110001] shift[23-22] imm_12[21-10] rn[9-5] [11111].
241  kA64Csel4rrrc,     // csel[s0011010100] rm[20-16] cond[15-12] [00] rn[9-5] rd[4-0].
242  kA64Csinc4rrrc,    // csinc [s0011010100] rm[20-16] cond[15-12] [01] rn[9-5] rd[4-0].
243  kA64Csneg4rrrc,    // csneg [s1011010100] rm[20-16] cond[15-12] [01] rn[9-5] rd[4-0].
244  kA64Dmb1B,         // dmb [11010101000000110011] CRm[11-8] [10111111].
245  kA64Eor3Rrl,       // eor [s10100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
246  kA64Eor4rrro,      // eor [s1001010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
247  kA64Extr4rrrd,     // extr[s00100111N0] rm[20-16] imm_s[15-10] rn[9-5] rd[4-0].
248  kA64Fabs2ff,       // fabs[000111100s100000110000] rn[9-5] rd[4-0].
249  kA64Fadd3fff,      // fadd[000111100s1] rm[20-16] [001010] rn[9-5] rd[4-0].
250  kA64Fcmp1f,        // fcmp[000111100s100000001000] rn[9-5] [01000].
251  kA64Fcmp2ff,       // fcmp[000111100s1] rm[20-16] [001000] rn[9-5] [00000].
252  kA64Fcvtzs2wf,     // fcvtzs [000111100s111000000000] rn[9-5] rd[4-0].
253  kA64Fcvtzs2xf,     // fcvtzs [100111100s111000000000] rn[9-5] rd[4-0].
254  kA64Fcvt2Ss,       // fcvt   [0001111000100010110000] rn[9-5] rd[4-0].
255  kA64Fcvt2sS,       // fcvt   [0001111001100010010000] rn[9-5] rd[4-0].
256  kA64Fdiv3fff,      // fdiv[000111100s1] rm[20-16] [000110] rn[9-5] rd[4-0].
257  kA64Fmov2ff,       // fmov[000111100s100000010000] rn[9-5] rd[4-0].
258  kA64Fmov2fI,       // fmov[000111100s1] imm_8[20-13] [10000000] rd[4-0].
259  kA64Fmov2sw,       // fmov[0001111000100111000000] rn[9-5] rd[4-0].
260  kA64Fmov2Sx,       // fmov[1001111001100111000000] rn[9-5] rd[4-0].
261  kA64Fmov2ws,       // fmov[0001111001101110000000] rn[9-5] rd[4-0].
262  kA64Fmov2xS,       // fmov[1001111001101111000000] rn[9-5] rd[4-0].
263  kA64Fmul3fff,      // fmul[000111100s1] rm[20-16] [000010] rn[9-5] rd[4-0].
264  kA64Fneg2ff,       // fneg[000111100s100001010000] rn[9-5] rd[4-0].
265  kA64Frintz2ff,     // frintz [000111100s100101110000] rn[9-5] rd[4-0].
266  kA64Fsqrt2ff,      // fsqrt[000111100s100001110000] rn[9-5] rd[4-0].
267  kA64Fsub3fff,      // fsub[000111100s1] rm[20-16] [001110] rn[9-5] rd[4-0].
268  kA64Ldrb3wXd,      // ldrb[0011100101] imm_12[21-10] rn[9-5] rt[4-0].
269  kA64Ldrb3wXx,      // ldrb[00111000011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
270  kA64Ldrsb3rXd,     // ldrsb[001110011s] imm_12[21-10] rn[9-5] rt[4-0].
271  kA64Ldrsb3rXx,     // ldrsb[0011 1000 1s1] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
272  kA64Ldrh3wXF,      // ldrh[0111100101] imm_12[21-10] rn[9-5] rt[4-0].
273  kA64Ldrh4wXxd,     // ldrh[01111000011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
274  kA64Ldrsh3rXF,     // ldrsh[011110011s] imm_12[21-10] rn[9-5] rt[4-0].
275  kA64Ldrsh4rXxd,    // ldrsh[011110001s1] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0]
276  kA64Ldr2fp,        // ldr [0s011100] imm_19[23-5] rt[4-0].
277  kA64Ldr2rp,        // ldr [0s011000] imm_19[23-5] rt[4-0].
278  kA64Ldr3fXD,       // ldr [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
279  kA64Ldr3rXD,       // ldr [1s111000010] imm_9[20-12] [01] rn[9-5] rt[4-0].
280  kA64Ldr4fXxG,      // ldr [1s111100011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
281  kA64Ldr4rXxG,      // ldr [1s111000011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
282  kA64LdrPost3rXd,   // ldr [1s111000010] imm_9[20-12] [01] rn[9-5] rt[4-0].
283  kA64Ldp4ffXD,      // ldp [0s10110101] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
284  kA64Ldp4rrXD,      // ldp [s010100101] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
285  kA64LdpPost4rrXD,  // ldp [s010100011] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
286  kA64Ldur3fXd,      // ldur[1s111100010] imm_9[20-12] [00] rn[9-5] rt[4-0].
287  kA64Ldur3rXd,      // ldur[1s111000010] imm_9[20-12] [00] rn[9-5] rt[4-0].
288  kA64Ldxr2rX,       // ldxr[1s00100001011111011111] rn[9-5] rt[4-0].
289  kA64Lsl3rrr,       // lsl [s0011010110] rm[20-16] [001000] rn[9-5] rd[4-0].
290  kA64Lsr3rrd,       // lsr alias of "ubfm arg0, arg1, arg2, #{31/63}".
291  kA64Lsr3rrr,       // lsr [s0011010110] rm[20-16] [001001] rn[9-5] rd[4-0].
292  kA64Movk3rdM,      // mov [010100101] hw[22-21] imm_16[20-5] rd[4-0].
293  kA64Movn3rdM,      // mov [000100101] hw[22-21] imm_16[20-5] rd[4-0].
294  kA64Movz3rdM,      // mov [011100101] hw[22-21] imm_16[20-5] rd[4-0].
295  kA64Mov2rr,        // mov [00101010000] rm[20-16] [000000] [11111] rd[4-0].
296  kA64Mvn2rr,        // mov [00101010001] rm[20-16] [000000] [11111] rd[4-0].
297  kA64Mul3rrr,       // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
298  kA64Msub4rrrr,     // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
299  kA64Neg3rro,       // neg alias of "sub arg0, rzr, arg1, arg2".
300  kA64Orr3Rrl,       // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
301  kA64Orr4rrro,      // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
302  kA64Ret,           // ret [11010110010111110000001111000000].
303  kA64Rev2rr,        // rev [s10110101100000000001x] rn[9-5] rd[4-0].
304  kA64Rev162rr,      // rev16[s101101011000000000001] rn[9-5] rd[4-0].
305  kA64Ror3rrr,       // ror [s0011010110] rm[20-16] [001011] rn[9-5] rd[4-0].
306  kA64Sbc3rrr,       // sbc [s0011010000] rm[20-16] [000000] rn[9-5] rd[4-0].
307  kA64Sbfm4rrdd,     // sbfm[0001001100] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
308  kA64Scvtf2fw,      // scvtf  [000111100s100010000000] rn[9-5] rd[4-0].
309  kA64Scvtf2fx,      // scvtf  [100111100s100010000000] rn[9-5] rd[4-0].
310  kA64Sdiv3rrr,      // sdiv[s0011010110] rm[20-16] [000011] rn[9-5] rd[4-0].
311  kA64Smaddl4xwwx,   // smaddl [10011011001] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0].
312  kA64Stp4ffXD,      // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
313  kA64Stp4rrXD,      // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
314  kA64StpPost4rrXD,  // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
315  kA64StpPre4rrXD,   // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
316  kA64Str3fXD,       // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
317  kA64Str4fXxG,      // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
318  kA64Str3rXD,       // str [1s11100100] imm_12[21-10] rn[9-5] rt[4-0].
319  kA64Str4rXxG,      // str [1s111000001] rm[20-16] option[15-13] S[12-12] [10] rn[9-5] rt[4-0].
320  kA64Strb3wXd,      // strb[0011100100] imm_12[21-10] rn[9-5] rt[4-0].
321  kA64Strb3wXx,      // strb[00111000001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
322  kA64Strh3wXF,      // strh[0111100100] imm_12[21-10] rn[9-5] rt[4-0].
323  kA64Strh4wXxd,     // strh[01111000001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
324  kA64StrPost3rXd,   // str [1s111000000] imm_9[20-12] [01] rn[9-5] rt[4-0].
325  kA64Stur3fXd,      // stur[1s111100000] imm_9[20-12] [00] rn[9-5] rt[4-0].
326  kA64Stur3rXd,      // stur[1s111000000] imm_9[20-12] [00] rn[9-5] rt[4-0].
327  kA64Stxr3wrX,      // stxr[11001000000] rs[20-16] [011111] rn[9-5] rt[4-0].
328  kA64Sub4RRdT,      // sub [s101000100] imm_12[21-10] rn[9-5] rd[4-0].
329  kA64Sub4rrro,      // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
330  kA64Subs3rRd,      // subs[s111000100] imm_12[21-10] rn[9-5] rd[4-0].
331  kA64Tst3rro,       // tst alias of "ands rzr, arg1, arg2, arg3".
332  kA64Ubfm4rrdd,     // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
333  kA64Last,
334  kA64NotWide = 0,   // Flag used to select the first instruction variant.
335  kA64Wide = 0x1000  // Flag used to select the second instruction variant.
336};
337
338/*
339 * The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
340 * and "mov xN, xM" or - for floating point instructions - "mov sN, sM" and "mov dN, dM".
341 * It definitely makes sense to exploit this symmetries of the instruction set. We do this via the
342 * WIDE, UNWIDE macros. For opcodes that allow it, the wide variant can be obtained by applying the
343 * WIDE macro to the non-wide opcode. E.g. WIDE(kA64Sub4RRdT).
344 */
345
346// Return the wide and no-wide variants of the given opcode.
347#define WIDE(op) ((ArmOpcode)((op) | kA64Wide))
348#define UNWIDE(op) ((ArmOpcode)((op) & ~kA64Wide))
349
350// Whether the given opcode is wide.
351#define IS_WIDE(op) (((op) & kA64Wide) != 0)
352
353/*
354 * Floating point variants. These are just aliases of the macros above which we use for floating
355 * point instructions, just for readibility reasons.
356 * TODO(Arm64): should we remove these and use the original macros?
357 */
358#define FWIDE WIDE
359#define FUNWIDE UNWIDE
360#define IS_FWIDE IS_WIDE
361
362enum ArmOpDmbOptions {
363  kSY = 0xf,
364  kST = 0xe,
365  kISH = 0xb,
366  kISHST = 0xa,
367  kNSH = 0x7,
368  kNSHST = 0x6
369};
370
371// Instruction assembly field_loc kind.
372enum ArmEncodingKind {
373  // All the formats below are encoded in the same way (as a kFmtBitBlt).
374  // These are grouped together, for fast handling (e.g. "if (LIKELY(fmt <= kFmtBitBlt)) ...").
375  kFmtRegW = 0,  // Word register (w) or wzr.
376  kFmtRegX,      // Extended word register (x) or xzr.
377  kFmtRegR,      // Register with same width as the instruction or zr.
378  kFmtRegWOrSp,  // Word register (w) or wsp.
379  kFmtRegXOrSp,  // Extended word register (x) or sp.
380  kFmtRegROrSp,  // Register with same width as the instruction or sp.
381  kFmtRegS,      // Single FP reg.
382  kFmtRegD,      // Double FP reg.
383  kFmtRegF,      // Single/double FP reg depending on the instruction width.
384  kFmtBitBlt,    // Bit string using end/start.
385
386  // Less likely formats.
387  kFmtUnused,    // Unused field and marks end of formats.
388  kFmtImm21,     // Sign-extended immediate using [23..5,30..29].
389  kFmtShift,     // Register shift, 9-bit at [23..21, 15..10]..
390  kFmtExtend,    // Register extend, 9-bit at [23..21, 15..10].
391  kFmtSkip,      // Unused field, but continue to next.
392};
393
394// TODO(Arm64): should we get rid of kFmtExtend?
395//   Note: the only instructions that use it (cmp, cmn) are not used themselves.
396
397// Struct used to define the snippet positions for each A64 opcode.
398struct ArmEncodingMap {
399  uint32_t wskeleton;
400  uint32_t xskeleton;
401  struct {
402    ArmEncodingKind kind;
403    int end;         // end for kFmtBitBlt, 1-bit slice end for FP regs.
404    int start;       // start for kFmtBitBlt, 4-bit slice end for FP regs.
405  } field_loc[4];
406  ArmOpcode opcode;  // can be WIDE()-ned to indicate it has a wide variant.
407  uint64_t flags;
408  const char* name;
409  const char* fmt;
410  int size;          // Note: size is in bytes.
411  FixupKind fixup;
412};
413
414#if 0
415// TODO(Arm64): try the following alternative, which fits exactly in one cache line (64 bytes).
416struct ArmEncodingMap {
417  uint32_t wskeleton;
418  uint32_t xskeleton;
419  uint64_t flags;
420  const char* name;
421  const char* fmt;
422  struct {
423    uint8_t kind;
424    int8_t end;         // end for kFmtBitBlt, 1-bit slice end for FP regs.
425    int8_t start;       // start for kFmtBitBlt, 4-bit slice end for FP regs.
426  } field_loc[4];
427  uint32_t fixup;
428  uint32_t opcode;         // can be WIDE()-ned to indicate it has a wide variant.
429  uint32_t padding[3];
430};
431#endif
432
433}  // namespace art
434
435#endif  // ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
436