arm_lir.h revision c777e0de83cdffdb2e240d439c5595a4836553e8
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_DEX_QUICK_ARM_ARM_LIR_H_ 18#define ART_COMPILER_DEX_QUICK_ARM_ARM_LIR_H_ 19 20#include "dex/compiler_internals.h" 21 22namespace art { 23 24/* 25 * Runtime register usage conventions. 26 * 27 * r0-r3: Argument registers in both Dalvik and C/C++ conventions. 28 * However, for Dalvik->Dalvik calls we'll pass the target's Method* 29 * pointer in r0 as a hidden arg0. Otherwise used as codegen scratch 30 * registers. 31 * r0-r1: As in C/C++ r0 is 32-bit return register and r0/r1 is 64-bit 32 * r4 : (rARM_SUSPEND) is reserved (suspend check/debugger assist) 33 * r5 : Callee save (promotion target) 34 * r6 : Callee save (promotion target) 35 * r7 : Callee save (promotion target) 36 * r8 : Callee save (promotion target) 37 * r9 : (rARM_SELF) is reserved (pointer to thread-local storage) 38 * r10 : Callee save (promotion target) 39 * r11 : Callee save (promotion target) 40 * r12 : Scratch, may be trashed by linkage stubs 41 * r13 : (sp) is reserved 42 * r14 : (lr) is reserved 43 * r15 : (pc) is reserved 44 * 45 * 5 core temps that codegen can use (r0, r1, r2, r3, r12) 46 * 7 core registers that can be used for promotion 47 * 48 * Floating pointer registers 49 * s0-s31 50 * d0-d15, where d0={s0,s1}, d1={s2,s3}, ... , d15={s30,s31} 51 * 52 * s16-s31 (d8-d15) preserved across C calls 53 * s0-s15 (d0-d7) trashed across C calls 54 * 55 * s0-s15/d0-d7 used as codegen temp/scratch 56 * s16-s31/d8-d31 can be used for promotion. 57 * 58 * Calling convention 59 * o On a call to a Dalvik method, pass target's Method* in r0 60 * o r1-r3 will be used for up to the first 3 words of arguments 61 * o Arguments past the first 3 words will be placed in appropriate 62 * out slots by the caller. 63 * o If a 64-bit argument would span the register/memory argument 64 * boundary, it will instead be fully passed in the frame. 65 * o Maintain a 16-byte stack alignment 66 * 67 * Stack frame diagram (stack grows down, higher addresses at top): 68 * 69 * +------------------------+ 70 * | IN[ins-1] | {Note: resides in caller's frame} 71 * | . | 72 * | IN[0] | 73 * | caller's Method* | 74 * +========================+ {Note: start of callee's frame} 75 * | spill region | {variable sized - will include lr if non-leaf.} 76 * +------------------------+ 77 * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long] 78 * +------------------------+ 79 * | V[locals-1] | 80 * | V[locals-2] | 81 * | . | 82 * | . | 83 * | V[1] | 84 * | V[0] | 85 * +------------------------+ 86 * | 0 to 3 words padding | 87 * +------------------------+ 88 * | OUT[outs-1] | 89 * | OUT[outs-2] | 90 * | . | 91 * | OUT[0] | 92 * | cur_method* | <<== sp w/ 16-byte alignment 93 * +========================+ 94 */ 95 96// Offset to distingish FP regs. 97#define ARM_FP_REG_OFFSET 32 98// Offset to distinguish DP FP regs. 99#define ARM_FP_DOUBLE 64 100// First FP callee save. 101#define ARM_FP_CALLEE_SAVE_BASE 16 102// Reg types. 103#define ARM_REGTYPE(x) (x & (ARM_FP_REG_OFFSET | ARM_FP_DOUBLE)) 104#define ARM_FPREG(x) ((x & ARM_FP_REG_OFFSET) == ARM_FP_REG_OFFSET) 105#define ARM_LOWREG(x) ((x & 0x7) == x) 106#define ARM_DOUBLEREG(x) ((x & ARM_FP_DOUBLE) == ARM_FP_DOUBLE) 107#define ARM_SINGLEREG(x) (ARM_FPREG(x) && !ARM_DOUBLEREG(x)) 108 109/* 110 * Note: the low register of a floating point pair is sufficient to 111 * create the name of a double, but require both names to be passed to 112 * allow for asserts to verify that the pair is consecutive if significant 113 * rework is done in this area. Also, it is a good reminder in the calling 114 * code that reg locations always describe doubles as a pair of singles. 115 */ 116#define ARM_S2D(x, y) ((x) | ARM_FP_DOUBLE) 117// Mask to strip off fp flags. 118#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1) 119 120enum ArmResourceEncodingPos { 121 kArmGPReg0 = 0, 122 kArmRegSP = 13, 123 kArmRegLR = 14, 124 kArmRegPC = 15, 125 kArmFPReg0 = 16, 126 kArmFPReg16 = 32, 127 kArmRegEnd = 48, 128}; 129 130#define ENCODE_ARM_REG_LIST(N) (static_cast<uint64_t>(N)) 131#define ENCODE_ARM_REG_SP (1ULL << kArmRegSP) 132#define ENCODE_ARM_REG_LR (1ULL << kArmRegLR) 133#define ENCODE_ARM_REG_PC (1ULL << kArmRegPC) 134#define ENCODE_ARM_REG_FPCS_LIST(N) (static_cast<uint64_t>(N) << kArmFPReg16) 135 136enum ArmNativeRegisterPool { 137 r0 = 0, 138 r1 = 1, 139 r2 = 2, 140 r3 = 3, 141 rARM_SUSPEND = 4, 142 r5 = 5, 143 r6 = 6, 144 r7 = 7, 145 r8 = 8, 146 rARM_SELF = 9, 147 r10 = 10, 148 r11 = 11, 149 r12 = 12, 150 r13sp = 13, 151 rARM_SP = 13, 152 r14lr = 14, 153 rARM_LR = 14, 154 r15pc = 15, 155 rARM_PC = 15, 156 fr0 = 0 + ARM_FP_REG_OFFSET, 157 fr1 = 1 + ARM_FP_REG_OFFSET, 158 fr2 = 2 + ARM_FP_REG_OFFSET, 159 fr3 = 3 + ARM_FP_REG_OFFSET, 160 fr4 = 4 + ARM_FP_REG_OFFSET, 161 fr5 = 5 + ARM_FP_REG_OFFSET, 162 fr6 = 6 + ARM_FP_REG_OFFSET, 163 fr7 = 7 + ARM_FP_REG_OFFSET, 164 fr8 = 8 + ARM_FP_REG_OFFSET, 165 fr9 = 9 + ARM_FP_REG_OFFSET, 166 fr10 = 10 + ARM_FP_REG_OFFSET, 167 fr11 = 11 + ARM_FP_REG_OFFSET, 168 fr12 = 12 + ARM_FP_REG_OFFSET, 169 fr13 = 13 + ARM_FP_REG_OFFSET, 170 fr14 = 14 + ARM_FP_REG_OFFSET, 171 fr15 = 15 + ARM_FP_REG_OFFSET, 172 fr16 = 16 + ARM_FP_REG_OFFSET, 173 fr17 = 17 + ARM_FP_REG_OFFSET, 174 fr18 = 18 + ARM_FP_REG_OFFSET, 175 fr19 = 19 + ARM_FP_REG_OFFSET, 176 fr20 = 20 + ARM_FP_REG_OFFSET, 177 fr21 = 21 + ARM_FP_REG_OFFSET, 178 fr22 = 22 + ARM_FP_REG_OFFSET, 179 fr23 = 23 + ARM_FP_REG_OFFSET, 180 fr24 = 24 + ARM_FP_REG_OFFSET, 181 fr25 = 25 + ARM_FP_REG_OFFSET, 182 fr26 = 26 + ARM_FP_REG_OFFSET, 183 fr27 = 27 + ARM_FP_REG_OFFSET, 184 fr28 = 28 + ARM_FP_REG_OFFSET, 185 fr29 = 29 + ARM_FP_REG_OFFSET, 186 fr30 = 30 + ARM_FP_REG_OFFSET, 187 fr31 = 31 + ARM_FP_REG_OFFSET, 188 dr0 = fr0 + ARM_FP_DOUBLE, 189 dr1 = fr2 + ARM_FP_DOUBLE, 190 dr2 = fr4 + ARM_FP_DOUBLE, 191 dr3 = fr6 + ARM_FP_DOUBLE, 192 dr4 = fr8 + ARM_FP_DOUBLE, 193 dr5 = fr10 + ARM_FP_DOUBLE, 194 dr6 = fr12 + ARM_FP_DOUBLE, 195 dr7 = fr14 + ARM_FP_DOUBLE, 196 dr8 = fr16 + ARM_FP_DOUBLE, 197 dr9 = fr18 + ARM_FP_DOUBLE, 198 dr10 = fr20 + ARM_FP_DOUBLE, 199 dr11 = fr22 + ARM_FP_DOUBLE, 200 dr12 = fr24 + ARM_FP_DOUBLE, 201 dr13 = fr26 + ARM_FP_DOUBLE, 202 dr14 = fr28 + ARM_FP_DOUBLE, 203 dr15 = fr30 + ARM_FP_DOUBLE, 204}; 205 206// TODO: clean this up; reduce use of or eliminate macros 207 208const RegStorage rs_r0(RegStorage::k32BitSolo, r0); 209const RegStorage rs_r1(RegStorage::k32BitSolo, r1); 210const RegStorage rs_r2(RegStorage::k32BitSolo, r2); 211const RegStorage rs_r3(RegStorage::k32BitSolo, r3); 212const RegStorage rs_rARM_SUSPEND(RegStorage::k32BitSolo, rARM_SUSPEND); 213const RegStorage rs_r5(RegStorage::k32BitSolo, r5); 214const RegStorage rs_r6(RegStorage::k32BitSolo, r6); 215const RegStorage rs_r7(RegStorage::k32BitSolo, r7); 216const RegStorage rs_r8(RegStorage::k32BitSolo, r8); 217const RegStorage rs_rARM_SELF(RegStorage::k32BitSolo, rARM_SELF); 218const RegStorage rs_r10(RegStorage::k32BitSolo, r10); 219const RegStorage rs_r11(RegStorage::k32BitSolo, r11); 220const RegStorage rs_r12(RegStorage::k32BitSolo, r12); 221const RegStorage rs_r13sp(RegStorage::k32BitSolo, r13sp); 222const RegStorage rs_rARM_SP(RegStorage::k32BitSolo, rARM_SP); 223const RegStorage rs_r14lr(RegStorage::k32BitSolo, r14lr); 224const RegStorage rs_rARM_LR(RegStorage::k32BitSolo, rARM_LR); 225const RegStorage rs_r15pc(RegStorage::k32BitSolo, r15pc); 226const RegStorage rs_rARM_PC(RegStorage::k32BitSolo, rARM_PC); 227const RegStorage rs_invalid(RegStorage::kInvalid); 228 229// Target-independent aliases. 230#define rARM_ARG0 r0 231#define rs_rARM_ARG0 rs_r0 232#define rARM_ARG1 r1 233#define rs_rARM_ARG1 rs_r1 234#define rARM_ARG2 r2 235#define rs_rARM_ARG2 rs_r2 236#define rARM_ARG3 r3 237#define rs_rARM_ARG3 rs_r3 238#define rARM_FARG0 r0 239#define rs_ARM_FARG0 rs_r0 240#define rARM_FARG1 r1 241#define rs_rARM_FARG1 rs_r1 242#define rARM_FARG2 r2 243#define rs_rARM_FARG2 rs_r2 244#define rARM_FARG3 r3 245#define rs_rARM_FARG3 rs_r3 246#define rARM_RET0 r0 247#define rs_rARM_RET0 rs_r0 248#define rARM_RET1 r1 249#define rs_rARM_RET1 rs_r1 250#define rARM_INVOKE_TGT rARM_LR 251#define rs_rARM_INVOKE_TGT rs_rARM_LR 252#define rARM_COUNT RegStorage::kInvalidRegVal 253 254// RegisterLocation templates return values (r0, or r0/r1). 255const RegLocation arm_loc_c_return 256 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, 257 RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG}; 258const RegLocation arm_loc_c_return_wide 259 {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, 260 RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG}; 261const RegLocation arm_loc_c_return_float 262 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, 263 RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG}; 264const RegLocation arm_loc_c_return_double 265 {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, 266 RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG}; 267 268enum ArmShiftEncodings { 269 kArmLsl = 0x0, 270 kArmLsr = 0x1, 271 kArmAsr = 0x2, 272 kArmRor = 0x3 273}; 274 275/* 276 * The following enum defines the list of supported Thumb instructions by the 277 * assembler. Their corresponding EncodingMap positions will be defined in 278 * Assemble.cc. 279 */ 280enum ArmOpcode { 281 kArmFirst = 0, 282 kArm16BitData = kArmFirst, // DATA [0] rd[15..0]. 283 kThumbAdcRR, // adc [0100000101] rm[5..3] rd[2..0]. 284 kThumbAddRRI3, // add(1) [0001110] imm_3[8..6] rn[5..3] rd[2..0]. 285 kThumbAddRI8, // add(2) [00110] rd[10..8] imm_8[7..0]. 286 kThumbAddRRR, // add(3) [0001100] rm[8..6] rn[5..3] rd[2..0]. 287 kThumbAddRRLH, // add(4) [01000100] H12[01] rm[5..3] rd[2..0]. 288 kThumbAddRRHL, // add(4) [01001000] H12[10] rm[5..3] rd[2..0]. 289 kThumbAddRRHH, // add(4) [01001100] H12[11] rm[5..3] rd[2..0]. 290 kThumbAddPcRel, // add(5) [10100] rd[10..8] imm_8[7..0]. 291 kThumbAddSpRel, // add(6) [10101] rd[10..8] imm_8[7..0]. 292 kThumbAddSpI7, // add(7) [101100000] imm_7[6..0]. 293 kThumbAndRR, // and [0100000000] rm[5..3] rd[2..0]. 294 kThumbAsrRRI5, // asr(1) [00010] imm_5[10..6] rm[5..3] rd[2..0]. 295 kThumbAsrRR, // asr(2) [0100000100] rs[5..3] rd[2..0]. 296 kThumbBCond, // b(1) [1101] cond[11..8] offset_8[7..0]. 297 kThumbBUncond, // b(2) [11100] offset_11[10..0]. 298 kThumbBicRR, // bic [0100001110] rm[5..3] rd[2..0]. 299 kThumbBkpt, // bkpt [10111110] imm_8[7..0]. 300 kThumbBlx1, // blx(1) [111] H[10] offset_11[10..0]. 301 kThumbBlx2, // blx(1) [111] H[01] offset_11[10..0]. 302 kThumbBl1, // blx(1) [111] H[10] offset_11[10..0]. 303 kThumbBl2, // blx(1) [111] H[11] offset_11[10..0]. 304 kThumbBlxR, // blx(2) [010001111] rm[6..3] [000]. 305 kThumbBx, // bx [010001110] H2[6..6] rm[5..3] SBZ[000]. 306 kThumbCmnRR, // cmn [0100001011] rm[5..3] rd[2..0]. 307 kThumbCmpRI8, // cmp(1) [00101] rn[10..8] imm_8[7..0]. 308 kThumbCmpRR, // cmp(2) [0100001010] rm[5..3] rd[2..0]. 309 kThumbCmpLH, // cmp(3) [01000101] H12[01] rm[5..3] rd[2..0]. 310 kThumbCmpHL, // cmp(3) [01000110] H12[10] rm[5..3] rd[2..0]. 311 kThumbCmpHH, // cmp(3) [01000111] H12[11] rm[5..3] rd[2..0]. 312 kThumbEorRR, // eor [0100000001] rm[5..3] rd[2..0]. 313 kThumbLdmia, // ldmia [11001] rn[10..8] reglist [7..0]. 314 kThumbLdrRRI5, // ldr(1) [01101] imm_5[10..6] rn[5..3] rd[2..0]. 315 kThumbLdrRRR, // ldr(2) [0101100] rm[8..6] rn[5..3] rd[2..0]. 316 kThumbLdrPcRel, // ldr(3) [01001] rd[10..8] imm_8[7..0]. 317 kThumbLdrSpRel, // ldr(4) [10011] rd[10..8] imm_8[7..0]. 318 kThumbLdrbRRI5, // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0]. 319 kThumbLdrbRRR, // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0]. 320 kThumbLdrhRRI5, // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0]. 321 kThumbLdrhRRR, // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0]. 322 kThumbLdrsbRRR, // ldrsb [0101011] rm[8..6] rn[5..3] rd[2..0]. 323 kThumbLdrshRRR, // ldrsh [0101111] rm[8..6] rn[5..3] rd[2..0]. 324 kThumbLslRRI5, // lsl(1) [00000] imm_5[10..6] rm[5..3] rd[2..0]. 325 kThumbLslRR, // lsl(2) [0100000010] rs[5..3] rd[2..0]. 326 kThumbLsrRRI5, // lsr(1) [00001] imm_5[10..6] rm[5..3] rd[2..0]. 327 kThumbLsrRR, // lsr(2) [0100000011] rs[5..3] rd[2..0]. 328 kThumbMovImm, // mov(1) [00100] rd[10..8] imm_8[7..0]. 329 kThumbMovRR, // mov(2) [0001110000] rn[5..3] rd[2..0]. 330 kThumbMovRR_H2H, // mov(3) [01000111] H12[11] rm[5..3] rd[2..0]. 331 kThumbMovRR_H2L, // mov(3) [01000110] H12[01] rm[5..3] rd[2..0]. 332 kThumbMovRR_L2H, // mov(3) [01000101] H12[10] rm[5..3] rd[2..0]. 333 kThumbMul, // mul [0100001101] rm[5..3] rd[2..0]. 334 kThumbMvn, // mvn [0100001111] rm[5..3] rd[2..0]. 335 kThumbNeg, // neg [0100001001] rm[5..3] rd[2..0]. 336 kThumbOrr, // orr [0100001100] rm[5..3] rd[2..0]. 337 kThumbPop, // pop [1011110] r[8..8] rl[7..0]. 338 kThumbPush, // push [1011010] r[8..8] rl[7..0]. 339 kThumbRev, // rev [1011101000] rm[5..3] rd[2..0] 340 kThumbRevsh, // revsh [1011101011] rm[5..3] rd[2..0] 341 kThumbRorRR, // ror [0100000111] rs[5..3] rd[2..0]. 342 kThumbSbc, // sbc [0100000110] rm[5..3] rd[2..0]. 343 kThumbStmia, // stmia [11000] rn[10..8] reglist [7.. 0]. 344 kThumbStrRRI5, // str(1) [01100] imm_5[10..6] rn[5..3] rd[2..0]. 345 kThumbStrRRR, // str(2) [0101000] rm[8..6] rn[5..3] rd[2..0]. 346 kThumbStrSpRel, // str(3) [10010] rd[10..8] imm_8[7..0]. 347 kThumbStrbRRI5, // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0]. 348 kThumbStrbRRR, // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0]. 349 kThumbStrhRRI5, // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0]. 350 kThumbStrhRRR, // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0]. 351 kThumbSubRRI3, // sub(1) [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/ 352 kThumbSubRI8, // sub(2) [00111] rd[10..8] imm_8[7..0]. 353 kThumbSubRRR, // sub(3) [0001101] rm[8..6] rn[5..3] rd[2..0]. 354 kThumbSubSpI7, // sub(4) [101100001] imm_7[6..0]. 355 kThumbSwi, // swi [11011111] imm_8[7..0]. 356 kThumbTst, // tst [0100001000] rm[5..3] rn[2..0]. 357 kThumb2Vldrs, // vldr low sx [111011011001] rn[19..16] rd[15-12] [1010] imm_8[7..0]. 358 kThumb2Vldrd, // vldr low dx [111011011001] rn[19..16] rd[15-12] [1011] imm_8[7..0]. 359 kThumb2Vmuls, // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10100000] rm[3..0]. 360 kThumb2Vmuld, // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10110000] rm[3..0]. 361 kThumb2Vstrs, // vstr low sx [111011011000] rn[19..16] rd[15-12] [1010] imm_8[7..0]. 362 kThumb2Vstrd, // vstr low dx [111011011000] rn[19..16] rd[15-12] [1011] imm_8[7..0]. 363 kThumb2Vsubs, // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100040] rm[3..0]. 364 kThumb2Vsubd, // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110040] rm[3..0]. 365 kThumb2Vadds, // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100000] rm[3..0]. 366 kThumb2Vaddd, // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110000] rm[3..0]. 367 kThumb2Vdivs, // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10100000] rm[3..0]. 368 kThumb2Vdivd, // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10110000] rm[3..0]. 369 kThumb2VmlaF64, // vmla.F64 vd, vn, vm [111011100000] vn[19..16] vd[15..12] [10110000] vm[3..0]. 370 kThumb2VcvtIF, // vcvt.F32.S32 vd, vm [1110111010111000] vd[15..12] [10101100] vm[3..0]. 371 kThumb2VcvtFI, // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10101100] vm[3..0]. 372 kThumb2VcvtDI, // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10111100] vm[3..0]. 373 kThumb2VcvtFd, // vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12] [10101100] vm[3..0]. 374 kThumb2VcvtDF, // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0]. 375 kThumb2VcvtF64S32, // vcvt.F64.S32 vd, vm [1110111010111000] vd[15..12] [10111100] vm[3..0]. 376 kThumb2VcvtF64U32, // vcvt.F64.U32 vd, vm [1110111010111000] vd[15..12] [10110100] vm[3..0]. 377 kThumb2Vsqrts, // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0]. 378 kThumb2Vsqrtd, // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0]. 379 kThumb2MovI8M, // mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8. 380 kThumb2MovImm16, // mov(T3) rd, #<const> [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8. 381 kThumb2StrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0]. 382 kThumb2LdrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0]. 383 kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]. 384 kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]. 385 kThumb2Cbnz, // cbnz rd,<label> [101110] i [1] imm5[7..3] rn[2..0]. 386 kThumb2Cbz, // cbn rd,<label> [101100] i [1] imm5[7..3] rn[2..0]. 387 kThumb2AddRRI12, // add rd, rn, #imm12 [11110] i [100000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 388 kThumb2MovRR, // mov rd, rm [11101010010011110000] rd[11..8] [0000] rm[3..0]. 389 kThumb2Vmovs, // vmov.f32 vd, vm [111011101] D [110000] vd[15..12] 101001] M [0] vm[3..0]. 390 kThumb2Vmovd, // vmov.f64 vd, vm [111011101] D [110000] vd[15..12] 101101] M [0] vm[3..0]. 391 kThumb2Ldmia, // ldmia [111010001001] rn[19..16] mask[15..0]. 392 kThumb2Stmia, // stmia [111010001000] rn[19..16] mask[15..0]. 393 kThumb2AddRRR, // add [111010110000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 394 kThumb2SubRRR, // sub [111010111010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 395 kThumb2SbcRRR, // sbc [111010110110] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 396 kThumb2CmpRR, // cmp [111010111011] rn[19..16] [0000] [1111] [0000] rm[3..0]. 397 kThumb2SubRRI12, // sub rd, rn, #imm12 [11110] i [101010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 398 kThumb2MvnI8M, // mov(T2) rd, #<const> [11110] i [00011011110] imm3 rd[11..8] imm8. 399 kThumb2Sel, // sel rd, rn, rm [111110101010] rn[19-16] rd[11-8] rm[3-0]. 400 kThumb2Ubfx, // ubfx rd,rn,#lsb,#width [111100111100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0]. 401 kThumb2Sbfx, // ubfx rd,rn,#lsb,#width [111100110100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0]. 402 kThumb2LdrRRR, // ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 403 kThumb2LdrhRRR, // ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 404 kThumb2LdrshRRR, // ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 405 kThumb2LdrbRRR, // ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 406 kThumb2LdrsbRRR, // ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 407 kThumb2StrRRR, // str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 408 kThumb2StrhRRR, // str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 409 kThumb2StrbRRR, // str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0]. 410 kThumb2LdrhRRI12, // ldrh rt,[rn,#imm12] [111110001011] rt[15..12] rn[19..16] imm12[11..0]. 411 kThumb2LdrshRRI12, // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0]. 412 kThumb2LdrbRRI12, // ldrb rt,[rn,#imm12] [111110001001] rt[15..12] rn[19..16] imm12[11..0]. 413 kThumb2LdrsbRRI12, // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0]. 414 kThumb2StrhRRI12, // strh rt,[rn,#imm12] [111110001010] rt[15..12] rn[19..16] imm12[11..0]. 415 kThumb2StrbRRI12, // strb rt,[rn,#imm12] [111110001000] rt[15..12] rn[19..16] imm12[11..0]. 416 kThumb2Pop, // pop [1110100010111101] list[15-0]*/ 417 kThumb2Push, // push [1110100100101101] list[15-0]*/ 418 kThumb2CmpRI8M, // cmp rn, #<const> [11110] i [011011] rn[19-16] [0] imm3 [1111] imm8[7..0]. 419 kThumb2CmnRI8M, // cmn rn, #<const> [11110] i [010001] rn[19-16] [0] imm3 [1111] imm8[7..0]. 420 kThumb2AdcRRR, // adc [111010110101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 421 kThumb2AndRRR, // and [111010100000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 422 kThumb2BicRRR, // bic [111010100010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 423 kThumb2CmnRR, // cmn [111010110001] rn[19..16] [0000] [1111] [0000] rm[3..0]. 424 kThumb2EorRRR, // eor [111010101000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 425 kThumb2MulRRR, // mul [111110110000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0]. 426 kThumb2SdivRRR, // sdiv [111110111001] rn[19..16] [1111] rd[11..8] [1111] rm[3..0]. 427 kThumb2UdivRRR, // udiv [111110111011] rn[19..16] [1111] rd[11..8] [1111] rm[3..0]. 428 kThumb2MnvRR, // mvn [11101010011011110] rd[11-8] [0000] rm[3..0]. 429 kThumb2RsubRRI8M, // rsb rd, rn, #<const> [11110] i [011101] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 430 kThumb2NegRR, // actually rsub rd, rn, #0. 431 kThumb2OrrRRR, // orr [111010100100] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 432 kThumb2TstRR, // tst [111010100001] rn[19..16] [0000] [1111] [0000] rm[3..0]. 433 kThumb2LslRRR, // lsl [111110100000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0]. 434 kThumb2LsrRRR, // lsr [111110100010] rn[19..16] [1111] rd[11..8] [0000] rm[3..0]. 435 kThumb2AsrRRR, // asr [111110100100] rn[19..16] [1111] rd[11..8] [0000] rm[3..0]. 436 kThumb2RorRRR, // ror [111110100110] rn[19..16] [1111] rd[11..8] [0000] rm[3..0]. 437 kThumb2LslRRI5, // lsl [11101010010011110] imm[14.12] rd[11..8] [00] rm[3..0]. 438 kThumb2LsrRRI5, // lsr [11101010010011110] imm[14.12] rd[11..8] [01] rm[3..0]. 439 kThumb2AsrRRI5, // asr [11101010010011110] imm[14.12] rd[11..8] [10] rm[3..0]. 440 kThumb2RorRRI5, // ror [11101010010011110] imm[14.12] rd[11..8] [11] rm[3..0]. 441 kThumb2BicRRI8M, // bic rd, rn, #<const> [11110] i [000010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 442 kThumb2AndRRI8M, // and rd, rn, #<const> [11110] i [000000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 443 kThumb2OrrRRI8M, // orr rd, rn, #<const> [11110] i [000100] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 444 kThumb2EorRRI8M, // eor rd, rn, #<const> [11110] i [001000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 445 kThumb2AddRRI8M, // add rd, rn, #<const> [11110] i [010001] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 446 kThumb2AdcRRI8M, // adc rd, rn, #<const> [11110] i [010101] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 447 kThumb2SubRRI8M, // sub rd, rn, #<const> [11110] i [011011] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 448 kThumb2SbcRRI8M, // sub rd, rn, #<const> [11110] i [010111] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0]. 449 kThumb2RevRR, // rev [111110101001] rm[19..16] [1111] rd[11..8] 1000 rm[3..0] 450 kThumb2RevshRR, // rev [111110101001] rm[19..16] [1111] rd[11..8] 1011 rm[3..0] 451 kThumb2It, // it [10111111] firstcond[7-4] mask[3-0]. 452 kThumb2Fmstat, // fmstat [11101110111100011111101000010000]. 453 kThumb2Vcmpd, // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0]. 454 kThumb2Vcmps, // vcmp [111011101] D [11010] rd[15-12] [1011] E [1] M [0] rm[3-0]. 455 kThumb2LdrPcRel12, // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0]. 456 kThumb2BCond, // b<c> [1110] S cond[25-22] imm6[21-16] [10] J1 [0] J2 imm11[10..0]. 457 kThumb2Fmrs, // vmov [111011100000] vn[19-16] rt[15-12] [1010] N [0010000]. 458 kThumb2Fmsr, // vmov [111011100001] vn[19-16] rt[15-12] [1010] N [0010000]. 459 kThumb2Fmrrd, // vmov [111011000100] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0]. 460 kThumb2Fmdrr, // vmov [111011000101] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0]. 461 kThumb2Vabsd, // vabs.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0]. 462 kThumb2Vabss, // vabs.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0]. 463 kThumb2Vnegd, // vneg.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0]. 464 kThumb2Vnegs, // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0]. 465 kThumb2Vmovs_IMM8, // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0]. 466 kThumb2Vmovd_IMM8, // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0]. 467 kThumb2Mla, // mla [111110110000] rn[19-16] ra[15-12] rd[7-4] [0000] rm[3-0]. 468 kThumb2Umull, // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0]. 469 kThumb2Ldrex, // ldrex [111010000101] rn[19-16] rt[15-12] [1111] imm8[7-0]. 470 kThumb2Ldrexd, // ldrexd [111010001101] rn[19-16] rt[15-12] rt2[11-8] [11111111]. 471 kThumb2Strex, // strex [111010000100] rn[19-16] rt[15-12] rd[11-8] imm8[7-0]. 472 kThumb2Strexd, // strexd [111010001100] rn[19-16] rt[15-12] rt2[11-8] [0111] Rd[3-0]. 473 kThumb2Clrex, // clrex [11110011101111111000111100101111]. 474 kThumb2Bfi, // bfi [111100110110] rn[19-16] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0]. 475 kThumb2Bfc, // bfc [11110011011011110] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0]. 476 kThumb2Dmb, // dmb [1111001110111111100011110101] option[3-0]. 477 kThumb2LdrPcReln12, // ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0]. 478 kThumb2Stm, // stm <list> [111010010000] rn[19-16] 000 rl[12-0]. 479 kThumbUndefined, // undefined [11011110xxxxxxxx]. 480 kThumb2VPopCS, // vpop <list of callee save fp singles (s16+). 481 kThumb2VPushCS, // vpush <list callee save fp singles (s16+). 482 kThumb2Vldms, // vldms rd, <list>. 483 kThumb2Vstms, // vstms rd, <list>. 484 kThumb2BUncond, // b <label>. 485 kThumb2MovImm16H, // similar to kThumb2MovImm16, but target high hw. 486 kThumb2AddPCR, // Thumb2 2-operand add with hard-coded PC target. 487 kThumb2Adr, // Special purpose encoding of ADR for switch tables. 488 kThumb2MovImm16LST, // Special purpose version for switch table use. 489 kThumb2MovImm16HST, // Special purpose version for switch table use. 490 kThumb2LdmiaWB, // ldmia [111010011001[ rn[19..16] mask[15..0]. 491 kThumb2OrrRRRs, // orrs [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 492 kThumb2Push1, // t3 encoding of push. 493 kThumb2Pop1, // t3 encoding of pop. 494 kThumb2RsubRRR, // rsb [111010111101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0]. 495 kThumb2Smull, // smull [111110111000] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0]. 496 kThumb2LdrdPcRel8, // ldrd rt, rt2, pc +-/1024. 497 kThumb2LdrdI8, // ldrd rt, rt2, [rn +-/1024]. 498 kThumb2StrdI8, // strd rt, rt2, [rn +-/1024]. 499 kArmLast, 500}; 501 502enum ArmOpDmbOptions { 503 kSY = 0xf, 504 kST = 0xe, 505 kISH = 0xb, 506 kISHST = 0xa, 507 kNSH = 0x7, 508 kNSHST = 0x6 509}; 510 511// Instruction assembly field_loc kind. 512enum ArmEncodingKind { 513 kFmtUnused, // Unused field and marks end of formats. 514 kFmtBitBlt, // Bit string using end/start. 515 kFmtDfp, // Double FP reg. 516 kFmtSfp, // Single FP reg. 517 kFmtModImm, // Shifted 8-bit immed using [26,14..12,7..0]. 518 kFmtImm16, // Zero-extended immed using [26,19..16,14..12,7..0]. 519 kFmtImm6, // Encoded branch target using [9,7..3]0. 520 kFmtImm12, // Zero-extended immediate using [26,14..12,7..0]. 521 kFmtShift, // Shift descriptor, [14..12,7..4]. 522 kFmtLsb, // least significant bit using [14..12][7..6]. 523 kFmtBWidth, // bit-field width, encoded as width-1. 524 kFmtShift5, // Shift count, [14..12,7..6]. 525 kFmtBrOffset, // Signed extended [26,11,13,21-16,10-0]:0. 526 kFmtFPImm, // Encoded floating point immediate. 527 kFmtOff24, // 24-bit Thumb2 unconditional branch encoding. 528 kFmtSkip, // Unused field, but continue to next. 529}; 530 531// Struct used to define the snippet positions for each Thumb opcode. 532struct ArmEncodingMap { 533 uint32_t skeleton; 534 struct { 535 ArmEncodingKind kind; 536 int end; // end for kFmtBitBlt, 1-bit slice end for FP regs. 537 int start; // start for kFmtBitBlt, 4-bit slice end for FP regs. 538 } field_loc[4]; 539 ArmOpcode opcode; 540 uint64_t flags; 541 const char* name; 542 const char* fmt; 543 int size; // Note: size is in bytes. 544 FixupKind fixup; 545}; 546 547} // namespace art 548 549#endif // ART_COMPILER_DEX_QUICK_ARM_ARM_LIR_H_ 550