assemble_x86.cc revision b1eba213afaf7fa6445de863ddc9680ab99762ea
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "codegen_x86.h" 18#include "dex/quick/mir_to_lir-inl.h" 19#include "x86_lir.h" 20 21namespace art { 22 23#define MAX_ASSEMBLER_RETRIES 50 24 25const X86EncodingMap X86Mir2Lir::EncodingMap[kX86Last] = { 26 { kX8632BitData, kData, IS_UNARY_OP, { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data", "0x!0d" }, 27 { kX86Bkpt, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" }, 28 { kX86Nop, kNop, IS_UNARY_OP, { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop", "" }, 29 30#define ENCODING_MAP(opname, mem_use, reg_def, uses_ccodes, \ 31 rm8_r8, rm32_r32, \ 32 r8_rm8, r32_rm32, \ 33 ax8_i8, ax32_i32, \ 34 rm8_i8, rm8_i8_modrm, \ 35 rm32_i32, rm32_i32_modrm, \ 36 rm32_i8, rm32_i8_modrm) \ 37{ kX86 ## opname ## 8MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8MR", "[!0r+!1d],!2r" }, \ 38{ kX86 ## opname ## 8AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \ 39{ kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8TR", "fs:[!0d],!1r" }, \ 40{ kX86 ## opname ## 8RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RR", "!0r,!1r" }, \ 41{ kX86 ## opname ## 8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RM", "!0r,[!1r+!2d]" }, \ 42{ kX86 ## opname ## 8RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \ 43{ kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RT", "!0r,fs:[!1d]" }, \ 44{ kX86 ## opname ## 8RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1 }, #opname "8RI", "!0r,!1d" }, \ 45{ kX86 ## opname ## 8MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \ 46{ kX86 ## opname ## 8AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ 47{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8TI", "fs:[!0d],!1d" }, \ 48 \ 49{ kX86 ## opname ## 16MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16MR", "[!0r+!1d],!2r" }, \ 50{ kX86 ## opname ## 16AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \ 51{ kX86 ## opname ## 16TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16TR", "fs:[!0d],!1r" }, \ 52{ kX86 ## opname ## 16RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RR", "!0r,!1r" }, \ 53{ kX86 ## opname ## 16RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RM", "!0r,[!1r+!2d]" }, \ 54{ kX86 ## opname ## 16RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \ 55{ kX86 ## opname ## 16RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RT", "!0r,fs:[!1d]" }, \ 56{ kX86 ## opname ## 16RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 2 }, #opname "16RI", "!0r,!1d" }, \ 57{ kX86 ## opname ## 16MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16MI", "[!0r+!1d],!2d" }, \ 58{ kX86 ## opname ## 16AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ 59{ kX86 ## opname ## 16TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16TI", "fs:[!0d],!1d" }, \ 60{ kX86 ## opname ## 16RI8, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16RI8", "!0r,!1d" }, \ 61{ kX86 ## opname ## 16MI8, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16MI8", "[!0r+!1d],!2d" }, \ 62{ kX86 ## opname ## 16AI8, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \ 63{ kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16TI8", "fs:[!0d],!1d" }, \ 64 \ 65{ kX86 ## opname ## 32MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32MR", "[!0r+!1d],!2r" }, \ 66{ kX86 ## opname ## 32AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \ 67{ kX86 ## opname ## 32TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32TR", "fs:[!0d],!1r" }, \ 68{ kX86 ## opname ## 32RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RR", "!0r,!1r" }, \ 69{ kX86 ## opname ## 32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RM", "!0r,[!1r+!2d]" }, \ 70{ kX86 ## opname ## 32RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \ 71{ kX86 ## opname ## 32RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RT", "!0r,fs:[!1d]" }, \ 72{ kX86 ## opname ## 32RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \ 73{ kX86 ## opname ## 32MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32MI", "[!0r+!1d],!2d" }, \ 74{ kX86 ## opname ## 32AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ 75{ kX86 ## opname ## 32TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32TI", "fs:[!0d],!1d" }, \ 76{ kX86 ## opname ## 32RI8, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32RI8", "!0r,!1d" }, \ 77{ kX86 ## opname ## 32MI8, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32MI8", "[!0r+!1d],!2d" }, \ 78{ kX86 ## opname ## 32AI8, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \ 79{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32TI8", "fs:[!0d],!1d" } 80 81ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0, 82 0x00 /* RegMem8/Reg8 */, 0x01 /* RegMem32/Reg32 */, 83 0x02 /* Reg8/RegMem8 */, 0x03 /* Reg32/RegMem32 */, 84 0x04 /* Rax8/imm8 opcode */, 0x05 /* Rax32/imm32 */, 85 0x80, 0x0 /* RegMem8/imm8 */, 86 0x81, 0x0 /* RegMem32/imm32 */, 0x83, 0x0 /* RegMem32/imm8 */), 87ENCODING_MAP(Or, IS_LOAD | IS_STORE, REG_DEF0, 0, 88 0x08 /* RegMem8/Reg8 */, 0x09 /* RegMem32/Reg32 */, 89 0x0A /* Reg8/RegMem8 */, 0x0B /* Reg32/RegMem32 */, 90 0x0C /* Rax8/imm8 opcode */, 0x0D /* Rax32/imm32 */, 91 0x80, 0x1 /* RegMem8/imm8 */, 92 0x81, 0x1 /* RegMem32/imm32 */, 0x83, 0x1 /* RegMem32/imm8 */), 93ENCODING_MAP(Adc, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES, 94 0x10 /* RegMem8/Reg8 */, 0x11 /* RegMem32/Reg32 */, 95 0x12 /* Reg8/RegMem8 */, 0x13 /* Reg32/RegMem32 */, 96 0x14 /* Rax8/imm8 opcode */, 0x15 /* Rax32/imm32 */, 97 0x80, 0x2 /* RegMem8/imm8 */, 98 0x81, 0x2 /* RegMem32/imm32 */, 0x83, 0x2 /* RegMem32/imm8 */), 99ENCODING_MAP(Sbb, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES, 100 0x18 /* RegMem8/Reg8 */, 0x19 /* RegMem32/Reg32 */, 101 0x1A /* Reg8/RegMem8 */, 0x1B /* Reg32/RegMem32 */, 102 0x1C /* Rax8/imm8 opcode */, 0x1D /* Rax32/imm32 */, 103 0x80, 0x3 /* RegMem8/imm8 */, 104 0x81, 0x3 /* RegMem32/imm32 */, 0x83, 0x3 /* RegMem32/imm8 */), 105ENCODING_MAP(And, IS_LOAD | IS_STORE, REG_DEF0, 0, 106 0x20 /* RegMem8/Reg8 */, 0x21 /* RegMem32/Reg32 */, 107 0x22 /* Reg8/RegMem8 */, 0x23 /* Reg32/RegMem32 */, 108 0x24 /* Rax8/imm8 opcode */, 0x25 /* Rax32/imm32 */, 109 0x80, 0x4 /* RegMem8/imm8 */, 110 0x81, 0x4 /* RegMem32/imm32 */, 0x83, 0x4 /* RegMem32/imm8 */), 111ENCODING_MAP(Sub, IS_LOAD | IS_STORE, REG_DEF0, 0, 112 0x28 /* RegMem8/Reg8 */, 0x29 /* RegMem32/Reg32 */, 113 0x2A /* Reg8/RegMem8 */, 0x2B /* Reg32/RegMem32 */, 114 0x2C /* Rax8/imm8 opcode */, 0x2D /* Rax32/imm32 */, 115 0x80, 0x5 /* RegMem8/imm8 */, 116 0x81, 0x5 /* RegMem32/imm32 */, 0x83, 0x5 /* RegMem32/imm8 */), 117ENCODING_MAP(Xor, IS_LOAD | IS_STORE, REG_DEF0, 0, 118 0x30 /* RegMem8/Reg8 */, 0x31 /* RegMem32/Reg32 */, 119 0x32 /* Reg8/RegMem8 */, 0x33 /* Reg32/RegMem32 */, 120 0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */, 121 0x80, 0x6 /* RegMem8/imm8 */, 122 0x81, 0x6 /* RegMem32/imm32 */, 0x83, 0x6 /* RegMem32/imm8 */), 123ENCODING_MAP(Cmp, IS_LOAD, 0, 0, 124 0x38 /* RegMem8/Reg8 */, 0x39 /* RegMem32/Reg32 */, 125 0x3A /* Reg8/RegMem8 */, 0x3B /* Reg32/RegMem32 */, 126 0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */, 127 0x80, 0x7 /* RegMem8/imm8 */, 128 0x81, 0x7 /* RegMem32/imm32 */, 0x83, 0x7 /* RegMem32/imm8 */), 129#undef ENCODING_MAP 130 131 { kX86Imul16RRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RRI", "!0r,!1r,!2d" }, 132 { kX86Imul16RMI, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RMI", "!0r,[!1r+!2d],!3d" }, 133 { kX86Imul16RAI, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" }, 134 135 { kX86Imul32RRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RRI", "!0r,!1r,!2d" }, 136 { kX86Imul32RMI, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RMI", "!0r,[!1r+!2d],!3d" }, 137 { kX86Imul32RAI, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" }, 138 { kX86Imul32RRI8, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RRI8", "!0r,!1r,!2d" }, 139 { kX86Imul32RMI8, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" }, 140 { kX86Imul32RAI8, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" }, 141 142 { kX86Mov8MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8MR", "[!0r+!1d],!2r" }, 143 { kX86Mov8AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" }, 144 { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8TR", "fs:[!0d],!1r" }, 145 { kX86Mov8RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RR", "!0r,!1r" }, 146 { kX86Mov8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RM", "!0r,[!1r+!2d]" }, 147 { kX86Mov8RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, 148 { kX86Mov8RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RT", "!0r,fs:[!1d]" }, 149 { kX86Mov8RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB0, 0, 0, 0, 0, 1 }, "Mov8RI", "!0r,!1d" }, 150 { kX86Mov8MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8MI", "[!0r+!1d],!2d" }, 151 { kX86Mov8AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" }, 152 { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8TI", "fs:[!0d],!1d" }, 153 154 { kX86Mov16MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov16MR", "[!0r+!1d],!2r" }, 155 { kX86Mov16AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" }, 156 { kX86Mov16TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0x66, 0x89, 0, 0, 0, 0, 0 }, "Mov16TR", "fs:[!0d],!1r" }, 157 { kX86Mov16RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RR", "!0r,!1r" }, 158 { kX86Mov16RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RM", "!0r,[!1r+!2d]" }, 159 { kX86Mov16RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, 160 { kX86Mov16RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0x66, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RT", "!0r,fs:[!1d]" }, 161 { kX86Mov16RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0x66, 0, 0xB8, 0, 0, 0, 0, 2 }, "Mov16RI", "!0r,!1d" }, 162 { kX86Mov16MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0x66, 0, 0xC7, 0, 0, 0, 0, 2 }, "Mov16MI", "[!0r+!1d],!2d" }, 163 { kX86Mov16AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0x66, 0, 0xC7, 0, 0, 0, 0, 2 }, "Mov16AI", "[!0r+!1r<<!2d+!3d],!4d" }, 164 { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2 }, "Mov16TI", "fs:[!0d],!1d" }, 165 166 { kX86Mov32MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32MR", "[!0r+!1d],!2r" }, 167 { kX86Mov32AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" }, 168 { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32TR", "fs:[!0d],!1r" }, 169 { kX86Mov32RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RR", "!0r,!1r" }, 170 { kX86Mov32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RM", "!0r,[!1r+!2d]" }, 171 { kX86Mov32RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, 172 { kX86Mov32RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RT", "!0r,fs:[!1d]" }, 173 { kX86Mov32RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "Mov32RI", "!0r,!1d" }, 174 { kX86Mov32MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32MI", "[!0r+!1d],!2d" }, 175 { kX86Mov32AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" }, 176 { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" }, 177 178 { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, 179 180#define SHIFT_ENCODING_MAP(opname, modrm_opcode) \ 181{ kX86 ## opname ## 8RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8RI", "!0r,!1d" }, \ 182{ kX86 ## opname ## 8MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \ 183{ kX86 ## opname ## 8AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ 184{ kX86 ## opname ## 8RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8RC", "!0r,cl" }, \ 185{ kX86 ## opname ## 8MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8MC", "[!0r+!1d],cl" }, \ 186{ kX86 ## opname ## 8AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8AC", "[!0r+!1r<<!2d+!3d],cl" }, \ 187 \ 188{ kX86 ## opname ## 16RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16RI", "!0r,!1d" }, \ 189{ kX86 ## opname ## 16MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16MI", "[!0r+!1d],!2d" }, \ 190{ kX86 ## opname ## 16AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ 191{ kX86 ## opname ## 16RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16RC", "!0r,cl" }, \ 192{ kX86 ## opname ## 16MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16MC", "[!0r+!1d],cl" }, \ 193{ kX86 ## opname ## 16AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16AC", "[!0r+!1r<<!2d+!3d],cl" }, \ 194 \ 195{ kX86 ## opname ## 32RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32RI", "!0r,!1d" }, \ 196{ kX86 ## opname ## 32MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32MI", "[!0r+!1d],!2d" }, \ 197{ kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ 198{ kX86 ## opname ## 32RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32RC", "!0r,cl" }, \ 199{ kX86 ## opname ## 32MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32MC", "[!0r+!1d],cl" }, \ 200{ kX86 ## opname ## 32AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" } 201 202 SHIFT_ENCODING_MAP(Rol, 0x0), 203 SHIFT_ENCODING_MAP(Ror, 0x1), 204 SHIFT_ENCODING_MAP(Rcl, 0x2), 205 SHIFT_ENCODING_MAP(Rcr, 0x3), 206 SHIFT_ENCODING_MAP(Sal, 0x4), 207 SHIFT_ENCODING_MAP(Shr, 0x5), 208 SHIFT_ENCODING_MAP(Sar, 0x7), 209#undef SHIFT_ENCODING_MAP 210 211 { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0}, "Cmc", "" }, 212 213 { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8RI", "!0r,!1d" }, 214 { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8MI", "[!0r+!1d],!2d" }, 215 { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" }, 216 { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16RI", "!0r,!1d" }, 217 { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16MI", "[!0r+!1d],!2d" }, 218 { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" }, 219 { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32RI", "!0r,!1d" }, 220 { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32MI", "[!0r+!1d],!2d" }, 221 { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" }, 222 { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0}, "Test32RR", "!0r,!1r" }, 223 224#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \ 225 reg, reg_kind, reg_flags, \ 226 mem, mem_kind, mem_flags, \ 227 arr, arr_kind, arr_flags, imm, \ 228 b_flags, hw_flags, w_flags, \ 229 b_format, hw_format, w_format) \ 230{ kX86 ## opname ## 8 ## reg, reg_kind, reg_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #reg, #b_format "!0r" }, \ 231{ kX86 ## opname ## 8 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #mem, #b_format "[!0r+!1d]" }, \ 232{ kX86 ## opname ## 8 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #arr, #b_format "[!0r+!1r<<!2d+!3d]" }, \ 233{ kX86 ## opname ## 16 ## reg, reg_kind, reg_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #reg, #hw_format "!0r" }, \ 234{ kX86 ## opname ## 16 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #mem, #hw_format "[!0r+!1d]" }, \ 235{ kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #arr, #hw_format "[!0r+!1r<<!2d+!3d]" }, \ 236{ kX86 ## opname ## 32 ## reg, reg_kind, reg_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #reg, #w_format "!0r" }, \ 237{ kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #mem, #w_format "[!0r+!1d]" }, \ 238{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, #w_format "[!0r+!1r<<!2d+!3d]" } 239 240 UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""), 241 UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""), 242 243 UNARY_ENCODING_MAP(Mul, 0x4, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA, REG_DEFAD_USEA, "ax,al,", "dx:ax,ax,", "edx:eax,eax,"), 244 UNARY_ENCODING_MAP(Imul, 0x5, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA, REG_DEFAD_USEA, "ax,al,", "dx:ax,ax,", "edx:eax,eax,"), 245 UNARY_ENCODING_MAP(Divmod, 0x6, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"), 246 UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"), 247#undef UNARY_ENCODING_MAP 248 249#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \ 250{ kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \ 251{ kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \ 252{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" } 253 254 EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0), 255 { kX86MovsdMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdMR", "[!0r+!1d],!2r" }, 256 { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" }, 257 258 EXT_0F_ENCODING_MAP(Movss, 0xF3, 0x10, REG_DEF0), 259 { kX86MovssMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssMR", "[!0r+!1d],!2r" }, 260 { kX86MovssAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssAR", "[!0r+!1r<<!2d+!3d],!4r" }, 261 262 EXT_0F_ENCODING_MAP(Cvtsi2sd, 0xF2, 0x2A, REG_DEF0), 263 EXT_0F_ENCODING_MAP(Cvtsi2ss, 0xF3, 0x2A, REG_DEF0), 264 EXT_0F_ENCODING_MAP(Cvttsd2si, 0xF2, 0x2C, REG_DEF0), 265 EXT_0F_ENCODING_MAP(Cvttss2si, 0xF3, 0x2C, REG_DEF0), 266 EXT_0F_ENCODING_MAP(Cvtsd2si, 0xF2, 0x2D, REG_DEF0), 267 EXT_0F_ENCODING_MAP(Cvtss2si, 0xF3, 0x2D, REG_DEF0), 268 EXT_0F_ENCODING_MAP(Ucomisd, 0x66, 0x2E, SETS_CCODES), 269 EXT_0F_ENCODING_MAP(Ucomiss, 0x00, 0x2E, SETS_CCODES), 270 EXT_0F_ENCODING_MAP(Comisd, 0x66, 0x2F, SETS_CCODES), 271 EXT_0F_ENCODING_MAP(Comiss, 0x00, 0x2F, SETS_CCODES), 272 EXT_0F_ENCODING_MAP(Orps, 0x00, 0x56, REG_DEF0), 273 EXT_0F_ENCODING_MAP(Xorps, 0x00, 0x57, REG_DEF0), 274 EXT_0F_ENCODING_MAP(Addsd, 0xF2, 0x58, REG_DEF0), 275 EXT_0F_ENCODING_MAP(Addss, 0xF3, 0x58, REG_DEF0), 276 EXT_0F_ENCODING_MAP(Mulsd, 0xF2, 0x59, REG_DEF0), 277 EXT_0F_ENCODING_MAP(Mulss, 0xF3, 0x59, REG_DEF0), 278 EXT_0F_ENCODING_MAP(Cvtsd2ss, 0xF2, 0x5A, REG_DEF0), 279 EXT_0F_ENCODING_MAP(Cvtss2sd, 0xF3, 0x5A, REG_DEF0), 280 EXT_0F_ENCODING_MAP(Subsd, 0xF2, 0x5C, REG_DEF0), 281 EXT_0F_ENCODING_MAP(Subss, 0xF3, 0x5C, REG_DEF0), 282 EXT_0F_ENCODING_MAP(Divsd, 0xF2, 0x5E, REG_DEF0), 283 EXT_0F_ENCODING_MAP(Divss, 0xF3, 0x5E, REG_DEF0), 284 285 { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1 }, "PsrlqRI", "!0r,!1d" }, 286 { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" }, 287 288 EXT_0F_ENCODING_MAP(Movdxr, 0x66, 0x6E, REG_DEF0), 289 { kX86MovdrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxRR", "!0r,!1r" }, 290 { kX86MovdrxMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxMR", "[!0r+!1d],!2r" }, 291 { kX86MovdrxAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxAR", "[!0r+!1r<<!2d+!3d],!4r" }, 292 293 { kX86Set8R, kRegCond, IS_BINARY_OP | REG_DEF0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8R", "!1c !0r" }, 294 { kX86Set8M, kMemCond, IS_STORE | IS_TERTIARY_OP | REG_USE0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8M", "!2c [!0r+!1d]" }, 295 { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" }, 296 297 // TODO: load/store? 298 // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly. 299 { kX86Mfence, kReg, NO_OPERAND, { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0 }, "Mfence", "" }, 300 301 EXT_0F_ENCODING_MAP(Imul16, 0x66, 0xAF, REG_DEF0 | SETS_CCODES), 302 EXT_0F_ENCODING_MAP(Imul32, 0x00, 0xAF, REG_DEF0 | SETS_CCODES), 303 304 { kX86CmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "!0r,!1r" }, 305 { kX86CmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1d],!2r" }, 306 { kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" }, 307 { kX86LockCmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "!0r,!1r" }, 308 { kX86LockCmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" }, 309 { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" }, 310 311 EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0), 312 EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0), 313 EXT_0F_ENCODING_MAP(Movsx8, 0x00, 0xBE, REG_DEF0), 314 EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0), 315#undef EXT_0F_ENCODING_MAP 316 317 { kX86Jcc8, kJcc, IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0, 0, 0x70, 0, 0, 0, 0, 0 }, "Jcc8", "!1c !0t" }, 318 { kX86Jcc32, kJcc, IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0, 0, 0x0F, 0x80, 0, 0, 0, 0 }, "Jcc32", "!1c !0t" }, 319 { kX86Jmp8, kJmp, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP, { 0, 0, 0xEB, 0, 0, 0, 0, 0 }, "Jmp8", "!0t" }, 320 { kX86Jmp32, kJmp, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP, { 0, 0, 0xE9, 0, 0, 0, 0, 0 }, "Jmp32", "!0t" }, 321 { kX86JmpR, kJmp, IS_UNARY_OP | IS_BRANCH | REG_USE0, { 0, 0, 0xFF, 0, 0, 4, 0, 0 }, "JmpR", "!0r" }, 322 { kX86CallR, kCall, IS_UNARY_OP | IS_BRANCH | REG_USE0, { 0, 0, 0xE8, 0, 0, 0, 0, 0 }, "CallR", "!0r" }, 323 { kX86CallM, kCall, IS_BINARY_OP | IS_BRANCH | IS_LOAD | REG_USE0, { 0, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallM", "[!0r+!1d]" }, 324 { kX86CallA, kCall, IS_QUAD_OP | IS_BRANCH | IS_LOAD | REG_USE01, { 0, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallA", "[!0r+!1r<<!2d+!3d]" }, 325 { kX86CallT, kCall, IS_UNARY_OP | IS_BRANCH | IS_LOAD, { THREAD_PREFIX, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallT", "fs:[!0d]" }, 326 { kX86Ret, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xC3, 0, 0, 0, 0, 0 }, "Ret", "" }, 327 328 { kX86StartOfMethod, kMacro, IS_UNARY_OP | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0 }, "StartOfMethod", "!0r" }, 329 { kX86PcRelLoadRA, kPcRel, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "PcRelLoadRA", "!0r,[!1r+!2r<<!3d+!4p]" }, 330 { kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr", "!0r,!1d" }, 331}; 332 333static size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib) { 334 size_t size = 0; 335 if (entry->skeleton.prefix1 > 0) { 336 ++size; 337 if (entry->skeleton.prefix2 > 0) { 338 ++size; 339 } 340 } 341 ++size; // opcode 342 if (entry->skeleton.opcode == 0x0F) { 343 ++size; 344 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) { 345 ++size; 346 } 347 } 348 ++size; // modrm 349 if (has_sib || base == rX86_SP) { 350 // SP requires a SIB byte. 351 ++size; 352 } 353 if (displacement != 0 || base == rBP) { 354 // BP requires an explicit displacement, even when it's 0. 355 if (entry->opcode != kX86Lea32RA) { 356 DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name; 357 } 358 size += IS_SIMM8(displacement) ? 1 : 4; 359 } 360 size += entry->skeleton.immediate_bytes; 361 return size; 362} 363 364int X86Mir2Lir::GetInsnSize(LIR* lir) { 365 const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode]; 366 switch (entry->kind) { 367 case kData: 368 return 4; // 4 bytes of data 369 case kNop: 370 return lir->operands[0]; // length of nop is sole operand 371 case kNullary: 372 return 1; // 1 byte of opcode 373 case kReg: // lir operands - 0: reg 374 return ComputeSize(entry, 0, 0, false); 375 case kMem: // lir operands - 0: base, 1: disp 376 return ComputeSize(entry, lir->operands[0], lir->operands[1], false); 377 case kArray: // lir operands - 0: base, 1: index, 2: scale, 3: disp 378 return ComputeSize(entry, lir->operands[0], lir->operands[3], true); 379 case kMemReg: // lir operands - 0: base, 1: disp, 2: reg 380 return ComputeSize(entry, lir->operands[0], lir->operands[1], false); 381 case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg 382 return ComputeSize(entry, lir->operands[0], lir->operands[3], true); 383 case kThreadReg: // lir operands - 0: disp, 1: reg 384 return ComputeSize(entry, 0, lir->operands[0], false); 385 case kRegReg: 386 return ComputeSize(entry, 0, 0, false); 387 case kRegRegStore: 388 return ComputeSize(entry, 0, 0, false); 389 case kRegMem: // lir operands - 0: reg, 1: base, 2: disp 390 return ComputeSize(entry, lir->operands[1], lir->operands[2], false); 391 case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp 392 return ComputeSize(entry, lir->operands[1], lir->operands[4], true); 393 case kRegThread: // lir operands - 0: reg, 1: disp 394 return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit 395 case kRegImm: { // lir operands - 0: reg, 1: immediate 396 size_t size = ComputeSize(entry, 0, 0, false); 397 if (entry->skeleton.ax_opcode == 0) { 398 return size; 399 } else { 400 // AX opcodes don't require the modrm byte. 401 int reg = lir->operands[0]; 402 return size - (reg == rAX ? 1 : 0); 403 } 404 } 405 case kMemImm: // lir operands - 0: base, 1: disp, 2: immediate 406 return ComputeSize(entry, lir->operands[0], lir->operands[1], false); 407 case kArrayImm: // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate 408 return ComputeSize(entry, lir->operands[0], lir->operands[3], true); 409 case kThreadImm: // lir operands - 0: disp, 1: imm 410 return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit 411 case kRegRegImm: // lir operands - 0: reg, 1: reg, 2: imm 412 return ComputeSize(entry, 0, 0, false); 413 case kRegMemImm: // lir operands - 0: reg, 1: base, 2: disp, 3: imm 414 return ComputeSize(entry, lir->operands[1], lir->operands[2], false); 415 case kRegArrayImm: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm 416 return ComputeSize(entry, lir->operands[1], lir->operands[4], true); 417 case kMovRegImm: // lir operands - 0: reg, 1: immediate 418 return 1 + entry->skeleton.immediate_bytes; 419 case kShiftRegImm: // lir operands - 0: reg, 1: immediate 420 // Shift by immediate one has a shorter opcode. 421 return ComputeSize(entry, 0, 0, false) - (lir->operands[1] == 1 ? 1 : 0); 422 case kShiftMemImm: // lir operands - 0: base, 1: disp, 2: immediate 423 // Shift by immediate one has a shorter opcode. 424 return ComputeSize(entry, lir->operands[0], lir->operands[1], false) - 425 (lir->operands[2] == 1 ? 1 : 0); 426 case kShiftArrayImm: // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate 427 // Shift by immediate one has a shorter opcode. 428 return ComputeSize(entry, lir->operands[0], lir->operands[3], true) - 429 (lir->operands[4] == 1 ? 1 : 0); 430 case kShiftRegCl: 431 return ComputeSize(entry, 0, 0, false); 432 case kShiftMemCl: // lir operands - 0: base, 1: disp, 2: cl 433 return ComputeSize(entry, lir->operands[0], lir->operands[1], false); 434 case kShiftArrayCl: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg 435 return ComputeSize(entry, lir->operands[0], lir->operands[3], true); 436 case kRegCond: // lir operands - 0: reg, 1: cond 437 return ComputeSize(entry, 0, 0, false); 438 case kMemCond: // lir operands - 0: base, 1: disp, 2: cond 439 return ComputeSize(entry, lir->operands[0], lir->operands[1], false); 440 case kArrayCond: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond 441 return ComputeSize(entry, lir->operands[0], lir->operands[3], true); 442 case kJcc: 443 if (lir->opcode == kX86Jcc8) { 444 return 2; // opcode + rel8 445 } else { 446 DCHECK(lir->opcode == kX86Jcc32); 447 return 6; // 2 byte opcode + rel32 448 } 449 case kJmp: 450 if (lir->opcode == kX86Jmp8) { 451 return 2; // opcode + rel8 452 } else if (lir->opcode == kX86Jmp32) { 453 return 5; // opcode + rel32 454 } else { 455 DCHECK(lir->opcode == kX86JmpR); 456 return 2; // opcode + modrm 457 } 458 case kCall: 459 switch (lir->opcode) { 460 case kX86CallR: return 2; // opcode modrm 461 case kX86CallM: // lir operands - 0: base, 1: disp 462 return ComputeSize(entry, lir->operands[0], lir->operands[1], false); 463 case kX86CallA: // lir operands - 0: base, 1: index, 2: scale, 3: disp 464 return ComputeSize(entry, lir->operands[0], lir->operands[3], true); 465 case kX86CallT: // lir operands - 0: disp 466 return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit 467 default: 468 break; 469 } 470 break; 471 case kPcRel: 472 if (entry->opcode == kX86PcRelLoadRA) { 473 // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table 474 return ComputeSize(entry, lir->operands[1], 0x12345678, true); 475 } else { 476 DCHECK(entry->opcode == kX86PcRelAdr); 477 return 5; // opcode with reg + 4 byte immediate 478 } 479 case kMacro: 480 DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod)); 481 return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ + 482 ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) - 483 (lir->operands[0] == rAX ? 1 : 0); // shorter ax encoding 484 default: 485 break; 486 } 487 UNIMPLEMENTED(FATAL) << "Unimplemented size encoding for: " << entry->name; 488 return 0; 489} 490 491static uint8_t ModrmForDisp(int base, int disp) { 492 // BP requires an explicit disp, so do not omit it in the 0 case 493 if (disp == 0 && base != rBP) { 494 return 0; 495 } else if (IS_SIMM8(disp)) { 496 return 1; 497 } else { 498 return 2; 499 } 500} 501 502void X86Mir2Lir::EmitDisp(int base, int disp) { 503 // BP requires an explicit disp, so do not omit it in the 0 case 504 if (disp == 0 && base != rBP) { 505 return; 506 } else if (IS_SIMM8(disp)) { 507 code_buffer_.push_back(disp & 0xFF); 508 } else { 509 code_buffer_.push_back(disp & 0xFF); 510 code_buffer_.push_back((disp >> 8) & 0xFF); 511 code_buffer_.push_back((disp >> 16) & 0xFF); 512 code_buffer_.push_back((disp >> 24) & 0xFF); 513 } 514} 515 516void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) { 517 if (entry->skeleton.prefix1 != 0) { 518 code_buffer_.push_back(entry->skeleton.prefix1); 519 if (entry->skeleton.prefix2 != 0) { 520 code_buffer_.push_back(entry->skeleton.prefix2); 521 } 522 } else { 523 DCHECK_EQ(0, entry->skeleton.prefix2); 524 } 525 code_buffer_.push_back(entry->skeleton.opcode); 526 if (entry->skeleton.opcode == 0x0F) { 527 code_buffer_.push_back(entry->skeleton.extra_opcode1); 528 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 529 code_buffer_.push_back(entry->skeleton.extra_opcode2); 530 } else { 531 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 532 } 533 } else { 534 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 535 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 536 } 537 if (X86_FPREG(reg)) { 538 reg = reg & X86_FP_REG_MASK; 539 } 540 if (reg >= 4) { 541 DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) 542 << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 543 } 544 DCHECK_LT(reg, 8); 545 uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; 546 code_buffer_.push_back(modrm); 547 DCHECK_EQ(0, entry->skeleton.ax_opcode); 548 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 549} 550 551void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) { 552 if (entry->skeleton.prefix1 != 0) { 553 code_buffer_.push_back(entry->skeleton.prefix1); 554 if (entry->skeleton.prefix2 != 0) { 555 code_buffer_.push_back(entry->skeleton.prefix2); 556 } 557 } else { 558 DCHECK_EQ(0, entry->skeleton.prefix2); 559 } 560 code_buffer_.push_back(entry->skeleton.opcode); 561 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 562 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 563 DCHECK_LT(entry->skeleton.modrm_opcode, 8); 564 DCHECK_LT(base, 8); 565 uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base; 566 code_buffer_.push_back(modrm); 567 EmitDisp(base, disp); 568 DCHECK_EQ(0, entry->skeleton.ax_opcode); 569 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 570} 571 572void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry, 573 uint8_t base, int disp, uint8_t reg) { 574 if (entry->skeleton.prefix1 != 0) { 575 code_buffer_.push_back(entry->skeleton.prefix1); 576 if (entry->skeleton.prefix2 != 0) { 577 code_buffer_.push_back(entry->skeleton.prefix2); 578 } 579 } else { 580 DCHECK_EQ(0, entry->skeleton.prefix2); 581 } 582 code_buffer_.push_back(entry->skeleton.opcode); 583 if (entry->skeleton.opcode == 0x0F) { 584 code_buffer_.push_back(entry->skeleton.extra_opcode1); 585 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 586 code_buffer_.push_back(entry->skeleton.extra_opcode2); 587 } else { 588 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 589 } 590 } else { 591 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 592 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 593 } 594 if (X86_FPREG(reg)) { 595 reg = reg & X86_FP_REG_MASK; 596 } 597 if (reg >= 4) { 598 DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) 599 << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 600 } 601 DCHECK_LT(reg, 8); 602 DCHECK_LT(base, 8); 603 uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base; 604 code_buffer_.push_back(modrm); 605 if (base == rX86_SP) { 606 // Special SIB for SP base 607 code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP); 608 } 609 EmitDisp(base, disp); 610 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 611 DCHECK_EQ(0, entry->skeleton.ax_opcode); 612 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 613} 614 615void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry, 616 uint8_t reg, uint8_t base, int disp) { 617 // Opcode will flip operands. 618 EmitMemReg(entry, base, disp, reg); 619} 620 621void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index, 622 int scale, int disp) { 623 if (entry->skeleton.prefix1 != 0) { 624 code_buffer_.push_back(entry->skeleton.prefix1); 625 if (entry->skeleton.prefix2 != 0) { 626 code_buffer_.push_back(entry->skeleton.prefix2); 627 } 628 } else { 629 DCHECK_EQ(0, entry->skeleton.prefix2); 630 } 631 code_buffer_.push_back(entry->skeleton.opcode); 632 if (entry->skeleton.opcode == 0x0F) { 633 code_buffer_.push_back(entry->skeleton.extra_opcode1); 634 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 635 code_buffer_.push_back(entry->skeleton.extra_opcode2); 636 } else { 637 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 638 } 639 } else { 640 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 641 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 642 } 643 if (X86_FPREG(reg)) { 644 reg = reg & X86_FP_REG_MASK; 645 } 646 DCHECK_LT(reg, 8); 647 uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP; 648 code_buffer_.push_back(modrm); 649 DCHECK_LT(scale, 4); 650 DCHECK_LT(index, 8); 651 DCHECK_LT(base, 8); 652 uint8_t sib = (scale << 6) | (index << 3) | base; 653 code_buffer_.push_back(sib); 654 EmitDisp(base, disp); 655 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 656 DCHECK_EQ(0, entry->skeleton.ax_opcode); 657 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 658} 659 660void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp, 661 uint8_t reg) { 662 // Opcode will flip operands. 663 EmitRegArray(entry, reg, base, index, scale, disp); 664} 665 666void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) { 667 DCHECK_NE(entry->skeleton.prefix1, 0); 668 code_buffer_.push_back(entry->skeleton.prefix1); 669 if (entry->skeleton.prefix2 != 0) { 670 code_buffer_.push_back(entry->skeleton.prefix2); 671 } 672 code_buffer_.push_back(entry->skeleton.opcode); 673 if (entry->skeleton.opcode == 0x0F) { 674 code_buffer_.push_back(entry->skeleton.extra_opcode1); 675 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 676 code_buffer_.push_back(entry->skeleton.extra_opcode2); 677 } else { 678 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 679 } 680 } else { 681 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 682 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 683 } 684 if (X86_FPREG(reg)) { 685 reg = reg & X86_FP_REG_MASK; 686 } 687 if (reg >= 4) { 688 DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) 689 << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 690 } 691 DCHECK_LT(reg, 8); 692 uint8_t modrm = (0 << 6) | (reg << 3) | rBP; 693 code_buffer_.push_back(modrm); 694 code_buffer_.push_back(disp & 0xFF); 695 code_buffer_.push_back((disp >> 8) & 0xFF); 696 code_buffer_.push_back((disp >> 16) & 0xFF); 697 code_buffer_.push_back((disp >> 24) & 0xFF); 698 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 699 DCHECK_EQ(0, entry->skeleton.ax_opcode); 700 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 701} 702 703void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) { 704 if (entry->skeleton.prefix1 != 0) { 705 code_buffer_.push_back(entry->skeleton.prefix1); 706 if (entry->skeleton.prefix2 != 0) { 707 code_buffer_.push_back(entry->skeleton.prefix2); 708 } 709 } else { 710 DCHECK_EQ(0, entry->skeleton.prefix2); 711 } 712 code_buffer_.push_back(entry->skeleton.opcode); 713 if (entry->skeleton.opcode == 0x0F) { 714 code_buffer_.push_back(entry->skeleton.extra_opcode1); 715 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 716 code_buffer_.push_back(entry->skeleton.extra_opcode2); 717 } else { 718 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 719 } 720 } else { 721 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 722 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 723 } 724 if (X86_FPREG(reg1)) { 725 reg1 = reg1 & X86_FP_REG_MASK; 726 } 727 if (X86_FPREG(reg2)) { 728 reg2 = reg2 & X86_FP_REG_MASK; 729 } 730 DCHECK_LT(reg1, 8); 731 DCHECK_LT(reg2, 8); 732 uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2; 733 code_buffer_.push_back(modrm); 734 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 735 DCHECK_EQ(0, entry->skeleton.ax_opcode); 736 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 737} 738 739void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry, 740 uint8_t reg1, uint8_t reg2, int32_t imm) { 741 if (entry->skeleton.prefix1 != 0) { 742 code_buffer_.push_back(entry->skeleton.prefix1); 743 if (entry->skeleton.prefix2 != 0) { 744 code_buffer_.push_back(entry->skeleton.prefix2); 745 } 746 } else { 747 DCHECK_EQ(0, entry->skeleton.prefix2); 748 } 749 code_buffer_.push_back(entry->skeleton.opcode); 750 if (entry->skeleton.opcode == 0x0F) { 751 code_buffer_.push_back(entry->skeleton.extra_opcode1); 752 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 753 code_buffer_.push_back(entry->skeleton.extra_opcode2); 754 } else { 755 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 756 } 757 } else { 758 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 759 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 760 } 761 if (X86_FPREG(reg1)) { 762 reg1 = reg1 & X86_FP_REG_MASK; 763 } 764 if (X86_FPREG(reg2)) { 765 reg2 = reg2 & X86_FP_REG_MASK; 766 } 767 DCHECK_LT(reg1, 8); 768 DCHECK_LT(reg2, 8); 769 uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2; 770 code_buffer_.push_back(modrm); 771 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 772 DCHECK_EQ(0, entry->skeleton.ax_opcode); 773 switch (entry->skeleton.immediate_bytes) { 774 case 1: 775 DCHECK(IS_SIMM8(imm)); 776 code_buffer_.push_back(imm & 0xFF); 777 break; 778 case 2: 779 DCHECK(IS_SIMM16(imm)); 780 code_buffer_.push_back(imm & 0xFF); 781 code_buffer_.push_back((imm >> 8) & 0xFF); 782 break; 783 case 4: 784 code_buffer_.push_back(imm & 0xFF); 785 code_buffer_.push_back((imm >> 8) & 0xFF); 786 code_buffer_.push_back((imm >> 16) & 0xFF); 787 code_buffer_.push_back((imm >> 24) & 0xFF); 788 break; 789 default: 790 LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes 791 << ") for instruction: " << entry->name; 792 break; 793 } 794} 795 796void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) { 797 if (entry->skeleton.prefix1 != 0) { 798 code_buffer_.push_back(entry->skeleton.prefix1); 799 if (entry->skeleton.prefix2 != 0) { 800 code_buffer_.push_back(entry->skeleton.prefix2); 801 } 802 } else { 803 DCHECK_EQ(0, entry->skeleton.prefix2); 804 } 805 if (reg == rAX && entry->skeleton.ax_opcode != 0) { 806 code_buffer_.push_back(entry->skeleton.ax_opcode); 807 } else { 808 code_buffer_.push_back(entry->skeleton.opcode); 809 if (entry->skeleton.opcode == 0x0F) { 810 code_buffer_.push_back(entry->skeleton.extra_opcode1); 811 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 812 code_buffer_.push_back(entry->skeleton.extra_opcode2); 813 } else { 814 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 815 } 816 } else { 817 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 818 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 819 } 820 if (X86_FPREG(reg)) { 821 reg = reg & X86_FP_REG_MASK; 822 } 823 uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; 824 code_buffer_.push_back(modrm); 825 } 826 switch (entry->skeleton.immediate_bytes) { 827 case 1: 828 DCHECK(IS_SIMM8(imm)); 829 code_buffer_.push_back(imm & 0xFF); 830 break; 831 case 2: 832 DCHECK(IS_SIMM16(imm)); 833 code_buffer_.push_back(imm & 0xFF); 834 code_buffer_.push_back((imm >> 8) & 0xFF); 835 break; 836 case 4: 837 code_buffer_.push_back(imm & 0xFF); 838 code_buffer_.push_back((imm >> 8) & 0xFF); 839 code_buffer_.push_back((imm >> 16) & 0xFF); 840 code_buffer_.push_back((imm >> 24) & 0xFF); 841 break; 842 default: 843 LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes 844 << ") for instruction: " << entry->name; 845 break; 846 } 847} 848 849void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) { 850 if (entry->skeleton.prefix1 != 0) { 851 code_buffer_.push_back(entry->skeleton.prefix1); 852 if (entry->skeleton.prefix2 != 0) { 853 code_buffer_.push_back(entry->skeleton.prefix2); 854 } 855 } else { 856 DCHECK_EQ(0, entry->skeleton.prefix2); 857 } 858 code_buffer_.push_back(entry->skeleton.opcode); 859 if (entry->skeleton.opcode == 0x0F) { 860 code_buffer_.push_back(entry->skeleton.extra_opcode1); 861 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 862 code_buffer_.push_back(entry->skeleton.extra_opcode2); 863 } else { 864 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 865 } 866 } else { 867 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 868 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 869 } 870 uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP; 871 code_buffer_.push_back(modrm); 872 code_buffer_.push_back(disp & 0xFF); 873 code_buffer_.push_back((disp >> 8) & 0xFF); 874 code_buffer_.push_back((disp >> 16) & 0xFF); 875 code_buffer_.push_back((disp >> 24) & 0xFF); 876 switch (entry->skeleton.immediate_bytes) { 877 case 1: 878 DCHECK(IS_SIMM8(imm)); 879 code_buffer_.push_back(imm & 0xFF); 880 break; 881 case 2: 882 DCHECK(IS_SIMM16(imm)); 883 code_buffer_.push_back(imm & 0xFF); 884 code_buffer_.push_back((imm >> 8) & 0xFF); 885 break; 886 case 4: 887 code_buffer_.push_back(imm & 0xFF); 888 code_buffer_.push_back((imm >> 8) & 0xFF); 889 code_buffer_.push_back((imm >> 16) & 0xFF); 890 code_buffer_.push_back((imm >> 24) & 0xFF); 891 break; 892 default: 893 LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes 894 << ") for instruction: " << entry->name; 895 break; 896 } 897 DCHECK_EQ(entry->skeleton.ax_opcode, 0); 898} 899 900void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) { 901 DCHECK_LT(reg, 8); 902 code_buffer_.push_back(0xB8 + reg); 903 code_buffer_.push_back(imm & 0xFF); 904 code_buffer_.push_back((imm >> 8) & 0xFF); 905 code_buffer_.push_back((imm >> 16) & 0xFF); 906 code_buffer_.push_back((imm >> 24) & 0xFF); 907} 908 909void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) { 910 if (entry->skeleton.prefix1 != 0) { 911 code_buffer_.push_back(entry->skeleton.prefix1); 912 if (entry->skeleton.prefix2 != 0) { 913 code_buffer_.push_back(entry->skeleton.prefix2); 914 } 915 } else { 916 DCHECK_EQ(0, entry->skeleton.prefix2); 917 } 918 if (imm != 1) { 919 code_buffer_.push_back(entry->skeleton.opcode); 920 } else { 921 // Shorter encoding for 1 bit shift 922 code_buffer_.push_back(entry->skeleton.ax_opcode); 923 } 924 if (entry->skeleton.opcode == 0x0F) { 925 code_buffer_.push_back(entry->skeleton.extra_opcode1); 926 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 927 code_buffer_.push_back(entry->skeleton.extra_opcode2); 928 } else { 929 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 930 } 931 } else { 932 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 933 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 934 } 935 if (reg >= 4) { 936 DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) 937 << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); 938 } 939 DCHECK_LT(reg, 8); 940 uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; 941 code_buffer_.push_back(modrm); 942 if (imm != 1) { 943 DCHECK_EQ(entry->skeleton.immediate_bytes, 1); 944 DCHECK(IS_SIMM8(imm)); 945 code_buffer_.push_back(imm & 0xFF); 946 } 947} 948 949void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) { 950 DCHECK_EQ(cl, static_cast<uint8_t>(rCX)); 951 if (entry->skeleton.prefix1 != 0) { 952 code_buffer_.push_back(entry->skeleton.prefix1); 953 if (entry->skeleton.prefix2 != 0) { 954 code_buffer_.push_back(entry->skeleton.prefix2); 955 } 956 } else { 957 DCHECK_EQ(0, entry->skeleton.prefix2); 958 } 959 code_buffer_.push_back(entry->skeleton.opcode); 960 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 961 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 962 DCHECK_LT(reg, 8); 963 uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; 964 code_buffer_.push_back(modrm); 965 DCHECK_EQ(0, entry->skeleton.ax_opcode); 966 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 967} 968 969void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) { 970 if (entry->skeleton.prefix1 != 0) { 971 code_buffer_.push_back(entry->skeleton.prefix1); 972 if (entry->skeleton.prefix2 != 0) { 973 code_buffer_.push_back(entry->skeleton.prefix2); 974 } 975 } else { 976 DCHECK_EQ(0, entry->skeleton.prefix2); 977 } 978 DCHECK_EQ(0, entry->skeleton.ax_opcode); 979 DCHECK_EQ(0x0F, entry->skeleton.opcode); 980 code_buffer_.push_back(0x0F); 981 DCHECK_EQ(0x90, entry->skeleton.extra_opcode1); 982 code_buffer_.push_back(0x90 | condition); 983 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 984 DCHECK_LT(reg, 8); 985 uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; 986 code_buffer_.push_back(modrm); 987 DCHECK_EQ(entry->skeleton.immediate_bytes, 0); 988} 989 990void X86Mir2Lir::EmitJmp(const X86EncodingMap* entry, int rel) { 991 if (entry->opcode == kX86Jmp8) { 992 DCHECK(IS_SIMM8(rel)); 993 code_buffer_.push_back(0xEB); 994 code_buffer_.push_back(rel & 0xFF); 995 } else if (entry->opcode == kX86Jmp32) { 996 code_buffer_.push_back(0xE9); 997 code_buffer_.push_back(rel & 0xFF); 998 code_buffer_.push_back((rel >> 8) & 0xFF); 999 code_buffer_.push_back((rel >> 16) & 0xFF); 1000 code_buffer_.push_back((rel >> 24) & 0xFF); 1001 } else { 1002 DCHECK(entry->opcode == kX86JmpR); 1003 code_buffer_.push_back(entry->skeleton.opcode); 1004 uint8_t reg = static_cast<uint8_t>(rel); 1005 DCHECK_LT(reg, 8); 1006 uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; 1007 code_buffer_.push_back(modrm); 1008 } 1009} 1010 1011void X86Mir2Lir::EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc) { 1012 DCHECK_LT(cc, 16); 1013 if (entry->opcode == kX86Jcc8) { 1014 DCHECK(IS_SIMM8(rel)); 1015 code_buffer_.push_back(0x70 | cc); 1016 code_buffer_.push_back(rel & 0xFF); 1017 } else { 1018 DCHECK(entry->opcode == kX86Jcc32); 1019 code_buffer_.push_back(0x0F); 1020 code_buffer_.push_back(0x80 | cc); 1021 code_buffer_.push_back(rel & 0xFF); 1022 code_buffer_.push_back((rel >> 8) & 0xFF); 1023 code_buffer_.push_back((rel >> 16) & 0xFF); 1024 code_buffer_.push_back((rel >> 24) & 0xFF); 1025 } 1026} 1027 1028void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) { 1029 if (entry->skeleton.prefix1 != 0) { 1030 code_buffer_.push_back(entry->skeleton.prefix1); 1031 if (entry->skeleton.prefix2 != 0) { 1032 code_buffer_.push_back(entry->skeleton.prefix2); 1033 } 1034 } else { 1035 DCHECK_EQ(0, entry->skeleton.prefix2); 1036 } 1037 code_buffer_.push_back(entry->skeleton.opcode); 1038 if (entry->skeleton.opcode == 0x0F) { 1039 code_buffer_.push_back(entry->skeleton.extra_opcode1); 1040 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 1041 code_buffer_.push_back(entry->skeleton.extra_opcode2); 1042 } else { 1043 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 1044 } 1045 } else { 1046 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 1047 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 1048 } 1049 uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base; 1050 code_buffer_.push_back(modrm); 1051 if (base == rX86_SP) { 1052 // Special SIB for SP base 1053 code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP); 1054 } 1055 EmitDisp(base, disp); 1056 DCHECK_EQ(0, entry->skeleton.ax_opcode); 1057 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 1058} 1059 1060void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) { 1061 DCHECK_NE(entry->skeleton.prefix1, 0); 1062 code_buffer_.push_back(entry->skeleton.prefix1); 1063 if (entry->skeleton.prefix2 != 0) { 1064 code_buffer_.push_back(entry->skeleton.prefix2); 1065 } 1066 code_buffer_.push_back(entry->skeleton.opcode); 1067 if (entry->skeleton.opcode == 0x0F) { 1068 code_buffer_.push_back(entry->skeleton.extra_opcode1); 1069 if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) { 1070 code_buffer_.push_back(entry->skeleton.extra_opcode2); 1071 } else { 1072 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 1073 } 1074 } else { 1075 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 1076 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 1077 } 1078 uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP; 1079 code_buffer_.push_back(modrm); 1080 code_buffer_.push_back(disp & 0xFF); 1081 code_buffer_.push_back((disp >> 8) & 0xFF); 1082 code_buffer_.push_back((disp >> 16) & 0xFF); 1083 code_buffer_.push_back((disp >> 24) & 0xFF); 1084 DCHECK_EQ(0, entry->skeleton.ax_opcode); 1085 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 1086} 1087 1088void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, uint8_t reg, 1089 int base_or_table, uint8_t index, int scale, int table_or_disp) { 1090 int disp; 1091 if (entry->opcode == kX86PcRelLoadRA) { 1092 Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp); 1093 disp = tab_rec->offset; 1094 } else { 1095 DCHECK(entry->opcode == kX86PcRelAdr); 1096 Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table); 1097 disp = tab_rec->offset; 1098 } 1099 if (entry->skeleton.prefix1 != 0) { 1100 code_buffer_.push_back(entry->skeleton.prefix1); 1101 if (entry->skeleton.prefix2 != 0) { 1102 code_buffer_.push_back(entry->skeleton.prefix2); 1103 } 1104 } else { 1105 DCHECK_EQ(0, entry->skeleton.prefix2); 1106 } 1107 if (X86_FPREG(reg)) { 1108 reg = reg & X86_FP_REG_MASK; 1109 } 1110 DCHECK_LT(reg, 8); 1111 if (entry->opcode == kX86PcRelLoadRA) { 1112 code_buffer_.push_back(entry->skeleton.opcode); 1113 DCHECK_EQ(0, entry->skeleton.extra_opcode1); 1114 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 1115 uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP; 1116 code_buffer_.push_back(modrm); 1117 DCHECK_LT(scale, 4); 1118 DCHECK_LT(index, 8); 1119 DCHECK_LT(base_or_table, 8); 1120 uint8_t base = static_cast<uint8_t>(base_or_table); 1121 uint8_t sib = (scale << 6) | (index << 3) | base; 1122 code_buffer_.push_back(sib); 1123 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 1124 } else { 1125 code_buffer_.push_back(entry->skeleton.opcode + reg); 1126 } 1127 code_buffer_.push_back(disp & 0xFF); 1128 code_buffer_.push_back((disp >> 8) & 0xFF); 1129 code_buffer_.push_back((disp >> 16) & 0xFF); 1130 code_buffer_.push_back((disp >> 24) & 0xFF); 1131 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 1132 DCHECK_EQ(0, entry->skeleton.ax_opcode); 1133} 1134 1135void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset) { 1136 DCHECK(entry->opcode == kX86StartOfMethod) << entry->name; 1137 code_buffer_.push_back(0xE8); // call +0 1138 code_buffer_.push_back(0); 1139 code_buffer_.push_back(0); 1140 code_buffer_.push_back(0); 1141 code_buffer_.push_back(0); 1142 1143 DCHECK_LT(reg, 8); 1144 code_buffer_.push_back(0x58 + reg); // pop reg 1145 1146 EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */); 1147} 1148 1149void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) { 1150 UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " " 1151 << BuildInsnString(entry->fmt, lir, 0); 1152 for (int i = 0; i < GetInsnSize(lir); ++i) { 1153 code_buffer_.push_back(0xCC); // push breakpoint instruction - int 3 1154 } 1155} 1156 1157/* 1158 * Assemble the LIR into binary instruction format. Note that we may 1159 * discover that pc-relative displacements may not fit the selected 1160 * instruction. In those cases we will try to substitute a new code 1161 * sequence or request that the trace be shortened and retried. 1162 */ 1163AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) { 1164 LIR *lir; 1165 AssemblerStatus res = kSuccess; // Assume success 1166 1167 const bool kVerbosePcFixup = false; 1168 for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { 1169 if (lir->opcode < 0) { 1170 continue; 1171 } 1172 1173 if (lir->flags.is_nop) { 1174 continue; 1175 } 1176 1177 if (lir->flags.pcRelFixup) { 1178 switch (lir->opcode) { 1179 case kX86Jcc8: { 1180 LIR *target_lir = lir->target; 1181 DCHECK(target_lir != NULL); 1182 int delta = 0; 1183 uintptr_t pc; 1184 if (IS_SIMM8(lir->operands[0])) { 1185 pc = lir->offset + 2 /* opcode + rel8 */; 1186 } else { 1187 pc = lir->offset + 6 /* 2 byte opcode + rel32 */; 1188 } 1189 uintptr_t target = target_lir->offset; 1190 delta = target - pc; 1191 if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) { 1192 if (kVerbosePcFixup) { 1193 LOG(INFO) << "Retry for JCC growth at " << lir->offset 1194 << " delta: " << delta << " old delta: " << lir->operands[0]; 1195 } 1196 lir->opcode = kX86Jcc32; 1197 SetupResourceMasks(lir); 1198 res = kRetryAll; 1199 } 1200 if (kVerbosePcFixup) { 1201 LOG(INFO) << "Source:"; 1202 DumpLIRInsn(lir, 0); 1203 LOG(INFO) << "Target:"; 1204 DumpLIRInsn(target_lir, 0); 1205 LOG(INFO) << "Delta " << delta; 1206 } 1207 lir->operands[0] = delta; 1208 break; 1209 } 1210 case kX86Jcc32: { 1211 LIR *target_lir = lir->target; 1212 DCHECK(target_lir != NULL); 1213 uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */; 1214 uintptr_t target = target_lir->offset; 1215 int delta = target - pc; 1216 if (kVerbosePcFixup) { 1217 LOG(INFO) << "Source:"; 1218 DumpLIRInsn(lir, 0); 1219 LOG(INFO) << "Target:"; 1220 DumpLIRInsn(target_lir, 0); 1221 LOG(INFO) << "Delta " << delta; 1222 } 1223 lir->operands[0] = delta; 1224 break; 1225 } 1226 case kX86Jmp8: { 1227 LIR *target_lir = lir->target; 1228 DCHECK(target_lir != NULL); 1229 int delta = 0; 1230 uintptr_t pc; 1231 if (IS_SIMM8(lir->operands[0])) { 1232 pc = lir->offset + 2 /* opcode + rel8 */; 1233 } else { 1234 pc = lir->offset + 5 /* opcode + rel32 */; 1235 } 1236 uintptr_t target = target_lir->offset; 1237 delta = target - pc; 1238 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) { 1239 // Useless branch 1240 lir->flags.is_nop = true; 1241 if (kVerbosePcFixup) { 1242 LOG(INFO) << "Retry for useless branch at " << lir->offset; 1243 } 1244 res = kRetryAll; 1245 } else if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) { 1246 if (kVerbosePcFixup) { 1247 LOG(INFO) << "Retry for JMP growth at " << lir->offset; 1248 } 1249 lir->opcode = kX86Jmp32; 1250 SetupResourceMasks(lir); 1251 res = kRetryAll; 1252 } 1253 lir->operands[0] = delta; 1254 break; 1255 } 1256 case kX86Jmp32: { 1257 LIR *target_lir = lir->target; 1258 DCHECK(target_lir != NULL); 1259 uintptr_t pc = lir->offset + 5 /* opcode + rel32 */; 1260 uintptr_t target = target_lir->offset; 1261 int delta = target - pc; 1262 lir->operands[0] = delta; 1263 break; 1264 } 1265 default: 1266 break; 1267 } 1268 } 1269 1270 /* 1271 * If one of the pc-relative instructions expanded we'll have 1272 * to make another pass. Don't bother to fully assemble the 1273 * instruction. 1274 */ 1275 if (res != kSuccess) { 1276 continue; 1277 } 1278 CHECK_EQ(static_cast<size_t>(lir->offset), code_buffer_.size()); 1279 const X86EncodingMap *entry = &X86Mir2Lir::EncodingMap[lir->opcode]; 1280 size_t starting_cbuf_size = code_buffer_.size(); 1281 switch (entry->kind) { 1282 case kData: // 4 bytes of data 1283 code_buffer_.push_back(lir->operands[0]); 1284 break; 1285 case kNullary: // 1 byte of opcode 1286 DCHECK_EQ(0, entry->skeleton.prefix1); 1287 DCHECK_EQ(0, entry->skeleton.prefix2); 1288 code_buffer_.push_back(entry->skeleton.opcode); 1289 if (entry->skeleton.extra_opcode1 != 0) { 1290 code_buffer_.push_back(entry->skeleton.extra_opcode1); 1291 if (entry->skeleton.extra_opcode2 != 0) { 1292 code_buffer_.push_back(entry->skeleton.extra_opcode2); 1293 } 1294 } else { 1295 DCHECK_EQ(0, entry->skeleton.extra_opcode2); 1296 } 1297 DCHECK_EQ(0, entry->skeleton.modrm_opcode); 1298 DCHECK_EQ(0, entry->skeleton.ax_opcode); 1299 DCHECK_EQ(0, entry->skeleton.immediate_bytes); 1300 break; 1301 case kReg: // lir operands - 0: reg 1302 EmitOpReg(entry, lir->operands[0]); 1303 break; 1304 case kMem: // lir operands - 0: base, 1: disp 1305 EmitOpMem(entry, lir->operands[0], lir->operands[1]); 1306 break; 1307 case kMemReg: // lir operands - 0: base, 1: disp, 2: reg 1308 EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]); 1309 break; 1310 case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg 1311 EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2], 1312 lir->operands[3], lir->operands[4]); 1313 break; 1314 case kRegMem: // lir operands - 0: reg, 1: base, 2: disp 1315 EmitRegMem(entry, lir->operands[0], lir->operands[1], lir->operands[2]); 1316 break; 1317 case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp 1318 EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2], 1319 lir->operands[3], lir->operands[4]); 1320 break; 1321 case kRegThread: // lir operands - 0: reg, 1: disp 1322 EmitRegThread(entry, lir->operands[0], lir->operands[1]); 1323 break; 1324 case kRegReg: // lir operands - 0: reg1, 1: reg2 1325 EmitRegReg(entry, lir->operands[0], lir->operands[1]); 1326 break; 1327 case kRegRegStore: // lir operands - 0: reg2, 1: reg1 1328 EmitRegReg(entry, lir->operands[1], lir->operands[0]); 1329 break; 1330 case kRegRegImm: 1331 EmitRegRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]); 1332 break; 1333 case kRegImm: // lir operands - 0: reg, 1: immediate 1334 EmitRegImm(entry, lir->operands[0], lir->operands[1]); 1335 break; 1336 case kThreadImm: // lir operands - 0: disp, 1: immediate 1337 EmitThreadImm(entry, lir->operands[0], lir->operands[1]); 1338 break; 1339 case kMovRegImm: // lir operands - 0: reg, 1: immediate 1340 EmitMovRegImm(entry, lir->operands[0], lir->operands[1]); 1341 break; 1342 case kShiftRegImm: // lir operands - 0: reg, 1: immediate 1343 EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]); 1344 break; 1345 case kShiftRegCl: // lir operands - 0: reg, 1: cl 1346 EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]); 1347 break; 1348 case kRegCond: // lir operands - 0: reg, 1: condition 1349 EmitRegCond(entry, lir->operands[0], lir->operands[1]); 1350 break; 1351 case kJmp: // lir operands - 0: rel 1352 EmitJmp(entry, lir->operands[0]); 1353 break; 1354 case kJcc: // lir operands - 0: rel, 1: CC, target assigned 1355 EmitJcc(entry, lir->operands[0], lir->operands[1]); 1356 break; 1357 case kCall: 1358 switch (entry->opcode) { 1359 case kX86CallM: // lir operands - 0: base, 1: disp 1360 EmitCallMem(entry, lir->operands[0], lir->operands[1]); 1361 break; 1362 case kX86CallT: // lir operands - 0: disp 1363 EmitCallThread(entry, lir->operands[0]); 1364 break; 1365 default: 1366 EmitUnimplemented(entry, lir); 1367 break; 1368 } 1369 break; 1370 case kPcRel: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table 1371 EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2], 1372 lir->operands[3], lir->operands[4]); 1373 break; 1374 case kMacro: 1375 EmitMacro(entry, lir->operands[0], lir->offset); 1376 break; 1377 default: 1378 EmitUnimplemented(entry, lir); 1379 break; 1380 } 1381 CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)), 1382 code_buffer_.size() - starting_cbuf_size) 1383 << "Instruction size mismatch for entry: " << X86Mir2Lir::EncodingMap[lir->opcode].name; 1384 } 1385 return res; 1386} 1387 1388} // namespace art 1389