1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "trampoline_compiler.h"
18
19#include "base/arena_allocator.h"
20#include "jni_env_ext.h"
21
22#ifdef ART_ENABLE_CODEGEN_arm
23#include "utils/arm/assembler_thumb2.h"
24#endif
25
26#ifdef ART_ENABLE_CODEGEN_arm64
27#include "utils/arm64/assembler_arm64.h"
28#endif
29
30#ifdef ART_ENABLE_CODEGEN_mips
31#include "utils/mips/assembler_mips.h"
32#endif
33
34#ifdef ART_ENABLE_CODEGEN_mips64
35#include "utils/mips64/assembler_mips64.h"
36#endif
37
38#ifdef ART_ENABLE_CODEGEN_x86
39#include "utils/x86/assembler_x86.h"
40#endif
41
42#ifdef ART_ENABLE_CODEGEN_x86_64
43#include "utils/x86_64/assembler_x86_64.h"
44#endif
45
46#define __ assembler.
47
48namespace art {
49
50#ifdef ART_ENABLE_CODEGEN_arm
51namespace arm {
52static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
53    ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
54  Thumb2Assembler assembler(arena);
55
56  switch (abi) {
57    case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
58      __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
59      break;
60    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
61      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset(4).Int32Value());
62      __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
63      break;
64    case kQuickAbi:  // R9 holds Thread*.
65      __ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value());
66  }
67  __ bkpt(0);
68
69  __ FinalizeCode();
70  size_t cs = __ CodeSize();
71  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
72  MemoryRegion code(entry_stub->data(), entry_stub->size());
73  __ FinalizeInstructions(code);
74
75  return std::move(entry_stub);
76}
77}  // namespace arm
78#endif  // ART_ENABLE_CODEGEN_arm
79
80#ifdef ART_ENABLE_CODEGEN_arm64
81namespace arm64 {
82static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
83    ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
84  Arm64Assembler assembler(arena);
85
86  switch (abi) {
87    case kInterpreterAbi:  // Thread* is first argument (X0) in interpreter ABI.
88      __ JumpTo(Arm64ManagedRegister::FromXRegister(X0), Offset(offset.Int32Value()),
89          Arm64ManagedRegister::FromXRegister(IP1));
90
91      break;
92    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
93      __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
94                      Arm64ManagedRegister::FromXRegister(X0),
95                      Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
96
97      __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
98                Arm64ManagedRegister::FromXRegister(IP0));
99
100      break;
101    case kQuickAbi:  // X18 holds Thread*.
102      __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
103                Arm64ManagedRegister::FromXRegister(IP0));
104
105      break;
106  }
107
108  __ FinalizeCode();
109  size_t cs = __ CodeSize();
110  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
111  MemoryRegion code(entry_stub->data(), entry_stub->size());
112  __ FinalizeInstructions(code);
113
114  return std::move(entry_stub);
115}
116}  // namespace arm64
117#endif  // ART_ENABLE_CODEGEN_arm64
118
119#ifdef ART_ENABLE_CODEGEN_mips
120namespace mips {
121static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
122    ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<4> offset) {
123  MipsAssembler assembler(arena);
124
125  switch (abi) {
126    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
127      __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
128      break;
129    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
130      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset(4).Int32Value());
131      __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
132      break;
133    case kQuickAbi:  // S1 holds Thread*.
134      __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
135  }
136  __ Jr(T9);
137  __ Nop();
138  __ Break();
139
140  __ FinalizeCode();
141  size_t cs = __ CodeSize();
142  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
143  MemoryRegion code(entry_stub->data(), entry_stub->size());
144  __ FinalizeInstructions(code);
145
146  return std::move(entry_stub);
147}
148}  // namespace mips
149#endif  // ART_ENABLE_CODEGEN_mips
150
151#ifdef ART_ENABLE_CODEGEN_mips64
152namespace mips64 {
153static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
154    ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset<8> offset) {
155  Mips64Assembler assembler(arena);
156
157  switch (abi) {
158    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
159      __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
160      break;
161    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
162      __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset(8).Int32Value());
163      __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
164      break;
165    case kQuickAbi:  // Fall-through.
166      __ LoadFromOffset(kLoadDoubleword, T9, S1, offset.Int32Value());
167  }
168  __ Jr(T9);
169  __ Nop();
170  __ Break();
171
172  __ FinalizeCode();
173  size_t cs = __ CodeSize();
174  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
175  MemoryRegion code(entry_stub->data(), entry_stub->size());
176  __ FinalizeInstructions(code);
177
178  return std::move(entry_stub);
179}
180}  // namespace mips64
181#endif  // ART_ENABLE_CODEGEN_mips
182
183#ifdef ART_ENABLE_CODEGEN_x86
184namespace x86 {
185static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
186                                                                    ThreadOffset<4> offset) {
187  X86Assembler assembler(arena);
188
189  // All x86 trampolines call via the Thread* held in fs.
190  __ fs()->jmp(Address::Absolute(offset));
191  __ int3();
192
193  __ FinalizeCode();
194  size_t cs = __ CodeSize();
195  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
196  MemoryRegion code(entry_stub->data(), entry_stub->size());
197  __ FinalizeInstructions(code);
198
199  return std::move(entry_stub);
200}
201}  // namespace x86
202#endif  // ART_ENABLE_CODEGEN_x86
203
204#ifdef ART_ENABLE_CODEGEN_x86_64
205namespace x86_64 {
206static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
207                                                                    ThreadOffset<8> offset) {
208  x86_64::X86_64Assembler assembler(arena);
209
210  // All x86 trampolines call via the Thread* held in gs.
211  __ gs()->jmp(x86_64::Address::Absolute(offset, true));
212  __ int3();
213
214  __ FinalizeCode();
215  size_t cs = __ CodeSize();
216  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
217  MemoryRegion code(entry_stub->data(), entry_stub->size());
218  __ FinalizeInstructions(code);
219
220  return std::move(entry_stub);
221}
222}  // namespace x86_64
223#endif  // ART_ENABLE_CODEGEN_x86_64
224
225std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
226                                                               EntryPointCallingConvention abi,
227                                                               ThreadOffset<8> offset) {
228  ArenaPool pool;
229  ArenaAllocator arena(&pool);
230  switch (isa) {
231#ifdef ART_ENABLE_CODEGEN_arm64
232    case kArm64:
233      return arm64::CreateTrampoline(&arena, abi, offset);
234#endif
235#ifdef ART_ENABLE_CODEGEN_mips64
236    case kMips64:
237      return mips64::CreateTrampoline(&arena, abi, offset);
238#endif
239#ifdef ART_ENABLE_CODEGEN_x86_64
240    case kX86_64:
241      return x86_64::CreateTrampoline(&arena, offset);
242#endif
243    default:
244      UNUSED(abi);
245      UNUSED(offset);
246      LOG(FATAL) << "Unexpected InstructionSet: " << isa;
247      UNREACHABLE();
248  }
249}
250
251std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
252                                                               EntryPointCallingConvention abi,
253                                                               ThreadOffset<4> offset) {
254  ArenaPool pool;
255  ArenaAllocator arena(&pool);
256  switch (isa) {
257#ifdef ART_ENABLE_CODEGEN_arm
258    case kArm:
259    case kThumb2:
260      return arm::CreateTrampoline(&arena, abi, offset);
261#endif
262#ifdef ART_ENABLE_CODEGEN_mips
263    case kMips:
264      return mips::CreateTrampoline(&arena, abi, offset);
265#endif
266#ifdef ART_ENABLE_CODEGEN_x86
267    case kX86:
268      UNUSED(abi);
269      return x86::CreateTrampoline(&arena, offset);
270#endif
271    default:
272      LOG(FATAL) << "Unexpected InstructionSet: " << isa;
273      UNREACHABLE();
274  }
275}
276
277}  // namespace art
278