trampoline_compiler.cc revision c6ee54e9a9fd67d24c63bd802ef2fe540a4f86a5
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "trampoline_compiler.h"
18
19#include "jni_internal.h"
20#include "utils/arm/assembler_arm.h"
21#include "utils/arm64/assembler_arm64.h"
22#include "utils/mips/assembler_mips.h"
23#include "utils/x86/assembler_x86.h"
24
25#define __ assembler->
26
27namespace art {
28
29namespace arm {
30static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
31                                                    ThreadOffset offset) {
32  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
33
34  switch (abi) {
35    case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
36      __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
37      break;
38    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
39      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
40      __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
41      break;
42    case kPortableAbi:  // R9 holds Thread*.
43    case kQuickAbi:  // Fall-through.
44      __ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value());
45  }
46  __ bkpt(0);
47
48  size_t cs = assembler->CodeSize();
49  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
50  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
51  assembler->FinalizeInstructions(code);
52
53  return entry_stub.release();
54}
55}  // namespace arm
56
57namespace arm64 {
58static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
59                                                    ThreadOffset offset) {
60  UniquePtr<Arm64Assembler> assembler(static_cast<Arm64Assembler*>(Assembler::Create(kArm64)));
61
62  switch (abi) {
63    case kInterpreterAbi:  // Thread* is first argument (X0) in interpreter ABI.
64      // FIXME IPx used by VIXL - this is unsafe.
65      __ JumpTo(Arm64ManagedRegister::FromCoreRegister(X0), Offset(offset.Int32Value()),
66          Arm64ManagedRegister::FromCoreRegister(IP1));
67
68      break;
69    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
70
71      __ LoadRawPtr(Arm64ManagedRegister::FromCoreRegister(IP1),
72                      Arm64ManagedRegister::FromCoreRegister(X0),
73                      Offset(JNIEnvExt::SelfOffset().Int32Value()));
74
75      // FIXME IPx used by VIXL - this is unsafe.
76      __ JumpTo(Arm64ManagedRegister::FromCoreRegister(IP1), Offset(offset.Int32Value()),
77                Arm64ManagedRegister::FromCoreRegister(IP0));
78
79      break;
80    case kPortableAbi:  // X18 holds Thread*.
81    case kQuickAbi:  // Fall-through.
82      __ JumpTo(Arm64ManagedRegister::FromCoreRegister(TR), Offset(offset.Int32Value()),
83                Arm64ManagedRegister::FromCoreRegister(IP0));
84
85      break;
86  }
87
88  size_t cs = assembler->CodeSize();
89  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
90  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
91  assembler->FinalizeInstructions(code);
92
93  return entry_stub.release();
94}
95}  // namespace arm64
96
97namespace mips {
98static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
99                                                    ThreadOffset offset) {
100  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
101
102  switch (abi) {
103    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
104      __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
105      break;
106    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
107      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
108      __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
109      break;
110    case kPortableAbi:  // S1 holds Thread*.
111    case kQuickAbi:  // Fall-through.
112      __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
113  }
114  __ Jr(T9);
115  __ Nop();
116  __ Break();
117
118  size_t cs = assembler->CodeSize();
119  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
120  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
121  assembler->FinalizeInstructions(code);
122
123  return entry_stub.release();
124}
125}  // namespace mips
126
127namespace x86 {
128static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset offset) {
129  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
130
131  // All x86 trampolines call via the Thread* held in fs.
132  __ fs()->jmp(Address::Absolute(offset));
133  __ int3();
134
135  size_t cs = assembler->CodeSize();
136  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
137  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
138  assembler->FinalizeInstructions(code);
139
140  return entry_stub.release();
141}
142}  // namespace x86
143
144namespace x86_64 {
145static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset offset) {
146  UniquePtr<x86::X86Assembler> assembler(static_cast<x86::X86Assembler*>(Assembler::Create(kX86_64)));
147
148  // All x86 trampolines call via the Thread* held in gs.
149  __ gs()->jmp(x86::Address::Absolute(offset, true));
150  __ int3();
151
152  size_t cs = assembler->CodeSize();
153  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
154  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
155  assembler->FinalizeInstructions(code);
156
157  return entry_stub.release();
158}
159}  // namespace x86_64
160
161const std::vector<uint8_t>* CreateTrampoline(InstructionSet isa, EntryPointCallingConvention abi,
162                                             ThreadOffset offset) {
163  switch (isa) {
164    case kArm:
165    case kThumb2:
166      return arm::CreateTrampoline(abi, offset);
167    case kArm64:
168      return arm64::CreateTrampoline(abi, offset);
169    case kMips:
170      return mips::CreateTrampoline(abi, offset);
171    case kX86:
172      return x86::CreateTrampoline(offset);
173    case kX86_64:
174      return x86_64::CreateTrampoline(offset);
175    default:
176      LOG(FATAL) << "Unknown InstructionSet: " << isa;
177      return NULL;
178  }
179}
180
181}  // namespace art
182