assembler-x64-inl.h revision d0582a6c46733687d045e4188a1bcd0123c758a1
1// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_ASSEMBLER_X64_INL_H_
29#define V8_X64_ASSEMBLER_X64_INL_H_
30
31#include "cpu.h"
32#include "memory.h"
33
34namespace v8 {
35namespace internal {
36
37Condition NegateCondition(Condition cc) {
38  return static_cast<Condition>(cc ^ 1);
39}
40
41
42// -----------------------------------------------------------------------------
43// Implementation of Assembler
44
45
46
47void Assembler::emitl(uint32_t x) {
48  Memory::uint32_at(pc_) = x;
49  pc_ += sizeof(uint32_t);
50}
51
52
53void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
54  Memory::uint64_at(pc_) = x;
55  if (rmode != RelocInfo::NONE) {
56    RecordRelocInfo(rmode, x);
57  }
58  pc_ += sizeof(uint64_t);
59}
60
61
62void Assembler::emitw(uint16_t x) {
63  Memory::uint16_at(pc_) = x;
64  pc_ += sizeof(uint16_t);
65}
66
67
68void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
69  ASSERT(RelocInfo::IsCodeTarget(rmode));
70  RecordRelocInfo(rmode);
71  int current = code_targets_.length();
72  if (current > 0 && code_targets_.last().is_identical_to(target)) {
73    // Optimization if we keep jumping to the same code target.
74    emitl(current - 1);
75  } else {
76    code_targets_.Add(target);
77    emitl(current);
78  }
79}
80
81
82void Assembler::emit_rex_64(Register reg, Register rm_reg) {
83  emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
84}
85
86
87void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
88  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
89}
90
91
92void Assembler::emit_rex_64(Register reg, const Operand& op) {
93  emit(0x48 | reg.high_bit() << 2 | op.rex_);
94}
95
96
97void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
98  emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
99}
100
101
102void Assembler::emit_rex_64(Register rm_reg) {
103  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
104  emit(0x48 | rm_reg.high_bit());
105}
106
107
108void Assembler::emit_rex_64(const Operand& op) {
109  emit(0x48 | op.rex_);
110}
111
112
113void Assembler::emit_rex_32(Register reg, Register rm_reg) {
114  emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
115}
116
117
118void Assembler::emit_rex_32(Register reg, const Operand& op) {
119  emit(0x40 | reg.high_bit() << 2  | op.rex_);
120}
121
122
123void Assembler::emit_rex_32(Register rm_reg) {
124  emit(0x40 | rm_reg.high_bit());
125}
126
127
128void Assembler::emit_rex_32(const Operand& op) {
129  emit(0x40 | op.rex_);
130}
131
132
133void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
134  byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
135  if (rex_bits != 0) emit(0x40 | rex_bits);
136}
137
138
139void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
140  byte rex_bits =  reg.high_bit() << 2 | op.rex_;
141  if (rex_bits != 0) emit(0x40 | rex_bits);
142}
143
144
145void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
146  byte rex_bits =  (reg.code() & 0x8) >> 1 | op.rex_;
147  if (rex_bits != 0) emit(0x40 | rex_bits);
148}
149
150
151void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
152  byte rex_bits =  (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
153  if (rex_bits != 0) emit(0x40 | rex_bits);
154}
155
156
157void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
158  byte rex_bits =  (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
159  if (rex_bits != 0) emit(0x40 | rex_bits);
160}
161
162
163void Assembler::emit_optional_rex_32(Register rm_reg) {
164  if (rm_reg.high_bit()) emit(0x41);
165}
166
167
168void Assembler::emit_optional_rex_32(const Operand& op) {
169  if (op.rex_ != 0) emit(0x40 | op.rex_);
170}
171
172
173Address Assembler::target_address_at(Address pc) {
174  return Memory::int32_at(pc) + pc + 4;
175}
176
177
178void Assembler::set_target_address_at(Address pc, Address target) {
179  Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
180  CPU::FlushICache(pc, sizeof(int32_t));
181}
182
183Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
184  return code_targets_[Memory::int32_at(pc)];
185}
186
187// -----------------------------------------------------------------------------
188// Implementation of RelocInfo
189
190// The modes possibly affected by apply must be in kApplyMask.
191void RelocInfo::apply(intptr_t delta) {
192  if (IsInternalReference(rmode_)) {
193    // absolute code pointer inside code object moves with the code object.
194    Memory::Address_at(pc_) += static_cast<int32_t>(delta);
195  } else if (IsCodeTarget(rmode_)) {
196    Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
197  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
198    // Special handling of js_return when a break point is set (call
199    // instruction has been inserted).
200    Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta);  // relocate entry
201  }
202}
203
204
205Address RelocInfo::target_address() {
206  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
207  if (IsCodeTarget(rmode_)) {
208    return Assembler::target_address_at(pc_);
209  } else {
210    return Memory::Address_at(pc_);
211  }
212}
213
214
215Address RelocInfo::target_address_address() {
216  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
217  return reinterpret_cast<Address>(pc_);
218}
219
220
221void RelocInfo::set_target_address(Address target) {
222  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
223  if (IsCodeTarget(rmode_)) {
224    Assembler::set_target_address_at(pc_, target);
225  } else {
226    Memory::Address_at(pc_) = target;
227  }
228}
229
230
231Object* RelocInfo::target_object() {
232  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
233  return Memory::Object_at(pc_);
234}
235
236
237Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
238  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
239  if (rmode_ == EMBEDDED_OBJECT) {
240    return Memory::Object_Handle_at(pc_);
241  } else {
242    return origin->code_target_object_handle_at(pc_);
243  }
244}
245
246
247Object** RelocInfo::target_object_address() {
248  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
249  return reinterpret_cast<Object**>(pc_);
250}
251
252
253Address* RelocInfo::target_reference_address() {
254  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
255  return reinterpret_cast<Address*>(pc_);
256}
257
258
259void RelocInfo::set_target_object(Object* target) {
260  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
261  *reinterpret_cast<Object**>(pc_) = target;
262}
263
264
265bool RelocInfo::IsPatchedReturnSequence() {
266  // The recognized call sequence is:
267  //  movq(kScratchRegister, immediate64); call(kScratchRegister);
268  // It only needs to be distinguished from a return sequence
269  //  movq(rsp, rbp); pop(rbp); ret(n); int3 *6
270  // The 11th byte is int3 (0xCC) in the return sequence and
271  // REX.WB (0x48+register bit) for the call sequence.
272#ifdef ENABLE_DEBUGGER_SUPPORT
273  return pc_[10] != 0xCC;
274#else
275  return false;
276#endif
277}
278
279
280Address RelocInfo::call_address() {
281  ASSERT(IsPatchedReturnSequence());
282  return Memory::Address_at(
283      pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
284}
285
286
287void RelocInfo::set_call_address(Address target) {
288  ASSERT(IsPatchedReturnSequence());
289  Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
290      target;
291}
292
293
294Object* RelocInfo::call_object() {
295  ASSERT(IsPatchedReturnSequence());
296  return *call_object_address();
297}
298
299
300void RelocInfo::set_call_object(Object* target) {
301  ASSERT(IsPatchedReturnSequence());
302  *call_object_address() = target;
303}
304
305
306Object** RelocInfo::call_object_address() {
307  ASSERT(IsPatchedReturnSequence());
308  return reinterpret_cast<Object**>(
309      pc_ + Assembler::kPatchReturnSequenceAddressOffset);
310}
311
312// -----------------------------------------------------------------------------
313// Implementation of Operand
314
315void Operand::set_modrm(int mod, Register rm_reg) {
316  ASSERT(is_uint2(mod));
317  buf_[0] = mod << 6 | rm_reg.low_bits();
318  // Set REX.B to the high bit of rm.code().
319  rex_ |= rm_reg.high_bit();
320}
321
322
323void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
324  ASSERT(len_ == 1);
325  ASSERT(is_uint2(scale));
326  // Use SIB with no index register only for base rsp or r12. Otherwise we
327  // would skip the SIB byte entirely.
328  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
329  buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
330  rex_ |= index.high_bit() << 1 | base.high_bit();
331  len_ = 2;
332}
333
334void Operand::set_disp8(int disp) {
335  ASSERT(is_int8(disp));
336  ASSERT(len_ == 1 || len_ == 2);
337  int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
338  *p = disp;
339  len_ += sizeof(int8_t);
340}
341
342void Operand::set_disp32(int disp) {
343  ASSERT(len_ == 1 || len_ == 2);
344  int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
345  *p = disp;
346  len_ += sizeof(int32_t);
347}
348
349
350} }  // namespace v8::internal
351
352#endif  // V8_X64_ASSEMBLER_X64_INL_H_
353