macro-assembler-x64.cc revision e0cee9b3ed82e2391fd85d118aeaa4ea361c687d
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "assembler-x64.h"
35#include "macro-assembler-x64.h"
36#include "serialize.h"
37#include "debug.h"
38#include "heap.h"
39
40namespace v8 {
41namespace internal {
42
43MacroAssembler::MacroAssembler(void* buffer, int size)
44    : Assembler(buffer, size),
45      generating_stub_(false),
46      allow_stub_calls_(true),
47      code_object_(Heap::undefined_value()) {
48}
49
50
51void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
52  movq(destination, Operand(kRootRegister,
53                            (index << kPointerSizeLog2) - kRootRegisterBias));
54}
55
56
57void MacroAssembler::LoadRootIndexed(Register destination,
58                                     Register variable_offset,
59                                     int fixed_offset) {
60  movq(destination,
61       Operand(kRootRegister,
62               variable_offset, times_pointer_size,
63               (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
64}
65
66
67void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
68  movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
69       source);
70}
71
72
73void MacroAssembler::PushRoot(Heap::RootListIndex index) {
74  push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
75}
76
77
78void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
79  cmpq(with, Operand(kRootRegister,
80                     (index << kPointerSizeLog2) - kRootRegisterBias));
81}
82
83
84void MacroAssembler::CompareRoot(const Operand& with,
85                                 Heap::RootListIndex index) {
86  ASSERT(!with.AddressUsesRegister(kScratchRegister));
87  LoadRoot(kScratchRegister, index);
88  cmpq(with, kScratchRegister);
89}
90
91
92void MacroAssembler::RecordWriteHelper(Register object,
93                                       Register addr,
94                                       Register scratch) {
95  if (FLAG_debug_code) {
96    // Check that the object is not in new space.
97    NearLabel not_in_new_space;
98    InNewSpace(object, scratch, not_equal, &not_in_new_space);
99    Abort("new-space object passed to RecordWriteHelper");
100    bind(&not_in_new_space);
101  }
102
103  // Compute the page start address from the heap object pointer, and reuse
104  // the 'object' register for it.
105  and_(object, Immediate(~Page::kPageAlignmentMask));
106
107  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
108  // method for more details.
109  shrl(addr, Immediate(Page::kRegionSizeLog2));
110  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
111
112  // Set dirty mark for region.
113  bts(Operand(object, Page::kDirtyFlagOffset), addr);
114}
115
116
117void MacroAssembler::RecordWrite(Register object,
118                                 int offset,
119                                 Register value,
120                                 Register index) {
121  // The compiled code assumes that record write doesn't change the
122  // context register, so we check that none of the clobbered
123  // registers are rsi.
124  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
125
126  // First, check if a write barrier is even needed. The tests below
127  // catch stores of Smis and stores into young gen.
128  Label done;
129  JumpIfSmi(value, &done);
130
131  RecordWriteNonSmi(object, offset, value, index);
132  bind(&done);
133
134  // Clobber all input registers when running with the debug-code flag
135  // turned on to provoke errors. This clobbering repeats the
136  // clobbering done inside RecordWriteNonSmi but it's necessary to
137  // avoid having the fast case for smis leave the registers
138  // unchanged.
139  if (FLAG_debug_code) {
140    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
141    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
142    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
143  }
144}
145
146
147void MacroAssembler::RecordWrite(Register object,
148                                 Register address,
149                                 Register value) {
150  // The compiled code assumes that record write doesn't change the
151  // context register, so we check that none of the clobbered
152  // registers are rsi.
153  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
154
155  // First, check if a write barrier is even needed. The tests below
156  // catch stores of Smis and stores into young gen.
157  Label done;
158  JumpIfSmi(value, &done);
159
160  InNewSpace(object, value, equal, &done);
161
162  RecordWriteHelper(object, address, value);
163
164  bind(&done);
165
166  // Clobber all input registers when running with the debug-code flag
167  // turned on to provoke errors.
168  if (FLAG_debug_code) {
169    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
170    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
171    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
172  }
173}
174
175
176void MacroAssembler::RecordWriteNonSmi(Register object,
177                                       int offset,
178                                       Register scratch,
179                                       Register index) {
180  Label done;
181
182  if (FLAG_debug_code) {
183    NearLabel okay;
184    JumpIfNotSmi(object, &okay);
185    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
186    bind(&okay);
187
188    if (offset == 0) {
189      // index must be int32.
190      Register tmp = index.is(rax) ? rbx : rax;
191      push(tmp);
192      movl(tmp, index);
193      cmpq(tmp, index);
194      Check(equal, "Index register for RecordWrite must be untagged int32.");
195      pop(tmp);
196    }
197  }
198
199  // Test that the object address is not in the new space. We cannot
200  // update page dirty marks for new space pages.
201  InNewSpace(object, scratch, equal, &done);
202
203  // The offset is relative to a tagged or untagged HeapObject pointer,
204  // so either offset or offset + kHeapObjectTag must be a
205  // multiple of kPointerSize.
206  ASSERT(IsAligned(offset, kPointerSize) ||
207         IsAligned(offset + kHeapObjectTag, kPointerSize));
208
209  Register dst = index;
210  if (offset != 0) {
211    lea(dst, Operand(object, offset));
212  } else {
213    // array access: calculate the destination address in the same manner as
214    // KeyedStoreIC::GenerateGeneric.
215    lea(dst, FieldOperand(object,
216                          index,
217                          times_pointer_size,
218                          FixedArray::kHeaderSize));
219  }
220  RecordWriteHelper(object, dst, scratch);
221
222  bind(&done);
223
224  // Clobber all input registers when running with the debug-code flag
225  // turned on to provoke errors.
226  if (FLAG_debug_code) {
227    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
228    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
229    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
230  }
231}
232
233void MacroAssembler::Assert(Condition cc, const char* msg) {
234  if (FLAG_debug_code) Check(cc, msg);
235}
236
237
238void MacroAssembler::AssertFastElements(Register elements) {
239  if (FLAG_debug_code) {
240    NearLabel ok;
241    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
242                Heap::kFixedArrayMapRootIndex);
243    j(equal, &ok);
244    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
245                Heap::kFixedCOWArrayMapRootIndex);
246    j(equal, &ok);
247    Abort("JSObject with fast elements map has slow elements");
248    bind(&ok);
249  }
250}
251
252
253void MacroAssembler::Check(Condition cc, const char* msg) {
254  NearLabel L;
255  j(cc, &L);
256  Abort(msg);
257  // will not return here
258  bind(&L);
259}
260
261
262void MacroAssembler::CheckStackAlignment() {
263  int frame_alignment = OS::ActivationFrameAlignment();
264  int frame_alignment_mask = frame_alignment - 1;
265  if (frame_alignment > kPointerSize) {
266    ASSERT(IsPowerOf2(frame_alignment));
267    NearLabel alignment_as_expected;
268    testq(rsp, Immediate(frame_alignment_mask));
269    j(zero, &alignment_as_expected);
270    // Abort if stack is not aligned.
271    int3();
272    bind(&alignment_as_expected);
273  }
274}
275
276
277void MacroAssembler::NegativeZeroTest(Register result,
278                                      Register op,
279                                      Label* then_label) {
280  NearLabel ok;
281  testl(result, result);
282  j(not_zero, &ok);
283  testl(op, op);
284  j(sign, then_label);
285  bind(&ok);
286}
287
288
289void MacroAssembler::Abort(const char* msg) {
290  // We want to pass the msg string like a smi to avoid GC
291  // problems, however msg is not guaranteed to be aligned
292  // properly. Instead, we pass an aligned pointer that is
293  // a proper v8 smi, but also pass the alignment difference
294  // from the real pointer as a smi.
295  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
296  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
297  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
298  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
299#ifdef DEBUG
300  if (msg != NULL) {
301    RecordComment("Abort message: ");
302    RecordComment(msg);
303  }
304#endif
305  // Disable stub call restrictions to always allow calls to abort.
306  AllowStubCallsScope allow_scope(this, true);
307
308  push(rax);
309  movq(kScratchRegister, p0, RelocInfo::NONE);
310  push(kScratchRegister);
311  movq(kScratchRegister,
312       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
313       RelocInfo::NONE);
314  push(kScratchRegister);
315  CallRuntime(Runtime::kAbort, 2);
316  // will not return here
317  int3();
318}
319
320
321void MacroAssembler::CallStub(CodeStub* stub) {
322  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
323  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
324}
325
326
327MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
328  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
329  MaybeObject* result = stub->TryGetCode();
330  if (!result->IsFailure()) {
331    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
332         RelocInfo::CODE_TARGET);
333  }
334  return result;
335}
336
337
338void MacroAssembler::TailCallStub(CodeStub* stub) {
339  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
340  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
341}
342
343
344MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
345  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
346  MaybeObject* result = stub->TryGetCode();
347  if (!result->IsFailure()) {
348    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
349        RelocInfo::CODE_TARGET);
350  }
351  return result;
352}
353
354
355void MacroAssembler::StubReturn(int argc) {
356  ASSERT(argc >= 1 && generating_stub());
357  ret((argc - 1) * kPointerSize);
358}
359
360
361void MacroAssembler::IllegalOperation(int num_arguments) {
362  if (num_arguments > 0) {
363    addq(rsp, Immediate(num_arguments * kPointerSize));
364  }
365  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
366}
367
368
369void MacroAssembler::IndexFromHash(Register hash, Register index) {
370  // The assert checks that the constants for the maximum number of digits
371  // for an array index cached in the hash field and the number of bits
372  // reserved for it does not conflict.
373  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
374         (1 << String::kArrayIndexValueBits));
375  // We want the smi-tagged index in key. Even if we subsequently go to
376  // the slow case, converting the key to a smi is always valid.
377  // key: string key
378  // hash: key's hash field, including its array index value.
379  and_(hash, Immediate(String::kArrayIndexValueMask));
380  shr(hash, Immediate(String::kHashShift));
381  // Here we actually clobber the key which will be used if calling into
382  // runtime later. However as the new key is the numeric value of a string key
383  // there is no difference in using either key.
384  Integer32ToSmi(index, hash);
385}
386
387
388void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
389  CallRuntime(Runtime::FunctionForId(id), num_arguments);
390}
391
392
393void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
394  Runtime::Function* function = Runtime::FunctionForId(id);
395  Set(rax, function->nargs);
396  movq(rbx, ExternalReference(function));
397  CEntryStub ces(1);
398  ces.SaveDoubles();
399  CallStub(&ces);
400}
401
402
403MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
404                                            int num_arguments) {
405  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
406}
407
408
409void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
410  // If the expected number of arguments of the runtime function is
411  // constant, we check that the actual number of arguments match the
412  // expectation.
413  if (f->nargs >= 0 && f->nargs != num_arguments) {
414    IllegalOperation(num_arguments);
415    return;
416  }
417
418  // TODO(1236192): Most runtime routines don't need the number of
419  // arguments passed in because it is constant. At some point we
420  // should remove this need and make the runtime routine entry code
421  // smarter.
422  Set(rax, num_arguments);
423  movq(rbx, ExternalReference(f));
424  CEntryStub ces(f->result_size);
425  CallStub(&ces);
426}
427
428
429MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
430                                            int num_arguments) {
431  if (f->nargs >= 0 && f->nargs != num_arguments) {
432    IllegalOperation(num_arguments);
433    // Since we did not call the stub, there was no allocation failure.
434    // Return some non-failure object.
435    return Heap::undefined_value();
436  }
437
438  // TODO(1236192): Most runtime routines don't need the number of
439  // arguments passed in because it is constant. At some point we
440  // should remove this need and make the runtime routine entry code
441  // smarter.
442  Set(rax, num_arguments);
443  movq(rbx, ExternalReference(f));
444  CEntryStub ces(f->result_size);
445  return TryCallStub(&ces);
446}
447
448
449void MacroAssembler::CallExternalReference(const ExternalReference& ext,
450                                           int num_arguments) {
451  Set(rax, num_arguments);
452  movq(rbx, ext);
453
454  CEntryStub stub(1);
455  CallStub(&stub);
456}
457
458
459void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
460                                               int num_arguments,
461                                               int result_size) {
462  // ----------- S t a t e -------------
463  //  -- rsp[0] : return address
464  //  -- rsp[8] : argument num_arguments - 1
465  //  ...
466  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
467  // -----------------------------------
468
469  // TODO(1236192): Most runtime routines don't need the number of
470  // arguments passed in because it is constant. At some point we
471  // should remove this need and make the runtime routine entry code
472  // smarter.
473  Set(rax, num_arguments);
474  JumpToExternalReference(ext, result_size);
475}
476
477
478MaybeObject* MacroAssembler::TryTailCallExternalReference(
479    const ExternalReference& ext, int num_arguments, int result_size) {
480  // ----------- S t a t e -------------
481  //  -- rsp[0] : return address
482  //  -- rsp[8] : argument num_arguments - 1
483  //  ...
484  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
485  // -----------------------------------
486
487  // TODO(1236192): Most runtime routines don't need the number of
488  // arguments passed in because it is constant. At some point we
489  // should remove this need and make the runtime routine entry code
490  // smarter.
491  Set(rax, num_arguments);
492  return TryJumpToExternalReference(ext, result_size);
493}
494
495
496void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
497                                     int num_arguments,
498                                     int result_size) {
499  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
500}
501
502
503MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
504                                                int num_arguments,
505                                                int result_size) {
506  return TryTailCallExternalReference(ExternalReference(fid),
507                                      num_arguments,
508                                      result_size);
509}
510
511
512static int Offset(ExternalReference ref0, ExternalReference ref1) {
513  int64_t offset = (ref0.address() - ref1.address());
514  // Check that fits into int.
515  ASSERT(static_cast<int>(offset) == offset);
516  return static_cast<int>(offset);
517}
518
519
520void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
521#ifdef _WIN64
522  // We need to prepare a slot for result handle on stack and put
523  // a pointer to it into 1st arg register.
524  EnterApiExitFrame(arg_stack_space + 1);
525
526  // rcx must be used to pass the pointer to the return value slot.
527  lea(rcx, StackSpaceOperand(arg_stack_space));
528#else
529  EnterApiExitFrame(arg_stack_space);
530#endif
531}
532
533
534MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
535    ApiFunction* function, int stack_space) {
536  Label empty_result;
537  Label prologue;
538  Label promote_scheduled_exception;
539  Label delete_allocated_handles;
540  Label leave_exit_frame;
541  Label write_back;
542
543  ExternalReference next_address =
544      ExternalReference::handle_scope_next_address();
545  const int kNextOffset = 0;
546  const int kLimitOffset = Offset(
547      ExternalReference::handle_scope_limit_address(),
548      next_address);
549  const int kLevelOffset = Offset(
550      ExternalReference::handle_scope_level_address(),
551      next_address);
552  ExternalReference scheduled_exception_address =
553      ExternalReference::scheduled_exception_address();
554
555  // Allocate HandleScope in callee-save registers.
556  Register prev_next_address_reg = r14;
557  Register prev_limit_reg = rbx;
558  Register base_reg = r12;
559  movq(base_reg, next_address);
560  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
561  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
562  addl(Operand(base_reg, kLevelOffset), Immediate(1));
563  // Call the api function!
564  movq(rax,
565       reinterpret_cast<int64_t>(function->address()),
566       RelocInfo::RUNTIME_ENTRY);
567  call(rax);
568
569#ifdef _WIN64
570  // rax keeps a pointer to v8::Handle, unpack it.
571  movq(rax, Operand(rax, 0));
572#endif
573  // Check if the result handle holds 0.
574  testq(rax, rax);
575  j(zero, &empty_result);
576  // It was non-zero.  Dereference to get the result value.
577  movq(rax, Operand(rax, 0));
578  bind(&prologue);
579
580  // No more valid handles (the result handle was the last one). Restore
581  // previous handle scope.
582  subl(Operand(base_reg, kLevelOffset), Immediate(1));
583  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
584  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
585  j(not_equal, &delete_allocated_handles);
586  bind(&leave_exit_frame);
587
588  // Check if the function scheduled an exception.
589  movq(rsi, scheduled_exception_address);
590  Cmp(Operand(rsi, 0), Factory::the_hole_value());
591  j(not_equal, &promote_scheduled_exception);
592
593  LeaveApiExitFrame();
594  ret(stack_space * kPointerSize);
595
596  bind(&promote_scheduled_exception);
597  MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
598                                           0, 1);
599  if (result->IsFailure()) {
600    return result;
601  }
602
603  bind(&empty_result);
604  // It was zero; the result is undefined.
605  Move(rax, Factory::undefined_value());
606  jmp(&prologue);
607
608  // HandleScope limit has changed. Delete allocated extensions.
609  bind(&delete_allocated_handles);
610  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
611  movq(prev_limit_reg, rax);
612  movq(rax, ExternalReference::delete_handle_scope_extensions());
613  call(rax);
614  movq(rax, prev_limit_reg);
615  jmp(&leave_exit_frame);
616
617  return result;
618}
619
620
621void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
622                                             int result_size) {
623  // Set the entry point and jump to the C entry runtime stub.
624  movq(rbx, ext);
625  CEntryStub ces(result_size);
626  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
627}
628
629
630MaybeObject* MacroAssembler::TryJumpToExternalReference(
631    const ExternalReference& ext, int result_size) {
632  // Set the entry point and jump to the C entry runtime stub.
633  movq(rbx, ext);
634  CEntryStub ces(result_size);
635  return TryTailCallStub(&ces);
636}
637
638
639void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
640                                   InvokeFlag flag,
641                                   PostCallGenerator* post_call_generator) {
642  // Calls are not allowed in some stubs.
643  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
644
645  // Rely on the assertion to check that the number of provided
646  // arguments match the expected number of arguments. Fake a
647  // parameter count to avoid emitting code to do the check.
648  ParameterCount expected(0);
649  GetBuiltinEntry(rdx, id);
650  InvokeCode(rdx, expected, expected, flag, post_call_generator);
651}
652
653
654void MacroAssembler::GetBuiltinFunction(Register target,
655                                        Builtins::JavaScript id) {
656  // Load the builtins object into target register.
657  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
658  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
659  movq(target, FieldOperand(target,
660                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
661}
662
663
664void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
665  ASSERT(!target.is(rdi));
666  // Load the JavaScript builtin function from the builtins object.
667  GetBuiltinFunction(rdi, id);
668  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
669}
670
671
672void MacroAssembler::Set(Register dst, int64_t x) {
673  if (x == 0) {
674    xorl(dst, dst);
675  } else if (is_int32(x)) {
676    movq(dst, Immediate(static_cast<int32_t>(x)));
677  } else if (is_uint32(x)) {
678    movl(dst, Immediate(static_cast<uint32_t>(x)));
679  } else {
680    movq(dst, x, RelocInfo::NONE);
681  }
682}
683
684void MacroAssembler::Set(const Operand& dst, int64_t x) {
685  if (is_int32(x)) {
686    movq(dst, Immediate(static_cast<int32_t>(x)));
687  } else {
688    movq(kScratchRegister, x, RelocInfo::NONE);
689    movq(dst, kScratchRegister);
690  }
691}
692
693// ----------------------------------------------------------------------------
694// Smi tagging, untagging and tag detection.
695
696Register MacroAssembler::GetSmiConstant(Smi* source) {
697  int value = source->value();
698  if (value == 0) {
699    xorl(kScratchRegister, kScratchRegister);
700    return kScratchRegister;
701  }
702  if (value == 1) {
703    return kSmiConstantRegister;
704  }
705  LoadSmiConstant(kScratchRegister, source);
706  return kScratchRegister;
707}
708
709void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
710  if (FLAG_debug_code) {
711    movq(dst,
712         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
713         RelocInfo::NONE);
714    cmpq(dst, kSmiConstantRegister);
715    if (allow_stub_calls()) {
716      Assert(equal, "Uninitialized kSmiConstantRegister");
717    } else {
718      NearLabel ok;
719      j(equal, &ok);
720      int3();
721      bind(&ok);
722    }
723  }
724  if (source->value() == 0) {
725    xorl(dst, dst);
726    return;
727  }
728  int value = source->value();
729  bool negative = value < 0;
730  unsigned int uvalue = negative ? -value : value;
731
732  switch (uvalue) {
733    case 9:
734      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
735      break;
736    case 8:
737      xorl(dst, dst);
738      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
739      break;
740    case 4:
741      xorl(dst, dst);
742      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
743      break;
744    case 5:
745      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
746      break;
747    case 3:
748      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
749      break;
750    case 2:
751      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
752      break;
753    case 1:
754      movq(dst, kSmiConstantRegister);
755      break;
756    case 0:
757      UNREACHABLE();
758      return;
759    default:
760      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
761      return;
762  }
763  if (negative) {
764    neg(dst);
765  }
766}
767
768
769void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
770  ASSERT_EQ(0, kSmiTag);
771  if (!dst.is(src)) {
772    movl(dst, src);
773  }
774  shl(dst, Immediate(kSmiShift));
775}
776
777
778void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
779  if (FLAG_debug_code) {
780    testb(dst, Immediate(0x01));
781    NearLabel ok;
782    j(zero, &ok);
783    if (allow_stub_calls()) {
784      Abort("Integer32ToSmiField writing to non-smi location");
785    } else {
786      int3();
787    }
788    bind(&ok);
789  }
790  ASSERT(kSmiShift % kBitsPerByte == 0);
791  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
792}
793
794
795void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
796                                                Register src,
797                                                int constant) {
798  if (dst.is(src)) {
799    addq(dst, Immediate(constant));
800  } else {
801    lea(dst, Operand(src, constant));
802  }
803  shl(dst, Immediate(kSmiShift));
804}
805
806
807void MacroAssembler::SmiToInteger32(Register dst, Register src) {
808  ASSERT_EQ(0, kSmiTag);
809  if (!dst.is(src)) {
810    movq(dst, src);
811  }
812  shr(dst, Immediate(kSmiShift));
813}
814
815
816void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
817  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
818}
819
820
821void MacroAssembler::SmiToInteger64(Register dst, Register src) {
822  ASSERT_EQ(0, kSmiTag);
823  if (!dst.is(src)) {
824    movq(dst, src);
825  }
826  sar(dst, Immediate(kSmiShift));
827}
828
829
830void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
831  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
832}
833
834
835void MacroAssembler::SmiTest(Register src) {
836  testq(src, src);
837}
838
839
840void MacroAssembler::SmiCompare(Register dst, Register src) {
841  cmpq(dst, src);
842}
843
844
845void MacroAssembler::SmiCompare(Register dst, Smi* src) {
846  ASSERT(!dst.is(kScratchRegister));
847  if (src->value() == 0) {
848    testq(dst, dst);
849  } else {
850    Register constant_reg = GetSmiConstant(src);
851    cmpq(dst, constant_reg);
852  }
853}
854
855
856void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
857  cmpq(dst, src);
858}
859
860
861void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
862  cmpq(dst, src);
863}
864
865
866void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
867  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
868}
869
870
871void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
872  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
873}
874
875
876void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
877                                                           Register src,
878                                                           int power) {
879  ASSERT(power >= 0);
880  ASSERT(power < 64);
881  if (power == 0) {
882    SmiToInteger64(dst, src);
883    return;
884  }
885  if (!dst.is(src)) {
886    movq(dst, src);
887  }
888  if (power < kSmiShift) {
889    sar(dst, Immediate(kSmiShift - power));
890  } else if (power > kSmiShift) {
891    shl(dst, Immediate(power - kSmiShift));
892  }
893}
894
895
896void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
897                                                         Register src,
898                                                         int power) {
899  ASSERT((0 <= power) && (power < 32));
900  if (dst.is(src)) {
901    shr(dst, Immediate(power + kSmiShift));
902  } else {
903    UNIMPLEMENTED();  // Not used.
904  }
905}
906
907
908Condition MacroAssembler::CheckSmi(Register src) {
909  ASSERT_EQ(0, kSmiTag);
910  testb(src, Immediate(kSmiTagMask));
911  return zero;
912}
913
914
915Condition MacroAssembler::CheckSmi(const Operand& src) {
916  ASSERT_EQ(0, kSmiTag);
917  testb(src, Immediate(kSmiTagMask));
918  return zero;
919}
920
921
922Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
923  ASSERT_EQ(0, kSmiTag);
924  // Test that both bits of the mask 0x8000000000000001 are zero.
925  movq(kScratchRegister, src);
926  rol(kScratchRegister, Immediate(1));
927  testb(kScratchRegister, Immediate(3));
928  return zero;
929}
930
931
932Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
933  if (first.is(second)) {
934    return CheckSmi(first);
935  }
936  ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
937  leal(kScratchRegister, Operand(first, second, times_1, 0));
938  testb(kScratchRegister, Immediate(0x03));
939  return zero;
940}
941
942
943Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
944                                                  Register second) {
945  if (first.is(second)) {
946    return CheckNonNegativeSmi(first);
947  }
948  movq(kScratchRegister, first);
949  or_(kScratchRegister, second);
950  rol(kScratchRegister, Immediate(1));
951  testl(kScratchRegister, Immediate(3));
952  return zero;
953}
954
955
956Condition MacroAssembler::CheckEitherSmi(Register first,
957                                         Register second,
958                                         Register scratch) {
959  if (first.is(second)) {
960    return CheckSmi(first);
961  }
962  if (scratch.is(second)) {
963    andl(scratch, first);
964  } else {
965    if (!scratch.is(first)) {
966      movl(scratch, first);
967    }
968    andl(scratch, second);
969  }
970  testb(scratch, Immediate(kSmiTagMask));
971  return zero;
972}
973
974
975Condition MacroAssembler::CheckIsMinSmi(Register src) {
976  ASSERT(!src.is(kScratchRegister));
977  // If we overflow by subtracting one, it's the minimal smi value.
978  cmpq(src, kSmiConstantRegister);
979  return overflow;
980}
981
982
983Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
984  // A 32-bit integer value can always be converted to a smi.
985  return always;
986}
987
988
989Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
990  // An unsigned 32-bit integer value is valid as long as the high bit
991  // is not set.
992  testl(src, src);
993  return positive;
994}
995
996
997void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
998  if (dst.is(src)) {
999    andl(dst, Immediate(kSmiTagMask));
1000  } else {
1001    movl(dst, Immediate(kSmiTagMask));
1002    andl(dst, src);
1003  }
1004}
1005
1006
1007void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1008  if (!(src.AddressUsesRegister(dst))) {
1009    movl(dst, Immediate(kSmiTagMask));
1010    andl(dst, src);
1011  } else {
1012    movl(dst, src);
1013    andl(dst, Immediate(kSmiTagMask));
1014  }
1015}
1016
1017
1018void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1019  if (constant->value() == 0) {
1020    if (!dst.is(src)) {
1021      movq(dst, src);
1022    }
1023    return;
1024  } else if (dst.is(src)) {
1025    ASSERT(!dst.is(kScratchRegister));
1026    switch (constant->value()) {
1027      case 1:
1028        addq(dst, kSmiConstantRegister);
1029        return;
1030      case 2:
1031        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1032        return;
1033      case 4:
1034        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1035        return;
1036      case 8:
1037        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1038        return;
1039      default:
1040        Register constant_reg = GetSmiConstant(constant);
1041        addq(dst, constant_reg);
1042        return;
1043    }
1044  } else {
1045    switch (constant->value()) {
1046      case 1:
1047        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1048        return;
1049      case 2:
1050        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1051        return;
1052      case 4:
1053        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1054        return;
1055      case 8:
1056        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1057        return;
1058      default:
1059        LoadSmiConstant(dst, constant);
1060        addq(dst, src);
1061        return;
1062    }
1063  }
1064}
1065
1066
1067void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1068  if (constant->value() != 0) {
1069    addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1070  }
1071}
1072
1073
1074void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1075  if (constant->value() == 0) {
1076    if (!dst.is(src)) {
1077      movq(dst, src);
1078    }
1079  } else if (dst.is(src)) {
1080    ASSERT(!dst.is(kScratchRegister));
1081    Register constant_reg = GetSmiConstant(constant);
1082    subq(dst, constant_reg);
1083  } else {
1084    if (constant->value() == Smi::kMinValue) {
1085      LoadSmiConstant(dst, constant);
1086      // Adding and subtracting the min-value gives the same result, it only
1087      // differs on the overflow bit, which we don't check here.
1088      addq(dst, src);
1089    } else {
1090      // Subtract by adding the negation.
1091      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1092      addq(dst, src);
1093    }
1094  }
1095}
1096
1097
1098void MacroAssembler::SmiAdd(Register dst,
1099                            Register src1,
1100                            Register src2) {
1101  // No overflow checking. Use only when it's known that
1102  // overflowing is impossible.
1103  ASSERT(!dst.is(src2));
1104  if (dst.is(src1)) {
1105    addq(dst, src2);
1106  } else {
1107    movq(dst, src1);
1108    addq(dst, src2);
1109  }
1110  Assert(no_overflow, "Smi addition overflow");
1111}
1112
1113
1114void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1115  // No overflow checking. Use only when it's known that
1116  // overflowing is impossible (e.g., subtracting two positive smis).
1117  ASSERT(!dst.is(src2));
1118  if (dst.is(src1)) {
1119    subq(dst, src2);
1120  } else {
1121    movq(dst, src1);
1122    subq(dst, src2);
1123  }
1124  Assert(no_overflow, "Smi subtraction overflow");
1125}
1126
1127
1128void MacroAssembler::SmiSub(Register dst,
1129                            Register src1,
1130                            const Operand& src2) {
1131  // No overflow checking. Use only when it's known that
1132  // overflowing is impossible (e.g., subtracting two positive smis).
1133  if (dst.is(src1)) {
1134    subq(dst, src2);
1135  } else {
1136    movq(dst, src1);
1137    subq(dst, src2);
1138  }
1139  Assert(no_overflow, "Smi subtraction overflow");
1140}
1141
1142
1143void MacroAssembler::SmiNot(Register dst, Register src) {
1144  ASSERT(!dst.is(kScratchRegister));
1145  ASSERT(!src.is(kScratchRegister));
1146  // Set tag and padding bits before negating, so that they are zero afterwards.
1147  movl(kScratchRegister, Immediate(~0));
1148  if (dst.is(src)) {
1149    xor_(dst, kScratchRegister);
1150  } else {
1151    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1152  }
1153  not_(dst);
1154}
1155
1156
1157void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1158  ASSERT(!dst.is(src2));
1159  if (!dst.is(src1)) {
1160    movq(dst, src1);
1161  }
1162  and_(dst, src2);
1163}
1164
1165
1166void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1167  if (constant->value() == 0) {
1168    Set(dst, 0);
1169  } else if (dst.is(src)) {
1170    ASSERT(!dst.is(kScratchRegister));
1171    Register constant_reg = GetSmiConstant(constant);
1172    and_(dst, constant_reg);
1173  } else {
1174    LoadSmiConstant(dst, constant);
1175    and_(dst, src);
1176  }
1177}
1178
1179
1180void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1181  if (!dst.is(src1)) {
1182    movq(dst, src1);
1183  }
1184  or_(dst, src2);
1185}
1186
1187
1188void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1189  if (dst.is(src)) {
1190    ASSERT(!dst.is(kScratchRegister));
1191    Register constant_reg = GetSmiConstant(constant);
1192    or_(dst, constant_reg);
1193  } else {
1194    LoadSmiConstant(dst, constant);
1195    or_(dst, src);
1196  }
1197}
1198
1199
1200void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1201  if (!dst.is(src1)) {
1202    movq(dst, src1);
1203  }
1204  xor_(dst, src2);
1205}
1206
1207
1208void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1209  if (dst.is(src)) {
1210    ASSERT(!dst.is(kScratchRegister));
1211    Register constant_reg = GetSmiConstant(constant);
1212    xor_(dst, constant_reg);
1213  } else {
1214    LoadSmiConstant(dst, constant);
1215    xor_(dst, src);
1216  }
1217}
1218
1219
1220void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1221                                                     Register src,
1222                                                     int shift_value) {
1223  ASSERT(is_uint5(shift_value));
1224  if (shift_value > 0) {
1225    if (dst.is(src)) {
1226      sar(dst, Immediate(shift_value + kSmiShift));
1227      shl(dst, Immediate(kSmiShift));
1228    } else {
1229      UNIMPLEMENTED();  // Not used.
1230    }
1231  }
1232}
1233
1234
1235void MacroAssembler::SmiShiftLeftConstant(Register dst,
1236                                          Register src,
1237                                          int shift_value) {
1238  if (!dst.is(src)) {
1239    movq(dst, src);
1240  }
1241  if (shift_value > 0) {
1242    shl(dst, Immediate(shift_value));
1243  }
1244}
1245
1246
1247void MacroAssembler::SmiShiftLeft(Register dst,
1248                                  Register src1,
1249                                  Register src2) {
1250  ASSERT(!dst.is(rcx));
1251  NearLabel result_ok;
1252  // Untag shift amount.
1253  if (!dst.is(src1)) {
1254    movq(dst, src1);
1255  }
1256  SmiToInteger32(rcx, src2);
1257  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1258  and_(rcx, Immediate(0x1f));
1259  shl_cl(dst);
1260}
1261
1262
1263void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1264                                             Register src1,
1265                                             Register src2) {
1266  ASSERT(!dst.is(kScratchRegister));
1267  ASSERT(!src1.is(kScratchRegister));
1268  ASSERT(!src2.is(kScratchRegister));
1269  ASSERT(!dst.is(rcx));
1270  if (src1.is(rcx)) {
1271    movq(kScratchRegister, src1);
1272  } else if (src2.is(rcx)) {
1273    movq(kScratchRegister, src2);
1274  }
1275  if (!dst.is(src1)) {
1276    movq(dst, src1);
1277  }
1278  SmiToInteger32(rcx, src2);
1279  orl(rcx, Immediate(kSmiShift));
1280  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
1281  shl(dst, Immediate(kSmiShift));
1282  if (src1.is(rcx)) {
1283    movq(src1, kScratchRegister);
1284  } else if (src2.is(rcx)) {
1285    movq(src2, kScratchRegister);
1286  }
1287}
1288
1289
1290SmiIndex MacroAssembler::SmiToIndex(Register dst,
1291                                    Register src,
1292                                    int shift) {
1293  ASSERT(is_uint6(shift));
1294  // There is a possible optimization if shift is in the range 60-63, but that
1295  // will (and must) never happen.
1296  if (!dst.is(src)) {
1297    movq(dst, src);
1298  }
1299  if (shift < kSmiShift) {
1300    sar(dst, Immediate(kSmiShift - shift));
1301  } else {
1302    shl(dst, Immediate(shift - kSmiShift));
1303  }
1304  return SmiIndex(dst, times_1);
1305}
1306
1307SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1308                                            Register src,
1309                                            int shift) {
1310  // Register src holds a positive smi.
1311  ASSERT(is_uint6(shift));
1312  if (!dst.is(src)) {
1313    movq(dst, src);
1314  }
1315  neg(dst);
1316  if (shift < kSmiShift) {
1317    sar(dst, Immediate(kSmiShift - shift));
1318  } else {
1319    shl(dst, Immediate(shift - kSmiShift));
1320  }
1321  return SmiIndex(dst, times_1);
1322}
1323
1324
1325void MacroAssembler::Move(Register dst, Register src) {
1326  if (!dst.is(src)) {
1327    movq(dst, src);
1328  }
1329}
1330
1331
1332void MacroAssembler::Move(Register dst, Handle<Object> source) {
1333  ASSERT(!source->IsFailure());
1334  if (source->IsSmi()) {
1335    Move(dst, Smi::cast(*source));
1336  } else {
1337    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1338  }
1339}
1340
1341
1342void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
1343  ASSERT(!source->IsFailure());
1344  if (source->IsSmi()) {
1345    Move(dst, Smi::cast(*source));
1346  } else {
1347    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1348    movq(dst, kScratchRegister);
1349  }
1350}
1351
1352
1353void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1354  if (source->IsSmi()) {
1355    SmiCompare(dst, Smi::cast(*source));
1356  } else {
1357    Move(kScratchRegister, source);
1358    cmpq(dst, kScratchRegister);
1359  }
1360}
1361
1362
1363void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
1364  if (source->IsSmi()) {
1365    SmiCompare(dst, Smi::cast(*source));
1366  } else {
1367    ASSERT(source->IsHeapObject());
1368    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1369    cmpq(dst, kScratchRegister);
1370  }
1371}
1372
1373
1374void MacroAssembler::Push(Handle<Object> source) {
1375  if (source->IsSmi()) {
1376    Push(Smi::cast(*source));
1377  } else {
1378    ASSERT(source->IsHeapObject());
1379    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1380    push(kScratchRegister);
1381  }
1382}
1383
1384
1385void MacroAssembler::Push(Smi* source) {
1386  intptr_t smi = reinterpret_cast<intptr_t>(source);
1387  if (is_int32(smi)) {
1388    push(Immediate(static_cast<int32_t>(smi)));
1389  } else {
1390    Register constant = GetSmiConstant(source);
1391    push(constant);
1392  }
1393}
1394
1395
1396void MacroAssembler::Drop(int stack_elements) {
1397  if (stack_elements > 0) {
1398    addq(rsp, Immediate(stack_elements * kPointerSize));
1399  }
1400}
1401
1402
1403void MacroAssembler::Test(const Operand& src, Smi* source) {
1404  testl(Operand(src, kIntSize), Immediate(source->value()));
1405}
1406
1407
1408void MacroAssembler::Jump(ExternalReference ext) {
1409  movq(kScratchRegister, ext);
1410  jmp(kScratchRegister);
1411}
1412
1413
1414void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1415  movq(kScratchRegister, destination, rmode);
1416  jmp(kScratchRegister);
1417}
1418
1419
1420void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1421  // TODO(X64): Inline this
1422  jmp(code_object, rmode);
1423}
1424
1425
1426void MacroAssembler::Call(ExternalReference ext) {
1427  movq(kScratchRegister, ext);
1428  call(kScratchRegister);
1429}
1430
1431
1432void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1433  movq(kScratchRegister, destination, rmode);
1434  call(kScratchRegister);
1435}
1436
1437
1438void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1439  ASSERT(RelocInfo::IsCodeTarget(rmode));
1440  call(code_object, rmode);
1441}
1442
1443
1444void MacroAssembler::Pushad() {
1445  push(rax);
1446  push(rcx);
1447  push(rdx);
1448  push(rbx);
1449  // Not pushing rsp or rbp.
1450  push(rsi);
1451  push(rdi);
1452  push(r8);
1453  push(r9);
1454  // r10 is kScratchRegister.
1455  push(r11);
1456  push(r12);
1457  // r13 is kRootRegister.
1458  push(r14);
1459  // r15 is kSmiConstantRegister
1460  STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
1461  // Use lea for symmetry with Popad.
1462  int sp_delta =
1463      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
1464  lea(rsp, Operand(rsp, -sp_delta));
1465}
1466
1467
1468void MacroAssembler::Popad() {
1469  // Popad must not change the flags, so use lea instead of addq.
1470  int sp_delta =
1471      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
1472  lea(rsp, Operand(rsp, sp_delta));
1473  pop(r14);
1474  pop(r12);
1475  pop(r11);
1476  pop(r9);
1477  pop(r8);
1478  pop(rdi);
1479  pop(rsi);
1480  pop(rbx);
1481  pop(rdx);
1482  pop(rcx);
1483  pop(rax);
1484}
1485
1486
1487void MacroAssembler::Dropad() {
1488  addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
1489}
1490
1491
1492// Order general registers are pushed by Pushad:
1493// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
1494int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
1495    0,
1496    1,
1497    2,
1498    3,
1499    -1,
1500    -1,
1501    4,
1502    5,
1503    6,
1504    7,
1505    -1,
1506    8,
1507    9,
1508    -1,
1509    10,
1510    -1
1511};
1512
1513
1514void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
1515  movq(SafepointRegisterSlot(dst), src);
1516}
1517
1518
1519void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
1520  movq(dst, SafepointRegisterSlot(src));
1521}
1522
1523
1524Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
1525  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
1526}
1527
1528
1529void MacroAssembler::PushTryHandler(CodeLocation try_location,
1530                                    HandlerType type) {
1531  // Adjust this code if not the case.
1532  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1533
1534  // The pc (return address) is already on TOS.  This code pushes state,
1535  // frame pointer and current handler.  Check that they are expected
1536  // next on the stack, in that order.
1537  ASSERT_EQ(StackHandlerConstants::kStateOffset,
1538            StackHandlerConstants::kPCOffset - kPointerSize);
1539  ASSERT_EQ(StackHandlerConstants::kFPOffset,
1540            StackHandlerConstants::kStateOffset - kPointerSize);
1541  ASSERT_EQ(StackHandlerConstants::kNextOffset,
1542            StackHandlerConstants::kFPOffset - kPointerSize);
1543
1544  if (try_location == IN_JAVASCRIPT) {
1545    if (type == TRY_CATCH_HANDLER) {
1546      push(Immediate(StackHandler::TRY_CATCH));
1547    } else {
1548      push(Immediate(StackHandler::TRY_FINALLY));
1549    }
1550    push(rbp);
1551  } else {
1552    ASSERT(try_location == IN_JS_ENTRY);
1553    // The frame pointer does not point to a JS frame so we save NULL
1554    // for rbp. We expect the code throwing an exception to check rbp
1555    // before dereferencing it to restore the context.
1556    push(Immediate(StackHandler::ENTRY));
1557    push(Immediate(0));  // NULL frame pointer.
1558  }
1559  // Save the current handler.
1560  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1561  push(Operand(kScratchRegister, 0));
1562  // Link this handler.
1563  movq(Operand(kScratchRegister, 0), rsp);
1564}
1565
1566
1567void MacroAssembler::PopTryHandler() {
1568  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1569  // Unlink this handler.
1570  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1571  pop(Operand(kScratchRegister, 0));
1572  // Remove the remaining fields.
1573  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1574}
1575
1576
1577void MacroAssembler::Throw(Register value) {
1578  // Check that stack should contain next handler, frame pointer, state and
1579  // return address in that order.
1580  STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
1581            StackHandlerConstants::kStateOffset);
1582  STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
1583            StackHandlerConstants::kPCOffset);
1584  // Keep thrown value in rax.
1585  if (!value.is(rax)) {
1586    movq(rax, value);
1587  }
1588
1589  ExternalReference handler_address(Top::k_handler_address);
1590  movq(kScratchRegister, handler_address);
1591  movq(rsp, Operand(kScratchRegister, 0));
1592  // get next in chain
1593  pop(rcx);
1594  movq(Operand(kScratchRegister, 0), rcx);
1595  pop(rbp);  // pop frame pointer
1596  pop(rdx);  // remove state
1597
1598  // Before returning we restore the context from the frame pointer if not NULL.
1599  // The frame pointer is NULL in the exception handler of a JS entry frame.
1600  Set(rsi, 0);  // Tentatively set context pointer to NULL
1601  NearLabel skip;
1602  cmpq(rbp, Immediate(0));
1603  j(equal, &skip);
1604  movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
1605  bind(&skip);
1606  ret(0);
1607}
1608
1609
1610void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
1611                                      Register value) {
1612  // Keep thrown value in rax.
1613  if (!value.is(rax)) {
1614    movq(rax, value);
1615  }
1616  // Fetch top stack handler.
1617  ExternalReference handler_address(Top::k_handler_address);
1618  movq(kScratchRegister, handler_address);
1619  movq(rsp, Operand(kScratchRegister, 0));
1620
1621  // Unwind the handlers until the ENTRY handler is found.
1622  NearLabel loop, done;
1623  bind(&loop);
1624  // Load the type of the current stack handler.
1625  const int kStateOffset = StackHandlerConstants::kStateOffset;
1626  cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
1627  j(equal, &done);
1628  // Fetch the next handler in the list.
1629  const int kNextOffset = StackHandlerConstants::kNextOffset;
1630  movq(rsp, Operand(rsp, kNextOffset));
1631  jmp(&loop);
1632  bind(&done);
1633
1634  // Set the top handler address to next handler past the current ENTRY handler.
1635  movq(kScratchRegister, handler_address);
1636  pop(Operand(kScratchRegister, 0));
1637
1638  if (type == OUT_OF_MEMORY) {
1639    // Set external caught exception to false.
1640    ExternalReference external_caught(Top::k_external_caught_exception_address);
1641    movq(rax, Immediate(false));
1642    store_rax(external_caught);
1643
1644    // Set pending exception and rax to out of memory exception.
1645    ExternalReference pending_exception(Top::k_pending_exception_address);
1646    movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
1647    store_rax(pending_exception);
1648  }
1649
1650  // Clear the context pointer.
1651  Set(rsi, 0);
1652
1653  // Restore registers from handler.
1654  STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
1655            StackHandlerConstants::kFPOffset);
1656  pop(rbp);  // FP
1657  STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
1658            StackHandlerConstants::kStateOffset);
1659  pop(rdx);  // State
1660
1661  STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
1662            StackHandlerConstants::kPCOffset);
1663  ret(0);
1664}
1665
1666
1667void MacroAssembler::Ret() {
1668  ret(0);
1669}
1670
1671
1672void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
1673  if (is_uint16(bytes_dropped)) {
1674    ret(bytes_dropped);
1675  } else {
1676    pop(scratch);
1677    addq(rsp, Immediate(bytes_dropped));
1678    push(scratch);
1679    ret(0);
1680  }
1681}
1682
1683
1684void MacroAssembler::FCmp() {
1685  fucomip();
1686  fstp(0);
1687}
1688
1689
1690void MacroAssembler::CmpObjectType(Register heap_object,
1691                                   InstanceType type,
1692                                   Register map) {
1693  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1694  CmpInstanceType(map, type);
1695}
1696
1697
1698void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1699  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1700       Immediate(static_cast<int8_t>(type)));
1701}
1702
1703
1704void MacroAssembler::CheckMap(Register obj,
1705                              Handle<Map> map,
1706                              Label* fail,
1707                              bool is_heap_object) {
1708  if (!is_heap_object) {
1709    JumpIfSmi(obj, fail);
1710  }
1711  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
1712  j(not_equal, fail);
1713}
1714
1715
1716void MacroAssembler::AbortIfNotNumber(Register object) {
1717  NearLabel ok;
1718  Condition is_smi = CheckSmi(object);
1719  j(is_smi, &ok);
1720  Cmp(FieldOperand(object, HeapObject::kMapOffset),
1721      Factory::heap_number_map());
1722  Assert(equal, "Operand not a number");
1723  bind(&ok);
1724}
1725
1726
1727void MacroAssembler::AbortIfSmi(Register object) {
1728  NearLabel ok;
1729  Condition is_smi = CheckSmi(object);
1730  Assert(NegateCondition(is_smi), "Operand is a smi");
1731}
1732
1733
1734void MacroAssembler::AbortIfNotSmi(Register object) {
1735  NearLabel ok;
1736  Condition is_smi = CheckSmi(object);
1737  Assert(is_smi, "Operand is not a smi");
1738}
1739
1740
1741void MacroAssembler::AbortIfNotString(Register object) {
1742  testb(object, Immediate(kSmiTagMask));
1743  Assert(not_equal, "Operand is not a string");
1744  push(object);
1745  movq(object, FieldOperand(object, HeapObject::kMapOffset));
1746  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
1747  pop(object);
1748  Assert(below, "Operand is not a string");
1749}
1750
1751
1752void MacroAssembler::AbortIfNotRootValue(Register src,
1753                                         Heap::RootListIndex root_value_index,
1754                                         const char* message) {
1755  ASSERT(!src.is(kScratchRegister));
1756  LoadRoot(kScratchRegister, root_value_index);
1757  cmpq(src, kScratchRegister);
1758  Check(equal, message);
1759}
1760
1761
1762
1763Condition MacroAssembler::IsObjectStringType(Register heap_object,
1764                                             Register map,
1765                                             Register instance_type) {
1766  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1767  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
1768  ASSERT(kNotStringTag != 0);
1769  testb(instance_type, Immediate(kIsNotStringMask));
1770  return zero;
1771}
1772
1773
1774void MacroAssembler::TryGetFunctionPrototype(Register function,
1775                                             Register result,
1776                                             Label* miss) {
1777  // Check that the receiver isn't a smi.
1778  testl(function, Immediate(kSmiTagMask));
1779  j(zero, miss);
1780
1781  // Check that the function really is a function.
1782  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1783  j(not_equal, miss);
1784
1785  // Make sure that the function has an instance prototype.
1786  NearLabel non_instance;
1787  testb(FieldOperand(result, Map::kBitFieldOffset),
1788        Immediate(1 << Map::kHasNonInstancePrototype));
1789  j(not_zero, &non_instance);
1790
1791  // Get the prototype or initial map from the function.
1792  movq(result,
1793       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1794
1795  // If the prototype or initial map is the hole, don't return it and
1796  // simply miss the cache instead. This will allow us to allocate a
1797  // prototype object on-demand in the runtime system.
1798  CompareRoot(result, Heap::kTheHoleValueRootIndex);
1799  j(equal, miss);
1800
1801  // If the function does not have an initial map, we're done.
1802  NearLabel done;
1803  CmpObjectType(result, MAP_TYPE, kScratchRegister);
1804  j(not_equal, &done);
1805
1806  // Get the prototype from the initial map.
1807  movq(result, FieldOperand(result, Map::kPrototypeOffset));
1808  jmp(&done);
1809
1810  // Non-instance prototype: Fetch prototype from constructor field
1811  // in initial map.
1812  bind(&non_instance);
1813  movq(result, FieldOperand(result, Map::kConstructorOffset));
1814
1815  // All done.
1816  bind(&done);
1817}
1818
1819
1820void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
1821  if (FLAG_native_code_counters && counter->Enabled()) {
1822    movq(kScratchRegister, ExternalReference(counter));
1823    movl(Operand(kScratchRegister, 0), Immediate(value));
1824  }
1825}
1826
1827
1828void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
1829  ASSERT(value > 0);
1830  if (FLAG_native_code_counters && counter->Enabled()) {
1831    movq(kScratchRegister, ExternalReference(counter));
1832    Operand operand(kScratchRegister, 0);
1833    if (value == 1) {
1834      incl(operand);
1835    } else {
1836      addl(operand, Immediate(value));
1837    }
1838  }
1839}
1840
1841
1842void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
1843  ASSERT(value > 0);
1844  if (FLAG_native_code_counters && counter->Enabled()) {
1845    movq(kScratchRegister, ExternalReference(counter));
1846    Operand operand(kScratchRegister, 0);
1847    if (value == 1) {
1848      decl(operand);
1849    } else {
1850      subl(operand, Immediate(value));
1851    }
1852  }
1853}
1854
1855
1856#ifdef ENABLE_DEBUGGER_SUPPORT
1857void MacroAssembler::DebugBreak() {
1858  ASSERT(allow_stub_calls());
1859  Set(rax, 0);  // No arguments.
1860  movq(rbx, ExternalReference(Runtime::kDebugBreak));
1861  CEntryStub ces(1);
1862  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1863}
1864#endif  // ENABLE_DEBUGGER_SUPPORT
1865
1866
1867void MacroAssembler::InvokeCode(Register code,
1868                                const ParameterCount& expected,
1869                                const ParameterCount& actual,
1870                                InvokeFlag flag,
1871                                PostCallGenerator* post_call_generator) {
1872  NearLabel done;
1873  InvokePrologue(expected,
1874                 actual,
1875                 Handle<Code>::null(),
1876                 code,
1877                 &done,
1878                 flag,
1879                 post_call_generator);
1880  if (flag == CALL_FUNCTION) {
1881    call(code);
1882    if (post_call_generator != NULL) post_call_generator->Generate();
1883  } else {
1884    ASSERT(flag == JUMP_FUNCTION);
1885    jmp(code);
1886  }
1887  bind(&done);
1888}
1889
1890
1891void MacroAssembler::InvokeCode(Handle<Code> code,
1892                                const ParameterCount& expected,
1893                                const ParameterCount& actual,
1894                                RelocInfo::Mode rmode,
1895                                InvokeFlag flag,
1896                                PostCallGenerator* post_call_generator) {
1897  NearLabel done;
1898  Register dummy = rax;
1899  InvokePrologue(expected,
1900                 actual,
1901                 code,
1902                 dummy,
1903                 &done,
1904                 flag,
1905                 post_call_generator);
1906  if (flag == CALL_FUNCTION) {
1907    Call(code, rmode);
1908    if (post_call_generator != NULL) post_call_generator->Generate();
1909  } else {
1910    ASSERT(flag == JUMP_FUNCTION);
1911    Jump(code, rmode);
1912  }
1913  bind(&done);
1914}
1915
1916
1917void MacroAssembler::InvokeFunction(Register function,
1918                                    const ParameterCount& actual,
1919                                    InvokeFlag flag,
1920                                    PostCallGenerator* post_call_generator) {
1921  ASSERT(function.is(rdi));
1922  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1923  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
1924  movsxlq(rbx,
1925          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
1926  // Advances rdx to the end of the Code object header, to the start of
1927  // the executable code.
1928  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
1929
1930  ParameterCount expected(rbx);
1931  InvokeCode(rdx, expected, actual, flag, post_call_generator);
1932}
1933
1934
1935void MacroAssembler::InvokeFunction(JSFunction* function,
1936                                    const ParameterCount& actual,
1937                                    InvokeFlag flag,
1938                                    PostCallGenerator* post_call_generator) {
1939  ASSERT(function->is_compiled());
1940  // Get the function and setup the context.
1941  Move(rdi, Handle<JSFunction>(function));
1942  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
1943
1944  if (V8::UseCrankshaft()) {
1945    // Since Crankshaft can recompile a function, we need to load
1946    // the Code object every time we call the function.
1947    movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
1948    ParameterCount expected(function->shared()->formal_parameter_count());
1949    InvokeCode(rdx, expected, actual, flag, post_call_generator);
1950  } else {
1951    // Invoke the cached code.
1952    Handle<Code> code(function->code());
1953    ParameterCount expected(function->shared()->formal_parameter_count());
1954    InvokeCode(code,
1955               expected,
1956               actual,
1957               RelocInfo::CODE_TARGET,
1958               flag,
1959               post_call_generator);
1960  }
1961}
1962
1963
1964void MacroAssembler::EnterFrame(StackFrame::Type type) {
1965  push(rbp);
1966  movq(rbp, rsp);
1967  push(rsi);  // Context.
1968  Push(Smi::FromInt(type));
1969  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1970  push(kScratchRegister);
1971  if (FLAG_debug_code) {
1972    movq(kScratchRegister,
1973         Factory::undefined_value(),
1974         RelocInfo::EMBEDDED_OBJECT);
1975    cmpq(Operand(rsp, 0), kScratchRegister);
1976    Check(not_equal, "code object not properly patched");
1977  }
1978}
1979
1980
1981void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1982  if (FLAG_debug_code) {
1983    Move(kScratchRegister, Smi::FromInt(type));
1984    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
1985    Check(equal, "stack frame types must match");
1986  }
1987  movq(rsp, rbp);
1988  pop(rbp);
1989}
1990
1991
1992void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
1993  // Setup the frame structure on the stack.
1994  // All constants are relative to the frame pointer of the exit frame.
1995  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1996  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1997  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
1998  push(rbp);
1999  movq(rbp, rsp);
2000
2001  // Reserve room for entry stack pointer and push the code object.
2002  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
2003  push(Immediate(0));  // Saved entry sp, patched before call.
2004  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
2005  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
2006
2007  // Save the frame pointer and the context in top.
2008  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
2009  ExternalReference context_address(Top::k_context_address);
2010  if (save_rax) {
2011    movq(r14, rax);  // Backup rax before we use it.
2012  }
2013
2014  movq(rax, rbp);
2015  store_rax(c_entry_fp_address);
2016  movq(rax, rsi);
2017  store_rax(context_address);
2018}
2019
2020
2021void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
2022                                            bool save_doubles) {
2023#ifdef _WIN64
2024  const int kShadowSpace = 4;
2025  arg_stack_space += kShadowSpace;
2026#endif
2027  // Optionally save all XMM registers.
2028  if (save_doubles) {
2029    CpuFeatures::Scope scope(SSE2);
2030    int space = XMMRegister::kNumRegisters * kDoubleSize +
2031        arg_stack_space * kPointerSize;
2032    subq(rsp, Immediate(space));
2033    int offset = -2 * kPointerSize;
2034    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
2035      XMMRegister reg = XMMRegister::FromAllocationIndex(i);
2036      movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
2037    }
2038  } else if (arg_stack_space > 0) {
2039    subq(rsp, Immediate(arg_stack_space * kPointerSize));
2040  }
2041
2042  // Get the required frame alignment for the OS.
2043  static const int kFrameAlignment = OS::ActivationFrameAlignment();
2044  if (kFrameAlignment > 0) {
2045    ASSERT(IsPowerOf2(kFrameAlignment));
2046    movq(kScratchRegister, Immediate(-kFrameAlignment));
2047    and_(rsp, kScratchRegister);
2048  }
2049
2050  // Patch the saved entry sp.
2051  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2052}
2053
2054
2055void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
2056  EnterExitFramePrologue(true);
2057
2058  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
2059  // so it must be retained across the C-call.
2060  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
2061  lea(r12, Operand(rbp, r14, times_pointer_size, offset));
2062
2063  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
2064}
2065
2066
2067void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
2068  EnterExitFramePrologue(false);
2069  EnterExitFrameEpilogue(arg_stack_space, false);
2070}
2071
2072
2073void MacroAssembler::LeaveExitFrame(bool save_doubles) {
2074  // Registers:
2075  // r12 : argv
2076  if (save_doubles) {
2077    int offset = -2 * kPointerSize;
2078    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
2079      XMMRegister reg = XMMRegister::FromAllocationIndex(i);
2080      movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
2081    }
2082  }
2083  // Get the return address from the stack and restore the frame pointer.
2084  movq(rcx, Operand(rbp, 1 * kPointerSize));
2085  movq(rbp, Operand(rbp, 0 * kPointerSize));
2086
2087  // Drop everything up to and including the arguments and the receiver
2088  // from the caller stack.
2089  lea(rsp, Operand(r12, 1 * kPointerSize));
2090
2091  // Push the return address to get ready to return.
2092  push(rcx);
2093
2094  LeaveExitFrameEpilogue();
2095}
2096
2097
2098void MacroAssembler::LeaveApiExitFrame() {
2099  movq(rsp, rbp);
2100  pop(rbp);
2101
2102  LeaveExitFrameEpilogue();
2103}
2104
2105
2106void MacroAssembler::LeaveExitFrameEpilogue() {
2107  // Restore current context from top and clear it in debug mode.
2108  ExternalReference context_address(Top::k_context_address);
2109  movq(kScratchRegister, context_address);
2110  movq(rsi, Operand(kScratchRegister, 0));
2111#ifdef DEBUG
2112  movq(Operand(kScratchRegister, 0), Immediate(0));
2113#endif
2114
2115  // Clear the top frame.
2116  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
2117  movq(kScratchRegister, c_entry_fp_address);
2118  movq(Operand(kScratchRegister, 0), Immediate(0));
2119}
2120
2121
2122void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
2123                                            Register scratch,
2124                                            Label* miss) {
2125  Label same_contexts;
2126
2127  ASSERT(!holder_reg.is(scratch));
2128  ASSERT(!scratch.is(kScratchRegister));
2129  // Load current lexical context from the stack frame.
2130  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
2131
2132  // When generating debug code, make sure the lexical context is set.
2133  if (FLAG_debug_code) {
2134    cmpq(scratch, Immediate(0));
2135    Check(not_equal, "we should not have an empty lexical context");
2136  }
2137  // Load the global context of the current context.
2138  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
2139  movq(scratch, FieldOperand(scratch, offset));
2140  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
2141
2142  // Check the context is a global context.
2143  if (FLAG_debug_code) {
2144    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
2145        Factory::global_context_map());
2146    Check(equal, "JSGlobalObject::global_context should be a global context.");
2147  }
2148
2149  // Check if both contexts are the same.
2150  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2151  j(equal, &same_contexts);
2152
2153  // Compare security tokens.
2154  // Check that the security token in the calling global object is
2155  // compatible with the security token in the receiving global
2156  // object.
2157
2158  // Check the context is a global context.
2159  if (FLAG_debug_code) {
2160    // Preserve original value of holder_reg.
2161    push(holder_reg);
2162    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2163    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
2164    Check(not_equal, "JSGlobalProxy::context() should not be null.");
2165
2166    // Read the first word and compare to global_context_map(),
2167    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
2168    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
2169    Check(equal, "JSGlobalObject::global_context should be a global context.");
2170    pop(holder_reg);
2171  }
2172
2173  movq(kScratchRegister,
2174       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2175  int token_offset =
2176      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
2177  movq(scratch, FieldOperand(scratch, token_offset));
2178  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
2179  j(not_equal, miss);
2180
2181  bind(&same_contexts);
2182}
2183
2184
2185void MacroAssembler::LoadAllocationTopHelper(Register result,
2186                                             Register scratch,
2187                                             AllocationFlags flags) {
2188  ExternalReference new_space_allocation_top =
2189      ExternalReference::new_space_allocation_top_address();
2190
2191  // Just return if allocation top is already known.
2192  if ((flags & RESULT_CONTAINS_TOP) != 0) {
2193    // No use of scratch if allocation top is provided.
2194    ASSERT(!scratch.is_valid());
2195#ifdef DEBUG
2196    // Assert that result actually contains top on entry.
2197    movq(kScratchRegister, new_space_allocation_top);
2198    cmpq(result, Operand(kScratchRegister, 0));
2199    Check(equal, "Unexpected allocation top");
2200#endif
2201    return;
2202  }
2203
2204  // Move address of new object to result. Use scratch register if available,
2205  // and keep address in scratch until call to UpdateAllocationTopHelper.
2206  if (scratch.is_valid()) {
2207    movq(scratch, new_space_allocation_top);
2208    movq(result, Operand(scratch, 0));
2209  } else if (result.is(rax)) {
2210    load_rax(new_space_allocation_top);
2211  } else {
2212    movq(kScratchRegister, new_space_allocation_top);
2213    movq(result, Operand(kScratchRegister, 0));
2214  }
2215}
2216
2217
2218void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
2219                                               Register scratch) {
2220  if (FLAG_debug_code) {
2221    testq(result_end, Immediate(kObjectAlignmentMask));
2222    Check(zero, "Unaligned allocation in new space");
2223  }
2224
2225  ExternalReference new_space_allocation_top =
2226      ExternalReference::new_space_allocation_top_address();
2227
2228  // Update new top.
2229  if (result_end.is(rax)) {
2230    // rax can be stored directly to a memory location.
2231    store_rax(new_space_allocation_top);
2232  } else {
2233    // Register required - use scratch provided if available.
2234    if (scratch.is_valid()) {
2235      movq(Operand(scratch, 0), result_end);
2236    } else {
2237      movq(kScratchRegister, new_space_allocation_top);
2238      movq(Operand(kScratchRegister, 0), result_end);
2239    }
2240  }
2241}
2242
2243
2244void MacroAssembler::AllocateInNewSpace(int object_size,
2245                                        Register result,
2246                                        Register result_end,
2247                                        Register scratch,
2248                                        Label* gc_required,
2249                                        AllocationFlags flags) {
2250  if (!FLAG_inline_new) {
2251    if (FLAG_debug_code) {
2252      // Trash the registers to simulate an allocation failure.
2253      movl(result, Immediate(0x7091));
2254      if (result_end.is_valid()) {
2255        movl(result_end, Immediate(0x7191));
2256      }
2257      if (scratch.is_valid()) {
2258        movl(scratch, Immediate(0x7291));
2259      }
2260    }
2261    jmp(gc_required);
2262    return;
2263  }
2264  ASSERT(!result.is(result_end));
2265
2266  // Load address of new object into result.
2267  LoadAllocationTopHelper(result, scratch, flags);
2268
2269  // Calculate new top and bail out if new space is exhausted.
2270  ExternalReference new_space_allocation_limit =
2271      ExternalReference::new_space_allocation_limit_address();
2272
2273  Register top_reg = result_end.is_valid() ? result_end : result;
2274
2275  if (!top_reg.is(result)) {
2276    movq(top_reg, result);
2277  }
2278  addq(top_reg, Immediate(object_size));
2279  j(carry, gc_required);
2280  movq(kScratchRegister, new_space_allocation_limit);
2281  cmpq(top_reg, Operand(kScratchRegister, 0));
2282  j(above, gc_required);
2283
2284  // Update allocation top.
2285  UpdateAllocationTopHelper(top_reg, scratch);
2286
2287  if (top_reg.is(result)) {
2288    if ((flags & TAG_OBJECT) != 0) {
2289      subq(result, Immediate(object_size - kHeapObjectTag));
2290    } else {
2291      subq(result, Immediate(object_size));
2292    }
2293  } else if ((flags & TAG_OBJECT) != 0) {
2294    // Tag the result if requested.
2295    addq(result, Immediate(kHeapObjectTag));
2296  }
2297}
2298
2299
2300void MacroAssembler::AllocateInNewSpace(int header_size,
2301                                        ScaleFactor element_size,
2302                                        Register element_count,
2303                                        Register result,
2304                                        Register result_end,
2305                                        Register scratch,
2306                                        Label* gc_required,
2307                                        AllocationFlags flags) {
2308  if (!FLAG_inline_new) {
2309    if (FLAG_debug_code) {
2310      // Trash the registers to simulate an allocation failure.
2311      movl(result, Immediate(0x7091));
2312      movl(result_end, Immediate(0x7191));
2313      if (scratch.is_valid()) {
2314        movl(scratch, Immediate(0x7291));
2315      }
2316      // Register element_count is not modified by the function.
2317    }
2318    jmp(gc_required);
2319    return;
2320  }
2321  ASSERT(!result.is(result_end));
2322
2323  // Load address of new object into result.
2324  LoadAllocationTopHelper(result, scratch, flags);
2325
2326  // Calculate new top and bail out if new space is exhausted.
2327  ExternalReference new_space_allocation_limit =
2328      ExternalReference::new_space_allocation_limit_address();
2329
2330  // We assume that element_count*element_size + header_size does not
2331  // overflow.
2332  lea(result_end, Operand(element_count, element_size, header_size));
2333  addq(result_end, result);
2334  j(carry, gc_required);
2335  movq(kScratchRegister, new_space_allocation_limit);
2336  cmpq(result_end, Operand(kScratchRegister, 0));
2337  j(above, gc_required);
2338
2339  // Update allocation top.
2340  UpdateAllocationTopHelper(result_end, scratch);
2341
2342  // Tag the result if requested.
2343  if ((flags & TAG_OBJECT) != 0) {
2344    addq(result, Immediate(kHeapObjectTag));
2345  }
2346}
2347
2348
2349void MacroAssembler::AllocateInNewSpace(Register object_size,
2350                                        Register result,
2351                                        Register result_end,
2352                                        Register scratch,
2353                                        Label* gc_required,
2354                                        AllocationFlags flags) {
2355  if (!FLAG_inline_new) {
2356    if (FLAG_debug_code) {
2357      // Trash the registers to simulate an allocation failure.
2358      movl(result, Immediate(0x7091));
2359      movl(result_end, Immediate(0x7191));
2360      if (scratch.is_valid()) {
2361        movl(scratch, Immediate(0x7291));
2362      }
2363      // object_size is left unchanged by this function.
2364    }
2365    jmp(gc_required);
2366    return;
2367  }
2368  ASSERT(!result.is(result_end));
2369
2370  // Load address of new object into result.
2371  LoadAllocationTopHelper(result, scratch, flags);
2372
2373  // Calculate new top and bail out if new space is exhausted.
2374  ExternalReference new_space_allocation_limit =
2375      ExternalReference::new_space_allocation_limit_address();
2376  if (!object_size.is(result_end)) {
2377    movq(result_end, object_size);
2378  }
2379  addq(result_end, result);
2380  j(carry, gc_required);
2381  movq(kScratchRegister, new_space_allocation_limit);
2382  cmpq(result_end, Operand(kScratchRegister, 0));
2383  j(above, gc_required);
2384
2385  // Update allocation top.
2386  UpdateAllocationTopHelper(result_end, scratch);
2387
2388  // Tag the result if requested.
2389  if ((flags & TAG_OBJECT) != 0) {
2390    addq(result, Immediate(kHeapObjectTag));
2391  }
2392}
2393
2394
2395void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2396  ExternalReference new_space_allocation_top =
2397      ExternalReference::new_space_allocation_top_address();
2398
2399  // Make sure the object has no tag before resetting top.
2400  and_(object, Immediate(~kHeapObjectTagMask));
2401  movq(kScratchRegister, new_space_allocation_top);
2402#ifdef DEBUG
2403  cmpq(object, Operand(kScratchRegister, 0));
2404  Check(below, "Undo allocation of non allocated memory");
2405#endif
2406  movq(Operand(kScratchRegister, 0), object);
2407}
2408
2409
2410void MacroAssembler::AllocateHeapNumber(Register result,
2411                                        Register scratch,
2412                                        Label* gc_required) {
2413  // Allocate heap number in new space.
2414  AllocateInNewSpace(HeapNumber::kSize,
2415                     result,
2416                     scratch,
2417                     no_reg,
2418                     gc_required,
2419                     TAG_OBJECT);
2420
2421  // Set the map.
2422  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2423  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2424}
2425
2426
2427void MacroAssembler::AllocateTwoByteString(Register result,
2428                                           Register length,
2429                                           Register scratch1,
2430                                           Register scratch2,
2431                                           Register scratch3,
2432                                           Label* gc_required) {
2433  // Calculate the number of bytes needed for the characters in the string while
2434  // observing object alignment.
2435  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
2436                               kObjectAlignmentMask;
2437  ASSERT(kShortSize == 2);
2438  // scratch1 = length * 2 + kObjectAlignmentMask.
2439  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
2440                kHeaderAlignment));
2441  and_(scratch1, Immediate(~kObjectAlignmentMask));
2442  if (kHeaderAlignment > 0) {
2443    subq(scratch1, Immediate(kHeaderAlignment));
2444  }
2445
2446  // Allocate two byte string in new space.
2447  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
2448                     times_1,
2449                     scratch1,
2450                     result,
2451                     scratch2,
2452                     scratch3,
2453                     gc_required,
2454                     TAG_OBJECT);
2455
2456  // Set the map, length and hash field.
2457  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2458  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2459  Integer32ToSmi(scratch1, length);
2460  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2461  movq(FieldOperand(result, String::kHashFieldOffset),
2462       Immediate(String::kEmptyHashField));
2463}
2464
2465
2466void MacroAssembler::AllocateAsciiString(Register result,
2467                                         Register length,
2468                                         Register scratch1,
2469                                         Register scratch2,
2470                                         Register scratch3,
2471                                         Label* gc_required) {
2472  // Calculate the number of bytes needed for the characters in the string while
2473  // observing object alignment.
2474  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
2475                               kObjectAlignmentMask;
2476  movl(scratch1, length);
2477  ASSERT(kCharSize == 1);
2478  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
2479  and_(scratch1, Immediate(~kObjectAlignmentMask));
2480  if (kHeaderAlignment > 0) {
2481    subq(scratch1, Immediate(kHeaderAlignment));
2482  }
2483
2484  // Allocate ascii string in new space.
2485  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
2486                     times_1,
2487                     scratch1,
2488                     result,
2489                     scratch2,
2490                     scratch3,
2491                     gc_required,
2492                     TAG_OBJECT);
2493
2494  // Set the map, length and hash field.
2495  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2496  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2497  Integer32ToSmi(scratch1, length);
2498  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2499  movq(FieldOperand(result, String::kHashFieldOffset),
2500       Immediate(String::kEmptyHashField));
2501}
2502
2503
2504void MacroAssembler::AllocateConsString(Register result,
2505                                        Register scratch1,
2506                                        Register scratch2,
2507                                        Label* gc_required) {
2508  // Allocate heap number in new space.
2509  AllocateInNewSpace(ConsString::kSize,
2510                     result,
2511                     scratch1,
2512                     scratch2,
2513                     gc_required,
2514                     TAG_OBJECT);
2515
2516  // Set the map. The other fields are left uninitialized.
2517  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
2518  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2519}
2520
2521
2522void MacroAssembler::AllocateAsciiConsString(Register result,
2523                                             Register scratch1,
2524                                             Register scratch2,
2525                                             Label* gc_required) {
2526  // Allocate heap number in new space.
2527  AllocateInNewSpace(ConsString::kSize,
2528                     result,
2529                     scratch1,
2530                     scratch2,
2531                     gc_required,
2532                     TAG_OBJECT);
2533
2534  // Set the map. The other fields are left uninitialized.
2535  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2536  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2537}
2538
2539
2540void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2541  if (context_chain_length > 0) {
2542    // Move up the chain of contexts to the context containing the slot.
2543    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2544    // Load the function context (which is the incoming, outer context).
2545    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2546    for (int i = 1; i < context_chain_length; i++) {
2547      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2548      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2549    }
2550    // The context may be an intermediate context, not a function context.
2551    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2552  } else {
2553    // Slot is in the current function context.  Move it into the
2554    // destination register in case we store into it (the write barrier
2555    // cannot be allowed to destroy the context in rsi).
2556    movq(dst, rsi);
2557  }
2558
2559  // We should not have found a 'with' context by walking the context chain
2560  // (i.e., the static scope chain and runtime context chain do not agree).
2561  // A variable occurring in such a scope should have slot type LOOKUP and
2562  // not CONTEXT.
2563  if (FLAG_debug_code) {
2564    cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2565    Check(equal, "Yo dawg, I heard you liked function contexts "
2566                 "so I put function contexts in all your contexts");
2567  }
2568}
2569
2570
2571void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2572  // Load the global or builtins object from the current context.
2573  movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2574  // Load the global context from the global or builtins object.
2575  movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
2576  // Load the function from the global context.
2577  movq(function, Operand(function, Context::SlotOffset(index)));
2578}
2579
2580
2581void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2582                                                  Register map) {
2583  // Load the initial map.  The global functions all have initial maps.
2584  movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2585  if (FLAG_debug_code) {
2586    Label ok, fail;
2587    CheckMap(map, Factory::meta_map(), &fail, false);
2588    jmp(&ok);
2589    bind(&fail);
2590    Abort("Global functions must have initial map");
2591    bind(&ok);
2592  }
2593}
2594
2595
2596int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2597  // On Windows 64 stack slots are reserved by the caller for all arguments
2598  // including the ones passed in registers, and space is always allocated for
2599  // the four register arguments even if the function takes fewer than four
2600  // arguments.
2601  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2602  // and the caller does not reserve stack slots for them.
2603  ASSERT(num_arguments >= 0);
2604#ifdef _WIN64
2605  static const int kMinimumStackSlots = 4;
2606  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2607  return num_arguments;
2608#else
2609  static const int kRegisterPassedArguments = 6;
2610  if (num_arguments < kRegisterPassedArguments) return 0;
2611  return num_arguments - kRegisterPassedArguments;
2612#endif
2613}
2614
2615
2616void MacroAssembler::PrepareCallCFunction(int num_arguments) {
2617  int frame_alignment = OS::ActivationFrameAlignment();
2618  ASSERT(frame_alignment != 0);
2619  ASSERT(num_arguments >= 0);
2620  // Make stack end at alignment and allocate space for arguments and old rsp.
2621  movq(kScratchRegister, rsp);
2622  ASSERT(IsPowerOf2(frame_alignment));
2623  int argument_slots_on_stack =
2624      ArgumentStackSlotsForCFunctionCall(num_arguments);
2625  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
2626  and_(rsp, Immediate(-frame_alignment));
2627  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
2628}
2629
2630
2631void MacroAssembler::CallCFunction(ExternalReference function,
2632                                   int num_arguments) {
2633  movq(rax, function);
2634  CallCFunction(rax, num_arguments);
2635}
2636
2637
2638void MacroAssembler::CallCFunction(Register function, int num_arguments) {
2639  // Check stack alignment.
2640  if (FLAG_debug_code) {
2641    CheckStackAlignment();
2642  }
2643
2644  call(function);
2645  ASSERT(OS::ActivationFrameAlignment() != 0);
2646  ASSERT(num_arguments >= 0);
2647  int argument_slots_on_stack =
2648      ArgumentStackSlotsForCFunctionCall(num_arguments);
2649  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
2650}
2651
2652
2653CodePatcher::CodePatcher(byte* address, int size)
2654    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2655  // Create a new macro assembler pointing to the address of the code to patch.
2656  // The size is adjusted with kGap on order for the assembler to generate size
2657  // bytes of instructions without failing with buffer size constraints.
2658  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2659}
2660
2661
2662CodePatcher::~CodePatcher() {
2663  // Indicate that code has changed.
2664  CPU::FlushICache(address_, size_);
2665
2666  // Check that the code was patched as expected.
2667  ASSERT(masm_.pc_ == address_ + size_);
2668  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2669}
2670
2671} }  // namespace v8::internal
2672
2673#endif  // V8_TARGET_ARCH_X64
2674