macro-assembler-x64.cc revision 5913587db4c6bab03d97bfe44b06289fd6d7270d
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "assembler-x64.h"
35#include "macro-assembler-x64.h"
36#include "serialize.h"
37#include "debug.h"
38#include "heap.h"
39
40namespace v8 {
41namespace internal {
42
43MacroAssembler::MacroAssembler(void* buffer, int size)
44    : Assembler(buffer, size),
45      generating_stub_(false),
46      allow_stub_calls_(true),
47      code_object_(Heap::undefined_value()) {
48}
49
50
51void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
52  movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
53}
54
55
56void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
57  movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
58}
59
60
61void MacroAssembler::PushRoot(Heap::RootListIndex index) {
62  push(Operand(kRootRegister, index << kPointerSizeLog2));
63}
64
65
66void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
67  cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
68}
69
70
71void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
72  LoadRoot(kScratchRegister, index);
73  cmpq(with, kScratchRegister);
74}
75
76
77void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
78  CompareRoot(rsp, Heap::kStackLimitRootIndex);
79  j(below, on_stack_overflow);
80}
81
82
83void MacroAssembler::RecordWriteHelper(Register object,
84                                       Register addr,
85                                       Register scratch) {
86  if (FLAG_debug_code) {
87    // Check that the object is not in new space.
88    NearLabel not_in_new_space;
89    InNewSpace(object, scratch, not_equal, &not_in_new_space);
90    Abort("new-space object passed to RecordWriteHelper");
91    bind(&not_in_new_space);
92  }
93
94  // Compute the page start address from the heap object pointer, and reuse
95  // the 'object' register for it.
96  and_(object, Immediate(~Page::kPageAlignmentMask));
97
98  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
99  // method for more details.
100  shrl(addr, Immediate(Page::kRegionSizeLog2));
101  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
102
103  // Set dirty mark for region.
104  bts(Operand(object, Page::kDirtyFlagOffset), addr);
105}
106
107
108void MacroAssembler::RecordWrite(Register object,
109                                 int offset,
110                                 Register value,
111                                 Register index) {
112  // The compiled code assumes that record write doesn't change the
113  // context register, so we check that none of the clobbered
114  // registers are rsi.
115  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
116
117  // First, check if a write barrier is even needed. The tests below
118  // catch stores of Smis and stores into young gen.
119  Label done;
120  JumpIfSmi(value, &done);
121
122  RecordWriteNonSmi(object, offset, value, index);
123  bind(&done);
124
125  // Clobber all input registers when running with the debug-code flag
126  // turned on to provoke errors. This clobbering repeats the
127  // clobbering done inside RecordWriteNonSmi but it's necessary to
128  // avoid having the fast case for smis leave the registers
129  // unchanged.
130  if (FLAG_debug_code) {
131    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
132    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
133    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
134  }
135}
136
137
138void MacroAssembler::RecordWrite(Register object,
139                                 Register address,
140                                 Register value) {
141  // The compiled code assumes that record write doesn't change the
142  // context register, so we check that none of the clobbered
143  // registers are esi.
144  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
145
146  // First, check if a write barrier is even needed. The tests below
147  // catch stores of Smis and stores into young gen.
148  Label done;
149  JumpIfSmi(value, &done);
150
151  InNewSpace(object, value, equal, &done);
152
153  RecordWriteHelper(object, address, value);
154
155  bind(&done);
156
157  // Clobber all input registers when running with the debug-code flag
158  // turned on to provoke errors.
159  if (FLAG_debug_code) {
160    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
161    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
162    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
163  }
164}
165
166
167void MacroAssembler::RecordWriteNonSmi(Register object,
168                                       int offset,
169                                       Register scratch,
170                                       Register index) {
171  Label done;
172
173  if (FLAG_debug_code) {
174    NearLabel okay;
175    JumpIfNotSmi(object, &okay);
176    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
177    bind(&okay);
178
179    if (offset == 0) {
180      // index must be int32.
181      Register tmp = index.is(rax) ? rbx : rax;
182      push(tmp);
183      movl(tmp, index);
184      cmpq(tmp, index);
185      Check(equal, "Index register for RecordWrite must be untagged int32.");
186      pop(tmp);
187    }
188  }
189
190  // Test that the object address is not in the new space. We cannot
191  // update page dirty marks for new space pages.
192  InNewSpace(object, scratch, equal, &done);
193
194  // The offset is relative to a tagged or untagged HeapObject pointer,
195  // so either offset or offset + kHeapObjectTag must be a
196  // multiple of kPointerSize.
197  ASSERT(IsAligned(offset, kPointerSize) ||
198         IsAligned(offset + kHeapObjectTag, kPointerSize));
199
200  Register dst = index;
201  if (offset != 0) {
202    lea(dst, Operand(object, offset));
203  } else {
204    // array access: calculate the destination address in the same manner as
205    // KeyedStoreIC::GenerateGeneric.
206    lea(dst, FieldOperand(object,
207                          index,
208                          times_pointer_size,
209                          FixedArray::kHeaderSize));
210  }
211  RecordWriteHelper(object, dst, scratch);
212
213  bind(&done);
214
215  // Clobber all input registers when running with the debug-code flag
216  // turned on to provoke errors.
217  if (FLAG_debug_code) {
218    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
219    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
220    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
221  }
222}
223
224void MacroAssembler::Assert(Condition cc, const char* msg) {
225  if (FLAG_debug_code) Check(cc, msg);
226}
227
228
229void MacroAssembler::AssertFastElements(Register elements) {
230  if (FLAG_debug_code) {
231    NearLabel ok;
232    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
233                Heap::kFixedArrayMapRootIndex);
234    j(equal, &ok);
235    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
236                Heap::kFixedCOWArrayMapRootIndex);
237    j(equal, &ok);
238    Abort("JSObject with fast elements map has slow elements");
239    bind(&ok);
240  }
241}
242
243
244void MacroAssembler::Check(Condition cc, const char* msg) {
245  NearLabel L;
246  j(cc, &L);
247  Abort(msg);
248  // will not return here
249  bind(&L);
250}
251
252
253void MacroAssembler::CheckStackAlignment() {
254  int frame_alignment = OS::ActivationFrameAlignment();
255  int frame_alignment_mask = frame_alignment - 1;
256  if (frame_alignment > kPointerSize) {
257    ASSERT(IsPowerOf2(frame_alignment));
258    NearLabel alignment_as_expected;
259    testq(rsp, Immediate(frame_alignment_mask));
260    j(zero, &alignment_as_expected);
261    // Abort if stack is not aligned.
262    int3();
263    bind(&alignment_as_expected);
264  }
265}
266
267
268void MacroAssembler::NegativeZeroTest(Register result,
269                                      Register op,
270                                      Label* then_label) {
271  NearLabel ok;
272  testl(result, result);
273  j(not_zero, &ok);
274  testl(op, op);
275  j(sign, then_label);
276  bind(&ok);
277}
278
279
280void MacroAssembler::Abort(const char* msg) {
281  // We want to pass the msg string like a smi to avoid GC
282  // problems, however msg is not guaranteed to be aligned
283  // properly. Instead, we pass an aligned pointer that is
284  // a proper v8 smi, but also pass the alignment difference
285  // from the real pointer as a smi.
286  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
287  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
288  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
289  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
290#ifdef DEBUG
291  if (msg != NULL) {
292    RecordComment("Abort message: ");
293    RecordComment(msg);
294  }
295#endif
296  // Disable stub call restrictions to always allow calls to abort.
297  set_allow_stub_calls(true);
298
299  push(rax);
300  movq(kScratchRegister, p0, RelocInfo::NONE);
301  push(kScratchRegister);
302  movq(kScratchRegister,
303       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
304       RelocInfo::NONE);
305  push(kScratchRegister);
306  CallRuntime(Runtime::kAbort, 2);
307  // will not return here
308  int3();
309}
310
311
312void MacroAssembler::CallStub(CodeStub* stub) {
313  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
314  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
315}
316
317
318MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
319  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
320  MaybeObject* result = stub->TryGetCode();
321  if (!result->IsFailure()) {
322    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
323         RelocInfo::CODE_TARGET);
324  }
325  return result;
326}
327
328
329void MacroAssembler::TailCallStub(CodeStub* stub) {
330  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
331  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
332}
333
334
335MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
336  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
337  MaybeObject* result = stub->TryGetCode();
338  if (!result->IsFailure()) {
339    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
340        RelocInfo::CODE_TARGET);
341  }
342  return result;
343}
344
345
346void MacroAssembler::StubReturn(int argc) {
347  ASSERT(argc >= 1 && generating_stub());
348  ret((argc - 1) * kPointerSize);
349}
350
351
352void MacroAssembler::IllegalOperation(int num_arguments) {
353  if (num_arguments > 0) {
354    addq(rsp, Immediate(num_arguments * kPointerSize));
355  }
356  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
357}
358
359
360void MacroAssembler::IndexFromHash(Register hash, Register index) {
361  // The assert checks that the constants for the maximum number of digits
362  // for an array index cached in the hash field and the number of bits
363  // reserved for it does not conflict.
364  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
365         (1 << String::kArrayIndexValueBits));
366  // We want the smi-tagged index in key. Even if we subsequently go to
367  // the slow case, converting the key to a smi is always valid.
368  // key: string key
369  // hash: key's hash field, including its array index value.
370  and_(hash, Immediate(String::kArrayIndexValueMask));
371  shr(hash, Immediate(String::kHashShift));
372  // Here we actually clobber the key which will be used if calling into
373  // runtime later. However as the new key is the numeric value of a string key
374  // there is no difference in using either key.
375  Integer32ToSmi(index, hash);
376}
377
378
379void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
380  CallRuntime(Runtime::FunctionForId(id), num_arguments);
381}
382
383
384MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
385                                            int num_arguments) {
386  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
387}
388
389
390void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
391  // If the expected number of arguments of the runtime function is
392  // constant, we check that the actual number of arguments match the
393  // expectation.
394  if (f->nargs >= 0 && f->nargs != num_arguments) {
395    IllegalOperation(num_arguments);
396    return;
397  }
398
399  // TODO(1236192): Most runtime routines don't need the number of
400  // arguments passed in because it is constant. At some point we
401  // should remove this need and make the runtime routine entry code
402  // smarter.
403  Set(rax, num_arguments);
404  movq(rbx, ExternalReference(f));
405  CEntryStub ces(f->result_size);
406  CallStub(&ces);
407}
408
409
410MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
411                                            int num_arguments) {
412  if (f->nargs >= 0 && f->nargs != num_arguments) {
413    IllegalOperation(num_arguments);
414    // Since we did not call the stub, there was no allocation failure.
415    // Return some non-failure object.
416    return Heap::undefined_value();
417  }
418
419  // TODO(1236192): Most runtime routines don't need the number of
420  // arguments passed in because it is constant. At some point we
421  // should remove this need and make the runtime routine entry code
422  // smarter.
423  Set(rax, num_arguments);
424  movq(rbx, ExternalReference(f));
425  CEntryStub ces(f->result_size);
426  return TryCallStub(&ces);
427}
428
429
430void MacroAssembler::CallExternalReference(const ExternalReference& ext,
431                                           int num_arguments) {
432  Set(rax, num_arguments);
433  movq(rbx, ext);
434
435  CEntryStub stub(1);
436  CallStub(&stub);
437}
438
439
440void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
441                                               int num_arguments,
442                                               int result_size) {
443  // ----------- S t a t e -------------
444  //  -- rsp[0] : return address
445  //  -- rsp[8] : argument num_arguments - 1
446  //  ...
447  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
448  // -----------------------------------
449
450  // TODO(1236192): Most runtime routines don't need the number of
451  // arguments passed in because it is constant. At some point we
452  // should remove this need and make the runtime routine entry code
453  // smarter.
454  Set(rax, num_arguments);
455  JumpToExternalReference(ext, result_size);
456}
457
458
459void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
460                                     int num_arguments,
461                                     int result_size) {
462  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
463}
464
465
466static int Offset(ExternalReference ref0, ExternalReference ref1) {
467  int64_t offset = (ref0.address() - ref1.address());
468  // Check that fits into int.
469  ASSERT(static_cast<int>(offset) == offset);
470  return static_cast<int>(offset);
471}
472
473
474void MacroAssembler::PrepareCallApiFunction(int stack_space) {
475  EnterApiExitFrame(stack_space, 0);
476}
477
478
479void MacroAssembler::CallApiFunctionAndReturn(ApiFunction* function) {
480  Label empty_result;
481  Label prologue;
482  Label promote_scheduled_exception;
483  Label delete_allocated_handles;
484  Label leave_exit_frame;
485  Label write_back;
486
487  ExternalReference next_address =
488      ExternalReference::handle_scope_next_address();
489  const int kNextOffset = 0;
490  const int kLimitOffset = Offset(
491      ExternalReference::handle_scope_limit_address(),
492      next_address);
493  const int kLevelOffset = Offset(
494      ExternalReference::handle_scope_level_address(),
495      next_address);
496  ExternalReference scheduled_exception_address =
497      ExternalReference::scheduled_exception_address();
498
499  // Allocate HandleScope in callee-save registers.
500  Register prev_next_address_reg = r14;
501  Register prev_limit_reg = rbx;
502  Register base_reg = kSmiConstantRegister;
503  movq(base_reg, next_address);
504  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
505  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
506  addl(Operand(base_reg, kLevelOffset), Immediate(1));
507  // Call the api function!
508  movq(rax,
509       reinterpret_cast<int64_t>(function->address()),
510       RelocInfo::RUNTIME_ENTRY);
511  call(rax);
512
513#ifdef _WIN64
514  // rax keeps a pointer to v8::Handle, unpack it.
515  movq(rax, Operand(rax, 0));
516#endif
517  // Check if the result handle holds 0.
518  testq(rax, rax);
519  j(zero, &empty_result);
520  // It was non-zero.  Dereference to get the result value.
521  movq(rax, Operand(rax, 0));
522  bind(&prologue);
523
524  // No more valid handles (the result handle was the last one). Restore
525  // previous handle scope.
526  subl(Operand(base_reg, kLevelOffset), Immediate(1));
527  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
528  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
529  j(not_equal, &delete_allocated_handles);
530  bind(&leave_exit_frame);
531  InitializeSmiConstantRegister();
532
533  // Check if the function scheduled an exception.
534  movq(rsi, scheduled_exception_address);
535  Cmp(Operand(rsi, 0), Factory::the_hole_value());
536  j(not_equal, &promote_scheduled_exception);
537
538  LeaveExitFrame();
539  ret(0);
540
541  bind(&promote_scheduled_exception);
542  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
543
544  bind(&empty_result);
545  // It was zero; the result is undefined.
546  Move(rax, Factory::undefined_value());
547  jmp(&prologue);
548
549  // HandleScope limit has changed. Delete allocated extensions.
550  bind(&delete_allocated_handles);
551  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
552  movq(prev_limit_reg, rax);
553  movq(rax, ExternalReference::delete_handle_scope_extensions());
554  call(rax);
555  movq(rax, prev_limit_reg);
556  jmp(&leave_exit_frame);
557}
558
559
560void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
561                                             int result_size) {
562  // Set the entry point and jump to the C entry runtime stub.
563  movq(rbx, ext);
564  CEntryStub ces(result_size);
565  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
566}
567
568
569void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
570  // Calls are not allowed in some stubs.
571  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
572
573  // Rely on the assertion to check that the number of provided
574  // arguments match the expected number of arguments. Fake a
575  // parameter count to avoid emitting code to do the check.
576  ParameterCount expected(0);
577  GetBuiltinEntry(rdx, id);
578  InvokeCode(rdx, expected, expected, flag);
579}
580
581
582void MacroAssembler::GetBuiltinFunction(Register target,
583                                        Builtins::JavaScript id) {
584  // Load the builtins object into target register.
585  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
586  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
587  movq(target, FieldOperand(target,
588                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
589}
590
591
592void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
593  ASSERT(!target.is(rdi));
594  // Load the JavaScript builtin function from the builtins object.
595  GetBuiltinFunction(rdi, id);
596  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
597}
598
599
600void MacroAssembler::Set(Register dst, int64_t x) {
601  if (x == 0) {
602    xorl(dst, dst);
603  } else if (is_int32(x)) {
604    movq(dst, Immediate(static_cast<int32_t>(x)));
605  } else if (is_uint32(x)) {
606    movl(dst, Immediate(static_cast<uint32_t>(x)));
607  } else {
608    movq(dst, x, RelocInfo::NONE);
609  }
610}
611
612void MacroAssembler::Set(const Operand& dst, int64_t x) {
613  if (is_int32(x)) {
614    movq(dst, Immediate(static_cast<int32_t>(x)));
615  } else {
616    movq(kScratchRegister, x, RelocInfo::NONE);
617    movq(dst, kScratchRegister);
618  }
619}
620
621// ----------------------------------------------------------------------------
622// Smi tagging, untagging and tag detection.
623
624Register MacroAssembler::GetSmiConstant(Smi* source) {
625  int value = source->value();
626  if (value == 0) {
627    xorl(kScratchRegister, kScratchRegister);
628    return kScratchRegister;
629  }
630  if (value == 1) {
631    return kSmiConstantRegister;
632  }
633  LoadSmiConstant(kScratchRegister, source);
634  return kScratchRegister;
635}
636
637void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
638  if (FLAG_debug_code) {
639    movq(dst,
640         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
641         RelocInfo::NONE);
642    cmpq(dst, kSmiConstantRegister);
643    if (allow_stub_calls()) {
644      Assert(equal, "Uninitialized kSmiConstantRegister");
645    } else {
646      NearLabel ok;
647      j(equal, &ok);
648      int3();
649      bind(&ok);
650    }
651  }
652  if (source->value() == 0) {
653    xorl(dst, dst);
654    return;
655  }
656  int value = source->value();
657  bool negative = value < 0;
658  unsigned int uvalue = negative ? -value : value;
659
660  switch (uvalue) {
661    case 9:
662      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
663      break;
664    case 8:
665      xorl(dst, dst);
666      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
667      break;
668    case 4:
669      xorl(dst, dst);
670      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
671      break;
672    case 5:
673      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
674      break;
675    case 3:
676      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
677      break;
678    case 2:
679      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
680      break;
681    case 1:
682      movq(dst, kSmiConstantRegister);
683      break;
684    case 0:
685      UNREACHABLE();
686      return;
687    default:
688      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
689      return;
690  }
691  if (negative) {
692    neg(dst);
693  }
694}
695
696
697void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
698  ASSERT_EQ(0, kSmiTag);
699  if (!dst.is(src)) {
700    movl(dst, src);
701  }
702  shl(dst, Immediate(kSmiShift));
703}
704
705
706void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
707  if (FLAG_debug_code) {
708    testb(dst, Immediate(0x01));
709    NearLabel ok;
710    j(zero, &ok);
711    if (allow_stub_calls()) {
712      Abort("Integer32ToSmiField writing to non-smi location");
713    } else {
714      int3();
715    }
716    bind(&ok);
717  }
718  ASSERT(kSmiShift % kBitsPerByte == 0);
719  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
720}
721
722
723void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
724                                                Register src,
725                                                int constant) {
726  if (dst.is(src)) {
727    addq(dst, Immediate(constant));
728  } else {
729    lea(dst, Operand(src, constant));
730  }
731  shl(dst, Immediate(kSmiShift));
732}
733
734
735void MacroAssembler::SmiToInteger32(Register dst, Register src) {
736  ASSERT_EQ(0, kSmiTag);
737  if (!dst.is(src)) {
738    movq(dst, src);
739  }
740  shr(dst, Immediate(kSmiShift));
741}
742
743
744void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
745  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
746}
747
748
749void MacroAssembler::SmiToInteger64(Register dst, Register src) {
750  ASSERT_EQ(0, kSmiTag);
751  if (!dst.is(src)) {
752    movq(dst, src);
753  }
754  sar(dst, Immediate(kSmiShift));
755}
756
757
758void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
759  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
760}
761
762
763void MacroAssembler::SmiTest(Register src) {
764  testq(src, src);
765}
766
767
768void MacroAssembler::SmiCompare(Register dst, Register src) {
769  cmpq(dst, src);
770}
771
772
773void MacroAssembler::SmiCompare(Register dst, Smi* src) {
774  ASSERT(!dst.is(kScratchRegister));
775  if (src->value() == 0) {
776    testq(dst, dst);
777  } else {
778    Register constant_reg = GetSmiConstant(src);
779    cmpq(dst, constant_reg);
780  }
781}
782
783
784void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
785  cmpq(dst, src);
786}
787
788
789void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
790  cmpq(dst, src);
791}
792
793
794void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
795  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
796}
797
798
799void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
800  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
801}
802
803
804void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
805                                                           Register src,
806                                                           int power) {
807  ASSERT(power >= 0);
808  ASSERT(power < 64);
809  if (power == 0) {
810    SmiToInteger64(dst, src);
811    return;
812  }
813  if (!dst.is(src)) {
814    movq(dst, src);
815  }
816  if (power < kSmiShift) {
817    sar(dst, Immediate(kSmiShift - power));
818  } else if (power > kSmiShift) {
819    shl(dst, Immediate(power - kSmiShift));
820  }
821}
822
823
824void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
825                                                         Register src,
826                                                         int power) {
827  ASSERT((0 <= power) && (power < 32));
828  if (dst.is(src)) {
829    shr(dst, Immediate(power + kSmiShift));
830  } else {
831    UNIMPLEMENTED();  // Not used.
832  }
833}
834
835
836Condition MacroAssembler::CheckSmi(Register src) {
837  ASSERT_EQ(0, kSmiTag);
838  testb(src, Immediate(kSmiTagMask));
839  return zero;
840}
841
842
843Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
844  ASSERT_EQ(0, kSmiTag);
845  // Make mask 0x8000000000000001 and test that both bits are zero.
846  movq(kScratchRegister, src);
847  rol(kScratchRegister, Immediate(1));
848  testb(kScratchRegister, Immediate(3));
849  return zero;
850}
851
852
853Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
854  if (first.is(second)) {
855    return CheckSmi(first);
856  }
857  ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
858  leal(kScratchRegister, Operand(first, second, times_1, 0));
859  testb(kScratchRegister, Immediate(0x03));
860  return zero;
861}
862
863
864Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
865                                                  Register second) {
866  if (first.is(second)) {
867    return CheckNonNegativeSmi(first);
868  }
869  movq(kScratchRegister, first);
870  or_(kScratchRegister, second);
871  rol(kScratchRegister, Immediate(1));
872  testl(kScratchRegister, Immediate(3));
873  return zero;
874}
875
876
877Condition MacroAssembler::CheckEitherSmi(Register first,
878                                         Register second,
879                                         Register scratch) {
880  if (first.is(second)) {
881    return CheckSmi(first);
882  }
883  if (scratch.is(second)) {
884    andl(scratch, first);
885  } else {
886    if (!scratch.is(first)) {
887      movl(scratch, first);
888    }
889    andl(scratch, second);
890  }
891  testb(scratch, Immediate(kSmiTagMask));
892  return zero;
893}
894
895
896Condition MacroAssembler::CheckIsMinSmi(Register src) {
897  ASSERT(!src.is(kScratchRegister));
898  // If we overflow by subtracting one, it's the minimal smi value.
899  cmpq(src, kSmiConstantRegister);
900  return overflow;
901}
902
903
904Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
905  // A 32-bit integer value can always be converted to a smi.
906  return always;
907}
908
909
910Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
911  // An unsigned 32-bit integer value is valid as long as the high bit
912  // is not set.
913  testl(src, src);
914  return positive;
915}
916
917
918void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
919  if (constant->value() == 0) {
920    if (!dst.is(src)) {
921      movq(dst, src);
922    }
923    return;
924  } else if (dst.is(src)) {
925    ASSERT(!dst.is(kScratchRegister));
926    switch (constant->value()) {
927      case 1:
928        addq(dst, kSmiConstantRegister);
929        return;
930      case 2:
931        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
932        return;
933      case 4:
934        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
935        return;
936      case 8:
937        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
938        return;
939      default:
940        Register constant_reg = GetSmiConstant(constant);
941        addq(dst, constant_reg);
942        return;
943    }
944  } else {
945    switch (constant->value()) {
946      case 1:
947        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
948        return;
949      case 2:
950        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
951        return;
952      case 4:
953        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
954        return;
955      case 8:
956        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
957        return;
958      default:
959        LoadSmiConstant(dst, constant);
960        addq(dst, src);
961        return;
962    }
963  }
964}
965
966
967void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
968  if (constant->value() != 0) {
969    addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
970  }
971}
972
973
974void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
975  if (constant->value() == 0) {
976    if (!dst.is(src)) {
977      movq(dst, src);
978    }
979  } else if (dst.is(src)) {
980    ASSERT(!dst.is(kScratchRegister));
981    Register constant_reg = GetSmiConstant(constant);
982    subq(dst, constant_reg);
983  } else {
984    if (constant->value() == Smi::kMinValue) {
985      LoadSmiConstant(dst, constant);
986      // Adding and subtracting the min-value gives the same result, it only
987      // differs on the overflow bit, which we don't check here.
988      addq(dst, src);
989    } else {
990      // Subtract by adding the negation.
991      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
992      addq(dst, src);
993    }
994  }
995}
996
997
998void MacroAssembler::SmiAdd(Register dst,
999                            Register src1,
1000                            Register src2) {
1001  // No overflow checking. Use only when it's known that
1002  // overflowing is impossible.
1003  ASSERT(!dst.is(src2));
1004  if (dst.is(src1)) {
1005    addq(dst, src2);
1006  } else {
1007    movq(dst, src1);
1008    addq(dst, src2);
1009  }
1010  Assert(no_overflow, "Smi addition overflow");
1011}
1012
1013
1014void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1015  // No overflow checking. Use only when it's known that
1016  // overflowing is impossible (e.g., subtracting two positive smis).
1017  ASSERT(!dst.is(src2));
1018  if (dst.is(src1)) {
1019    subq(dst, src2);
1020  } else {
1021    movq(dst, src1);
1022    subq(dst, src2);
1023  }
1024  Assert(no_overflow, "Smi subtraction overflow");
1025}
1026
1027
1028void MacroAssembler::SmiSub(Register dst,
1029                            Register src1,
1030                            const Operand& src2) {
1031  // No overflow checking. Use only when it's known that
1032  // overflowing is impossible (e.g., subtracting two positive smis).
1033  if (dst.is(src1)) {
1034    subq(dst, src2);
1035  } else {
1036    movq(dst, src1);
1037    subq(dst, src2);
1038  }
1039  Assert(no_overflow, "Smi subtraction overflow");
1040}
1041
1042
1043void MacroAssembler::SmiNot(Register dst, Register src) {
1044  ASSERT(!dst.is(kScratchRegister));
1045  ASSERT(!src.is(kScratchRegister));
1046  // Set tag and padding bits before negating, so that they are zero afterwards.
1047  movl(kScratchRegister, Immediate(~0));
1048  if (dst.is(src)) {
1049    xor_(dst, kScratchRegister);
1050  } else {
1051    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1052  }
1053  not_(dst);
1054}
1055
1056
1057void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1058  ASSERT(!dst.is(src2));
1059  if (!dst.is(src1)) {
1060    movq(dst, src1);
1061  }
1062  and_(dst, src2);
1063}
1064
1065
1066void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1067  if (constant->value() == 0) {
1068    xor_(dst, dst);
1069  } else if (dst.is(src)) {
1070    ASSERT(!dst.is(kScratchRegister));
1071    Register constant_reg = GetSmiConstant(constant);
1072    and_(dst, constant_reg);
1073  } else {
1074    LoadSmiConstant(dst, constant);
1075    and_(dst, src);
1076  }
1077}
1078
1079
1080void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1081  if (!dst.is(src1)) {
1082    movq(dst, src1);
1083  }
1084  or_(dst, src2);
1085}
1086
1087
1088void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1089  if (dst.is(src)) {
1090    ASSERT(!dst.is(kScratchRegister));
1091    Register constant_reg = GetSmiConstant(constant);
1092    or_(dst, constant_reg);
1093  } else {
1094    LoadSmiConstant(dst, constant);
1095    or_(dst, src);
1096  }
1097}
1098
1099
1100void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1101  if (!dst.is(src1)) {
1102    movq(dst, src1);
1103  }
1104  xor_(dst, src2);
1105}
1106
1107
1108void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1109  if (dst.is(src)) {
1110    ASSERT(!dst.is(kScratchRegister));
1111    Register constant_reg = GetSmiConstant(constant);
1112    xor_(dst, constant_reg);
1113  } else {
1114    LoadSmiConstant(dst, constant);
1115    xor_(dst, src);
1116  }
1117}
1118
1119
1120void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1121                                                     Register src,
1122                                                     int shift_value) {
1123  ASSERT(is_uint5(shift_value));
1124  if (shift_value > 0) {
1125    if (dst.is(src)) {
1126      sar(dst, Immediate(shift_value + kSmiShift));
1127      shl(dst, Immediate(kSmiShift));
1128    } else {
1129      UNIMPLEMENTED();  // Not used.
1130    }
1131  }
1132}
1133
1134
1135void MacroAssembler::SmiShiftLeftConstant(Register dst,
1136                                          Register src,
1137                                          int shift_value) {
1138  if (!dst.is(src)) {
1139    movq(dst, src);
1140  }
1141  if (shift_value > 0) {
1142    shl(dst, Immediate(shift_value));
1143  }
1144}
1145
1146
1147void MacroAssembler::SmiShiftLeft(Register dst,
1148                                  Register src1,
1149                                  Register src2) {
1150  ASSERT(!dst.is(rcx));
1151  NearLabel result_ok;
1152  // Untag shift amount.
1153  if (!dst.is(src1)) {
1154    movq(dst, src1);
1155  }
1156  SmiToInteger32(rcx, src2);
1157  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1158  and_(rcx, Immediate(0x1f));
1159  shl_cl(dst);
1160}
1161
1162
1163void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1164                                             Register src1,
1165                                             Register src2) {
1166  ASSERT(!dst.is(kScratchRegister));
1167  ASSERT(!src1.is(kScratchRegister));
1168  ASSERT(!src2.is(kScratchRegister));
1169  ASSERT(!dst.is(rcx));
1170  if (src1.is(rcx)) {
1171    movq(kScratchRegister, src1);
1172  } else if (src2.is(rcx)) {
1173    movq(kScratchRegister, src2);
1174  }
1175  if (!dst.is(src1)) {
1176    movq(dst, src1);
1177  }
1178  SmiToInteger32(rcx, src2);
1179  orl(rcx, Immediate(kSmiShift));
1180  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
1181  shl(dst, Immediate(kSmiShift));
1182  if (src1.is(rcx)) {
1183    movq(src1, kScratchRegister);
1184  } else if (src2.is(rcx)) {
1185    movq(src2, kScratchRegister);
1186  }
1187}
1188
1189
1190SmiIndex MacroAssembler::SmiToIndex(Register dst,
1191                                    Register src,
1192                                    int shift) {
1193  ASSERT(is_uint6(shift));
1194  // There is a possible optimization if shift is in the range 60-63, but that
1195  // will (and must) never happen.
1196  if (!dst.is(src)) {
1197    movq(dst, src);
1198  }
1199  if (shift < kSmiShift) {
1200    sar(dst, Immediate(kSmiShift - shift));
1201  } else {
1202    shl(dst, Immediate(shift - kSmiShift));
1203  }
1204  return SmiIndex(dst, times_1);
1205}
1206
1207SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1208                                            Register src,
1209                                            int shift) {
1210  // Register src holds a positive smi.
1211  ASSERT(is_uint6(shift));
1212  if (!dst.is(src)) {
1213    movq(dst, src);
1214  }
1215  neg(dst);
1216  if (shift < kSmiShift) {
1217    sar(dst, Immediate(kSmiShift - shift));
1218  } else {
1219    shl(dst, Immediate(shift - kSmiShift));
1220  }
1221  return SmiIndex(dst, times_1);
1222}
1223
1224
1225void MacroAssembler::Move(Register dst, Register src) {
1226  if (!dst.is(src)) {
1227    movq(dst, src);
1228  }
1229}
1230
1231
1232
1233
1234void MacroAssembler::Move(Register dst, Handle<Object> source) {
1235  ASSERT(!source->IsFailure());
1236  if (source->IsSmi()) {
1237    Move(dst, Smi::cast(*source));
1238  } else {
1239    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1240  }
1241}
1242
1243
1244void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
1245  ASSERT(!source->IsFailure());
1246  if (source->IsSmi()) {
1247    Move(dst, Smi::cast(*source));
1248  } else {
1249    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1250    movq(dst, kScratchRegister);
1251  }
1252}
1253
1254
1255void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1256  if (source->IsSmi()) {
1257    SmiCompare(dst, Smi::cast(*source));
1258  } else {
1259    Move(kScratchRegister, source);
1260    cmpq(dst, kScratchRegister);
1261  }
1262}
1263
1264
1265void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
1266  if (source->IsSmi()) {
1267    SmiCompare(dst, Smi::cast(*source));
1268  } else {
1269    ASSERT(source->IsHeapObject());
1270    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1271    cmpq(dst, kScratchRegister);
1272  }
1273}
1274
1275
1276void MacroAssembler::Push(Handle<Object> source) {
1277  if (source->IsSmi()) {
1278    Push(Smi::cast(*source));
1279  } else {
1280    ASSERT(source->IsHeapObject());
1281    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1282    push(kScratchRegister);
1283  }
1284}
1285
1286
1287void MacroAssembler::Push(Smi* source) {
1288  intptr_t smi = reinterpret_cast<intptr_t>(source);
1289  if (is_int32(smi)) {
1290    push(Immediate(static_cast<int32_t>(smi)));
1291  } else {
1292    Register constant = GetSmiConstant(source);
1293    push(constant);
1294  }
1295}
1296
1297
1298void MacroAssembler::Drop(int stack_elements) {
1299  if (stack_elements > 0) {
1300    addq(rsp, Immediate(stack_elements * kPointerSize));
1301  }
1302}
1303
1304
1305void MacroAssembler::Test(const Operand& src, Smi* source) {
1306  testl(Operand(src, kIntSize), Immediate(source->value()));
1307}
1308
1309
1310void MacroAssembler::Jump(ExternalReference ext) {
1311  movq(kScratchRegister, ext);
1312  jmp(kScratchRegister);
1313}
1314
1315
1316void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1317  movq(kScratchRegister, destination, rmode);
1318  jmp(kScratchRegister);
1319}
1320
1321
1322void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1323  // TODO(X64): Inline this
1324  jmp(code_object, rmode);
1325}
1326
1327
1328void MacroAssembler::Call(ExternalReference ext) {
1329  movq(kScratchRegister, ext);
1330  call(kScratchRegister);
1331}
1332
1333
1334void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1335  movq(kScratchRegister, destination, rmode);
1336  call(kScratchRegister);
1337}
1338
1339
1340void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1341  ASSERT(RelocInfo::IsCodeTarget(rmode));
1342  call(code_object, rmode);
1343}
1344
1345
1346void MacroAssembler::PushTryHandler(CodeLocation try_location,
1347                                    HandlerType type) {
1348  // Adjust this code if not the case.
1349  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1350
1351  // The pc (return address) is already on TOS.  This code pushes state,
1352  // frame pointer and current handler.  Check that they are expected
1353  // next on the stack, in that order.
1354  ASSERT_EQ(StackHandlerConstants::kStateOffset,
1355            StackHandlerConstants::kPCOffset - kPointerSize);
1356  ASSERT_EQ(StackHandlerConstants::kFPOffset,
1357            StackHandlerConstants::kStateOffset - kPointerSize);
1358  ASSERT_EQ(StackHandlerConstants::kNextOffset,
1359            StackHandlerConstants::kFPOffset - kPointerSize);
1360
1361  if (try_location == IN_JAVASCRIPT) {
1362    if (type == TRY_CATCH_HANDLER) {
1363      push(Immediate(StackHandler::TRY_CATCH));
1364    } else {
1365      push(Immediate(StackHandler::TRY_FINALLY));
1366    }
1367    push(rbp);
1368  } else {
1369    ASSERT(try_location == IN_JS_ENTRY);
1370    // The frame pointer does not point to a JS frame so we save NULL
1371    // for rbp. We expect the code throwing an exception to check rbp
1372    // before dereferencing it to restore the context.
1373    push(Immediate(StackHandler::ENTRY));
1374    push(Immediate(0));  // NULL frame pointer.
1375  }
1376  // Save the current handler.
1377  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1378  push(Operand(kScratchRegister, 0));
1379  // Link this handler.
1380  movq(Operand(kScratchRegister, 0), rsp);
1381}
1382
1383
1384void MacroAssembler::PopTryHandler() {
1385  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1386  // Unlink this handler.
1387  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1388  pop(Operand(kScratchRegister, 0));
1389  // Remove the remaining fields.
1390  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1391}
1392
1393
1394void MacroAssembler::Ret() {
1395  ret(0);
1396}
1397
1398
1399void MacroAssembler::FCmp() {
1400  fucomip();
1401  fstp(0);
1402}
1403
1404
1405void MacroAssembler::CmpObjectType(Register heap_object,
1406                                   InstanceType type,
1407                                   Register map) {
1408  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1409  CmpInstanceType(map, type);
1410}
1411
1412
1413void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1414  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1415       Immediate(static_cast<int8_t>(type)));
1416}
1417
1418
1419void MacroAssembler::CheckMap(Register obj,
1420                              Handle<Map> map,
1421                              Label* fail,
1422                              bool is_heap_object) {
1423  if (!is_heap_object) {
1424    JumpIfSmi(obj, fail);
1425  }
1426  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
1427  j(not_equal, fail);
1428}
1429
1430
1431void MacroAssembler::AbortIfNotNumber(Register object) {
1432  NearLabel ok;
1433  Condition is_smi = CheckSmi(object);
1434  j(is_smi, &ok);
1435  Cmp(FieldOperand(object, HeapObject::kMapOffset),
1436      Factory::heap_number_map());
1437  Assert(equal, "Operand not a number");
1438  bind(&ok);
1439}
1440
1441
1442void MacroAssembler::AbortIfSmi(Register object) {
1443  NearLabel ok;
1444  Condition is_smi = CheckSmi(object);
1445  Assert(NegateCondition(is_smi), "Operand is a smi");
1446}
1447
1448
1449void MacroAssembler::AbortIfNotSmi(Register object) {
1450  NearLabel ok;
1451  Condition is_smi = CheckSmi(object);
1452  Assert(is_smi, "Operand is not a smi");
1453}
1454
1455
1456void MacroAssembler::AbortIfNotRootValue(Register src,
1457                                         Heap::RootListIndex root_value_index,
1458                                         const char* message) {
1459  ASSERT(!src.is(kScratchRegister));
1460  LoadRoot(kScratchRegister, root_value_index);
1461  cmpq(src, kScratchRegister);
1462  Check(equal, message);
1463}
1464
1465
1466
1467Condition MacroAssembler::IsObjectStringType(Register heap_object,
1468                                             Register map,
1469                                             Register instance_type) {
1470  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1471  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
1472  ASSERT(kNotStringTag != 0);
1473  testb(instance_type, Immediate(kIsNotStringMask));
1474  return zero;
1475}
1476
1477
1478void MacroAssembler::TryGetFunctionPrototype(Register function,
1479                                             Register result,
1480                                             Label* miss) {
1481  // Check that the receiver isn't a smi.
1482  testl(function, Immediate(kSmiTagMask));
1483  j(zero, miss);
1484
1485  // Check that the function really is a function.
1486  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1487  j(not_equal, miss);
1488
1489  // Make sure that the function has an instance prototype.
1490  NearLabel non_instance;
1491  testb(FieldOperand(result, Map::kBitFieldOffset),
1492        Immediate(1 << Map::kHasNonInstancePrototype));
1493  j(not_zero, &non_instance);
1494
1495  // Get the prototype or initial map from the function.
1496  movq(result,
1497       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1498
1499  // If the prototype or initial map is the hole, don't return it and
1500  // simply miss the cache instead. This will allow us to allocate a
1501  // prototype object on-demand in the runtime system.
1502  CompareRoot(result, Heap::kTheHoleValueRootIndex);
1503  j(equal, miss);
1504
1505  // If the function does not have an initial map, we're done.
1506  NearLabel done;
1507  CmpObjectType(result, MAP_TYPE, kScratchRegister);
1508  j(not_equal, &done);
1509
1510  // Get the prototype from the initial map.
1511  movq(result, FieldOperand(result, Map::kPrototypeOffset));
1512  jmp(&done);
1513
1514  // Non-instance prototype: Fetch prototype from constructor field
1515  // in initial map.
1516  bind(&non_instance);
1517  movq(result, FieldOperand(result, Map::kConstructorOffset));
1518
1519  // All done.
1520  bind(&done);
1521}
1522
1523
1524void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
1525  if (FLAG_native_code_counters && counter->Enabled()) {
1526    movq(kScratchRegister, ExternalReference(counter));
1527    movl(Operand(kScratchRegister, 0), Immediate(value));
1528  }
1529}
1530
1531
1532void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
1533  ASSERT(value > 0);
1534  if (FLAG_native_code_counters && counter->Enabled()) {
1535    movq(kScratchRegister, ExternalReference(counter));
1536    Operand operand(kScratchRegister, 0);
1537    if (value == 1) {
1538      incl(operand);
1539    } else {
1540      addl(operand, Immediate(value));
1541    }
1542  }
1543}
1544
1545
1546void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
1547  ASSERT(value > 0);
1548  if (FLAG_native_code_counters && counter->Enabled()) {
1549    movq(kScratchRegister, ExternalReference(counter));
1550    Operand operand(kScratchRegister, 0);
1551    if (value == 1) {
1552      decl(operand);
1553    } else {
1554      subl(operand, Immediate(value));
1555    }
1556  }
1557}
1558
1559
1560#ifdef ENABLE_DEBUGGER_SUPPORT
1561void MacroAssembler::DebugBreak() {
1562  ASSERT(allow_stub_calls());
1563  xor_(rax, rax);  // no arguments
1564  movq(rbx, ExternalReference(Runtime::kDebugBreak));
1565  CEntryStub ces(1);
1566  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1567}
1568#endif  // ENABLE_DEBUGGER_SUPPORT
1569
1570
1571void MacroAssembler::InvokeCode(Register code,
1572                                const ParameterCount& expected,
1573                                const ParameterCount& actual,
1574                                InvokeFlag flag) {
1575  NearLabel done;
1576  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
1577  if (flag == CALL_FUNCTION) {
1578    call(code);
1579  } else {
1580    ASSERT(flag == JUMP_FUNCTION);
1581    jmp(code);
1582  }
1583  bind(&done);
1584}
1585
1586
1587void MacroAssembler::InvokeCode(Handle<Code> code,
1588                                const ParameterCount& expected,
1589                                const ParameterCount& actual,
1590                                RelocInfo::Mode rmode,
1591                                InvokeFlag flag) {
1592  NearLabel done;
1593  Register dummy = rax;
1594  InvokePrologue(expected, actual, code, dummy, &done, flag);
1595  if (flag == CALL_FUNCTION) {
1596    Call(code, rmode);
1597  } else {
1598    ASSERT(flag == JUMP_FUNCTION);
1599    Jump(code, rmode);
1600  }
1601  bind(&done);
1602}
1603
1604
1605void MacroAssembler::InvokeFunction(Register function,
1606                                    const ParameterCount& actual,
1607                                    InvokeFlag flag) {
1608  ASSERT(function.is(rdi));
1609  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1610  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
1611  movsxlq(rbx,
1612          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
1613  // Advances rdx to the end of the Code object header, to the start of
1614  // the executable code.
1615  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
1616
1617  ParameterCount expected(rbx);
1618  InvokeCode(rdx, expected, actual, flag);
1619}
1620
1621
1622void MacroAssembler::InvokeFunction(JSFunction* function,
1623                                    const ParameterCount& actual,
1624                                    InvokeFlag flag) {
1625  ASSERT(function->is_compiled());
1626  // Get the function and setup the context.
1627  Move(rdi, Handle<JSFunction>(function));
1628  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
1629
1630  // Invoke the cached code.
1631  Handle<Code> code(function->code());
1632  ParameterCount expected(function->shared()->formal_parameter_count());
1633  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
1634}
1635
1636
1637void MacroAssembler::EnterFrame(StackFrame::Type type) {
1638  push(rbp);
1639  movq(rbp, rsp);
1640  push(rsi);  // Context.
1641  Push(Smi::FromInt(type));
1642  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1643  push(kScratchRegister);
1644  if (FLAG_debug_code) {
1645    movq(kScratchRegister,
1646         Factory::undefined_value(),
1647         RelocInfo::EMBEDDED_OBJECT);
1648    cmpq(Operand(rsp, 0), kScratchRegister);
1649    Check(not_equal, "code object not properly patched");
1650  }
1651}
1652
1653
1654void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1655  if (FLAG_debug_code) {
1656    Move(kScratchRegister, Smi::FromInt(type));
1657    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
1658    Check(equal, "stack frame types must match");
1659  }
1660  movq(rsp, rbp);
1661  pop(rbp);
1662}
1663
1664
1665void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
1666  // Setup the frame structure on the stack.
1667  // All constants are relative to the frame pointer of the exit frame.
1668  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1669  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1670  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
1671  push(rbp);
1672  movq(rbp, rsp);
1673
1674  // Reserve room for entry stack pointer and push the code object.
1675  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1676  push(Immediate(0));  // Saved entry sp, patched before call.
1677  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1678  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
1679
1680  // Save the frame pointer and the context in top.
1681  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1682  ExternalReference context_address(Top::k_context_address);
1683  if (save_rax) {
1684    movq(r14, rax);  // Backup rax before we use it.
1685  }
1686
1687  movq(rax, rbp);
1688  store_rax(c_entry_fp_address);
1689  movq(rax, rsi);
1690  store_rax(context_address);
1691}
1692
1693void MacroAssembler::EnterExitFrameEpilogue(int result_size,
1694                                            int argc) {
1695#ifdef _WIN64
1696  // Reserve space on stack for result and argument structures, if necessary.
1697  int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
1698  // Reserve space for the Arguments object.  The Windows 64-bit ABI
1699  // requires us to pass this structure as a pointer to its location on
1700  // the stack.  The structure contains 2 values.
1701  int argument_stack_space = argc * kPointerSize;
1702  // We also need backing space for 4 parameters, even though
1703  // we only pass one or two parameter, and it is in a register.
1704  int argument_mirror_space = 4 * kPointerSize;
1705  int total_stack_space =
1706      argument_mirror_space + argument_stack_space + result_stack_space;
1707  subq(rsp, Immediate(total_stack_space));
1708#endif
1709
1710  // Get the required frame alignment for the OS.
1711  static const int kFrameAlignment = OS::ActivationFrameAlignment();
1712  if (kFrameAlignment > 0) {
1713    ASSERT(IsPowerOf2(kFrameAlignment));
1714    movq(kScratchRegister, Immediate(-kFrameAlignment));
1715    and_(rsp, kScratchRegister);
1716  }
1717
1718  // Patch the saved entry sp.
1719  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
1720}
1721
1722
1723void MacroAssembler::EnterExitFrame(int result_size) {
1724  EnterExitFramePrologue(true);
1725
1726  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
1727  // so it must be retained across the C-call.
1728  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1729  lea(r12, Operand(rbp, r14, times_pointer_size, offset));
1730
1731  EnterExitFrameEpilogue(result_size, 2);
1732}
1733
1734
1735void MacroAssembler::EnterApiExitFrame(int stack_space,
1736                                       int argc,
1737                                       int result_size) {
1738  EnterExitFramePrologue(false);
1739
1740  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
1741  // so it must be retained across the C-call.
1742  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1743  lea(r12, Operand(rbp, (stack_space * kPointerSize) + offset));
1744
1745  EnterExitFrameEpilogue(result_size, argc);
1746}
1747
1748
1749void MacroAssembler::LeaveExitFrame(int result_size) {
1750  // Registers:
1751  // r12 : argv
1752
1753  // Get the return address from the stack and restore the frame pointer.
1754  movq(rcx, Operand(rbp, 1 * kPointerSize));
1755  movq(rbp, Operand(rbp, 0 * kPointerSize));
1756
1757  // Pop everything up to and including the arguments and the receiver
1758  // from the caller stack.
1759  lea(rsp, Operand(r12, 1 * kPointerSize));
1760
1761  // Restore current context from top and clear it in debug mode.
1762  ExternalReference context_address(Top::k_context_address);
1763  movq(kScratchRegister, context_address);
1764  movq(rsi, Operand(kScratchRegister, 0));
1765#ifdef DEBUG
1766  movq(Operand(kScratchRegister, 0), Immediate(0));
1767#endif
1768
1769  // Push the return address to get ready to return.
1770  push(rcx);
1771
1772  // Clear the top frame.
1773  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1774  movq(kScratchRegister, c_entry_fp_address);
1775  movq(Operand(kScratchRegister, 0), Immediate(0));
1776}
1777
1778
1779void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1780                                            Register scratch,
1781                                            Label* miss) {
1782  Label same_contexts;
1783
1784  ASSERT(!holder_reg.is(scratch));
1785  ASSERT(!scratch.is(kScratchRegister));
1786  // Load current lexical context from the stack frame.
1787  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
1788
1789  // When generating debug code, make sure the lexical context is set.
1790  if (FLAG_debug_code) {
1791    cmpq(scratch, Immediate(0));
1792    Check(not_equal, "we should not have an empty lexical context");
1793  }
1794  // Load the global context of the current context.
1795  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1796  movq(scratch, FieldOperand(scratch, offset));
1797  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
1798
1799  // Check the context is a global context.
1800  if (FLAG_debug_code) {
1801    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
1802        Factory::global_context_map());
1803    Check(equal, "JSGlobalObject::global_context should be a global context.");
1804  }
1805
1806  // Check if both contexts are the same.
1807  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1808  j(equal, &same_contexts);
1809
1810  // Compare security tokens.
1811  // Check that the security token in the calling global object is
1812  // compatible with the security token in the receiving global
1813  // object.
1814
1815  // Check the context is a global context.
1816  if (FLAG_debug_code) {
1817    // Preserve original value of holder_reg.
1818    push(holder_reg);
1819    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1820    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
1821    Check(not_equal, "JSGlobalProxy::context() should not be null.");
1822
1823    // Read the first word and compare to global_context_map(),
1824    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
1825    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
1826    Check(equal, "JSGlobalObject::global_context should be a global context.");
1827    pop(holder_reg);
1828  }
1829
1830  movq(kScratchRegister,
1831       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1832  int token_offset =
1833      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1834  movq(scratch, FieldOperand(scratch, token_offset));
1835  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
1836  j(not_equal, miss);
1837
1838  bind(&same_contexts);
1839}
1840
1841
1842void MacroAssembler::LoadAllocationTopHelper(Register result,
1843                                             Register result_end,
1844                                             Register scratch,
1845                                             AllocationFlags flags) {
1846  ExternalReference new_space_allocation_top =
1847      ExternalReference::new_space_allocation_top_address();
1848
1849  // Just return if allocation top is already known.
1850  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1851    // No use of scratch if allocation top is provided.
1852    ASSERT(!scratch.is_valid());
1853#ifdef DEBUG
1854    // Assert that result actually contains top on entry.
1855    movq(kScratchRegister, new_space_allocation_top);
1856    cmpq(result, Operand(kScratchRegister, 0));
1857    Check(equal, "Unexpected allocation top");
1858#endif
1859    return;
1860  }
1861
1862  // Move address of new object to result. Use scratch register if available,
1863  // and keep address in scratch until call to UpdateAllocationTopHelper.
1864  if (scratch.is_valid()) {
1865    ASSERT(!scratch.is(result_end));
1866    movq(scratch, new_space_allocation_top);
1867    movq(result, Operand(scratch, 0));
1868  } else if (result.is(rax)) {
1869    load_rax(new_space_allocation_top);
1870  } else {
1871    movq(kScratchRegister, new_space_allocation_top);
1872    movq(result, Operand(kScratchRegister, 0));
1873  }
1874}
1875
1876
1877void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1878                                               Register scratch) {
1879  if (FLAG_debug_code) {
1880    testq(result_end, Immediate(kObjectAlignmentMask));
1881    Check(zero, "Unaligned allocation in new space");
1882  }
1883
1884  ExternalReference new_space_allocation_top =
1885      ExternalReference::new_space_allocation_top_address();
1886
1887  // Update new top.
1888  if (result_end.is(rax)) {
1889    // rax can be stored directly to a memory location.
1890    store_rax(new_space_allocation_top);
1891  } else {
1892    // Register required - use scratch provided if available.
1893    if (scratch.is_valid()) {
1894      movq(Operand(scratch, 0), result_end);
1895    } else {
1896      movq(kScratchRegister, new_space_allocation_top);
1897      movq(Operand(kScratchRegister, 0), result_end);
1898    }
1899  }
1900}
1901
1902
1903void MacroAssembler::AllocateInNewSpace(int object_size,
1904                                        Register result,
1905                                        Register result_end,
1906                                        Register scratch,
1907                                        Label* gc_required,
1908                                        AllocationFlags flags) {
1909  if (!FLAG_inline_new) {
1910    if (FLAG_debug_code) {
1911      // Trash the registers to simulate an allocation failure.
1912      movl(result, Immediate(0x7091));
1913      if (result_end.is_valid()) {
1914        movl(result_end, Immediate(0x7191));
1915      }
1916      if (scratch.is_valid()) {
1917        movl(scratch, Immediate(0x7291));
1918      }
1919    }
1920    jmp(gc_required);
1921    return;
1922  }
1923  ASSERT(!result.is(result_end));
1924
1925  // Load address of new object into result.
1926  LoadAllocationTopHelper(result, result_end, scratch, flags);
1927
1928  // Calculate new top and bail out if new space is exhausted.
1929  ExternalReference new_space_allocation_limit =
1930      ExternalReference::new_space_allocation_limit_address();
1931
1932  Register top_reg = result_end.is_valid() ? result_end : result;
1933
1934  if (top_reg.is(result)) {
1935    addq(top_reg, Immediate(object_size));
1936  } else {
1937    lea(top_reg, Operand(result, object_size));
1938  }
1939  movq(kScratchRegister, new_space_allocation_limit);
1940  cmpq(top_reg, Operand(kScratchRegister, 0));
1941  j(above, gc_required);
1942
1943  // Update allocation top.
1944  UpdateAllocationTopHelper(top_reg, scratch);
1945
1946  if (top_reg.is(result)) {
1947    if ((flags & TAG_OBJECT) != 0) {
1948      subq(result, Immediate(object_size - kHeapObjectTag));
1949    } else {
1950      subq(result, Immediate(object_size));
1951    }
1952  } else if ((flags & TAG_OBJECT) != 0) {
1953    // Tag the result if requested.
1954    addq(result, Immediate(kHeapObjectTag));
1955  }
1956}
1957
1958
1959void MacroAssembler::AllocateInNewSpace(int header_size,
1960                                        ScaleFactor element_size,
1961                                        Register element_count,
1962                                        Register result,
1963                                        Register result_end,
1964                                        Register scratch,
1965                                        Label* gc_required,
1966                                        AllocationFlags flags) {
1967  if (!FLAG_inline_new) {
1968    if (FLAG_debug_code) {
1969      // Trash the registers to simulate an allocation failure.
1970      movl(result, Immediate(0x7091));
1971      movl(result_end, Immediate(0x7191));
1972      if (scratch.is_valid()) {
1973        movl(scratch, Immediate(0x7291));
1974      }
1975      // Register element_count is not modified by the function.
1976    }
1977    jmp(gc_required);
1978    return;
1979  }
1980  ASSERT(!result.is(result_end));
1981
1982  // Load address of new object into result.
1983  LoadAllocationTopHelper(result, result_end, scratch, flags);
1984
1985  // Calculate new top and bail out if new space is exhausted.
1986  ExternalReference new_space_allocation_limit =
1987      ExternalReference::new_space_allocation_limit_address();
1988  lea(result_end, Operand(result, element_count, element_size, header_size));
1989  movq(kScratchRegister, new_space_allocation_limit);
1990  cmpq(result_end, Operand(kScratchRegister, 0));
1991  j(above, gc_required);
1992
1993  // Update allocation top.
1994  UpdateAllocationTopHelper(result_end, scratch);
1995
1996  // Tag the result if requested.
1997  if ((flags & TAG_OBJECT) != 0) {
1998    addq(result, Immediate(kHeapObjectTag));
1999  }
2000}
2001
2002
2003void MacroAssembler::AllocateInNewSpace(Register object_size,
2004                                        Register result,
2005                                        Register result_end,
2006                                        Register scratch,
2007                                        Label* gc_required,
2008                                        AllocationFlags flags) {
2009  if (!FLAG_inline_new) {
2010    if (FLAG_debug_code) {
2011      // Trash the registers to simulate an allocation failure.
2012      movl(result, Immediate(0x7091));
2013      movl(result_end, Immediate(0x7191));
2014      if (scratch.is_valid()) {
2015        movl(scratch, Immediate(0x7291));
2016      }
2017      // object_size is left unchanged by this function.
2018    }
2019    jmp(gc_required);
2020    return;
2021  }
2022  ASSERT(!result.is(result_end));
2023
2024  // Load address of new object into result.
2025  LoadAllocationTopHelper(result, result_end, scratch, flags);
2026
2027  // Calculate new top and bail out if new space is exhausted.
2028  ExternalReference new_space_allocation_limit =
2029      ExternalReference::new_space_allocation_limit_address();
2030  if (!object_size.is(result_end)) {
2031    movq(result_end, object_size);
2032  }
2033  addq(result_end, result);
2034  movq(kScratchRegister, new_space_allocation_limit);
2035  cmpq(result_end, Operand(kScratchRegister, 0));
2036  j(above, gc_required);
2037
2038  // Update allocation top.
2039  UpdateAllocationTopHelper(result_end, scratch);
2040
2041  // Tag the result if requested.
2042  if ((flags & TAG_OBJECT) != 0) {
2043    addq(result, Immediate(kHeapObjectTag));
2044  }
2045}
2046
2047
2048void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2049  ExternalReference new_space_allocation_top =
2050      ExternalReference::new_space_allocation_top_address();
2051
2052  // Make sure the object has no tag before resetting top.
2053  and_(object, Immediate(~kHeapObjectTagMask));
2054  movq(kScratchRegister, new_space_allocation_top);
2055#ifdef DEBUG
2056  cmpq(object, Operand(kScratchRegister, 0));
2057  Check(below, "Undo allocation of non allocated memory");
2058#endif
2059  movq(Operand(kScratchRegister, 0), object);
2060}
2061
2062
2063void MacroAssembler::AllocateHeapNumber(Register result,
2064                                        Register scratch,
2065                                        Label* gc_required) {
2066  // Allocate heap number in new space.
2067  AllocateInNewSpace(HeapNumber::kSize,
2068                     result,
2069                     scratch,
2070                     no_reg,
2071                     gc_required,
2072                     TAG_OBJECT);
2073
2074  // Set the map.
2075  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2076  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2077}
2078
2079
2080void MacroAssembler::AllocateTwoByteString(Register result,
2081                                           Register length,
2082                                           Register scratch1,
2083                                           Register scratch2,
2084                                           Register scratch3,
2085                                           Label* gc_required) {
2086  // Calculate the number of bytes needed for the characters in the string while
2087  // observing object alignment.
2088  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
2089                               kObjectAlignmentMask;
2090  ASSERT(kShortSize == 2);
2091  // scratch1 = length * 2 + kObjectAlignmentMask.
2092  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
2093                kHeaderAlignment));
2094  and_(scratch1, Immediate(~kObjectAlignmentMask));
2095  if (kHeaderAlignment > 0) {
2096    subq(scratch1, Immediate(kHeaderAlignment));
2097  }
2098
2099  // Allocate two byte string in new space.
2100  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
2101                     times_1,
2102                     scratch1,
2103                     result,
2104                     scratch2,
2105                     scratch3,
2106                     gc_required,
2107                     TAG_OBJECT);
2108
2109  // Set the map, length and hash field.
2110  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2111  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2112  Integer32ToSmi(scratch1, length);
2113  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2114  movq(FieldOperand(result, String::kHashFieldOffset),
2115       Immediate(String::kEmptyHashField));
2116}
2117
2118
2119void MacroAssembler::AllocateAsciiString(Register result,
2120                                         Register length,
2121                                         Register scratch1,
2122                                         Register scratch2,
2123                                         Register scratch3,
2124                                         Label* gc_required) {
2125  // Calculate the number of bytes needed for the characters in the string while
2126  // observing object alignment.
2127  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
2128                               kObjectAlignmentMask;
2129  movl(scratch1, length);
2130  ASSERT(kCharSize == 1);
2131  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
2132  and_(scratch1, Immediate(~kObjectAlignmentMask));
2133  if (kHeaderAlignment > 0) {
2134    subq(scratch1, Immediate(kHeaderAlignment));
2135  }
2136
2137  // Allocate ascii string in new space.
2138  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
2139                     times_1,
2140                     scratch1,
2141                     result,
2142                     scratch2,
2143                     scratch3,
2144                     gc_required,
2145                     TAG_OBJECT);
2146
2147  // Set the map, length and hash field.
2148  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2149  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2150  Integer32ToSmi(scratch1, length);
2151  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2152  movq(FieldOperand(result, String::kHashFieldOffset),
2153       Immediate(String::kEmptyHashField));
2154}
2155
2156
2157void MacroAssembler::AllocateConsString(Register result,
2158                                        Register scratch1,
2159                                        Register scratch2,
2160                                        Label* gc_required) {
2161  // Allocate heap number in new space.
2162  AllocateInNewSpace(ConsString::kSize,
2163                     result,
2164                     scratch1,
2165                     scratch2,
2166                     gc_required,
2167                     TAG_OBJECT);
2168
2169  // Set the map. The other fields are left uninitialized.
2170  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
2171  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2172}
2173
2174
2175void MacroAssembler::AllocateAsciiConsString(Register result,
2176                                             Register scratch1,
2177                                             Register scratch2,
2178                                             Label* gc_required) {
2179  // Allocate heap number in new space.
2180  AllocateInNewSpace(ConsString::kSize,
2181                     result,
2182                     scratch1,
2183                     scratch2,
2184                     gc_required,
2185                     TAG_OBJECT);
2186
2187  // Set the map. The other fields are left uninitialized.
2188  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2189  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2190}
2191
2192
2193void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2194  if (context_chain_length > 0) {
2195    // Move up the chain of contexts to the context containing the slot.
2196    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2197    // Load the function context (which is the incoming, outer context).
2198    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2199    for (int i = 1; i < context_chain_length; i++) {
2200      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2201      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2202    }
2203    // The context may be an intermediate context, not a function context.
2204    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2205  } else {  // context is the current function context.
2206    // The context may be an intermediate context, not a function context.
2207    movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2208  }
2209}
2210
2211
2212int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2213  // On Windows 64 stack slots are reserved by the caller for all arguments
2214  // including the ones passed in registers, and space is always allocated for
2215  // the four register arguments even if the function takes fewer than four
2216  // arguments.
2217  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2218  // and the caller does not reserve stack slots for them.
2219  ASSERT(num_arguments >= 0);
2220#ifdef _WIN64
2221  static const int kMinimumStackSlots = 4;
2222  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2223  return num_arguments;
2224#else
2225  static const int kRegisterPassedArguments = 6;
2226  if (num_arguments < kRegisterPassedArguments) return 0;
2227  return num_arguments - kRegisterPassedArguments;
2228#endif
2229}
2230
2231
2232void MacroAssembler::PrepareCallCFunction(int num_arguments) {
2233  int frame_alignment = OS::ActivationFrameAlignment();
2234  ASSERT(frame_alignment != 0);
2235  ASSERT(num_arguments >= 0);
2236  // Make stack end at alignment and allocate space for arguments and old rsp.
2237  movq(kScratchRegister, rsp);
2238  ASSERT(IsPowerOf2(frame_alignment));
2239  int argument_slots_on_stack =
2240      ArgumentStackSlotsForCFunctionCall(num_arguments);
2241  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
2242  and_(rsp, Immediate(-frame_alignment));
2243  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
2244}
2245
2246
2247void MacroAssembler::CallCFunction(ExternalReference function,
2248                                   int num_arguments) {
2249  movq(rax, function);
2250  CallCFunction(rax, num_arguments);
2251}
2252
2253
2254void MacroAssembler::CallCFunction(Register function, int num_arguments) {
2255  // Check stack alignment.
2256  if (FLAG_debug_code) {
2257    CheckStackAlignment();
2258  }
2259
2260  call(function);
2261  ASSERT(OS::ActivationFrameAlignment() != 0);
2262  ASSERT(num_arguments >= 0);
2263  int argument_slots_on_stack =
2264      ArgumentStackSlotsForCFunctionCall(num_arguments);
2265  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
2266}
2267
2268
2269CodePatcher::CodePatcher(byte* address, int size)
2270    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2271  // Create a new macro assembler pointing to the address of the code to patch.
2272  // The size is adjusted with kGap on order for the assembler to generate size
2273  // bytes of instructions without failing with buffer size constraints.
2274  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2275}
2276
2277
2278CodePatcher::~CodePatcher() {
2279  // Indicate that code has changed.
2280  CPU::FlushICache(address_, size_);
2281
2282  // Check that the code was patched as expected.
2283  ASSERT(masm_.pc_ == address_ + size_);
2284  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2285}
2286
2287} }  // namespace v8::internal
2288
2289#endif  // V8_TARGET_ARCH_X64
2290