macro-assembler-x64.cc revision 756813857a4c2a4d8ad2e805969d5768d3cf43a0
1// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "assembler-x64.h"
35#include "macro-assembler-x64.h"
36#include "serialize.h"
37#include "debug.h"
38#include "heap.h"
39
40namespace v8 {
41namespace internal {
42
43MacroAssembler::MacroAssembler(void* buffer, int size)
44    : Assembler(buffer, size),
45      generating_stub_(false),
46      allow_stub_calls_(true),
47      code_object_(Heap::undefined_value()) {
48}
49
50
51void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
52  movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
53}
54
55
56void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
57  movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
58}
59
60
61void MacroAssembler::PushRoot(Heap::RootListIndex index) {
62  push(Operand(kRootRegister, index << kPointerSizeLog2));
63}
64
65
66void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
67  cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
68}
69
70
71void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
72  LoadRoot(kScratchRegister, index);
73  cmpq(with, kScratchRegister);
74}
75
76
77void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
78  CompareRoot(rsp, Heap::kStackLimitRootIndex);
79  j(below, on_stack_overflow);
80}
81
82
83void MacroAssembler::RecordWriteHelper(Register object,
84                                       Register addr,
85                                       Register scratch) {
86  if (FLAG_debug_code) {
87    // Check that the object is not in new space.
88    Label not_in_new_space;
89    InNewSpace(object, scratch, not_equal, &not_in_new_space);
90    Abort("new-space object passed to RecordWriteHelper");
91    bind(&not_in_new_space);
92  }
93
94  // Compute the page start address from the heap object pointer, and reuse
95  // the 'object' register for it.
96  and_(object, Immediate(~Page::kPageAlignmentMask));
97
98  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
99  // method for more details.
100  shrl(addr, Immediate(Page::kRegionSizeLog2));
101  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
102
103  // Set dirty mark for region.
104  bts(Operand(object, Page::kDirtyFlagOffset), addr);
105}
106
107
108void MacroAssembler::RecordWrite(Register object,
109                                 int offset,
110                                 Register value,
111                                 Register index) {
112  // The compiled code assumes that record write doesn't change the
113  // context register, so we check that none of the clobbered
114  // registers are rsi.
115  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
116
117  // First, check if a write barrier is even needed. The tests below
118  // catch stores of Smis and stores into young gen.
119  Label done;
120  JumpIfSmi(value, &done);
121
122  RecordWriteNonSmi(object, offset, value, index);
123  bind(&done);
124
125  // Clobber all input registers when running with the debug-code flag
126  // turned on to provoke errors. This clobbering repeats the
127  // clobbering done inside RecordWriteNonSmi but it's necessary to
128  // avoid having the fast case for smis leave the registers
129  // unchanged.
130  if (FLAG_debug_code) {
131    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
132    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
133    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
134  }
135}
136
137
138void MacroAssembler::RecordWrite(Register object,
139                                 Register address,
140                                 Register value) {
141  // The compiled code assumes that record write doesn't change the
142  // context register, so we check that none of the clobbered
143  // registers are esi.
144  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
145
146  // First, check if a write barrier is even needed. The tests below
147  // catch stores of Smis and stores into young gen.
148  Label done;
149  JumpIfSmi(value, &done);
150
151  InNewSpace(object, value, equal, &done);
152
153  RecordWriteHelper(object, address, value);
154
155  bind(&done);
156
157  // Clobber all input registers when running with the debug-code flag
158  // turned on to provoke errors.
159  if (FLAG_debug_code) {
160    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
161    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
162    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
163  }
164}
165
166
167void MacroAssembler::RecordWriteNonSmi(Register object,
168                                       int offset,
169                                       Register scratch,
170                                       Register index) {
171  Label done;
172
173  if (FLAG_debug_code) {
174    Label okay;
175    JumpIfNotSmi(object, &okay);
176    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
177    bind(&okay);
178
179    if (offset == 0) {
180      // index must be int32.
181      Register tmp = index.is(rax) ? rbx : rax;
182      push(tmp);
183      movl(tmp, index);
184      cmpq(tmp, index);
185      Check(equal, "Index register for RecordWrite must be untagged int32.");
186      pop(tmp);
187    }
188  }
189
190  // Test that the object address is not in the new space. We cannot
191  // update page dirty marks for new space pages.
192  InNewSpace(object, scratch, equal, &done);
193
194  // The offset is relative to a tagged or untagged HeapObject pointer,
195  // so either offset or offset + kHeapObjectTag must be a
196  // multiple of kPointerSize.
197  ASSERT(IsAligned(offset, kPointerSize) ||
198         IsAligned(offset + kHeapObjectTag, kPointerSize));
199
200  Register dst = index;
201  if (offset != 0) {
202    lea(dst, Operand(object, offset));
203  } else {
204    // array access: calculate the destination address in the same manner as
205    // KeyedStoreIC::GenerateGeneric.
206    lea(dst, FieldOperand(object,
207                          index,
208                          times_pointer_size,
209                          FixedArray::kHeaderSize));
210  }
211  RecordWriteHelper(object, dst, scratch);
212
213  bind(&done);
214
215  // Clobber all input registers when running with the debug-code flag
216  // turned on to provoke errors.
217  if (FLAG_debug_code) {
218    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
219    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
220    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
221  }
222}
223
224
225void MacroAssembler::InNewSpace(Register object,
226                                Register scratch,
227                                Condition cc,
228                                Label* branch) {
229  if (Serializer::enabled()) {
230    // Can't do arithmetic on external references if it might get serialized.
231    // The mask isn't really an address.  We load it as an external reference in
232    // case the size of the new space is different between the snapshot maker
233    // and the running system.
234    if (scratch.is(object)) {
235      movq(kScratchRegister, ExternalReference::new_space_mask());
236      and_(scratch, kScratchRegister);
237    } else {
238      movq(scratch, ExternalReference::new_space_mask());
239      and_(scratch, object);
240    }
241    movq(kScratchRegister, ExternalReference::new_space_start());
242    cmpq(scratch, kScratchRegister);
243    j(cc, branch);
244  } else {
245    ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
246    intptr_t new_space_start =
247        reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
248    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
249    if (scratch.is(object)) {
250      addq(scratch, kScratchRegister);
251    } else {
252      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
253    }
254    and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
255    j(cc, branch);
256  }
257}
258
259
260void MacroAssembler::Assert(Condition cc, const char* msg) {
261  if (FLAG_debug_code) Check(cc, msg);
262}
263
264
265void MacroAssembler::AssertFastElements(Register elements) {
266  if (FLAG_debug_code) {
267    Label ok;
268    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
269                Heap::kFixedArrayMapRootIndex);
270    j(equal, &ok);
271    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
272                Heap::kFixedCOWArrayMapRootIndex);
273    j(equal, &ok);
274    Abort("JSObject with fast elements map has slow elements");
275    bind(&ok);
276  }
277}
278
279
280void MacroAssembler::Check(Condition cc, const char* msg) {
281  Label L;
282  j(cc, &L);
283  Abort(msg);
284  // will not return here
285  bind(&L);
286}
287
288
289void MacroAssembler::CheckStackAlignment() {
290  int frame_alignment = OS::ActivationFrameAlignment();
291  int frame_alignment_mask = frame_alignment - 1;
292  if (frame_alignment > kPointerSize) {
293    ASSERT(IsPowerOf2(frame_alignment));
294    Label alignment_as_expected;
295    testq(rsp, Immediate(frame_alignment_mask));
296    j(zero, &alignment_as_expected);
297    // Abort if stack is not aligned.
298    int3();
299    bind(&alignment_as_expected);
300  }
301}
302
303
304void MacroAssembler::NegativeZeroTest(Register result,
305                                      Register op,
306                                      Label* then_label) {
307  Label ok;
308  testl(result, result);
309  j(not_zero, &ok);
310  testl(op, op);
311  j(sign, then_label);
312  bind(&ok);
313}
314
315
316void MacroAssembler::Abort(const char* msg) {
317  // We want to pass the msg string like a smi to avoid GC
318  // problems, however msg is not guaranteed to be aligned
319  // properly. Instead, we pass an aligned pointer that is
320  // a proper v8 smi, but also pass the alignment difference
321  // from the real pointer as a smi.
322  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
323  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
324  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
325  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
326#ifdef DEBUG
327  if (msg != NULL) {
328    RecordComment("Abort message: ");
329    RecordComment(msg);
330  }
331#endif
332  // Disable stub call restrictions to always allow calls to abort.
333  set_allow_stub_calls(true);
334
335  push(rax);
336  movq(kScratchRegister, p0, RelocInfo::NONE);
337  push(kScratchRegister);
338  movq(kScratchRegister,
339       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
340       RelocInfo::NONE);
341  push(kScratchRegister);
342  CallRuntime(Runtime::kAbort, 2);
343  // will not return here
344  int3();
345}
346
347
348void MacroAssembler::CallStub(CodeStub* stub) {
349  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
350  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
351}
352
353
354Object* MacroAssembler::TryCallStub(CodeStub* stub) {
355  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
356  Object* result = stub->TryGetCode();
357  if (!result->IsFailure()) {
358    call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
359  }
360  return result;
361}
362
363
364void MacroAssembler::TailCallStub(CodeStub* stub) {
365  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
366  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
367}
368
369
370Object* MacroAssembler::TryTailCallStub(CodeStub* stub) {
371  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
372  Object* result = stub->TryGetCode();
373  if (!result->IsFailure()) {
374    jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
375  }
376  return result;
377}
378
379
380void MacroAssembler::StubReturn(int argc) {
381  ASSERT(argc >= 1 && generating_stub());
382  ret((argc - 1) * kPointerSize);
383}
384
385
386void MacroAssembler::IllegalOperation(int num_arguments) {
387  if (num_arguments > 0) {
388    addq(rsp, Immediate(num_arguments * kPointerSize));
389  }
390  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
391}
392
393
394void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
395  CallRuntime(Runtime::FunctionForId(id), num_arguments);
396}
397
398
399Object* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
400                                       int num_arguments) {
401  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
402}
403
404
405void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
406  // If the expected number of arguments of the runtime function is
407  // constant, we check that the actual number of arguments match the
408  // expectation.
409  if (f->nargs >= 0 && f->nargs != num_arguments) {
410    IllegalOperation(num_arguments);
411    return;
412  }
413
414  // TODO(1236192): Most runtime routines don't need the number of
415  // arguments passed in because it is constant. At some point we
416  // should remove this need and make the runtime routine entry code
417  // smarter.
418  Set(rax, num_arguments);
419  movq(rbx, ExternalReference(f));
420  CEntryStub ces(f->result_size);
421  CallStub(&ces);
422}
423
424
425Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
426                                       int num_arguments) {
427  if (f->nargs >= 0 && f->nargs != num_arguments) {
428    IllegalOperation(num_arguments);
429    // Since we did not call the stub, there was no allocation failure.
430    // Return some non-failure object.
431    return Heap::undefined_value();
432  }
433
434  // TODO(1236192): Most runtime routines don't need the number of
435  // arguments passed in because it is constant. At some point we
436  // should remove this need and make the runtime routine entry code
437  // smarter.
438  Set(rax, num_arguments);
439  movq(rbx, ExternalReference(f));
440  CEntryStub ces(f->result_size);
441  return TryCallStub(&ces);
442}
443
444
445void MacroAssembler::CallExternalReference(const ExternalReference& ext,
446                                           int num_arguments) {
447  Set(rax, num_arguments);
448  movq(rbx, ext);
449
450  CEntryStub stub(1);
451  CallStub(&stub);
452}
453
454
455void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
456                                               int num_arguments,
457                                               int result_size) {
458  // ----------- S t a t e -------------
459  //  -- rsp[0] : return address
460  //  -- rsp[8] : argument num_arguments - 1
461  //  ...
462  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
463  // -----------------------------------
464
465  // TODO(1236192): Most runtime routines don't need the number of
466  // arguments passed in because it is constant. At some point we
467  // should remove this need and make the runtime routine entry code
468  // smarter.
469  Set(rax, num_arguments);
470  JumpToExternalReference(ext, result_size);
471}
472
473
474void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
475                                     int num_arguments,
476                                     int result_size) {
477  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
478}
479
480
481static int Offset(ExternalReference ref0, ExternalReference ref1) {
482  int64_t offset = (ref0.address() - ref1.address());
483  // Check that fits into int.
484  ASSERT(static_cast<int>(offset) == offset);
485  return static_cast<int>(offset);
486}
487
488
489void MacroAssembler::PushHandleScope(Register scratch) {
490  ExternalReference extensions_address =
491      ExternalReference::handle_scope_extensions_address();
492  const int kExtensionsOffset = 0;
493  const int kNextOffset = Offset(
494      ExternalReference::handle_scope_next_address(),
495      extensions_address);
496  const int kLimitOffset = Offset(
497      ExternalReference::handle_scope_limit_address(),
498      extensions_address);
499
500  // Push the number of extensions, smi-tagged so the gc will ignore it.
501  movq(kScratchRegister, extensions_address);
502  movq(scratch, Operand(kScratchRegister, kExtensionsOffset));
503  movq(Operand(kScratchRegister, kExtensionsOffset), Immediate(0));
504  Integer32ToSmi(scratch, scratch);
505  push(scratch);
506  // Push next and limit pointers which will be wordsize aligned and
507  // hence automatically smi tagged.
508  push(Operand(kScratchRegister, kNextOffset));
509  push(Operand(kScratchRegister, kLimitOffset));
510}
511
512
513Object* MacroAssembler::PopHandleScopeHelper(Register saved,
514                                             Register scratch,
515                                             bool gc_allowed) {
516  ExternalReference extensions_address =
517      ExternalReference::handle_scope_extensions_address();
518  const int kExtensionsOffset = 0;
519  const int kNextOffset = Offset(
520      ExternalReference::handle_scope_next_address(),
521      extensions_address);
522  const int kLimitOffset = Offset(
523      ExternalReference::handle_scope_limit_address(),
524      extensions_address);
525
526  Object* result = NULL;
527  Label write_back;
528  movq(kScratchRegister, extensions_address);
529  cmpq(Operand(kScratchRegister, kExtensionsOffset), Immediate(0));
530  j(equal, &write_back);
531  push(saved);
532  if (gc_allowed) {
533    CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
534  } else {
535    result = TryCallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
536    if (result->IsFailure()) return result;
537  }
538  pop(saved);
539  movq(kScratchRegister, extensions_address);
540
541  bind(&write_back);
542  pop(Operand(kScratchRegister, kLimitOffset));
543  pop(Operand(kScratchRegister, kNextOffset));
544  pop(scratch);
545  SmiToInteger32(scratch, scratch);
546  movq(Operand(kScratchRegister, kExtensionsOffset), scratch);
547
548  return result;
549}
550
551
552void MacroAssembler::PopHandleScope(Register saved, Register scratch) {
553  PopHandleScopeHelper(saved, scratch, true);
554}
555
556
557Object* MacroAssembler::TryPopHandleScope(Register saved, Register scratch) {
558  return PopHandleScopeHelper(saved, scratch, false);
559}
560
561
562void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
563                                             int result_size) {
564  // Set the entry point and jump to the C entry runtime stub.
565  movq(rbx, ext);
566  CEntryStub ces(result_size);
567  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
568}
569
570
571void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
572  // Calls are not allowed in some stubs.
573  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
574
575  // Rely on the assertion to check that the number of provided
576  // arguments match the expected number of arguments. Fake a
577  // parameter count to avoid emitting code to do the check.
578  ParameterCount expected(0);
579  GetBuiltinEntry(rdx, id);
580  InvokeCode(rdx, expected, expected, flag);
581}
582
583
584void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
585  ASSERT(!target.is(rdi));
586
587  // Load the builtins object into target register.
588  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
589  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
590
591  // Load the JavaScript builtin function from the builtins object.
592  movq(rdi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
593
594  // Load the code entry point from the builtins object.
595  movq(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
596  if (FLAG_debug_code) {
597    // Make sure the code objects in the builtins object and in the
598    // builtin function are the same.
599    push(target);
600    movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
601    cmpq(target, Operand(rsp, 0));
602    Assert(equal, "Builtin code object changed");
603    pop(target);
604  }
605  lea(target, FieldOperand(target, Code::kHeaderSize));
606}
607
608
609void MacroAssembler::Set(Register dst, int64_t x) {
610  if (x == 0) {
611    xorl(dst, dst);
612  } else if (is_int32(x)) {
613    movq(dst, Immediate(static_cast<int32_t>(x)));
614  } else if (is_uint32(x)) {
615    movl(dst, Immediate(static_cast<uint32_t>(x)));
616  } else {
617    movq(dst, x, RelocInfo::NONE);
618  }
619}
620
621void MacroAssembler::Set(const Operand& dst, int64_t x) {
622  if (is_int32(x)) {
623    movq(dst, Immediate(static_cast<int32_t>(x)));
624  } else {
625    movq(kScratchRegister, x, RelocInfo::NONE);
626    movq(dst, kScratchRegister);
627  }
628}
629
630// ----------------------------------------------------------------------------
631// Smi tagging, untagging and tag detection.
632
633static int kSmiShift = kSmiTagSize + kSmiShiftSize;
634
635Register MacroAssembler::GetSmiConstant(Smi* source) {
636  int value = source->value();
637  if (value == 0) {
638    xorl(kScratchRegister, kScratchRegister);
639    return kScratchRegister;
640  }
641  if (value == 1) {
642    return kSmiConstantRegister;
643  }
644  LoadSmiConstant(kScratchRegister, source);
645  return kScratchRegister;
646}
647
648void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
649  if (FLAG_debug_code) {
650    movq(dst,
651         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
652         RelocInfo::NONE);
653    cmpq(dst, kSmiConstantRegister);
654    if (allow_stub_calls()) {
655      Assert(equal, "Uninitialized kSmiConstantRegister");
656    } else {
657      Label ok;
658      j(equal, &ok);
659      int3();
660      bind(&ok);
661    }
662  }
663  if (source->value() == 0) {
664    xorl(dst, dst);
665    return;
666  }
667  int value = source->value();
668  bool negative = value < 0;
669  unsigned int uvalue = negative ? -value : value;
670
671  switch (uvalue) {
672    case 9:
673      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
674      break;
675    case 8:
676      xorl(dst, dst);
677      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
678      break;
679    case 4:
680      xorl(dst, dst);
681      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
682      break;
683    case 5:
684      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
685      break;
686    case 3:
687      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
688      break;
689    case 2:
690      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
691      break;
692    case 1:
693      movq(dst, kSmiConstantRegister);
694      break;
695    case 0:
696      UNREACHABLE();
697      return;
698    default:
699      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
700      return;
701  }
702  if (negative) {
703    neg(dst);
704  }
705}
706
707void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
708  ASSERT_EQ(0, kSmiTag);
709  if (!dst.is(src)) {
710    movl(dst, src);
711  }
712  shl(dst, Immediate(kSmiShift));
713}
714
715
716void MacroAssembler::Integer32ToSmi(Register dst,
717                                    Register src,
718                                    Label* on_overflow) {
719  ASSERT_EQ(0, kSmiTag);
720  // 32-bit integer always fits in a long smi.
721  if (!dst.is(src)) {
722    movl(dst, src);
723  }
724  shl(dst, Immediate(kSmiShift));
725}
726
727
728void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
729  if (FLAG_debug_code) {
730    testb(dst, Immediate(0x01));
731    Label ok;
732    j(zero, &ok);
733    if (allow_stub_calls()) {
734      Abort("Integer32ToSmiField writing to non-smi location");
735    } else {
736      int3();
737    }
738    bind(&ok);
739  }
740  ASSERT(kSmiShift % kBitsPerByte == 0);
741  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
742}
743
744
745void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
746                                                Register src,
747                                                int constant) {
748  if (dst.is(src)) {
749    addq(dst, Immediate(constant));
750  } else {
751    lea(dst, Operand(src, constant));
752  }
753  shl(dst, Immediate(kSmiShift));
754}
755
756
757void MacroAssembler::SmiToInteger32(Register dst, Register src) {
758  ASSERT_EQ(0, kSmiTag);
759  if (!dst.is(src)) {
760    movq(dst, src);
761  }
762  shr(dst, Immediate(kSmiShift));
763}
764
765
766void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
767  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
768}
769
770
771void MacroAssembler::SmiToInteger64(Register dst, Register src) {
772  ASSERT_EQ(0, kSmiTag);
773  if (!dst.is(src)) {
774    movq(dst, src);
775  }
776  sar(dst, Immediate(kSmiShift));
777}
778
779
780void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
781  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
782}
783
784
785void MacroAssembler::SmiTest(Register src) {
786  testq(src, src);
787}
788
789
790void MacroAssembler::SmiCompare(Register dst, Register src) {
791  cmpq(dst, src);
792}
793
794
795void MacroAssembler::SmiCompare(Register dst, Smi* src) {
796  ASSERT(!dst.is(kScratchRegister));
797  if (src->value() == 0) {
798    testq(dst, dst);
799  } else {
800    Register constant_reg = GetSmiConstant(src);
801    cmpq(dst, constant_reg);
802  }
803}
804
805
806void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
807  cmpq(dst, src);
808}
809
810
811void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
812  cmpq(dst, src);
813}
814
815
816void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
817  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
818}
819
820
821void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
822  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
823}
824
825
826void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
827                                                           Register src,
828                                                           int power) {
829  ASSERT(power >= 0);
830  ASSERT(power < 64);
831  if (power == 0) {
832    SmiToInteger64(dst, src);
833    return;
834  }
835  if (!dst.is(src)) {
836    movq(dst, src);
837  }
838  if (power < kSmiShift) {
839    sar(dst, Immediate(kSmiShift - power));
840  } else if (power > kSmiShift) {
841    shl(dst, Immediate(power - kSmiShift));
842  }
843}
844
845
846void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
847                                                         Register src,
848                                                         int power) {
849  ASSERT((0 <= power) && (power < 32));
850  if (dst.is(src)) {
851    shr(dst, Immediate(power + kSmiShift));
852  } else {
853    UNIMPLEMENTED();  // Not used.
854  }
855}
856
857
858Condition MacroAssembler::CheckSmi(Register src) {
859  ASSERT_EQ(0, kSmiTag);
860  testb(src, Immediate(kSmiTagMask));
861  return zero;
862}
863
864
865Condition MacroAssembler::CheckPositiveSmi(Register src) {
866  ASSERT_EQ(0, kSmiTag);
867  // Make mask 0x8000000000000001 and test that both bits are zero.
868  movq(kScratchRegister, src);
869  rol(kScratchRegister, Immediate(1));
870  testb(kScratchRegister, Immediate(3));
871  return zero;
872}
873
874
875Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
876  if (first.is(second)) {
877    return CheckSmi(first);
878  }
879  ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
880  leal(kScratchRegister, Operand(first, second, times_1, 0));
881  testb(kScratchRegister, Immediate(0x03));
882  return zero;
883}
884
885
886Condition MacroAssembler::CheckBothPositiveSmi(Register first,
887                                               Register second) {
888  if (first.is(second)) {
889    return CheckPositiveSmi(first);
890  }
891  movq(kScratchRegister, first);
892  or_(kScratchRegister, second);
893  rol(kScratchRegister, Immediate(1));
894  testl(kScratchRegister, Immediate(0x03));
895  return zero;
896}
897
898
899Condition MacroAssembler::CheckEitherSmi(Register first,
900                                         Register second,
901                                         Register scratch) {
902  if (first.is(second)) {
903    return CheckSmi(first);
904  }
905  if (scratch.is(second)) {
906    andl(scratch, first);
907  } else {
908    if (!scratch.is(first)) {
909      movl(scratch, first);
910    }
911    andl(scratch, second);
912  }
913  testb(scratch, Immediate(kSmiTagMask));
914  return zero;
915}
916
917
918Condition MacroAssembler::CheckIsMinSmi(Register src) {
919  ASSERT(!src.is(kScratchRegister));
920  // If we overflow by subtracting one, it's the minimal smi value.
921  cmpq(src, kSmiConstantRegister);
922  return overflow;
923}
924
925
926Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
927  // A 32-bit integer value can always be converted to a smi.
928  return always;
929}
930
931
932Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
933  // An unsigned 32-bit integer value is valid as long as the high bit
934  // is not set.
935  testl(src, src);
936  return positive;
937}
938
939
940void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
941  if (dst.is(src)) {
942    ASSERT(!dst.is(kScratchRegister));
943    movq(kScratchRegister, src);
944    neg(dst);  // Low 32 bits are retained as zero by negation.
945    // Test if result is zero or Smi::kMinValue.
946    cmpq(dst, kScratchRegister);
947    j(not_equal, on_smi_result);
948    movq(src, kScratchRegister);
949  } else {
950    movq(dst, src);
951    neg(dst);
952    cmpq(dst, src);
953    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
954    j(not_equal, on_smi_result);
955  }
956}
957
958
959void MacroAssembler::SmiAdd(Register dst,
960                            Register src1,
961                            Register src2,
962                            Label* on_not_smi_result) {
963  ASSERT(!dst.is(src2));
964  if (on_not_smi_result == NULL) {
965    // No overflow checking. Use only when it's known that
966    // overflowing is impossible.
967    if (dst.is(src1)) {
968      addq(dst, src2);
969    } else {
970      movq(dst, src1);
971      addq(dst, src2);
972    }
973    Assert(no_overflow, "Smi addition overflow");
974  } else if (dst.is(src1)) {
975    movq(kScratchRegister, src1);
976    addq(kScratchRegister, src2);
977    j(overflow, on_not_smi_result);
978    movq(dst, kScratchRegister);
979  } else {
980    movq(dst, src1);
981    addq(dst, src2);
982    j(overflow, on_not_smi_result);
983  }
984}
985
986
987void MacroAssembler::SmiSub(Register dst,
988                            Register src1,
989                            Register src2,
990                            Label* on_not_smi_result) {
991  ASSERT(!dst.is(src2));
992  if (on_not_smi_result == NULL) {
993    // No overflow checking. Use only when it's known that
994    // overflowing is impossible (e.g., subtracting two positive smis).
995    if (dst.is(src1)) {
996      subq(dst, src2);
997    } else {
998      movq(dst, src1);
999      subq(dst, src2);
1000    }
1001    Assert(no_overflow, "Smi subtraction overflow");
1002  } else if (dst.is(src1)) {
1003    cmpq(dst, src2);
1004    j(overflow, on_not_smi_result);
1005    subq(dst, src2);
1006  } else {
1007    movq(dst, src1);
1008    subq(dst, src2);
1009    j(overflow, on_not_smi_result);
1010  }
1011}
1012
1013
1014void MacroAssembler::SmiSub(Register dst,
1015                            Register src1,
1016                            const Operand& src2,
1017                            Label* on_not_smi_result) {
1018  if (on_not_smi_result == NULL) {
1019    // No overflow checking. Use only when it's known that
1020    // overflowing is impossible (e.g., subtracting two positive smis).
1021    if (dst.is(src1)) {
1022      subq(dst, src2);
1023    } else {
1024      movq(dst, src1);
1025      subq(dst, src2);
1026    }
1027    Assert(no_overflow, "Smi subtraction overflow");
1028  } else if (dst.is(src1)) {
1029    movq(kScratchRegister, src2);
1030    cmpq(src1, kScratchRegister);
1031    j(overflow, on_not_smi_result);
1032    subq(src1, kScratchRegister);
1033  } else {
1034    movq(dst, src1);
1035    subq(dst, src2);
1036    j(overflow, on_not_smi_result);
1037  }
1038}
1039
1040void MacroAssembler::SmiMul(Register dst,
1041                            Register src1,
1042                            Register src2,
1043                            Label* on_not_smi_result) {
1044  ASSERT(!dst.is(src2));
1045  ASSERT(!dst.is(kScratchRegister));
1046  ASSERT(!src1.is(kScratchRegister));
1047  ASSERT(!src2.is(kScratchRegister));
1048
1049  if (dst.is(src1)) {
1050    Label failure, zero_correct_result;
1051    movq(kScratchRegister, src1);  // Create backup for later testing.
1052    SmiToInteger64(dst, src1);
1053    imul(dst, src2);
1054    j(overflow, &failure);
1055
1056    // Check for negative zero result.  If product is zero, and one
1057    // argument is negative, go to slow case.
1058    Label correct_result;
1059    testq(dst, dst);
1060    j(not_zero, &correct_result);
1061
1062    movq(dst, kScratchRegister);
1063    xor_(dst, src2);
1064    j(positive, &zero_correct_result);  // Result was positive zero.
1065
1066    bind(&failure);  // Reused failure exit, restores src1.
1067    movq(src1, kScratchRegister);
1068    jmp(on_not_smi_result);
1069
1070    bind(&zero_correct_result);
1071    xor_(dst, dst);
1072
1073    bind(&correct_result);
1074  } else {
1075    SmiToInteger64(dst, src1);
1076    imul(dst, src2);
1077    j(overflow, on_not_smi_result);
1078    // Check for negative zero result.  If product is zero, and one
1079    // argument is negative, go to slow case.
1080    Label correct_result;
1081    testq(dst, dst);
1082    j(not_zero, &correct_result);
1083    // One of src1 and src2 is zero, the check whether the other is
1084    // negative.
1085    movq(kScratchRegister, src1);
1086    xor_(kScratchRegister, src2);
1087    j(negative, on_not_smi_result);
1088    bind(&correct_result);
1089  }
1090}
1091
1092
1093void MacroAssembler::SmiTryAddConstant(Register dst,
1094                                       Register src,
1095                                       Smi* constant,
1096                                       Label* on_not_smi_result) {
1097  // Does not assume that src is a smi.
1098  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1099  ASSERT_EQ(0, kSmiTag);
1100  ASSERT(!dst.is(kScratchRegister));
1101  ASSERT(!src.is(kScratchRegister));
1102
1103  JumpIfNotSmi(src, on_not_smi_result);
1104  Register tmp = (dst.is(src) ? kScratchRegister : dst);
1105  LoadSmiConstant(tmp, constant);
1106  addq(tmp, src);
1107  j(overflow, on_not_smi_result);
1108  if (dst.is(src)) {
1109    movq(dst, tmp);
1110  }
1111}
1112
1113
1114void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1115  if (constant->value() == 0) {
1116    if (!dst.is(src)) {
1117      movq(dst, src);
1118    }
1119    return;
1120  } else if (dst.is(src)) {
1121    ASSERT(!dst.is(kScratchRegister));
1122    switch (constant->value()) {
1123      case 1:
1124        addq(dst, kSmiConstantRegister);
1125        return;
1126      case 2:
1127        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1128        return;
1129      case 4:
1130        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1131        return;
1132      case 8:
1133        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1134        return;
1135      default:
1136        Register constant_reg = GetSmiConstant(constant);
1137        addq(dst, constant_reg);
1138        return;
1139    }
1140  } else {
1141    switch (constant->value()) {
1142      case 1:
1143        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1144        return;
1145      case 2:
1146        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1147        return;
1148      case 4:
1149        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1150        return;
1151      case 8:
1152        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1153        return;
1154      default:
1155        LoadSmiConstant(dst, constant);
1156        addq(dst, src);
1157        return;
1158    }
1159  }
1160}
1161
1162
1163void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1164  if (constant->value() != 0) {
1165    addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1166  }
1167}
1168
1169
1170void MacroAssembler::SmiAddConstant(Register dst,
1171                                    Register src,
1172                                    Smi* constant,
1173                                    Label* on_not_smi_result) {
1174  if (constant->value() == 0) {
1175    if (!dst.is(src)) {
1176      movq(dst, src);
1177    }
1178  } else if (dst.is(src)) {
1179    ASSERT(!dst.is(kScratchRegister));
1180
1181    LoadSmiConstant(kScratchRegister, constant);
1182    addq(kScratchRegister, src);
1183    j(overflow, on_not_smi_result);
1184    movq(dst, kScratchRegister);
1185  } else {
1186    LoadSmiConstant(dst, constant);
1187    addq(dst, src);
1188    j(overflow, on_not_smi_result);
1189  }
1190}
1191
1192
1193void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1194  if (constant->value() == 0) {
1195    if (!dst.is(src)) {
1196      movq(dst, src);
1197    }
1198  } else if (dst.is(src)) {
1199    ASSERT(!dst.is(kScratchRegister));
1200    Register constant_reg = GetSmiConstant(constant);
1201    subq(dst, constant_reg);
1202  } else {
1203    if (constant->value() == Smi::kMinValue) {
1204      LoadSmiConstant(dst, constant);
1205      // Adding and subtracting the min-value gives the same result, it only
1206      // differs on the overflow bit, which we don't check here.
1207      addq(dst, src);
1208    } else {
1209      // Subtract by adding the negation.
1210      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1211      addq(dst, src);
1212    }
1213  }
1214}
1215
1216
1217void MacroAssembler::SmiSubConstant(Register dst,
1218                                    Register src,
1219                                    Smi* constant,
1220                                    Label* on_not_smi_result) {
1221  if (constant->value() == 0) {
1222    if (!dst.is(src)) {
1223      movq(dst, src);
1224    }
1225  } else if (dst.is(src)) {
1226    ASSERT(!dst.is(kScratchRegister));
1227    if (constant->value() == Smi::kMinValue) {
1228      // Subtracting min-value from any non-negative value will overflow.
1229      // We test the non-negativeness before doing the subtraction.
1230      testq(src, src);
1231      j(not_sign, on_not_smi_result);
1232      LoadSmiConstant(kScratchRegister, constant);
1233      subq(dst, kScratchRegister);
1234    } else {
1235      // Subtract by adding the negation.
1236      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1237      addq(kScratchRegister, dst);
1238      j(overflow, on_not_smi_result);
1239      movq(dst, kScratchRegister);
1240    }
1241  } else {
1242    if (constant->value() == Smi::kMinValue) {
1243      // Subtracting min-value from any non-negative value will overflow.
1244      // We test the non-negativeness before doing the subtraction.
1245      testq(src, src);
1246      j(not_sign, on_not_smi_result);
1247      LoadSmiConstant(dst, constant);
1248      // Adding and subtracting the min-value gives the same result, it only
1249      // differs on the overflow bit, which we don't check here.
1250      addq(dst, src);
1251    } else {
1252      // Subtract by adding the negation.
1253      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1254      addq(dst, src);
1255      j(overflow, on_not_smi_result);
1256    }
1257  }
1258}
1259
1260
1261void MacroAssembler::SmiDiv(Register dst,
1262                            Register src1,
1263                            Register src2,
1264                            Label* on_not_smi_result) {
1265  ASSERT(!src1.is(kScratchRegister));
1266  ASSERT(!src2.is(kScratchRegister));
1267  ASSERT(!dst.is(kScratchRegister));
1268  ASSERT(!src2.is(rax));
1269  ASSERT(!src2.is(rdx));
1270  ASSERT(!src1.is(rdx));
1271
1272  // Check for 0 divisor (result is +/-Infinity).
1273  Label positive_divisor;
1274  testq(src2, src2);
1275  j(zero, on_not_smi_result);
1276
1277  if (src1.is(rax)) {
1278    movq(kScratchRegister, src1);
1279  }
1280  SmiToInteger32(rax, src1);
1281  // We need to rule out dividing Smi::kMinValue by -1, since that would
1282  // overflow in idiv and raise an exception.
1283  // We combine this with negative zero test (negative zero only happens
1284  // when dividing zero by a negative number).
1285
1286  // We overshoot a little and go to slow case if we divide min-value
1287  // by any negative value, not just -1.
1288  Label safe_div;
1289  testl(rax, Immediate(0x7fffffff));
1290  j(not_zero, &safe_div);
1291  testq(src2, src2);
1292  if (src1.is(rax)) {
1293    j(positive, &safe_div);
1294    movq(src1, kScratchRegister);
1295    jmp(on_not_smi_result);
1296  } else {
1297    j(negative, on_not_smi_result);
1298  }
1299  bind(&safe_div);
1300
1301  SmiToInteger32(src2, src2);
1302  // Sign extend src1 into edx:eax.
1303  cdq();
1304  idivl(src2);
1305  Integer32ToSmi(src2, src2);
1306  // Check that the remainder is zero.
1307  testl(rdx, rdx);
1308  if (src1.is(rax)) {
1309    Label smi_result;
1310    j(zero, &smi_result);
1311    movq(src1, kScratchRegister);
1312    jmp(on_not_smi_result);
1313    bind(&smi_result);
1314  } else {
1315    j(not_zero, on_not_smi_result);
1316  }
1317  if (!dst.is(src1) && src1.is(rax)) {
1318    movq(src1, kScratchRegister);
1319  }
1320  Integer32ToSmi(dst, rax);
1321}
1322
1323
1324void MacroAssembler::SmiMod(Register dst,
1325                            Register src1,
1326                            Register src2,
1327                            Label* on_not_smi_result) {
1328  ASSERT(!dst.is(kScratchRegister));
1329  ASSERT(!src1.is(kScratchRegister));
1330  ASSERT(!src2.is(kScratchRegister));
1331  ASSERT(!src2.is(rax));
1332  ASSERT(!src2.is(rdx));
1333  ASSERT(!src1.is(rdx));
1334  ASSERT(!src1.is(src2));
1335
1336  testq(src2, src2);
1337  j(zero, on_not_smi_result);
1338
1339  if (src1.is(rax)) {
1340    movq(kScratchRegister, src1);
1341  }
1342  SmiToInteger32(rax, src1);
1343  SmiToInteger32(src2, src2);
1344
1345  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1346  Label safe_div;
1347  cmpl(rax, Immediate(Smi::kMinValue));
1348  j(not_equal, &safe_div);
1349  cmpl(src2, Immediate(-1));
1350  j(not_equal, &safe_div);
1351  // Retag inputs and go slow case.
1352  Integer32ToSmi(src2, src2);
1353  if (src1.is(rax)) {
1354    movq(src1, kScratchRegister);
1355  }
1356  jmp(on_not_smi_result);
1357  bind(&safe_div);
1358
1359  // Sign extend eax into edx:eax.
1360  cdq();
1361  idivl(src2);
1362  // Restore smi tags on inputs.
1363  Integer32ToSmi(src2, src2);
1364  if (src1.is(rax)) {
1365    movq(src1, kScratchRegister);
1366  }
1367  // Check for a negative zero result.  If the result is zero, and the
1368  // dividend is negative, go slow to return a floating point negative zero.
1369  Label smi_result;
1370  testl(rdx, rdx);
1371  j(not_zero, &smi_result);
1372  testq(src1, src1);
1373  j(negative, on_not_smi_result);
1374  bind(&smi_result);
1375  Integer32ToSmi(dst, rdx);
1376}
1377
1378
1379void MacroAssembler::SmiNot(Register dst, Register src) {
1380  ASSERT(!dst.is(kScratchRegister));
1381  ASSERT(!src.is(kScratchRegister));
1382  // Set tag and padding bits before negating, so that they are zero afterwards.
1383  movl(kScratchRegister, Immediate(~0));
1384  if (dst.is(src)) {
1385    xor_(dst, kScratchRegister);
1386  } else {
1387    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1388  }
1389  not_(dst);
1390}
1391
1392
1393void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1394  ASSERT(!dst.is(src2));
1395  if (!dst.is(src1)) {
1396    movq(dst, src1);
1397  }
1398  and_(dst, src2);
1399}
1400
1401
1402void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1403  if (constant->value() == 0) {
1404    xor_(dst, dst);
1405  } else if (dst.is(src)) {
1406    ASSERT(!dst.is(kScratchRegister));
1407    Register constant_reg = GetSmiConstant(constant);
1408    and_(dst, constant_reg);
1409  } else {
1410    LoadSmiConstant(dst, constant);
1411    and_(dst, src);
1412  }
1413}
1414
1415
1416void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1417  if (!dst.is(src1)) {
1418    movq(dst, src1);
1419  }
1420  or_(dst, src2);
1421}
1422
1423
1424void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1425  if (dst.is(src)) {
1426    ASSERT(!dst.is(kScratchRegister));
1427    Register constant_reg = GetSmiConstant(constant);
1428    or_(dst, constant_reg);
1429  } else {
1430    LoadSmiConstant(dst, constant);
1431    or_(dst, src);
1432  }
1433}
1434
1435
1436void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1437  if (!dst.is(src1)) {
1438    movq(dst, src1);
1439  }
1440  xor_(dst, src2);
1441}
1442
1443
1444void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1445  if (dst.is(src)) {
1446    ASSERT(!dst.is(kScratchRegister));
1447    Register constant_reg = GetSmiConstant(constant);
1448    xor_(dst, constant_reg);
1449  } else {
1450    LoadSmiConstant(dst, constant);
1451    xor_(dst, src);
1452  }
1453}
1454
1455
1456void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1457                                                     Register src,
1458                                                     int shift_value) {
1459  ASSERT(is_uint5(shift_value));
1460  if (shift_value > 0) {
1461    if (dst.is(src)) {
1462      sar(dst, Immediate(shift_value + kSmiShift));
1463      shl(dst, Immediate(kSmiShift));
1464    } else {
1465      UNIMPLEMENTED();  // Not used.
1466    }
1467  }
1468}
1469
1470
1471void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
1472                                                  Register src,
1473                                                  int shift_value,
1474                                                  Label* on_not_smi_result) {
1475  // Logic right shift interprets its result as an *unsigned* number.
1476  if (dst.is(src)) {
1477    UNIMPLEMENTED();  // Not used.
1478  } else {
1479    movq(dst, src);
1480    if (shift_value == 0) {
1481      testq(dst, dst);
1482      j(negative, on_not_smi_result);
1483    }
1484    shr(dst, Immediate(shift_value + kSmiShift));
1485    shl(dst, Immediate(kSmiShift));
1486  }
1487}
1488
1489
1490void MacroAssembler::SmiShiftLeftConstant(Register dst,
1491                                          Register src,
1492                                          int shift_value) {
1493  if (!dst.is(src)) {
1494    movq(dst, src);
1495  }
1496  if (shift_value > 0) {
1497    shl(dst, Immediate(shift_value));
1498  }
1499}
1500
1501
1502void MacroAssembler::SmiShiftLeft(Register dst,
1503                                  Register src1,
1504                                  Register src2) {
1505  ASSERT(!dst.is(rcx));
1506  Label result_ok;
1507  // Untag shift amount.
1508  if (!dst.is(src1)) {
1509    movq(dst, src1);
1510  }
1511  SmiToInteger32(rcx, src2);
1512  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1513  and_(rcx, Immediate(0x1f));
1514  shl_cl(dst);
1515}
1516
1517
1518void MacroAssembler::SmiShiftLogicalRight(Register dst,
1519                                          Register src1,
1520                                          Register src2,
1521                                          Label* on_not_smi_result) {
1522  ASSERT(!dst.is(kScratchRegister));
1523  ASSERT(!src1.is(kScratchRegister));
1524  ASSERT(!src2.is(kScratchRegister));
1525  ASSERT(!dst.is(rcx));
1526  Label result_ok;
1527  if (src1.is(rcx) || src2.is(rcx)) {
1528    movq(kScratchRegister, rcx);
1529  }
1530  if (!dst.is(src1)) {
1531    movq(dst, src1);
1532  }
1533  SmiToInteger32(rcx, src2);
1534  orl(rcx, Immediate(kSmiShift));
1535  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
1536  shl(dst, Immediate(kSmiShift));
1537  testq(dst, dst);
1538  if (src1.is(rcx) || src2.is(rcx)) {
1539    Label positive_result;
1540    j(positive, &positive_result);
1541    if (src1.is(rcx)) {
1542      movq(src1, kScratchRegister);
1543    } else {
1544      movq(src2, kScratchRegister);
1545    }
1546    jmp(on_not_smi_result);
1547    bind(&positive_result);
1548  } else {
1549    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
1550  }
1551}
1552
1553
1554void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1555                                             Register src1,
1556                                             Register src2) {
1557  ASSERT(!dst.is(kScratchRegister));
1558  ASSERT(!src1.is(kScratchRegister));
1559  ASSERT(!src2.is(kScratchRegister));
1560  ASSERT(!dst.is(rcx));
1561  if (src1.is(rcx)) {
1562    movq(kScratchRegister, src1);
1563  } else if (src2.is(rcx)) {
1564    movq(kScratchRegister, src2);
1565  }
1566  if (!dst.is(src1)) {
1567    movq(dst, src1);
1568  }
1569  SmiToInteger32(rcx, src2);
1570  orl(rcx, Immediate(kSmiShift));
1571  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
1572  shl(dst, Immediate(kSmiShift));
1573  if (src1.is(rcx)) {
1574    movq(src1, kScratchRegister);
1575  } else if (src2.is(rcx)) {
1576    movq(src2, kScratchRegister);
1577  }
1578}
1579
1580
1581void MacroAssembler::SelectNonSmi(Register dst,
1582                                  Register src1,
1583                                  Register src2,
1584                                  Label* on_not_smis) {
1585  ASSERT(!dst.is(kScratchRegister));
1586  ASSERT(!src1.is(kScratchRegister));
1587  ASSERT(!src2.is(kScratchRegister));
1588  ASSERT(!dst.is(src1));
1589  ASSERT(!dst.is(src2));
1590  // Both operands must not be smis.
1591#ifdef DEBUG
1592  if (allow_stub_calls()) {  // Check contains a stub call.
1593    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
1594    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
1595  }
1596#endif
1597  ASSERT_EQ(0, kSmiTag);
1598  ASSERT_EQ(0, Smi::FromInt(0));
1599  movl(kScratchRegister, Immediate(kSmiTagMask));
1600  and_(kScratchRegister, src1);
1601  testl(kScratchRegister, src2);
1602  // If non-zero then both are smis.
1603  j(not_zero, on_not_smis);
1604
1605  // Exactly one operand is a smi.
1606  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1607  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1608  subq(kScratchRegister, Immediate(1));
1609  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1610  movq(dst, src1);
1611  xor_(dst, src2);
1612  and_(dst, kScratchRegister);
1613  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1614  xor_(dst, src1);
1615  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
1616}
1617
1618
1619SmiIndex MacroAssembler::SmiToIndex(Register dst,
1620                                    Register src,
1621                                    int shift) {
1622  ASSERT(is_uint6(shift));
1623  // There is a possible optimization if shift is in the range 60-63, but that
1624  // will (and must) never happen.
1625  if (!dst.is(src)) {
1626    movq(dst, src);
1627  }
1628  if (shift < kSmiShift) {
1629    sar(dst, Immediate(kSmiShift - shift));
1630  } else {
1631    shl(dst, Immediate(shift - kSmiShift));
1632  }
1633  return SmiIndex(dst, times_1);
1634}
1635
1636SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1637                                            Register src,
1638                                            int shift) {
1639  // Register src holds a positive smi.
1640  ASSERT(is_uint6(shift));
1641  if (!dst.is(src)) {
1642    movq(dst, src);
1643  }
1644  neg(dst);
1645  if (shift < kSmiShift) {
1646    sar(dst, Immediate(kSmiShift - shift));
1647  } else {
1648    shl(dst, Immediate(shift - kSmiShift));
1649  }
1650  return SmiIndex(dst, times_1);
1651}
1652
1653
1654void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
1655  ASSERT_EQ(0, kSmiTag);
1656  Condition smi = CheckSmi(src);
1657  j(smi, on_smi);
1658}
1659
1660
1661void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
1662  Condition smi = CheckSmi(src);
1663  j(NegateCondition(smi), on_not_smi);
1664}
1665
1666
1667void MacroAssembler::JumpIfNotPositiveSmi(Register src,
1668                                          Label* on_not_positive_smi) {
1669  Condition positive_smi = CheckPositiveSmi(src);
1670  j(NegateCondition(positive_smi), on_not_positive_smi);
1671}
1672
1673
1674void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1675                                             Smi* constant,
1676                                             Label* on_equals) {
1677  SmiCompare(src, constant);
1678  j(equal, on_equals);
1679}
1680
1681
1682void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
1683  Condition is_valid = CheckInteger32ValidSmiValue(src);
1684  j(NegateCondition(is_valid), on_invalid);
1685}
1686
1687
1688void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1689                                                Label* on_invalid) {
1690  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1691  j(NegateCondition(is_valid), on_invalid);
1692}
1693
1694
1695void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
1696                                      Label* on_not_both_smi) {
1697  Condition both_smi = CheckBothSmi(src1, src2);
1698  j(NegateCondition(both_smi), on_not_both_smi);
1699}
1700
1701
1702void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
1703                                              Label* on_not_both_smi) {
1704  Condition both_smi = CheckBothPositiveSmi(src1, src2);
1705  j(NegateCondition(both_smi), on_not_both_smi);
1706}
1707
1708
1709
1710void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
1711                                                         Register second_object,
1712                                                         Register scratch1,
1713                                                         Register scratch2,
1714                                                         Label* on_fail) {
1715  // Check that both objects are not smis.
1716  Condition either_smi = CheckEitherSmi(first_object, second_object);
1717  j(either_smi, on_fail);
1718
1719  // Load instance type for both strings.
1720  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
1721  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
1722  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
1723  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
1724
1725  // Check that both are flat ascii strings.
1726  ASSERT(kNotStringTag != 0);
1727  const int kFlatAsciiStringMask =
1728      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1729  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1730
1731  andl(scratch1, Immediate(kFlatAsciiStringMask));
1732  andl(scratch2, Immediate(kFlatAsciiStringMask));
1733  // Interleave the bits to check both scratch1 and scratch2 in one test.
1734  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1735  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1736  cmpl(scratch1,
1737       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1738  j(not_equal, on_fail);
1739}
1740
1741
1742void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
1743    Register instance_type,
1744    Register scratch,
1745    Label *failure) {
1746  if (!scratch.is(instance_type)) {
1747    movl(scratch, instance_type);
1748  }
1749
1750  const int kFlatAsciiStringMask =
1751      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1752
1753  andl(scratch, Immediate(kFlatAsciiStringMask));
1754  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
1755  j(not_equal, failure);
1756}
1757
1758
1759void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
1760    Register first_object_instance_type,
1761    Register second_object_instance_type,
1762    Register scratch1,
1763    Register scratch2,
1764    Label* on_fail) {
1765  // Load instance type for both strings.
1766  movq(scratch1, first_object_instance_type);
1767  movq(scratch2, second_object_instance_type);
1768
1769  // Check that both are flat ascii strings.
1770  ASSERT(kNotStringTag != 0);
1771  const int kFlatAsciiStringMask =
1772      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1773  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1774
1775  andl(scratch1, Immediate(kFlatAsciiStringMask));
1776  andl(scratch2, Immediate(kFlatAsciiStringMask));
1777  // Interleave the bits to check both scratch1 and scratch2 in one test.
1778  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1779  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1780  cmpl(scratch1,
1781       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1782  j(not_equal, on_fail);
1783}
1784
1785
1786void MacroAssembler::Move(Register dst, Handle<Object> source) {
1787  ASSERT(!source->IsFailure());
1788  if (source->IsSmi()) {
1789    Move(dst, Smi::cast(*source));
1790  } else {
1791    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1792  }
1793}
1794
1795
1796void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
1797  ASSERT(!source->IsFailure());
1798  if (source->IsSmi()) {
1799    Move(dst, Smi::cast(*source));
1800  } else {
1801    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1802    movq(dst, kScratchRegister);
1803  }
1804}
1805
1806
1807void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1808  if (source->IsSmi()) {
1809    SmiCompare(dst, Smi::cast(*source));
1810  } else {
1811    Move(kScratchRegister, source);
1812    cmpq(dst, kScratchRegister);
1813  }
1814}
1815
1816
1817void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
1818  if (source->IsSmi()) {
1819    SmiCompare(dst, Smi::cast(*source));
1820  } else {
1821    ASSERT(source->IsHeapObject());
1822    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1823    cmpq(dst, kScratchRegister);
1824  }
1825}
1826
1827
1828void MacroAssembler::Push(Handle<Object> source) {
1829  if (source->IsSmi()) {
1830    Push(Smi::cast(*source));
1831  } else {
1832    ASSERT(source->IsHeapObject());
1833    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1834    push(kScratchRegister);
1835  }
1836}
1837
1838
1839void MacroAssembler::Push(Smi* source) {
1840  intptr_t smi = reinterpret_cast<intptr_t>(source);
1841  if (is_int32(smi)) {
1842    push(Immediate(static_cast<int32_t>(smi)));
1843  } else {
1844    Register constant = GetSmiConstant(source);
1845    push(constant);
1846  }
1847}
1848
1849
1850void MacroAssembler::Drop(int stack_elements) {
1851  if (stack_elements > 0) {
1852    addq(rsp, Immediate(stack_elements * kPointerSize));
1853  }
1854}
1855
1856
1857void MacroAssembler::Test(const Operand& src, Smi* source) {
1858  testl(Operand(src, kIntSize), Immediate(source->value()));
1859}
1860
1861
1862void MacroAssembler::Jump(ExternalReference ext) {
1863  movq(kScratchRegister, ext);
1864  jmp(kScratchRegister);
1865}
1866
1867
1868void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1869  movq(kScratchRegister, destination, rmode);
1870  jmp(kScratchRegister);
1871}
1872
1873
1874void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1875  // TODO(X64): Inline this
1876  jmp(code_object, rmode);
1877}
1878
1879
1880void MacroAssembler::Call(ExternalReference ext) {
1881  movq(kScratchRegister, ext);
1882  call(kScratchRegister);
1883}
1884
1885
1886void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1887  movq(kScratchRegister, destination, rmode);
1888  call(kScratchRegister);
1889}
1890
1891
1892void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1893  ASSERT(RelocInfo::IsCodeTarget(rmode));
1894  WriteRecordedPositions();
1895  call(code_object, rmode);
1896}
1897
1898
1899void MacroAssembler::PushTryHandler(CodeLocation try_location,
1900                                    HandlerType type) {
1901  // Adjust this code if not the case.
1902  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1903
1904  // The pc (return address) is already on TOS.  This code pushes state,
1905  // frame pointer and current handler.  Check that they are expected
1906  // next on the stack, in that order.
1907  ASSERT_EQ(StackHandlerConstants::kStateOffset,
1908            StackHandlerConstants::kPCOffset - kPointerSize);
1909  ASSERT_EQ(StackHandlerConstants::kFPOffset,
1910            StackHandlerConstants::kStateOffset - kPointerSize);
1911  ASSERT_EQ(StackHandlerConstants::kNextOffset,
1912            StackHandlerConstants::kFPOffset - kPointerSize);
1913
1914  if (try_location == IN_JAVASCRIPT) {
1915    if (type == TRY_CATCH_HANDLER) {
1916      push(Immediate(StackHandler::TRY_CATCH));
1917    } else {
1918      push(Immediate(StackHandler::TRY_FINALLY));
1919    }
1920    push(rbp);
1921  } else {
1922    ASSERT(try_location == IN_JS_ENTRY);
1923    // The frame pointer does not point to a JS frame so we save NULL
1924    // for rbp. We expect the code throwing an exception to check rbp
1925    // before dereferencing it to restore the context.
1926    push(Immediate(StackHandler::ENTRY));
1927    push(Immediate(0));  // NULL frame pointer.
1928  }
1929  // Save the current handler.
1930  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1931  push(Operand(kScratchRegister, 0));
1932  // Link this handler.
1933  movq(Operand(kScratchRegister, 0), rsp);
1934}
1935
1936
1937void MacroAssembler::PopTryHandler() {
1938  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1939  // Unlink this handler.
1940  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1941  pop(Operand(kScratchRegister, 0));
1942  // Remove the remaining fields.
1943  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1944}
1945
1946
1947void MacroAssembler::Ret() {
1948  ret(0);
1949}
1950
1951
1952void MacroAssembler::FCmp() {
1953  fucomip();
1954  fstp(0);
1955}
1956
1957
1958void MacroAssembler::CmpObjectType(Register heap_object,
1959                                   InstanceType type,
1960                                   Register map) {
1961  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1962  CmpInstanceType(map, type);
1963}
1964
1965
1966void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1967  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1968       Immediate(static_cast<int8_t>(type)));
1969}
1970
1971
1972void MacroAssembler::CheckMap(Register obj,
1973                              Handle<Map> map,
1974                              Label* fail,
1975                              bool is_heap_object) {
1976  if (!is_heap_object) {
1977    JumpIfSmi(obj, fail);
1978  }
1979  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
1980  j(not_equal, fail);
1981}
1982
1983
1984void MacroAssembler::AbortIfNotNumber(Register object) {
1985  Label ok;
1986  Condition is_smi = CheckSmi(object);
1987  j(is_smi, &ok);
1988  Cmp(FieldOperand(object, HeapObject::kMapOffset),
1989      Factory::heap_number_map());
1990  Assert(equal, "Operand not a number");
1991  bind(&ok);
1992}
1993
1994
1995void MacroAssembler::AbortIfSmi(Register object) {
1996  Label ok;
1997  Condition is_smi = CheckSmi(object);
1998  Assert(NegateCondition(is_smi), "Operand is a smi");
1999}
2000
2001
2002void MacroAssembler::AbortIfNotSmi(Register object) {
2003  Label ok;
2004  Condition is_smi = CheckSmi(object);
2005  Assert(is_smi, "Operand is not a smi");
2006}
2007
2008
2009void MacroAssembler::AbortIfNotRootValue(Register src,
2010                                         Heap::RootListIndex root_value_index,
2011                                         const char* message) {
2012  ASSERT(!src.is(kScratchRegister));
2013  LoadRoot(kScratchRegister, root_value_index);
2014  cmpq(src, kScratchRegister);
2015  Check(equal, message);
2016}
2017
2018
2019
2020Condition MacroAssembler::IsObjectStringType(Register heap_object,
2021                                             Register map,
2022                                             Register instance_type) {
2023  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2024  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
2025  ASSERT(kNotStringTag != 0);
2026  testb(instance_type, Immediate(kIsNotStringMask));
2027  return zero;
2028}
2029
2030
2031void MacroAssembler::TryGetFunctionPrototype(Register function,
2032                                             Register result,
2033                                             Label* miss) {
2034  // Check that the receiver isn't a smi.
2035  testl(function, Immediate(kSmiTagMask));
2036  j(zero, miss);
2037
2038  // Check that the function really is a function.
2039  CmpObjectType(function, JS_FUNCTION_TYPE, result);
2040  j(not_equal, miss);
2041
2042  // Make sure that the function has an instance prototype.
2043  Label non_instance;
2044  testb(FieldOperand(result, Map::kBitFieldOffset),
2045        Immediate(1 << Map::kHasNonInstancePrototype));
2046  j(not_zero, &non_instance);
2047
2048  // Get the prototype or initial map from the function.
2049  movq(result,
2050       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2051
2052  // If the prototype or initial map is the hole, don't return it and
2053  // simply miss the cache instead. This will allow us to allocate a
2054  // prototype object on-demand in the runtime system.
2055  CompareRoot(result, Heap::kTheHoleValueRootIndex);
2056  j(equal, miss);
2057
2058  // If the function does not have an initial map, we're done.
2059  Label done;
2060  CmpObjectType(result, MAP_TYPE, kScratchRegister);
2061  j(not_equal, &done);
2062
2063  // Get the prototype from the initial map.
2064  movq(result, FieldOperand(result, Map::kPrototypeOffset));
2065  jmp(&done);
2066
2067  // Non-instance prototype: Fetch prototype from constructor field
2068  // in initial map.
2069  bind(&non_instance);
2070  movq(result, FieldOperand(result, Map::kConstructorOffset));
2071
2072  // All done.
2073  bind(&done);
2074}
2075
2076
2077void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2078  if (FLAG_native_code_counters && counter->Enabled()) {
2079    movq(kScratchRegister, ExternalReference(counter));
2080    movl(Operand(kScratchRegister, 0), Immediate(value));
2081  }
2082}
2083
2084
2085void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2086  ASSERT(value > 0);
2087  if (FLAG_native_code_counters && counter->Enabled()) {
2088    movq(kScratchRegister, ExternalReference(counter));
2089    Operand operand(kScratchRegister, 0);
2090    if (value == 1) {
2091      incl(operand);
2092    } else {
2093      addl(operand, Immediate(value));
2094    }
2095  }
2096}
2097
2098
2099void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2100  ASSERT(value > 0);
2101  if (FLAG_native_code_counters && counter->Enabled()) {
2102    movq(kScratchRegister, ExternalReference(counter));
2103    Operand operand(kScratchRegister, 0);
2104    if (value == 1) {
2105      decl(operand);
2106    } else {
2107      subl(operand, Immediate(value));
2108    }
2109  }
2110}
2111
2112#ifdef ENABLE_DEBUGGER_SUPPORT
2113
2114void MacroAssembler::PushRegistersFromMemory(RegList regs) {
2115  ASSERT((regs & ~kJSCallerSaved) == 0);
2116  // Push the content of the memory location to the stack.
2117  for (int i = 0; i < kNumJSCallerSaved; i++) {
2118    int r = JSCallerSavedCode(i);
2119    if ((regs & (1 << r)) != 0) {
2120      ExternalReference reg_addr =
2121          ExternalReference(Debug_Address::Register(i));
2122      movq(kScratchRegister, reg_addr);
2123      push(Operand(kScratchRegister, 0));
2124    }
2125  }
2126}
2127
2128
2129void MacroAssembler::SaveRegistersToMemory(RegList regs) {
2130  ASSERT((regs & ~kJSCallerSaved) == 0);
2131  // Copy the content of registers to memory location.
2132  for (int i = 0; i < kNumJSCallerSaved; i++) {
2133    int r = JSCallerSavedCode(i);
2134    if ((regs & (1 << r)) != 0) {
2135      Register reg = { r };
2136      ExternalReference reg_addr =
2137          ExternalReference(Debug_Address::Register(i));
2138      movq(kScratchRegister, reg_addr);
2139      movq(Operand(kScratchRegister, 0), reg);
2140    }
2141  }
2142}
2143
2144
2145void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
2146  ASSERT((regs & ~kJSCallerSaved) == 0);
2147  // Copy the content of memory location to registers.
2148  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
2149    int r = JSCallerSavedCode(i);
2150    if ((regs & (1 << r)) != 0) {
2151      Register reg = { r };
2152      ExternalReference reg_addr =
2153          ExternalReference(Debug_Address::Register(i));
2154      movq(kScratchRegister, reg_addr);
2155      movq(reg, Operand(kScratchRegister, 0));
2156    }
2157  }
2158}
2159
2160
2161void MacroAssembler::PopRegistersToMemory(RegList regs) {
2162  ASSERT((regs & ~kJSCallerSaved) == 0);
2163  // Pop the content from the stack to the memory location.
2164  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
2165    int r = JSCallerSavedCode(i);
2166    if ((regs & (1 << r)) != 0) {
2167      ExternalReference reg_addr =
2168          ExternalReference(Debug_Address::Register(i));
2169      movq(kScratchRegister, reg_addr);
2170      pop(Operand(kScratchRegister, 0));
2171    }
2172  }
2173}
2174
2175
2176void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
2177                                                    Register scratch,
2178                                                    RegList regs) {
2179  ASSERT(!scratch.is(kScratchRegister));
2180  ASSERT(!base.is(kScratchRegister));
2181  ASSERT(!base.is(scratch));
2182  ASSERT((regs & ~kJSCallerSaved) == 0);
2183  // Copy the content of the stack to the memory location and adjust base.
2184  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
2185    int r = JSCallerSavedCode(i);
2186    if ((regs & (1 << r)) != 0) {
2187      movq(scratch, Operand(base, 0));
2188      ExternalReference reg_addr =
2189          ExternalReference(Debug_Address::Register(i));
2190      movq(kScratchRegister, reg_addr);
2191      movq(Operand(kScratchRegister, 0), scratch);
2192      lea(base, Operand(base, kPointerSize));
2193    }
2194  }
2195}
2196
2197void MacroAssembler::DebugBreak() {
2198  ASSERT(allow_stub_calls());
2199  xor_(rax, rax);  // no arguments
2200  movq(rbx, ExternalReference(Runtime::kDebugBreak));
2201  CEntryStub ces(1);
2202  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2203}
2204#endif  // ENABLE_DEBUGGER_SUPPORT
2205
2206
2207void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2208                                    const ParameterCount& actual,
2209                                    Handle<Code> code_constant,
2210                                    Register code_register,
2211                                    Label* done,
2212                                    InvokeFlag flag) {
2213  bool definitely_matches = false;
2214  Label invoke;
2215  if (expected.is_immediate()) {
2216    ASSERT(actual.is_immediate());
2217    if (expected.immediate() == actual.immediate()) {
2218      definitely_matches = true;
2219    } else {
2220      Set(rax, actual.immediate());
2221      if (expected.immediate() ==
2222              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2223        // Don't worry about adapting arguments for built-ins that
2224        // don't want that done. Skip adaption code by making it look
2225        // like we have a match between expected and actual number of
2226        // arguments.
2227        definitely_matches = true;
2228      } else {
2229        Set(rbx, expected.immediate());
2230      }
2231    }
2232  } else {
2233    if (actual.is_immediate()) {
2234      // Expected is in register, actual is immediate. This is the
2235      // case when we invoke function values without going through the
2236      // IC mechanism.
2237      cmpq(expected.reg(), Immediate(actual.immediate()));
2238      j(equal, &invoke);
2239      ASSERT(expected.reg().is(rbx));
2240      Set(rax, actual.immediate());
2241    } else if (!expected.reg().is(actual.reg())) {
2242      // Both expected and actual are in (different) registers. This
2243      // is the case when we invoke functions using call and apply.
2244      cmpq(expected.reg(), actual.reg());
2245      j(equal, &invoke);
2246      ASSERT(actual.reg().is(rax));
2247      ASSERT(expected.reg().is(rbx));
2248    }
2249  }
2250
2251  if (!definitely_matches) {
2252    Handle<Code> adaptor =
2253        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
2254    if (!code_constant.is_null()) {
2255      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
2256      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2257    } else if (!code_register.is(rdx)) {
2258      movq(rdx, code_register);
2259    }
2260
2261    if (flag == CALL_FUNCTION) {
2262      Call(adaptor, RelocInfo::CODE_TARGET);
2263      jmp(done);
2264    } else {
2265      Jump(adaptor, RelocInfo::CODE_TARGET);
2266    }
2267    bind(&invoke);
2268  }
2269}
2270
2271
2272void MacroAssembler::InvokeCode(Register code,
2273                                const ParameterCount& expected,
2274                                const ParameterCount& actual,
2275                                InvokeFlag flag) {
2276  Label done;
2277  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
2278  if (flag == CALL_FUNCTION) {
2279    call(code);
2280  } else {
2281    ASSERT(flag == JUMP_FUNCTION);
2282    jmp(code);
2283  }
2284  bind(&done);
2285}
2286
2287
2288void MacroAssembler::InvokeCode(Handle<Code> code,
2289                                const ParameterCount& expected,
2290                                const ParameterCount& actual,
2291                                RelocInfo::Mode rmode,
2292                                InvokeFlag flag) {
2293  Label done;
2294  Register dummy = rax;
2295  InvokePrologue(expected, actual, code, dummy, &done, flag);
2296  if (flag == CALL_FUNCTION) {
2297    Call(code, rmode);
2298  } else {
2299    ASSERT(flag == JUMP_FUNCTION);
2300    Jump(code, rmode);
2301  }
2302  bind(&done);
2303}
2304
2305
2306void MacroAssembler::InvokeFunction(Register function,
2307                                    const ParameterCount& actual,
2308                                    InvokeFlag flag) {
2309  ASSERT(function.is(rdi));
2310  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2311  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
2312  movsxlq(rbx,
2313          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
2314  movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
2315  // Advances rdx to the end of the Code object header, to the start of
2316  // the executable code.
2317  lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
2318
2319  ParameterCount expected(rbx);
2320  InvokeCode(rdx, expected, actual, flag);
2321}
2322
2323
2324void MacroAssembler::InvokeFunction(JSFunction* function,
2325                                    const ParameterCount& actual,
2326                                    InvokeFlag flag) {
2327  ASSERT(function->is_compiled());
2328  // Get the function and setup the context.
2329  Move(rdi, Handle<JSFunction>(function));
2330  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
2331
2332  // Invoke the cached code.
2333  Handle<Code> code(function->code());
2334  ParameterCount expected(function->shared()->formal_parameter_count());
2335  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
2336}
2337
2338
2339void MacroAssembler::EnterFrame(StackFrame::Type type) {
2340  push(rbp);
2341  movq(rbp, rsp);
2342  push(rsi);  // Context.
2343  Push(Smi::FromInt(type));
2344  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
2345  push(kScratchRegister);
2346  if (FLAG_debug_code) {
2347    movq(kScratchRegister,
2348         Factory::undefined_value(),
2349         RelocInfo::EMBEDDED_OBJECT);
2350    cmpq(Operand(rsp, 0), kScratchRegister);
2351    Check(not_equal, "code object not properly patched");
2352  }
2353}
2354
2355
2356void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2357  if (FLAG_debug_code) {
2358    Move(kScratchRegister, Smi::FromInt(type));
2359    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
2360    Check(equal, "stack frame types must match");
2361  }
2362  movq(rsp, rbp);
2363  pop(rbp);
2364}
2365
2366
2367void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode,
2368                                            bool save_rax) {
2369  // Setup the frame structure on the stack.
2370  // All constants are relative to the frame pointer of the exit frame.
2371  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
2372  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
2373  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
2374  push(rbp);
2375  movq(rbp, rsp);
2376
2377  // Reserve room for entry stack pointer and push the debug marker.
2378  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
2379  push(Immediate(0));  // Saved entry sp, patched before call.
2380  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
2381  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
2382
2383  // Save the frame pointer and the context in top.
2384  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
2385  ExternalReference context_address(Top::k_context_address);
2386  if (save_rax) {
2387    movq(r14, rax);  // Backup rax before we use it.
2388  }
2389
2390  movq(rax, rbp);
2391  store_rax(c_entry_fp_address);
2392  movq(rax, rsi);
2393  store_rax(context_address);
2394}
2395
2396void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode,
2397                                            int result_size,
2398                                            int argc) {
2399#ifdef ENABLE_DEBUGGER_SUPPORT
2400  // Save the state of all registers to the stack from the memory
2401  // location. This is needed to allow nested break points.
2402  if (mode == ExitFrame::MODE_DEBUG) {
2403    // TODO(1243899): This should be symmetric to
2404    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
2405    // correct here, but computed for the other call. Very error
2406    // prone! FIX THIS.  Actually there are deeper problems with
2407    // register saving than this asymmetry (see the bug report
2408    // associated with this issue).
2409    PushRegistersFromMemory(kJSCallerSaved);
2410  }
2411#endif
2412
2413#ifdef _WIN64
2414  // Reserve space on stack for result and argument structures, if necessary.
2415  int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
2416  // Reserve space for the Arguments object.  The Windows 64-bit ABI
2417  // requires us to pass this structure as a pointer to its location on
2418  // the stack.  The structure contains 2 values.
2419  int argument_stack_space = argc * kPointerSize;
2420  // We also need backing space for 4 parameters, even though
2421  // we only pass one or two parameter, and it is in a register.
2422  int argument_mirror_space = 4 * kPointerSize;
2423  int total_stack_space =
2424      argument_mirror_space + argument_stack_space + result_stack_space;
2425  subq(rsp, Immediate(total_stack_space));
2426#endif
2427
2428  // Get the required frame alignment for the OS.
2429  static const int kFrameAlignment = OS::ActivationFrameAlignment();
2430  if (kFrameAlignment > 0) {
2431    ASSERT(IsPowerOf2(kFrameAlignment));
2432    movq(kScratchRegister, Immediate(-kFrameAlignment));
2433    and_(rsp, kScratchRegister);
2434  }
2435
2436  // Patch the saved entry sp.
2437  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2438}
2439
2440
2441void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
2442  EnterExitFramePrologue(mode, true);
2443
2444  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
2445  // so it must be retained across the C-call.
2446  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
2447  lea(r12, Operand(rbp, r14, times_pointer_size, offset));
2448
2449  EnterExitFrameEpilogue(mode, result_size, 2);
2450}
2451
2452
2453void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
2454                                       int stack_space,
2455                                       int argc,
2456                                       int result_size) {
2457  EnterExitFramePrologue(mode, false);
2458
2459  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
2460  // so it must be retained across the C-call.
2461  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
2462  lea(r12, Operand(rbp, (stack_space * kPointerSize) + offset));
2463
2464  EnterExitFrameEpilogue(mode, result_size, argc);
2465}
2466
2467
2468void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
2469  // Registers:
2470  // r12 : argv
2471#ifdef ENABLE_DEBUGGER_SUPPORT
2472  // Restore the memory copy of the registers by digging them out from
2473  // the stack. This is needed to allow nested break points.
2474  if (mode == ExitFrame::MODE_DEBUG) {
2475    // It's okay to clobber register rbx below because we don't need
2476    // the function pointer after this.
2477    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
2478    int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
2479    lea(rbx, Operand(rbp, kOffset));
2480    CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
2481  }
2482#endif
2483
2484  // Get the return address from the stack and restore the frame pointer.
2485  movq(rcx, Operand(rbp, 1 * kPointerSize));
2486  movq(rbp, Operand(rbp, 0 * kPointerSize));
2487
2488  // Pop everything up to and including the arguments and the receiver
2489  // from the caller stack.
2490  lea(rsp, Operand(r12, 1 * kPointerSize));
2491
2492  // Restore current context from top and clear it in debug mode.
2493  ExternalReference context_address(Top::k_context_address);
2494  movq(kScratchRegister, context_address);
2495  movq(rsi, Operand(kScratchRegister, 0));
2496#ifdef DEBUG
2497  movq(Operand(kScratchRegister, 0), Immediate(0));
2498#endif
2499
2500  // Push the return address to get ready to return.
2501  push(rcx);
2502
2503  // Clear the top frame.
2504  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
2505  movq(kScratchRegister, c_entry_fp_address);
2506  movq(Operand(kScratchRegister, 0), Immediate(0));
2507}
2508
2509
2510void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
2511                                            Register scratch,
2512                                            Label* miss) {
2513  Label same_contexts;
2514
2515  ASSERT(!holder_reg.is(scratch));
2516  ASSERT(!scratch.is(kScratchRegister));
2517  // Load current lexical context from the stack frame.
2518  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
2519
2520  // When generating debug code, make sure the lexical context is set.
2521  if (FLAG_debug_code) {
2522    cmpq(scratch, Immediate(0));
2523    Check(not_equal, "we should not have an empty lexical context");
2524  }
2525  // Load the global context of the current context.
2526  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
2527  movq(scratch, FieldOperand(scratch, offset));
2528  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
2529
2530  // Check the context is a global context.
2531  if (FLAG_debug_code) {
2532    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
2533        Factory::global_context_map());
2534    Check(equal, "JSGlobalObject::global_context should be a global context.");
2535  }
2536
2537  // Check if both contexts are the same.
2538  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2539  j(equal, &same_contexts);
2540
2541  // Compare security tokens.
2542  // Check that the security token in the calling global object is
2543  // compatible with the security token in the receiving global
2544  // object.
2545
2546  // Check the context is a global context.
2547  if (FLAG_debug_code) {
2548    // Preserve original value of holder_reg.
2549    push(holder_reg);
2550    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2551    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
2552    Check(not_equal, "JSGlobalProxy::context() should not be null.");
2553
2554    // Read the first word and compare to global_context_map(),
2555    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
2556    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
2557    Check(equal, "JSGlobalObject::global_context should be a global context.");
2558    pop(holder_reg);
2559  }
2560
2561  movq(kScratchRegister,
2562       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2563  int token_offset =
2564      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
2565  movq(scratch, FieldOperand(scratch, token_offset));
2566  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
2567  j(not_equal, miss);
2568
2569  bind(&same_contexts);
2570}
2571
2572
2573void MacroAssembler::LoadAllocationTopHelper(Register result,
2574                                             Register result_end,
2575                                             Register scratch,
2576                                             AllocationFlags flags) {
2577  ExternalReference new_space_allocation_top =
2578      ExternalReference::new_space_allocation_top_address();
2579
2580  // Just return if allocation top is already known.
2581  if ((flags & RESULT_CONTAINS_TOP) != 0) {
2582    // No use of scratch if allocation top is provided.
2583    ASSERT(!scratch.is_valid());
2584#ifdef DEBUG
2585    // Assert that result actually contains top on entry.
2586    movq(kScratchRegister, new_space_allocation_top);
2587    cmpq(result, Operand(kScratchRegister, 0));
2588    Check(equal, "Unexpected allocation top");
2589#endif
2590    return;
2591  }
2592
2593  // Move address of new object to result. Use scratch register if available,
2594  // and keep address in scratch until call to UpdateAllocationTopHelper.
2595  if (scratch.is_valid()) {
2596    ASSERT(!scratch.is(result_end));
2597    movq(scratch, new_space_allocation_top);
2598    movq(result, Operand(scratch, 0));
2599  } else if (result.is(rax)) {
2600    load_rax(new_space_allocation_top);
2601  } else {
2602    movq(kScratchRegister, new_space_allocation_top);
2603    movq(result, Operand(kScratchRegister, 0));
2604  }
2605}
2606
2607
2608void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
2609                                               Register scratch) {
2610  if (FLAG_debug_code) {
2611    testq(result_end, Immediate(kObjectAlignmentMask));
2612    Check(zero, "Unaligned allocation in new space");
2613  }
2614
2615  ExternalReference new_space_allocation_top =
2616      ExternalReference::new_space_allocation_top_address();
2617
2618  // Update new top.
2619  if (result_end.is(rax)) {
2620    // rax can be stored directly to a memory location.
2621    store_rax(new_space_allocation_top);
2622  } else {
2623    // Register required - use scratch provided if available.
2624    if (scratch.is_valid()) {
2625      movq(Operand(scratch, 0), result_end);
2626    } else {
2627      movq(kScratchRegister, new_space_allocation_top);
2628      movq(Operand(kScratchRegister, 0), result_end);
2629    }
2630  }
2631}
2632
2633
2634void MacroAssembler::AllocateInNewSpace(int object_size,
2635                                        Register result,
2636                                        Register result_end,
2637                                        Register scratch,
2638                                        Label* gc_required,
2639                                        AllocationFlags flags) {
2640  ASSERT(!result.is(result_end));
2641
2642  // Load address of new object into result.
2643  LoadAllocationTopHelper(result, result_end, scratch, flags);
2644
2645  // Calculate new top and bail out if new space is exhausted.
2646  ExternalReference new_space_allocation_limit =
2647      ExternalReference::new_space_allocation_limit_address();
2648
2649  Register top_reg = result_end.is_valid() ? result_end : result;
2650
2651  if (top_reg.is(result)) {
2652    addq(top_reg, Immediate(object_size));
2653  } else {
2654    lea(top_reg, Operand(result, object_size));
2655  }
2656  movq(kScratchRegister, new_space_allocation_limit);
2657  cmpq(top_reg, Operand(kScratchRegister, 0));
2658  j(above, gc_required);
2659
2660  // Update allocation top.
2661  UpdateAllocationTopHelper(top_reg, scratch);
2662
2663  if (top_reg.is(result)) {
2664    if ((flags & TAG_OBJECT) != 0) {
2665      subq(result, Immediate(object_size - kHeapObjectTag));
2666    } else {
2667      subq(result, Immediate(object_size));
2668    }
2669  } else if ((flags & TAG_OBJECT) != 0) {
2670    // Tag the result if requested.
2671    addq(result, Immediate(kHeapObjectTag));
2672  }
2673}
2674
2675
2676void MacroAssembler::AllocateInNewSpace(int header_size,
2677                                        ScaleFactor element_size,
2678                                        Register element_count,
2679                                        Register result,
2680                                        Register result_end,
2681                                        Register scratch,
2682                                        Label* gc_required,
2683                                        AllocationFlags flags) {
2684  ASSERT(!result.is(result_end));
2685
2686  // Load address of new object into result.
2687  LoadAllocationTopHelper(result, result_end, scratch, flags);
2688
2689  // Calculate new top and bail out if new space is exhausted.
2690  ExternalReference new_space_allocation_limit =
2691      ExternalReference::new_space_allocation_limit_address();
2692  lea(result_end, Operand(result, element_count, element_size, header_size));
2693  movq(kScratchRegister, new_space_allocation_limit);
2694  cmpq(result_end, Operand(kScratchRegister, 0));
2695  j(above, gc_required);
2696
2697  // Update allocation top.
2698  UpdateAllocationTopHelper(result_end, scratch);
2699
2700  // Tag the result if requested.
2701  if ((flags & TAG_OBJECT) != 0) {
2702    addq(result, Immediate(kHeapObjectTag));
2703  }
2704}
2705
2706
2707void MacroAssembler::AllocateInNewSpace(Register object_size,
2708                                        Register result,
2709                                        Register result_end,
2710                                        Register scratch,
2711                                        Label* gc_required,
2712                                        AllocationFlags flags) {
2713  // Load address of new object into result.
2714  LoadAllocationTopHelper(result, result_end, scratch, flags);
2715
2716  // Calculate new top and bail out if new space is exhausted.
2717  ExternalReference new_space_allocation_limit =
2718      ExternalReference::new_space_allocation_limit_address();
2719  if (!object_size.is(result_end)) {
2720    movq(result_end, object_size);
2721  }
2722  addq(result_end, result);
2723  movq(kScratchRegister, new_space_allocation_limit);
2724  cmpq(result_end, Operand(kScratchRegister, 0));
2725  j(above, gc_required);
2726
2727  // Update allocation top.
2728  UpdateAllocationTopHelper(result_end, scratch);
2729
2730  // Tag the result if requested.
2731  if ((flags & TAG_OBJECT) != 0) {
2732    addq(result, Immediate(kHeapObjectTag));
2733  }
2734}
2735
2736
2737void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2738  ExternalReference new_space_allocation_top =
2739      ExternalReference::new_space_allocation_top_address();
2740
2741  // Make sure the object has no tag before resetting top.
2742  and_(object, Immediate(~kHeapObjectTagMask));
2743  movq(kScratchRegister, new_space_allocation_top);
2744#ifdef DEBUG
2745  cmpq(object, Operand(kScratchRegister, 0));
2746  Check(below, "Undo allocation of non allocated memory");
2747#endif
2748  movq(Operand(kScratchRegister, 0), object);
2749}
2750
2751
2752void MacroAssembler::AllocateHeapNumber(Register result,
2753                                        Register scratch,
2754                                        Label* gc_required) {
2755  // Allocate heap number in new space.
2756  AllocateInNewSpace(HeapNumber::kSize,
2757                     result,
2758                     scratch,
2759                     no_reg,
2760                     gc_required,
2761                     TAG_OBJECT);
2762
2763  // Set the map.
2764  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2765  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2766}
2767
2768
2769void MacroAssembler::AllocateTwoByteString(Register result,
2770                                           Register length,
2771                                           Register scratch1,
2772                                           Register scratch2,
2773                                           Register scratch3,
2774                                           Label* gc_required) {
2775  // Calculate the number of bytes needed for the characters in the string while
2776  // observing object alignment.
2777  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
2778                               kObjectAlignmentMask;
2779  ASSERT(kShortSize == 2);
2780  // scratch1 = length * 2 + kObjectAlignmentMask.
2781  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
2782                kHeaderAlignment));
2783  and_(scratch1, Immediate(~kObjectAlignmentMask));
2784  if (kHeaderAlignment > 0) {
2785    subq(scratch1, Immediate(kHeaderAlignment));
2786  }
2787
2788  // Allocate two byte string in new space.
2789  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
2790                     times_1,
2791                     scratch1,
2792                     result,
2793                     scratch2,
2794                     scratch3,
2795                     gc_required,
2796                     TAG_OBJECT);
2797
2798  // Set the map, length and hash field.
2799  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2800  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2801  Integer32ToSmi(scratch1, length);
2802  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2803  movq(FieldOperand(result, String::kHashFieldOffset),
2804       Immediate(String::kEmptyHashField));
2805}
2806
2807
2808void MacroAssembler::AllocateAsciiString(Register result,
2809                                         Register length,
2810                                         Register scratch1,
2811                                         Register scratch2,
2812                                         Register scratch3,
2813                                         Label* gc_required) {
2814  // Calculate the number of bytes needed for the characters in the string while
2815  // observing object alignment.
2816  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
2817                               kObjectAlignmentMask;
2818  movl(scratch1, length);
2819  ASSERT(kCharSize == 1);
2820  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
2821  and_(scratch1, Immediate(~kObjectAlignmentMask));
2822  if (kHeaderAlignment > 0) {
2823    subq(scratch1, Immediate(kHeaderAlignment));
2824  }
2825
2826  // Allocate ascii string in new space.
2827  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
2828                     times_1,
2829                     scratch1,
2830                     result,
2831                     scratch2,
2832                     scratch3,
2833                     gc_required,
2834                     TAG_OBJECT);
2835
2836  // Set the map, length and hash field.
2837  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2838  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2839  Integer32ToSmi(scratch1, length);
2840  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2841  movq(FieldOperand(result, String::kHashFieldOffset),
2842       Immediate(String::kEmptyHashField));
2843}
2844
2845
2846void MacroAssembler::AllocateConsString(Register result,
2847                                        Register scratch1,
2848                                        Register scratch2,
2849                                        Label* gc_required) {
2850  // Allocate heap number in new space.
2851  AllocateInNewSpace(ConsString::kSize,
2852                     result,
2853                     scratch1,
2854                     scratch2,
2855                     gc_required,
2856                     TAG_OBJECT);
2857
2858  // Set the map. The other fields are left uninitialized.
2859  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
2860  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2861}
2862
2863
2864void MacroAssembler::AllocateAsciiConsString(Register result,
2865                                             Register scratch1,
2866                                             Register scratch2,
2867                                             Label* gc_required) {
2868  // Allocate heap number in new space.
2869  AllocateInNewSpace(ConsString::kSize,
2870                     result,
2871                     scratch1,
2872                     scratch2,
2873                     gc_required,
2874                     TAG_OBJECT);
2875
2876  // Set the map. The other fields are left uninitialized.
2877  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2878  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2879}
2880
2881
2882void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2883  if (context_chain_length > 0) {
2884    // Move up the chain of contexts to the context containing the slot.
2885    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2886    // Load the function context (which is the incoming, outer context).
2887    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2888    for (int i = 1; i < context_chain_length; i++) {
2889      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2890      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2891    }
2892    // The context may be an intermediate context, not a function context.
2893    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2894  } else {  // context is the current function context.
2895    // The context may be an intermediate context, not a function context.
2896    movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2897  }
2898}
2899
2900
2901int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2902  // On Windows 64 stack slots are reserved by the caller for all arguments
2903  // including the ones passed in registers, and space is always allocated for
2904  // the four register arguments even if the function takes fewer than four
2905  // arguments.
2906  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2907  // and the caller does not reserve stack slots for them.
2908  ASSERT(num_arguments >= 0);
2909#ifdef _WIN64
2910  static const int kMinimumStackSlots = 4;
2911  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2912  return num_arguments;
2913#else
2914  static const int kRegisterPassedArguments = 6;
2915  if (num_arguments < kRegisterPassedArguments) return 0;
2916  return num_arguments - kRegisterPassedArguments;
2917#endif
2918}
2919
2920
2921void MacroAssembler::PrepareCallCFunction(int num_arguments) {
2922  int frame_alignment = OS::ActivationFrameAlignment();
2923  ASSERT(frame_alignment != 0);
2924  ASSERT(num_arguments >= 0);
2925  // Make stack end at alignment and allocate space for arguments and old rsp.
2926  movq(kScratchRegister, rsp);
2927  ASSERT(IsPowerOf2(frame_alignment));
2928  int argument_slots_on_stack =
2929      ArgumentStackSlotsForCFunctionCall(num_arguments);
2930  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
2931  and_(rsp, Immediate(-frame_alignment));
2932  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
2933}
2934
2935
2936void MacroAssembler::CallCFunction(ExternalReference function,
2937                                   int num_arguments) {
2938  movq(rax, function);
2939  CallCFunction(rax, num_arguments);
2940}
2941
2942
2943void MacroAssembler::CallCFunction(Register function, int num_arguments) {
2944  // Check stack alignment.
2945  if (FLAG_debug_code) {
2946    CheckStackAlignment();
2947  }
2948
2949  call(function);
2950  ASSERT(OS::ActivationFrameAlignment() != 0);
2951  ASSERT(num_arguments >= 0);
2952  int argument_slots_on_stack =
2953      ArgumentStackSlotsForCFunctionCall(num_arguments);
2954  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
2955}
2956
2957
2958CodePatcher::CodePatcher(byte* address, int size)
2959    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2960  // Create a new macro assembler pointing to the address of the code to patch.
2961  // The size is adjusted with kGap on order for the assembler to generate size
2962  // bytes of instructions without failing with buffer size constraints.
2963  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2964}
2965
2966
2967CodePatcher::~CodePatcher() {
2968  // Indicate that code has changed.
2969  CPU::FlushICache(address_, size_);
2970
2971  // Check that the code was patched as expected.
2972  ASSERT(masm_.pc_ == address_ + size_);
2973  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2974}
2975
2976} }  // namespace v8::internal
2977
2978#endif  // V8_TARGET_ARCH_X64
2979