macro-assembler-x64.cc revision c7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "codegen.h"
34#include "assembler-x64.h"
35#include "macro-assembler-x64.h"
36#include "serialize.h"
37#include "debug.h"
38#include "heap.h"
39
40namespace v8 {
41namespace internal {
42
43MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
44    : Assembler(arg_isolate, buffer, size),
45      generating_stub_(false),
46      allow_stub_calls_(true),
47      has_frame_(false),
48      root_array_available_(true) {
49  if (isolate() != NULL) {
50    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51                                  isolate());
52  }
53}
54
55
56static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
57  Address roots_register_value = kRootRegisterBias +
58      reinterpret_cast<Address>(isolate->heap()->roots_array_start());
59  intptr_t delta = other.address() - roots_register_value;
60  return delta;
61}
62
63
64Operand MacroAssembler::ExternalOperand(ExternalReference target,
65                                        Register scratch) {
66  if (root_array_available_ && !Serializer::enabled()) {
67    intptr_t delta = RootRegisterDelta(target, isolate());
68    if (is_int32(delta)) {
69      Serializer::TooLateToEnableNow();
70      return Operand(kRootRegister, static_cast<int32_t>(delta));
71    }
72  }
73  movq(scratch, target);
74  return Operand(scratch, 0);
75}
76
77
78void MacroAssembler::Load(Register destination, ExternalReference source) {
79  if (root_array_available_ && !Serializer::enabled()) {
80    intptr_t delta = RootRegisterDelta(source, isolate());
81    if (is_int32(delta)) {
82      Serializer::TooLateToEnableNow();
83      movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
84      return;
85    }
86  }
87  // Safe code.
88  if (destination.is(rax)) {
89    load_rax(source);
90  } else {
91    movq(kScratchRegister, source);
92    movq(destination, Operand(kScratchRegister, 0));
93  }
94}
95
96
97void MacroAssembler::Store(ExternalReference destination, Register source) {
98  if (root_array_available_ && !Serializer::enabled()) {
99    intptr_t delta = RootRegisterDelta(destination, isolate());
100    if (is_int32(delta)) {
101      Serializer::TooLateToEnableNow();
102      movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
103      return;
104    }
105  }
106  // Safe code.
107  if (source.is(rax)) {
108    store_rax(destination);
109  } else {
110    movq(kScratchRegister, destination);
111    movq(Operand(kScratchRegister, 0), source);
112  }
113}
114
115
116void MacroAssembler::LoadAddress(Register destination,
117                                 ExternalReference source) {
118  if (root_array_available_ && !Serializer::enabled()) {
119    intptr_t delta = RootRegisterDelta(source, isolate());
120    if (is_int32(delta)) {
121      Serializer::TooLateToEnableNow();
122      lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
123      return;
124    }
125  }
126  // Safe code.
127  movq(destination, source);
128}
129
130
131int MacroAssembler::LoadAddressSize(ExternalReference source) {
132  if (root_array_available_ && !Serializer::enabled()) {
133    // This calculation depends on the internals of LoadAddress.
134    // It's correctness is ensured by the asserts in the Call
135    // instruction below.
136    intptr_t delta = RootRegisterDelta(source, isolate());
137    if (is_int32(delta)) {
138      Serializer::TooLateToEnableNow();
139      // Operand is lea(scratch, Operand(kRootRegister, delta));
140      // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
141      int size = 4;
142      if (!is_int8(static_cast<int32_t>(delta))) {
143        size += 3;  // Need full four-byte displacement in lea.
144      }
145      return size;
146    }
147  }
148  // Size of movq(destination, src);
149  return 10;
150}
151
152
153void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
154  ASSERT(root_array_available_);
155  movq(destination, Operand(kRootRegister,
156                            (index << kPointerSizeLog2) - kRootRegisterBias));
157}
158
159
160void MacroAssembler::LoadRootIndexed(Register destination,
161                                     Register variable_offset,
162                                     int fixed_offset) {
163  ASSERT(root_array_available_);
164  movq(destination,
165       Operand(kRootRegister,
166               variable_offset, times_pointer_size,
167               (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
168}
169
170
171void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
172  ASSERT(root_array_available_);
173  movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
174       source);
175}
176
177
178void MacroAssembler::PushRoot(Heap::RootListIndex index) {
179  ASSERT(root_array_available_);
180  push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
181}
182
183
184void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
185  ASSERT(root_array_available_);
186  cmpq(with, Operand(kRootRegister,
187                     (index << kPointerSizeLog2) - kRootRegisterBias));
188}
189
190
191void MacroAssembler::CompareRoot(const Operand& with,
192                                 Heap::RootListIndex index) {
193  ASSERT(root_array_available_);
194  ASSERT(!with.AddressUsesRegister(kScratchRegister));
195  LoadRoot(kScratchRegister, index);
196  cmpq(with, kScratchRegister);
197}
198
199
200void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
201                                         Register addr,
202                                         Register scratch,
203                                         SaveFPRegsMode save_fp,
204                                         RememberedSetFinalAction and_then) {
205  if (FLAG_debug_code) {
206    Label ok;
207    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
208    int3();
209    bind(&ok);
210  }
211  // Load store buffer top.
212  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
213  // Store pointer to buffer.
214  movq(Operand(scratch, 0), addr);
215  // Increment buffer top.
216  addq(scratch, Immediate(kPointerSize));
217  // Write back new top of buffer.
218  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
219  // Call stub on end of buffer.
220  Label done;
221  // Check for end of buffer.
222  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
223  if (and_then == kReturnAtEnd) {
224    Label buffer_overflowed;
225    j(not_equal, &buffer_overflowed, Label::kNear);
226    ret(0);
227    bind(&buffer_overflowed);
228  } else {
229    ASSERT(and_then == kFallThroughAtEnd);
230    j(equal, &done, Label::kNear);
231  }
232  StoreBufferOverflowStub store_buffer_overflow =
233      StoreBufferOverflowStub(save_fp);
234  CallStub(&store_buffer_overflow);
235  if (and_then == kReturnAtEnd) {
236    ret(0);
237  } else {
238    ASSERT(and_then == kFallThroughAtEnd);
239    bind(&done);
240  }
241}
242
243
244void MacroAssembler::InNewSpace(Register object,
245                                Register scratch,
246                                Condition cc,
247                                Label* branch,
248                                Label::Distance distance) {
249  if (Serializer::enabled()) {
250    // Can't do arithmetic on external references if it might get serialized.
251    // The mask isn't really an address.  We load it as an external reference in
252    // case the size of the new space is different between the snapshot maker
253    // and the running system.
254    if (scratch.is(object)) {
255      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
256      and_(scratch, kScratchRegister);
257    } else {
258      movq(scratch, ExternalReference::new_space_mask(isolate()));
259      and_(scratch, object);
260    }
261    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
262    cmpq(scratch, kScratchRegister);
263    j(cc, branch, distance);
264  } else {
265    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
266    intptr_t new_space_start =
267        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
268    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
269    if (scratch.is(object)) {
270      addq(scratch, kScratchRegister);
271    } else {
272      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
273    }
274    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
275    j(cc, branch, distance);
276  }
277}
278
279
280void MacroAssembler::RecordWriteField(
281    Register object,
282    int offset,
283    Register value,
284    Register dst,
285    SaveFPRegsMode save_fp,
286    RememberedSetAction remembered_set_action,
287    SmiCheck smi_check) {
288  // The compiled code assumes that record write doesn't change the
289  // context register, so we check that none of the clobbered
290  // registers are rsi.
291  ASSERT(!value.is(rsi) && !dst.is(rsi));
292
293  // First, check if a write barrier is even needed. The tests below
294  // catch stores of Smis.
295  Label done;
296
297  // Skip barrier if writing a smi.
298  if (smi_check == INLINE_SMI_CHECK) {
299    JumpIfSmi(value, &done);
300  }
301
302  // Although the object register is tagged, the offset is relative to the start
303  // of the object, so so offset must be a multiple of kPointerSize.
304  ASSERT(IsAligned(offset, kPointerSize));
305
306  lea(dst, FieldOperand(object, offset));
307  if (emit_debug_code()) {
308    Label ok;
309    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
310    j(zero, &ok, Label::kNear);
311    int3();
312    bind(&ok);
313  }
314
315  RecordWrite(
316      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
317
318  bind(&done);
319
320  // Clobber clobbered input registers when running with the debug-code flag
321  // turned on to provoke errors.
322  if (emit_debug_code()) {
323    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
324    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
325  }
326}
327
328
329void MacroAssembler::RecordWriteArray(Register object,
330                                      Register value,
331                                      Register index,
332                                      SaveFPRegsMode save_fp,
333                                      RememberedSetAction remembered_set_action,
334                                      SmiCheck smi_check) {
335  // First, check if a write barrier is even needed. The tests below
336  // catch stores of Smis.
337  Label done;
338
339  // Skip barrier if writing a smi.
340  if (smi_check == INLINE_SMI_CHECK) {
341    JumpIfSmi(value, &done);
342  }
343
344  // Array access: calculate the destination address. Index is not a smi.
345  Register dst = index;
346  lea(dst, Operand(object, index, times_pointer_size,
347                   FixedArray::kHeaderSize - kHeapObjectTag));
348
349  RecordWrite(
350      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
351
352  bind(&done);
353
354  // Clobber clobbered input registers when running with the debug-code flag
355  // turned on to provoke errors.
356  if (emit_debug_code()) {
357    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
358    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
359  }
360}
361
362
363void MacroAssembler::RecordWrite(Register object,
364                                 Register address,
365                                 Register value,
366                                 SaveFPRegsMode fp_mode,
367                                 RememberedSetAction remembered_set_action,
368                                 SmiCheck smi_check) {
369  // The compiled code assumes that record write doesn't change the
370  // context register, so we check that none of the clobbered
371  // registers are rsi.
372  ASSERT(!value.is(rsi) && !address.is(rsi));
373
374  ASSERT(!object.is(value));
375  ASSERT(!object.is(address));
376  ASSERT(!value.is(address));
377  if (emit_debug_code()) {
378    AbortIfSmi(object);
379  }
380
381  if (remembered_set_action == OMIT_REMEMBERED_SET &&
382      !FLAG_incremental_marking) {
383    return;
384  }
385
386  if (FLAG_debug_code) {
387    Label ok;
388    cmpq(value, Operand(address, 0));
389    j(equal, &ok, Label::kNear);
390    int3();
391    bind(&ok);
392  }
393
394  // First, check if a write barrier is even needed. The tests below
395  // catch stores of smis and stores into the young generation.
396  Label done;
397
398  if (smi_check == INLINE_SMI_CHECK) {
399    // Skip barrier if writing a smi.
400    JumpIfSmi(value, &done);
401  }
402
403  CheckPageFlag(value,
404                value,  // Used as scratch.
405                MemoryChunk::kPointersToHereAreInterestingMask,
406                zero,
407                &done,
408                Label::kNear);
409
410  CheckPageFlag(object,
411                value,  // Used as scratch.
412                MemoryChunk::kPointersFromHereAreInterestingMask,
413                zero,
414                &done,
415                Label::kNear);
416
417  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
418  CallStub(&stub);
419
420  bind(&done);
421
422  // Clobber clobbered registers when running with the debug-code flag
423  // turned on to provoke errors.
424  if (emit_debug_code()) {
425    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
426    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
427  }
428}
429
430
431void MacroAssembler::Assert(Condition cc, const char* msg) {
432  if (emit_debug_code()) Check(cc, msg);
433}
434
435
436void MacroAssembler::AssertFastElements(Register elements) {
437  if (emit_debug_code()) {
438    Label ok;
439    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
440                Heap::kFixedArrayMapRootIndex);
441    j(equal, &ok, Label::kNear);
442    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
443                Heap::kFixedDoubleArrayMapRootIndex);
444    j(equal, &ok, Label::kNear);
445    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
446                Heap::kFixedCOWArrayMapRootIndex);
447    j(equal, &ok, Label::kNear);
448    Abort("JSObject with fast elements map has slow elements");
449    bind(&ok);
450  }
451}
452
453
454void MacroAssembler::Check(Condition cc, const char* msg) {
455  Label L;
456  j(cc, &L, Label::kNear);
457  Abort(msg);
458  // Control will not return here.
459  bind(&L);
460}
461
462
463void MacroAssembler::CheckStackAlignment() {
464  int frame_alignment = OS::ActivationFrameAlignment();
465  int frame_alignment_mask = frame_alignment - 1;
466  if (frame_alignment > kPointerSize) {
467    ASSERT(IsPowerOf2(frame_alignment));
468    Label alignment_as_expected;
469    testq(rsp, Immediate(frame_alignment_mask));
470    j(zero, &alignment_as_expected, Label::kNear);
471    // Abort if stack is not aligned.
472    int3();
473    bind(&alignment_as_expected);
474  }
475}
476
477
478void MacroAssembler::NegativeZeroTest(Register result,
479                                      Register op,
480                                      Label* then_label) {
481  Label ok;
482  testl(result, result);
483  j(not_zero, &ok, Label::kNear);
484  testl(op, op);
485  j(sign, then_label);
486  bind(&ok);
487}
488
489
490void MacroAssembler::Abort(const char* msg) {
491  // We want to pass the msg string like a smi to avoid GC
492  // problems, however msg is not guaranteed to be aligned
493  // properly. Instead, we pass an aligned pointer that is
494  // a proper v8 smi, but also pass the alignment difference
495  // from the real pointer as a smi.
496  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
497  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
498  // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
499  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
500#ifdef DEBUG
501  if (msg != NULL) {
502    RecordComment("Abort message: ");
503    RecordComment(msg);
504  }
505#endif
506  push(rax);
507  movq(kScratchRegister, p0, RelocInfo::NONE);
508  push(kScratchRegister);
509  movq(kScratchRegister,
510       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
511       RelocInfo::NONE);
512  push(kScratchRegister);
513
514  if (!has_frame_) {
515    // We don't actually want to generate a pile of code for this, so just
516    // claim there is a stack frame, without generating one.
517    FrameScope scope(this, StackFrame::NONE);
518    CallRuntime(Runtime::kAbort, 2);
519  } else {
520    CallRuntime(Runtime::kAbort, 2);
521  }
522  // Control will not return here.
523  int3();
524}
525
526
527void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
528  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
529  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
530}
531
532
533void MacroAssembler::TailCallStub(CodeStub* stub) {
534  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
535  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
536}
537
538
539void MacroAssembler::StubReturn(int argc) {
540  ASSERT(argc >= 1 && generating_stub());
541  ret((argc - 1) * kPointerSize);
542}
543
544
545bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
546  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
547  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
548}
549
550
551void MacroAssembler::IllegalOperation(int num_arguments) {
552  if (num_arguments > 0) {
553    addq(rsp, Immediate(num_arguments * kPointerSize));
554  }
555  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
556}
557
558
559void MacroAssembler::IndexFromHash(Register hash, Register index) {
560  // The assert checks that the constants for the maximum number of digits
561  // for an array index cached in the hash field and the number of bits
562  // reserved for it does not conflict.
563  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
564         (1 << String::kArrayIndexValueBits));
565  // We want the smi-tagged index in key. Even if we subsequently go to
566  // the slow case, converting the key to a smi is always valid.
567  // key: string key
568  // hash: key's hash field, including its array index value.
569  and_(hash, Immediate(String::kArrayIndexValueMask));
570  shr(hash, Immediate(String::kHashShift));
571  // Here we actually clobber the key which will be used if calling into
572  // runtime later. However as the new key is the numeric value of a string key
573  // there is no difference in using either key.
574  Integer32ToSmi(index, hash);
575}
576
577
578void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
579  CallRuntime(Runtime::FunctionForId(id), num_arguments);
580}
581
582
583void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
584  const Runtime::Function* function = Runtime::FunctionForId(id);
585  Set(rax, function->nargs);
586  LoadAddress(rbx, ExternalReference(function, isolate()));
587  CEntryStub ces(1, kSaveFPRegs);
588  CallStub(&ces);
589}
590
591
592void MacroAssembler::CallRuntime(const Runtime::Function* f,
593                                 int num_arguments) {
594  // If the expected number of arguments of the runtime function is
595  // constant, we check that the actual number of arguments match the
596  // expectation.
597  if (f->nargs >= 0 && f->nargs != num_arguments) {
598    IllegalOperation(num_arguments);
599    return;
600  }
601
602  // TODO(1236192): Most runtime routines don't need the number of
603  // arguments passed in because it is constant. At some point we
604  // should remove this need and make the runtime routine entry code
605  // smarter.
606  Set(rax, num_arguments);
607  LoadAddress(rbx, ExternalReference(f, isolate()));
608  CEntryStub ces(f->result_size);
609  CallStub(&ces);
610}
611
612
613void MacroAssembler::CallExternalReference(const ExternalReference& ext,
614                                           int num_arguments) {
615  Set(rax, num_arguments);
616  LoadAddress(rbx, ext);
617
618  CEntryStub stub(1);
619  CallStub(&stub);
620}
621
622
623void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
624                                               int num_arguments,
625                                               int result_size) {
626  // ----------- S t a t e -------------
627  //  -- rsp[0] : return address
628  //  -- rsp[8] : argument num_arguments - 1
629  //  ...
630  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
631  // -----------------------------------
632
633  // TODO(1236192): Most runtime routines don't need the number of
634  // arguments passed in because it is constant. At some point we
635  // should remove this need and make the runtime routine entry code
636  // smarter.
637  Set(rax, num_arguments);
638  JumpToExternalReference(ext, result_size);
639}
640
641
642void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
643                                     int num_arguments,
644                                     int result_size) {
645  TailCallExternalReference(ExternalReference(fid, isolate()),
646                            num_arguments,
647                            result_size);
648}
649
650
651static int Offset(ExternalReference ref0, ExternalReference ref1) {
652  int64_t offset = (ref0.address() - ref1.address());
653  // Check that fits into int.
654  ASSERT(static_cast<int>(offset) == offset);
655  return static_cast<int>(offset);
656}
657
658
659void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
660#ifdef _WIN64
661  // We need to prepare a slot for result handle on stack and put
662  // a pointer to it into 1st arg register.
663  EnterApiExitFrame(arg_stack_space + 1);
664
665  // rcx must be used to pass the pointer to the return value slot.
666  lea(rcx, StackSpaceOperand(arg_stack_space));
667#else
668  EnterApiExitFrame(arg_stack_space);
669#endif
670}
671
672
673void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
674                                              int stack_space) {
675  Label empty_result;
676  Label prologue;
677  Label promote_scheduled_exception;
678  Label delete_allocated_handles;
679  Label leave_exit_frame;
680  Label write_back;
681
682  Factory* factory = isolate()->factory();
683  ExternalReference next_address =
684      ExternalReference::handle_scope_next_address();
685  const int kNextOffset = 0;
686  const int kLimitOffset = Offset(
687      ExternalReference::handle_scope_limit_address(),
688      next_address);
689  const int kLevelOffset = Offset(
690      ExternalReference::handle_scope_level_address(),
691      next_address);
692  ExternalReference scheduled_exception_address =
693      ExternalReference::scheduled_exception_address(isolate());
694
695  // Allocate HandleScope in callee-save registers.
696  Register prev_next_address_reg = r14;
697  Register prev_limit_reg = rbx;
698  Register base_reg = r15;
699  movq(base_reg, next_address);
700  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
701  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
702  addl(Operand(base_reg, kLevelOffset), Immediate(1));
703  // Call the api function!
704  movq(rax, reinterpret_cast<int64_t>(function_address),
705       RelocInfo::RUNTIME_ENTRY);
706  call(rax);
707
708#ifdef _WIN64
709  // rax keeps a pointer to v8::Handle, unpack it.
710  movq(rax, Operand(rax, 0));
711#endif
712  // Check if the result handle holds 0.
713  testq(rax, rax);
714  j(zero, &empty_result);
715  // It was non-zero.  Dereference to get the result value.
716  movq(rax, Operand(rax, 0));
717  bind(&prologue);
718
719  // No more valid handles (the result handle was the last one). Restore
720  // previous handle scope.
721  subl(Operand(base_reg, kLevelOffset), Immediate(1));
722  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
723  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
724  j(not_equal, &delete_allocated_handles);
725  bind(&leave_exit_frame);
726
727  // Check if the function scheduled an exception.
728  movq(rsi, scheduled_exception_address);
729  Cmp(Operand(rsi, 0), factory->the_hole_value());
730  j(not_equal, &promote_scheduled_exception);
731
732  LeaveApiExitFrame();
733  ret(stack_space * kPointerSize);
734
735  bind(&promote_scheduled_exception);
736  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
737
738  bind(&empty_result);
739  // It was zero; the result is undefined.
740  Move(rax, factory->undefined_value());
741  jmp(&prologue);
742
743  // HandleScope limit has changed. Delete allocated extensions.
744  bind(&delete_allocated_handles);
745  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
746  movq(prev_limit_reg, rax);
747#ifdef _WIN64
748  LoadAddress(rcx, ExternalReference::isolate_address());
749#else
750  LoadAddress(rdi, ExternalReference::isolate_address());
751#endif
752  LoadAddress(rax,
753              ExternalReference::delete_handle_scope_extensions(isolate()));
754  call(rax);
755  movq(rax, prev_limit_reg);
756  jmp(&leave_exit_frame);
757}
758
759
760void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
761                                             int result_size) {
762  // Set the entry point and jump to the C entry runtime stub.
763  LoadAddress(rbx, ext);
764  CEntryStub ces(result_size);
765  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
766}
767
768
769void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
770                                   InvokeFlag flag,
771                                   const CallWrapper& call_wrapper) {
772  // You can't call a builtin without a valid frame.
773  ASSERT(flag == JUMP_FUNCTION || has_frame());
774
775  // Rely on the assertion to check that the number of provided
776  // arguments match the expected number of arguments. Fake a
777  // parameter count to avoid emitting code to do the check.
778  ParameterCount expected(0);
779  GetBuiltinEntry(rdx, id);
780  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
781}
782
783
784void MacroAssembler::GetBuiltinFunction(Register target,
785                                        Builtins::JavaScript id) {
786  // Load the builtins object into target register.
787  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
788  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
789  movq(target, FieldOperand(target,
790                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
791}
792
793
794void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
795  ASSERT(!target.is(rdi));
796  // Load the JavaScript builtin function from the builtins object.
797  GetBuiltinFunction(rdi, id);
798  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
799}
800
801
802static const Register saved_regs[] =
803    { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
804static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
805
806
807void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
808                                     Register exclusion1,
809                                     Register exclusion2,
810                                     Register exclusion3) {
811  // We don't allow a GC during a store buffer overflow so there is no need to
812  // store the registers in any particular way, but we do have to store and
813  // restore them.
814  for (int i = 0; i < kNumberOfSavedRegs; i++) {
815    Register reg = saved_regs[i];
816    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
817      push(reg);
818    }
819  }
820  // R12 to r15 are callee save on all platforms.
821  if (fp_mode == kSaveFPRegs) {
822    CpuFeatures::Scope scope(SSE2);
823    subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
824    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
825      XMMRegister reg = XMMRegister::from_code(i);
826      movsd(Operand(rsp, i * kDoubleSize), reg);
827    }
828  }
829}
830
831
832void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
833                                    Register exclusion1,
834                                    Register exclusion2,
835                                    Register exclusion3) {
836  if (fp_mode == kSaveFPRegs) {
837    CpuFeatures::Scope scope(SSE2);
838    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
839      XMMRegister reg = XMMRegister::from_code(i);
840      movsd(reg, Operand(rsp, i * kDoubleSize));
841    }
842    addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
843  }
844  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
845    Register reg = saved_regs[i];
846    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
847      pop(reg);
848    }
849  }
850}
851
852
853void MacroAssembler::Set(Register dst, int64_t x) {
854  if (x == 0) {
855    xorl(dst, dst);
856  } else if (is_uint32(x)) {
857    movl(dst, Immediate(static_cast<uint32_t>(x)));
858  } else if (is_int32(x)) {
859    movq(dst, Immediate(static_cast<int32_t>(x)));
860  } else {
861    movq(dst, x, RelocInfo::NONE);
862  }
863}
864
865void MacroAssembler::Set(const Operand& dst, int64_t x) {
866  if (is_int32(x)) {
867    movq(dst, Immediate(static_cast<int32_t>(x)));
868  } else {
869    Set(kScratchRegister, x);
870    movq(dst, kScratchRegister);
871  }
872}
873
874// ----------------------------------------------------------------------------
875// Smi tagging, untagging and tag detection.
876
877Register MacroAssembler::GetSmiConstant(Smi* source) {
878  int value = source->value();
879  if (value == 0) {
880    xorl(kScratchRegister, kScratchRegister);
881    return kScratchRegister;
882  }
883  if (value == 1) {
884    return kSmiConstantRegister;
885  }
886  LoadSmiConstant(kScratchRegister, source);
887  return kScratchRegister;
888}
889
890void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
891  if (emit_debug_code()) {
892    movq(dst,
893         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
894         RelocInfo::NONE);
895    cmpq(dst, kSmiConstantRegister);
896    if (allow_stub_calls()) {
897      Assert(equal, "Uninitialized kSmiConstantRegister");
898    } else {
899      Label ok;
900      j(equal, &ok, Label::kNear);
901      int3();
902      bind(&ok);
903    }
904  }
905  int value = source->value();
906  if (value == 0) {
907    xorl(dst, dst);
908    return;
909  }
910  bool negative = value < 0;
911  unsigned int uvalue = negative ? -value : value;
912
913  switch (uvalue) {
914    case 9:
915      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
916      break;
917    case 8:
918      xorl(dst, dst);
919      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
920      break;
921    case 4:
922      xorl(dst, dst);
923      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
924      break;
925    case 5:
926      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
927      break;
928    case 3:
929      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
930      break;
931    case 2:
932      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
933      break;
934    case 1:
935      movq(dst, kSmiConstantRegister);
936      break;
937    case 0:
938      UNREACHABLE();
939      return;
940    default:
941      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
942      return;
943  }
944  if (negative) {
945    neg(dst);
946  }
947}
948
949
950void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
951  STATIC_ASSERT(kSmiTag == 0);
952  if (!dst.is(src)) {
953    movl(dst, src);
954  }
955  shl(dst, Immediate(kSmiShift));
956}
957
958
959void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
960  if (emit_debug_code()) {
961    testb(dst, Immediate(0x01));
962    Label ok;
963    j(zero, &ok, Label::kNear);
964    if (allow_stub_calls()) {
965      Abort("Integer32ToSmiField writing to non-smi location");
966    } else {
967      int3();
968    }
969    bind(&ok);
970  }
971  ASSERT(kSmiShift % kBitsPerByte == 0);
972  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
973}
974
975
976void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
977                                                Register src,
978                                                int constant) {
979  if (dst.is(src)) {
980    addl(dst, Immediate(constant));
981  } else {
982    leal(dst, Operand(src, constant));
983  }
984  shl(dst, Immediate(kSmiShift));
985}
986
987
988void MacroAssembler::SmiToInteger32(Register dst, Register src) {
989  STATIC_ASSERT(kSmiTag == 0);
990  if (!dst.is(src)) {
991    movq(dst, src);
992  }
993  shr(dst, Immediate(kSmiShift));
994}
995
996
997void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
998  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
999}
1000
1001
1002void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1003  STATIC_ASSERT(kSmiTag == 0);
1004  if (!dst.is(src)) {
1005    movq(dst, src);
1006  }
1007  sar(dst, Immediate(kSmiShift));
1008}
1009
1010
1011void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1012  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1013}
1014
1015
1016void MacroAssembler::SmiTest(Register src) {
1017  testq(src, src);
1018}
1019
1020
1021void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1022  if (emit_debug_code()) {
1023    AbortIfNotSmi(smi1);
1024    AbortIfNotSmi(smi2);
1025  }
1026  cmpq(smi1, smi2);
1027}
1028
1029
1030void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1031  if (emit_debug_code()) {
1032    AbortIfNotSmi(dst);
1033  }
1034  Cmp(dst, src);
1035}
1036
1037
1038void MacroAssembler::Cmp(Register dst, Smi* src) {
1039  ASSERT(!dst.is(kScratchRegister));
1040  if (src->value() == 0) {
1041    testq(dst, dst);
1042  } else {
1043    Register constant_reg = GetSmiConstant(src);
1044    cmpq(dst, constant_reg);
1045  }
1046}
1047
1048
1049void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1050  if (emit_debug_code()) {
1051    AbortIfNotSmi(dst);
1052    AbortIfNotSmi(src);
1053  }
1054  cmpq(dst, src);
1055}
1056
1057
1058void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1059  if (emit_debug_code()) {
1060    AbortIfNotSmi(dst);
1061    AbortIfNotSmi(src);
1062  }
1063  cmpq(dst, src);
1064}
1065
1066
1067void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1068  if (emit_debug_code()) {
1069    AbortIfNotSmi(dst);
1070  }
1071  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1072}
1073
1074
1075void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1076  // The Operand cannot use the smi register.
1077  Register smi_reg = GetSmiConstant(src);
1078  ASSERT(!dst.AddressUsesRegister(smi_reg));
1079  cmpq(dst, smi_reg);
1080}
1081
1082
1083void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1084  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1085}
1086
1087
1088void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1089                                                           Register src,
1090                                                           int power) {
1091  ASSERT(power >= 0);
1092  ASSERT(power < 64);
1093  if (power == 0) {
1094    SmiToInteger64(dst, src);
1095    return;
1096  }
1097  if (!dst.is(src)) {
1098    movq(dst, src);
1099  }
1100  if (power < kSmiShift) {
1101    sar(dst, Immediate(kSmiShift - power));
1102  } else if (power > kSmiShift) {
1103    shl(dst, Immediate(power - kSmiShift));
1104  }
1105}
1106
1107
1108void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1109                                                         Register src,
1110                                                         int power) {
1111  ASSERT((0 <= power) && (power < 32));
1112  if (dst.is(src)) {
1113    shr(dst, Immediate(power + kSmiShift));
1114  } else {
1115    UNIMPLEMENTED();  // Not used.
1116  }
1117}
1118
1119
1120void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1121                                 Label* on_not_smis,
1122                                 Label::Distance near_jump) {
1123  if (dst.is(src1) || dst.is(src2)) {
1124    ASSERT(!src1.is(kScratchRegister));
1125    ASSERT(!src2.is(kScratchRegister));
1126    movq(kScratchRegister, src1);
1127    or_(kScratchRegister, src2);
1128    JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1129    movq(dst, kScratchRegister);
1130  } else {
1131    movq(dst, src1);
1132    or_(dst, src2);
1133    JumpIfNotSmi(dst, on_not_smis, near_jump);
1134  }
1135}
1136
1137
1138Condition MacroAssembler::CheckSmi(Register src) {
1139  STATIC_ASSERT(kSmiTag == 0);
1140  testb(src, Immediate(kSmiTagMask));
1141  return zero;
1142}
1143
1144
1145Condition MacroAssembler::CheckSmi(const Operand& src) {
1146  STATIC_ASSERT(kSmiTag == 0);
1147  testb(src, Immediate(kSmiTagMask));
1148  return zero;
1149}
1150
1151
1152Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1153  STATIC_ASSERT(kSmiTag == 0);
1154  // Test that both bits of the mask 0x8000000000000001 are zero.
1155  movq(kScratchRegister, src);
1156  rol(kScratchRegister, Immediate(1));
1157  testb(kScratchRegister, Immediate(3));
1158  return zero;
1159}
1160
1161
1162Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1163  if (first.is(second)) {
1164    return CheckSmi(first);
1165  }
1166  STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1167  leal(kScratchRegister, Operand(first, second, times_1, 0));
1168  testb(kScratchRegister, Immediate(0x03));
1169  return zero;
1170}
1171
1172
1173Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1174                                                  Register second) {
1175  if (first.is(second)) {
1176    return CheckNonNegativeSmi(first);
1177  }
1178  movq(kScratchRegister, first);
1179  or_(kScratchRegister, second);
1180  rol(kScratchRegister, Immediate(1));
1181  testl(kScratchRegister, Immediate(3));
1182  return zero;
1183}
1184
1185
1186Condition MacroAssembler::CheckEitherSmi(Register first,
1187                                         Register second,
1188                                         Register scratch) {
1189  if (first.is(second)) {
1190    return CheckSmi(first);
1191  }
1192  if (scratch.is(second)) {
1193    andl(scratch, first);
1194  } else {
1195    if (!scratch.is(first)) {
1196      movl(scratch, first);
1197    }
1198    andl(scratch, second);
1199  }
1200  testb(scratch, Immediate(kSmiTagMask));
1201  return zero;
1202}
1203
1204
1205Condition MacroAssembler::CheckIsMinSmi(Register src) {
1206  ASSERT(!src.is(kScratchRegister));
1207  // If we overflow by subtracting one, it's the minimal smi value.
1208  cmpq(src, kSmiConstantRegister);
1209  return overflow;
1210}
1211
1212
1213Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1214  // A 32-bit integer value can always be converted to a smi.
1215  return always;
1216}
1217
1218
1219Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1220  // An unsigned 32-bit integer value is valid as long as the high bit
1221  // is not set.
1222  testl(src, src);
1223  return positive;
1224}
1225
1226
1227void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1228  if (dst.is(src)) {
1229    andl(dst, Immediate(kSmiTagMask));
1230  } else {
1231    movl(dst, Immediate(kSmiTagMask));
1232    andl(dst, src);
1233  }
1234}
1235
1236
1237void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1238  if (!(src.AddressUsesRegister(dst))) {
1239    movl(dst, Immediate(kSmiTagMask));
1240    andl(dst, src);
1241  } else {
1242    movl(dst, src);
1243    andl(dst, Immediate(kSmiTagMask));
1244  }
1245}
1246
1247
1248void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1249                                            Label* on_invalid,
1250                                            Label::Distance near_jump) {
1251  Condition is_valid = CheckInteger32ValidSmiValue(src);
1252  j(NegateCondition(is_valid), on_invalid, near_jump);
1253}
1254
1255
1256void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1257                                                Label* on_invalid,
1258                                                Label::Distance near_jump) {
1259  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1260  j(NegateCondition(is_valid), on_invalid, near_jump);
1261}
1262
1263
1264void MacroAssembler::JumpIfSmi(Register src,
1265                               Label* on_smi,
1266                               Label::Distance near_jump) {
1267  Condition smi = CheckSmi(src);
1268  j(smi, on_smi, near_jump);
1269}
1270
1271
1272void MacroAssembler::JumpIfNotSmi(Register src,
1273                                  Label* on_not_smi,
1274                                  Label::Distance near_jump) {
1275  Condition smi = CheckSmi(src);
1276  j(NegateCondition(smi), on_not_smi, near_jump);
1277}
1278
1279
1280void MacroAssembler::JumpUnlessNonNegativeSmi(
1281    Register src, Label* on_not_smi_or_negative,
1282    Label::Distance near_jump) {
1283  Condition non_negative_smi = CheckNonNegativeSmi(src);
1284  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1285}
1286
1287
1288void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1289                                             Smi* constant,
1290                                             Label* on_equals,
1291                                             Label::Distance near_jump) {
1292  SmiCompare(src, constant);
1293  j(equal, on_equals, near_jump);
1294}
1295
1296
1297void MacroAssembler::JumpIfNotBothSmi(Register src1,
1298                                      Register src2,
1299                                      Label* on_not_both_smi,
1300                                      Label::Distance near_jump) {
1301  Condition both_smi = CheckBothSmi(src1, src2);
1302  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1303}
1304
1305
1306void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1307                                                  Register src2,
1308                                                  Label* on_not_both_smi,
1309                                                  Label::Distance near_jump) {
1310  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1311  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1312}
1313
1314
1315void MacroAssembler::SmiTryAddConstant(Register dst,
1316                                       Register src,
1317                                       Smi* constant,
1318                                       Label* on_not_smi_result,
1319                                       Label::Distance near_jump) {
1320  // Does not assume that src is a smi.
1321  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1322  STATIC_ASSERT(kSmiTag == 0);
1323  ASSERT(!dst.is(kScratchRegister));
1324  ASSERT(!src.is(kScratchRegister));
1325
1326  JumpIfNotSmi(src, on_not_smi_result, near_jump);
1327  Register tmp = (dst.is(src) ? kScratchRegister : dst);
1328  LoadSmiConstant(tmp, constant);
1329  addq(tmp, src);
1330  j(overflow, on_not_smi_result, near_jump);
1331  if (dst.is(src)) {
1332    movq(dst, tmp);
1333  }
1334}
1335
1336
1337void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1338  if (constant->value() == 0) {
1339    if (!dst.is(src)) {
1340      movq(dst, src);
1341    }
1342    return;
1343  } else if (dst.is(src)) {
1344    ASSERT(!dst.is(kScratchRegister));
1345    switch (constant->value()) {
1346      case 1:
1347        addq(dst, kSmiConstantRegister);
1348        return;
1349      case 2:
1350        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1351        return;
1352      case 4:
1353        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1354        return;
1355      case 8:
1356        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1357        return;
1358      default:
1359        Register constant_reg = GetSmiConstant(constant);
1360        addq(dst, constant_reg);
1361        return;
1362    }
1363  } else {
1364    switch (constant->value()) {
1365      case 1:
1366        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1367        return;
1368      case 2:
1369        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1370        return;
1371      case 4:
1372        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1373        return;
1374      case 8:
1375        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1376        return;
1377      default:
1378        LoadSmiConstant(dst, constant);
1379        addq(dst, src);
1380        return;
1381    }
1382  }
1383}
1384
1385
1386void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1387  if (constant->value() != 0) {
1388    addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1389  }
1390}
1391
1392
1393void MacroAssembler::SmiAddConstant(Register dst,
1394                                    Register src,
1395                                    Smi* constant,
1396                                    Label* on_not_smi_result,
1397                                    Label::Distance near_jump) {
1398  if (constant->value() == 0) {
1399    if (!dst.is(src)) {
1400      movq(dst, src);
1401    }
1402  } else if (dst.is(src)) {
1403    ASSERT(!dst.is(kScratchRegister));
1404
1405    LoadSmiConstant(kScratchRegister, constant);
1406    addq(kScratchRegister, src);
1407    j(overflow, on_not_smi_result, near_jump);
1408    movq(dst, kScratchRegister);
1409  } else {
1410    LoadSmiConstant(dst, constant);
1411    addq(dst, src);
1412    j(overflow, on_not_smi_result, near_jump);
1413  }
1414}
1415
1416
1417void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1418  if (constant->value() == 0) {
1419    if (!dst.is(src)) {
1420      movq(dst, src);
1421    }
1422  } else if (dst.is(src)) {
1423    ASSERT(!dst.is(kScratchRegister));
1424    Register constant_reg = GetSmiConstant(constant);
1425    subq(dst, constant_reg);
1426  } else {
1427    if (constant->value() == Smi::kMinValue) {
1428      LoadSmiConstant(dst, constant);
1429      // Adding and subtracting the min-value gives the same result, it only
1430      // differs on the overflow bit, which we don't check here.
1431      addq(dst, src);
1432    } else {
1433      // Subtract by adding the negation.
1434      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1435      addq(dst, src);
1436    }
1437  }
1438}
1439
1440
1441void MacroAssembler::SmiSubConstant(Register dst,
1442                                    Register src,
1443                                    Smi* constant,
1444                                    Label* on_not_smi_result,
1445                                    Label::Distance near_jump) {
1446  if (constant->value() == 0) {
1447    if (!dst.is(src)) {
1448      movq(dst, src);
1449    }
1450  } else if (dst.is(src)) {
1451    ASSERT(!dst.is(kScratchRegister));
1452    if (constant->value() == Smi::kMinValue) {
1453      // Subtracting min-value from any non-negative value will overflow.
1454      // We test the non-negativeness before doing the subtraction.
1455      testq(src, src);
1456      j(not_sign, on_not_smi_result, near_jump);
1457      LoadSmiConstant(kScratchRegister, constant);
1458      subq(dst, kScratchRegister);
1459    } else {
1460      // Subtract by adding the negation.
1461      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1462      addq(kScratchRegister, dst);
1463      j(overflow, on_not_smi_result, near_jump);
1464      movq(dst, kScratchRegister);
1465    }
1466  } else {
1467    if (constant->value() == Smi::kMinValue) {
1468      // Subtracting min-value from any non-negative value will overflow.
1469      // We test the non-negativeness before doing the subtraction.
1470      testq(src, src);
1471      j(not_sign, on_not_smi_result, near_jump);
1472      LoadSmiConstant(dst, constant);
1473      // Adding and subtracting the min-value gives the same result, it only
1474      // differs on the overflow bit, which we don't check here.
1475      addq(dst, src);
1476    } else {
1477      // Subtract by adding the negation.
1478      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1479      addq(dst, src);
1480      j(overflow, on_not_smi_result, near_jump);
1481    }
1482  }
1483}
1484
1485
1486void MacroAssembler::SmiNeg(Register dst,
1487                            Register src,
1488                            Label* on_smi_result,
1489                            Label::Distance near_jump) {
1490  if (dst.is(src)) {
1491    ASSERT(!dst.is(kScratchRegister));
1492    movq(kScratchRegister, src);
1493    neg(dst);  // Low 32 bits are retained as zero by negation.
1494    // Test if result is zero or Smi::kMinValue.
1495    cmpq(dst, kScratchRegister);
1496    j(not_equal, on_smi_result, near_jump);
1497    movq(src, kScratchRegister);
1498  } else {
1499    movq(dst, src);
1500    neg(dst);
1501    cmpq(dst, src);
1502    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1503    j(not_equal, on_smi_result, near_jump);
1504  }
1505}
1506
1507
1508void MacroAssembler::SmiAdd(Register dst,
1509                            Register src1,
1510                            Register src2,
1511                            Label* on_not_smi_result,
1512                            Label::Distance near_jump) {
1513  ASSERT_NOT_NULL(on_not_smi_result);
1514  ASSERT(!dst.is(src2));
1515  if (dst.is(src1)) {
1516    movq(kScratchRegister, src1);
1517    addq(kScratchRegister, src2);
1518    j(overflow, on_not_smi_result, near_jump);
1519    movq(dst, kScratchRegister);
1520  } else {
1521    movq(dst, src1);
1522    addq(dst, src2);
1523    j(overflow, on_not_smi_result, near_jump);
1524  }
1525}
1526
1527
1528void MacroAssembler::SmiAdd(Register dst,
1529                            Register src1,
1530                            const Operand& src2,
1531                            Label* on_not_smi_result,
1532                            Label::Distance near_jump) {
1533  ASSERT_NOT_NULL(on_not_smi_result);
1534  if (dst.is(src1)) {
1535    movq(kScratchRegister, src1);
1536    addq(kScratchRegister, src2);
1537    j(overflow, on_not_smi_result, near_jump);
1538    movq(dst, kScratchRegister);
1539  } else {
1540    ASSERT(!src2.AddressUsesRegister(dst));
1541    movq(dst, src1);
1542    addq(dst, src2);
1543    j(overflow, on_not_smi_result, near_jump);
1544  }
1545}
1546
1547
1548void MacroAssembler::SmiAdd(Register dst,
1549                            Register src1,
1550                            Register src2) {
1551  // No overflow checking. Use only when it's known that
1552  // overflowing is impossible.
1553  if (!dst.is(src1)) {
1554    if (emit_debug_code()) {
1555      movq(kScratchRegister, src1);
1556      addq(kScratchRegister, src2);
1557      Check(no_overflow, "Smi addition overflow");
1558    }
1559    lea(dst, Operand(src1, src2, times_1, 0));
1560  } else {
1561    addq(dst, src2);
1562    Assert(no_overflow, "Smi addition overflow");
1563  }
1564}
1565
1566
1567void MacroAssembler::SmiSub(Register dst,
1568                            Register src1,
1569                            Register src2,
1570                            Label* on_not_smi_result,
1571                            Label::Distance near_jump) {
1572  ASSERT_NOT_NULL(on_not_smi_result);
1573  ASSERT(!dst.is(src2));
1574  if (dst.is(src1)) {
1575    cmpq(dst, src2);
1576    j(overflow, on_not_smi_result, near_jump);
1577    subq(dst, src2);
1578  } else {
1579    movq(dst, src1);
1580    subq(dst, src2);
1581    j(overflow, on_not_smi_result, near_jump);
1582  }
1583}
1584
1585
1586void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1587  // No overflow checking. Use only when it's known that
1588  // overflowing is impossible (e.g., subtracting two positive smis).
1589  ASSERT(!dst.is(src2));
1590  if (!dst.is(src1)) {
1591    movq(dst, src1);
1592  }
1593  subq(dst, src2);
1594  Assert(no_overflow, "Smi subtraction overflow");
1595}
1596
1597
1598void MacroAssembler::SmiSub(Register dst,
1599                            Register src1,
1600                            const Operand& src2,
1601                            Label* on_not_smi_result,
1602                            Label::Distance near_jump) {
1603  ASSERT_NOT_NULL(on_not_smi_result);
1604  if (dst.is(src1)) {
1605    movq(kScratchRegister, src2);
1606    cmpq(src1, kScratchRegister);
1607    j(overflow, on_not_smi_result, near_jump);
1608    subq(src1, kScratchRegister);
1609  } else {
1610    movq(dst, src1);
1611    subq(dst, src2);
1612    j(overflow, on_not_smi_result, near_jump);
1613  }
1614}
1615
1616
1617void MacroAssembler::SmiSub(Register dst,
1618                            Register src1,
1619                            const Operand& src2) {
1620  // No overflow checking. Use only when it's known that
1621  // overflowing is impossible (e.g., subtracting two positive smis).
1622  if (!dst.is(src1)) {
1623    movq(dst, src1);
1624  }
1625  subq(dst, src2);
1626  Assert(no_overflow, "Smi subtraction overflow");
1627}
1628
1629
1630void MacroAssembler::SmiMul(Register dst,
1631                            Register src1,
1632                            Register src2,
1633                            Label* on_not_smi_result,
1634                            Label::Distance near_jump) {
1635  ASSERT(!dst.is(src2));
1636  ASSERT(!dst.is(kScratchRegister));
1637  ASSERT(!src1.is(kScratchRegister));
1638  ASSERT(!src2.is(kScratchRegister));
1639
1640  if (dst.is(src1)) {
1641    Label failure, zero_correct_result;
1642    movq(kScratchRegister, src1);  // Create backup for later testing.
1643    SmiToInteger64(dst, src1);
1644    imul(dst, src2);
1645    j(overflow, &failure, Label::kNear);
1646
1647    // Check for negative zero result.  If product is zero, and one
1648    // argument is negative, go to slow case.
1649    Label correct_result;
1650    testq(dst, dst);
1651    j(not_zero, &correct_result, Label::kNear);
1652
1653    movq(dst, kScratchRegister);
1654    xor_(dst, src2);
1655    // Result was positive zero.
1656    j(positive, &zero_correct_result, Label::kNear);
1657
1658    bind(&failure);  // Reused failure exit, restores src1.
1659    movq(src1, kScratchRegister);
1660    jmp(on_not_smi_result, near_jump);
1661
1662    bind(&zero_correct_result);
1663    Set(dst, 0);
1664
1665    bind(&correct_result);
1666  } else {
1667    SmiToInteger64(dst, src1);
1668    imul(dst, src2);
1669    j(overflow, on_not_smi_result, near_jump);
1670    // Check for negative zero result.  If product is zero, and one
1671    // argument is negative, go to slow case.
1672    Label correct_result;
1673    testq(dst, dst);
1674    j(not_zero, &correct_result, Label::kNear);
1675    // One of src1 and src2 is zero, the check whether the other is
1676    // negative.
1677    movq(kScratchRegister, src1);
1678    xor_(kScratchRegister, src2);
1679    j(negative, on_not_smi_result, near_jump);
1680    bind(&correct_result);
1681  }
1682}
1683
1684
1685void MacroAssembler::SmiDiv(Register dst,
1686                            Register src1,
1687                            Register src2,
1688                            Label* on_not_smi_result,
1689                            Label::Distance near_jump) {
1690  ASSERT(!src1.is(kScratchRegister));
1691  ASSERT(!src2.is(kScratchRegister));
1692  ASSERT(!dst.is(kScratchRegister));
1693  ASSERT(!src2.is(rax));
1694  ASSERT(!src2.is(rdx));
1695  ASSERT(!src1.is(rdx));
1696
1697  // Check for 0 divisor (result is +/-Infinity).
1698  testq(src2, src2);
1699  j(zero, on_not_smi_result, near_jump);
1700
1701  if (src1.is(rax)) {
1702    movq(kScratchRegister, src1);
1703  }
1704  SmiToInteger32(rax, src1);
1705  // We need to rule out dividing Smi::kMinValue by -1, since that would
1706  // overflow in idiv and raise an exception.
1707  // We combine this with negative zero test (negative zero only happens
1708  // when dividing zero by a negative number).
1709
1710  // We overshoot a little and go to slow case if we divide min-value
1711  // by any negative value, not just -1.
1712  Label safe_div;
1713  testl(rax, Immediate(0x7fffffff));
1714  j(not_zero, &safe_div, Label::kNear);
1715  testq(src2, src2);
1716  if (src1.is(rax)) {
1717    j(positive, &safe_div, Label::kNear);
1718    movq(src1, kScratchRegister);
1719    jmp(on_not_smi_result, near_jump);
1720  } else {
1721    j(negative, on_not_smi_result, near_jump);
1722  }
1723  bind(&safe_div);
1724
1725  SmiToInteger32(src2, src2);
1726  // Sign extend src1 into edx:eax.
1727  cdq();
1728  idivl(src2);
1729  Integer32ToSmi(src2, src2);
1730  // Check that the remainder is zero.
1731  testl(rdx, rdx);
1732  if (src1.is(rax)) {
1733    Label smi_result;
1734    j(zero, &smi_result, Label::kNear);
1735    movq(src1, kScratchRegister);
1736    jmp(on_not_smi_result, near_jump);
1737    bind(&smi_result);
1738  } else {
1739    j(not_zero, on_not_smi_result, near_jump);
1740  }
1741  if (!dst.is(src1) && src1.is(rax)) {
1742    movq(src1, kScratchRegister);
1743  }
1744  Integer32ToSmi(dst, rax);
1745}
1746
1747
1748void MacroAssembler::SmiMod(Register dst,
1749                            Register src1,
1750                            Register src2,
1751                            Label* on_not_smi_result,
1752                            Label::Distance near_jump) {
1753  ASSERT(!dst.is(kScratchRegister));
1754  ASSERT(!src1.is(kScratchRegister));
1755  ASSERT(!src2.is(kScratchRegister));
1756  ASSERT(!src2.is(rax));
1757  ASSERT(!src2.is(rdx));
1758  ASSERT(!src1.is(rdx));
1759  ASSERT(!src1.is(src2));
1760
1761  testq(src2, src2);
1762  j(zero, on_not_smi_result, near_jump);
1763
1764  if (src1.is(rax)) {
1765    movq(kScratchRegister, src1);
1766  }
1767  SmiToInteger32(rax, src1);
1768  SmiToInteger32(src2, src2);
1769
1770  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1771  Label safe_div;
1772  cmpl(rax, Immediate(Smi::kMinValue));
1773  j(not_equal, &safe_div, Label::kNear);
1774  cmpl(src2, Immediate(-1));
1775  j(not_equal, &safe_div, Label::kNear);
1776  // Retag inputs and go slow case.
1777  Integer32ToSmi(src2, src2);
1778  if (src1.is(rax)) {
1779    movq(src1, kScratchRegister);
1780  }
1781  jmp(on_not_smi_result, near_jump);
1782  bind(&safe_div);
1783
1784  // Sign extend eax into edx:eax.
1785  cdq();
1786  idivl(src2);
1787  // Restore smi tags on inputs.
1788  Integer32ToSmi(src2, src2);
1789  if (src1.is(rax)) {
1790    movq(src1, kScratchRegister);
1791  }
1792  // Check for a negative zero result.  If the result is zero, and the
1793  // dividend is negative, go slow to return a floating point negative zero.
1794  Label smi_result;
1795  testl(rdx, rdx);
1796  j(not_zero, &smi_result, Label::kNear);
1797  testq(src1, src1);
1798  j(negative, on_not_smi_result, near_jump);
1799  bind(&smi_result);
1800  Integer32ToSmi(dst, rdx);
1801}
1802
1803
1804void MacroAssembler::SmiNot(Register dst, Register src) {
1805  ASSERT(!dst.is(kScratchRegister));
1806  ASSERT(!src.is(kScratchRegister));
1807  // Set tag and padding bits before negating, so that they are zero afterwards.
1808  movl(kScratchRegister, Immediate(~0));
1809  if (dst.is(src)) {
1810    xor_(dst, kScratchRegister);
1811  } else {
1812    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1813  }
1814  not_(dst);
1815}
1816
1817
1818void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1819  ASSERT(!dst.is(src2));
1820  if (!dst.is(src1)) {
1821    movq(dst, src1);
1822  }
1823  and_(dst, src2);
1824}
1825
1826
1827void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1828  if (constant->value() == 0) {
1829    Set(dst, 0);
1830  } else if (dst.is(src)) {
1831    ASSERT(!dst.is(kScratchRegister));
1832    Register constant_reg = GetSmiConstant(constant);
1833    and_(dst, constant_reg);
1834  } else {
1835    LoadSmiConstant(dst, constant);
1836    and_(dst, src);
1837  }
1838}
1839
1840
1841void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1842  if (!dst.is(src1)) {
1843    ASSERT(!src1.is(src2));
1844    movq(dst, src1);
1845  }
1846  or_(dst, src2);
1847}
1848
1849
1850void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1851  if (dst.is(src)) {
1852    ASSERT(!dst.is(kScratchRegister));
1853    Register constant_reg = GetSmiConstant(constant);
1854    or_(dst, constant_reg);
1855  } else {
1856    LoadSmiConstant(dst, constant);
1857    or_(dst, src);
1858  }
1859}
1860
1861
1862void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1863  if (!dst.is(src1)) {
1864    ASSERT(!src1.is(src2));
1865    movq(dst, src1);
1866  }
1867  xor_(dst, src2);
1868}
1869
1870
1871void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1872  if (dst.is(src)) {
1873    ASSERT(!dst.is(kScratchRegister));
1874    Register constant_reg = GetSmiConstant(constant);
1875    xor_(dst, constant_reg);
1876  } else {
1877    LoadSmiConstant(dst, constant);
1878    xor_(dst, src);
1879  }
1880}
1881
1882
1883void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1884                                                     Register src,
1885                                                     int shift_value) {
1886  ASSERT(is_uint5(shift_value));
1887  if (shift_value > 0) {
1888    if (dst.is(src)) {
1889      sar(dst, Immediate(shift_value + kSmiShift));
1890      shl(dst, Immediate(kSmiShift));
1891    } else {
1892      UNIMPLEMENTED();  // Not used.
1893    }
1894  }
1895}
1896
1897
1898void MacroAssembler::SmiShiftLeftConstant(Register dst,
1899                                          Register src,
1900                                          int shift_value) {
1901  if (!dst.is(src)) {
1902    movq(dst, src);
1903  }
1904  if (shift_value > 0) {
1905    shl(dst, Immediate(shift_value));
1906  }
1907}
1908
1909
1910void MacroAssembler::SmiShiftLogicalRightConstant(
1911    Register dst, Register src, int shift_value,
1912    Label* on_not_smi_result, Label::Distance near_jump) {
1913  // Logic right shift interprets its result as an *unsigned* number.
1914  if (dst.is(src)) {
1915    UNIMPLEMENTED();  // Not used.
1916  } else {
1917    movq(dst, src);
1918    if (shift_value == 0) {
1919      testq(dst, dst);
1920      j(negative, on_not_smi_result, near_jump);
1921    }
1922    shr(dst, Immediate(shift_value + kSmiShift));
1923    shl(dst, Immediate(kSmiShift));
1924  }
1925}
1926
1927
1928void MacroAssembler::SmiShiftLeft(Register dst,
1929                                  Register src1,
1930                                  Register src2) {
1931  ASSERT(!dst.is(rcx));
1932  // Untag shift amount.
1933  if (!dst.is(src1)) {
1934    movq(dst, src1);
1935  }
1936  SmiToInteger32(rcx, src2);
1937  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1938  and_(rcx, Immediate(0x1f));
1939  shl_cl(dst);
1940}
1941
1942
1943void MacroAssembler::SmiShiftLogicalRight(Register dst,
1944                                          Register src1,
1945                                          Register src2,
1946                                          Label* on_not_smi_result,
1947                                          Label::Distance near_jump) {
1948  ASSERT(!dst.is(kScratchRegister));
1949  ASSERT(!src1.is(kScratchRegister));
1950  ASSERT(!src2.is(kScratchRegister));
1951  ASSERT(!dst.is(rcx));
1952  // dst and src1 can be the same, because the one case that bails out
1953  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
1954  if (src1.is(rcx) || src2.is(rcx)) {
1955    movq(kScratchRegister, rcx);
1956  }
1957  if (!dst.is(src1)) {
1958    movq(dst, src1);
1959  }
1960  SmiToInteger32(rcx, src2);
1961  orl(rcx, Immediate(kSmiShift));
1962  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
1963  shl(dst, Immediate(kSmiShift));
1964  testq(dst, dst);
1965  if (src1.is(rcx) || src2.is(rcx)) {
1966    Label positive_result;
1967    j(positive, &positive_result, Label::kNear);
1968    if (src1.is(rcx)) {
1969      movq(src1, kScratchRegister);
1970    } else {
1971      movq(src2, kScratchRegister);
1972    }
1973    jmp(on_not_smi_result, near_jump);
1974    bind(&positive_result);
1975  } else {
1976    // src2 was zero and src1 negative.
1977    j(negative, on_not_smi_result, near_jump);
1978  }
1979}
1980
1981
1982void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1983                                             Register src1,
1984                                             Register src2) {
1985  ASSERT(!dst.is(kScratchRegister));
1986  ASSERT(!src1.is(kScratchRegister));
1987  ASSERT(!src2.is(kScratchRegister));
1988  ASSERT(!dst.is(rcx));
1989  if (src1.is(rcx)) {
1990    movq(kScratchRegister, src1);
1991  } else if (src2.is(rcx)) {
1992    movq(kScratchRegister, src2);
1993  }
1994  if (!dst.is(src1)) {
1995    movq(dst, src1);
1996  }
1997  SmiToInteger32(rcx, src2);
1998  orl(rcx, Immediate(kSmiShift));
1999  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
2000  shl(dst, Immediate(kSmiShift));
2001  if (src1.is(rcx)) {
2002    movq(src1, kScratchRegister);
2003  } else if (src2.is(rcx)) {
2004    movq(src2, kScratchRegister);
2005  }
2006}
2007
2008
2009void MacroAssembler::SelectNonSmi(Register dst,
2010                                  Register src1,
2011                                  Register src2,
2012                                  Label* on_not_smis,
2013                                  Label::Distance near_jump) {
2014  ASSERT(!dst.is(kScratchRegister));
2015  ASSERT(!src1.is(kScratchRegister));
2016  ASSERT(!src2.is(kScratchRegister));
2017  ASSERT(!dst.is(src1));
2018  ASSERT(!dst.is(src2));
2019  // Both operands must not be smis.
2020#ifdef DEBUG
2021  if (allow_stub_calls()) {  // Check contains a stub call.
2022    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2023    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
2024  }
2025#endif
2026  STATIC_ASSERT(kSmiTag == 0);
2027  ASSERT_EQ(0, Smi::FromInt(0));
2028  movl(kScratchRegister, Immediate(kSmiTagMask));
2029  and_(kScratchRegister, src1);
2030  testl(kScratchRegister, src2);
2031  // If non-zero then both are smis.
2032  j(not_zero, on_not_smis, near_jump);
2033
2034  // Exactly one operand is a smi.
2035  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2036  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2037  subq(kScratchRegister, Immediate(1));
2038  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2039  movq(dst, src1);
2040  xor_(dst, src2);
2041  and_(dst, kScratchRegister);
2042  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2043  xor_(dst, src1);
2044  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2045}
2046
2047
2048SmiIndex MacroAssembler::SmiToIndex(Register dst,
2049                                    Register src,
2050                                    int shift) {
2051  ASSERT(is_uint6(shift));
2052  // There is a possible optimization if shift is in the range 60-63, but that
2053  // will (and must) never happen.
2054  if (!dst.is(src)) {
2055    movq(dst, src);
2056  }
2057  if (shift < kSmiShift) {
2058    sar(dst, Immediate(kSmiShift - shift));
2059  } else {
2060    shl(dst, Immediate(shift - kSmiShift));
2061  }
2062  return SmiIndex(dst, times_1);
2063}
2064
2065SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2066                                            Register src,
2067                                            int shift) {
2068  // Register src holds a positive smi.
2069  ASSERT(is_uint6(shift));
2070  if (!dst.is(src)) {
2071    movq(dst, src);
2072  }
2073  neg(dst);
2074  if (shift < kSmiShift) {
2075    sar(dst, Immediate(kSmiShift - shift));
2076  } else {
2077    shl(dst, Immediate(shift - kSmiShift));
2078  }
2079  return SmiIndex(dst, times_1);
2080}
2081
2082
2083void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2084  ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2085  addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2086}
2087
2088
2089void MacroAssembler::JumpIfNotString(Register object,
2090                                     Register object_map,
2091                                     Label* not_string,
2092                                     Label::Distance near_jump) {
2093  Condition is_smi = CheckSmi(object);
2094  j(is_smi, not_string, near_jump);
2095  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2096  j(above_equal, not_string, near_jump);
2097}
2098
2099
2100void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2101    Register first_object,
2102    Register second_object,
2103    Register scratch1,
2104    Register scratch2,
2105    Label* on_fail,
2106    Label::Distance near_jump) {
2107  // Check that both objects are not smis.
2108  Condition either_smi = CheckEitherSmi(first_object, second_object);
2109  j(either_smi, on_fail, near_jump);
2110
2111  // Load instance type for both strings.
2112  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2113  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2114  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2115  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2116
2117  // Check that both are flat ASCII strings.
2118  ASSERT(kNotStringTag != 0);
2119  const int kFlatAsciiStringMask =
2120      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2121  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2122
2123  andl(scratch1, Immediate(kFlatAsciiStringMask));
2124  andl(scratch2, Immediate(kFlatAsciiStringMask));
2125  // Interleave the bits to check both scratch1 and scratch2 in one test.
2126  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2127  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2128  cmpl(scratch1,
2129       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2130  j(not_equal, on_fail, near_jump);
2131}
2132
2133
2134void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2135    Register instance_type,
2136    Register scratch,
2137    Label* failure,
2138    Label::Distance near_jump) {
2139  if (!scratch.is(instance_type)) {
2140    movl(scratch, instance_type);
2141  }
2142
2143  const int kFlatAsciiStringMask =
2144      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2145
2146  andl(scratch, Immediate(kFlatAsciiStringMask));
2147  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
2148  j(not_equal, failure, near_jump);
2149}
2150
2151
2152void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2153    Register first_object_instance_type,
2154    Register second_object_instance_type,
2155    Register scratch1,
2156    Register scratch2,
2157    Label* on_fail,
2158    Label::Distance near_jump) {
2159  // Load instance type for both strings.
2160  movq(scratch1, first_object_instance_type);
2161  movq(scratch2, second_object_instance_type);
2162
2163  // Check that both are flat ASCII strings.
2164  ASSERT(kNotStringTag != 0);
2165  const int kFlatAsciiStringMask =
2166      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2167  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2168
2169  andl(scratch1, Immediate(kFlatAsciiStringMask));
2170  andl(scratch2, Immediate(kFlatAsciiStringMask));
2171  // Interleave the bits to check both scratch1 and scratch2 in one test.
2172  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2173  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2174  cmpl(scratch1,
2175       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2176  j(not_equal, on_fail, near_jump);
2177}
2178
2179
2180
2181void MacroAssembler::Move(Register dst, Register src) {
2182  if (!dst.is(src)) {
2183    movq(dst, src);
2184  }
2185}
2186
2187
2188void MacroAssembler::Move(Register dst, Handle<Object> source) {
2189  ASSERT(!source->IsFailure());
2190  if (source->IsSmi()) {
2191    Move(dst, Smi::cast(*source));
2192  } else {
2193    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
2194  }
2195}
2196
2197
2198void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2199  ASSERT(!source->IsFailure());
2200  if (source->IsSmi()) {
2201    Move(dst, Smi::cast(*source));
2202  } else {
2203    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2204    movq(dst, kScratchRegister);
2205  }
2206}
2207
2208
2209void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2210  if (source->IsSmi()) {
2211    Cmp(dst, Smi::cast(*source));
2212  } else {
2213    Move(kScratchRegister, source);
2214    cmpq(dst, kScratchRegister);
2215  }
2216}
2217
2218
2219void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2220  if (source->IsSmi()) {
2221    Cmp(dst, Smi::cast(*source));
2222  } else {
2223    ASSERT(source->IsHeapObject());
2224    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2225    cmpq(dst, kScratchRegister);
2226  }
2227}
2228
2229
2230void MacroAssembler::Push(Handle<Object> source) {
2231  if (source->IsSmi()) {
2232    Push(Smi::cast(*source));
2233  } else {
2234    ASSERT(source->IsHeapObject());
2235    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2236    push(kScratchRegister);
2237  }
2238}
2239
2240
2241void MacroAssembler::LoadHeapObject(Register result,
2242                                    Handle<HeapObject> object) {
2243  if (isolate()->heap()->InNewSpace(*object)) {
2244    Handle<JSGlobalPropertyCell> cell =
2245        isolate()->factory()->NewJSGlobalPropertyCell(object);
2246    movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2247    movq(result, Operand(result, 0));
2248  } else {
2249    Move(result, object);
2250  }
2251}
2252
2253
2254void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2255  if (isolate()->heap()->InNewSpace(*object)) {
2256    Handle<JSGlobalPropertyCell> cell =
2257        isolate()->factory()->NewJSGlobalPropertyCell(object);
2258    movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2259    movq(kScratchRegister, Operand(kScratchRegister, 0));
2260    push(kScratchRegister);
2261  } else {
2262    Push(object);
2263  }
2264}
2265
2266
2267void MacroAssembler::LoadGlobalCell(Register dst,
2268                                    Handle<JSGlobalPropertyCell> cell) {
2269  if (dst.is(rax)) {
2270    load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
2271  } else {
2272    movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2273    movq(dst, Operand(dst, 0));
2274  }
2275}
2276
2277
2278void MacroAssembler::Push(Smi* source) {
2279  intptr_t smi = reinterpret_cast<intptr_t>(source);
2280  if (is_int32(smi)) {
2281    push(Immediate(static_cast<int32_t>(smi)));
2282  } else {
2283    Register constant = GetSmiConstant(source);
2284    push(constant);
2285  }
2286}
2287
2288
2289void MacroAssembler::Drop(int stack_elements) {
2290  if (stack_elements > 0) {
2291    addq(rsp, Immediate(stack_elements * kPointerSize));
2292  }
2293}
2294
2295
2296void MacroAssembler::Test(const Operand& src, Smi* source) {
2297  testl(Operand(src, kIntSize), Immediate(source->value()));
2298}
2299
2300
2301void MacroAssembler::TestBit(const Operand& src, int bits) {
2302  int byte_offset = bits / kBitsPerByte;
2303  int bit_in_byte = bits & (kBitsPerByte - 1);
2304  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2305}
2306
2307
2308void MacroAssembler::Jump(ExternalReference ext) {
2309  LoadAddress(kScratchRegister, ext);
2310  jmp(kScratchRegister);
2311}
2312
2313
2314void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2315  movq(kScratchRegister, destination, rmode);
2316  jmp(kScratchRegister);
2317}
2318
2319
2320void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2321  // TODO(X64): Inline this
2322  jmp(code_object, rmode);
2323}
2324
2325
2326int MacroAssembler::CallSize(ExternalReference ext) {
2327  // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2328  const int kCallInstructionSize = 3;
2329  return LoadAddressSize(ext) + kCallInstructionSize;
2330}
2331
2332
2333void MacroAssembler::Call(ExternalReference ext) {
2334#ifdef DEBUG
2335  int end_position = pc_offset() + CallSize(ext);
2336#endif
2337  LoadAddress(kScratchRegister, ext);
2338  call(kScratchRegister);
2339#ifdef DEBUG
2340  CHECK_EQ(end_position, pc_offset());
2341#endif
2342}
2343
2344
2345void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2346#ifdef DEBUG
2347  int end_position = pc_offset() + CallSize(destination, rmode);
2348#endif
2349  movq(kScratchRegister, destination, rmode);
2350  call(kScratchRegister);
2351#ifdef DEBUG
2352  CHECK_EQ(pc_offset(), end_position);
2353#endif
2354}
2355
2356
2357void MacroAssembler::Call(Handle<Code> code_object,
2358                          RelocInfo::Mode rmode,
2359                          unsigned ast_id) {
2360#ifdef DEBUG
2361  int end_position = pc_offset() + CallSize(code_object);
2362#endif
2363  ASSERT(RelocInfo::IsCodeTarget(rmode));
2364  call(code_object, rmode, ast_id);
2365#ifdef DEBUG
2366  CHECK_EQ(end_position, pc_offset());
2367#endif
2368}
2369
2370
2371void MacroAssembler::Pushad() {
2372  push(rax);
2373  push(rcx);
2374  push(rdx);
2375  push(rbx);
2376  // Not pushing rsp or rbp.
2377  push(rsi);
2378  push(rdi);
2379  push(r8);
2380  push(r9);
2381  // r10 is kScratchRegister.
2382  push(r11);
2383  // r12 is kSmiConstantRegister.
2384  // r13 is kRootRegister.
2385  push(r14);
2386  push(r15);
2387  STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
2388  // Use lea for symmetry with Popad.
2389  int sp_delta =
2390      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2391  lea(rsp, Operand(rsp, -sp_delta));
2392}
2393
2394
2395void MacroAssembler::Popad() {
2396  // Popad must not change the flags, so use lea instead of addq.
2397  int sp_delta =
2398      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2399  lea(rsp, Operand(rsp, sp_delta));
2400  pop(r15);
2401  pop(r14);
2402  pop(r11);
2403  pop(r9);
2404  pop(r8);
2405  pop(rdi);
2406  pop(rsi);
2407  pop(rbx);
2408  pop(rdx);
2409  pop(rcx);
2410  pop(rax);
2411}
2412
2413
2414void MacroAssembler::Dropad() {
2415  addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2416}
2417
2418
2419// Order general registers are pushed by Pushad:
2420// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2421int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2422    0,
2423    1,
2424    2,
2425    3,
2426    -1,
2427    -1,
2428    4,
2429    5,
2430    6,
2431    7,
2432    -1,
2433    8,
2434    -1,
2435    -1,
2436    9,
2437    10
2438};
2439
2440
2441void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2442  movq(SafepointRegisterSlot(dst), src);
2443}
2444
2445
2446void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2447  movq(dst, SafepointRegisterSlot(src));
2448}
2449
2450
2451Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2452  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2453}
2454
2455
2456void MacroAssembler::PushTryHandler(CodeLocation try_location,
2457                                    HandlerType type,
2458                                    int handler_index) {
2459  // Adjust this code if not the case.
2460  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2461  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2462  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2463  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2464  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2465  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2466
2467  // We will build up the handler from the bottom by pushing on the stack.
2468  // First compute the state and push the frame pointer and context.
2469  unsigned state = StackHandler::OffsetField::encode(handler_index);
2470  if (try_location == IN_JAVASCRIPT) {
2471    push(rbp);
2472    push(rsi);
2473    state |= (type == TRY_CATCH_HANDLER)
2474        ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
2475        : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
2476  } else {
2477    ASSERT(try_location == IN_JS_ENTRY);
2478    // The frame pointer does not point to a JS frame so we save NULL for
2479    // rbp. We expect the code throwing an exception to check rbp before
2480    // dereferencing it to restore the context.
2481    push(Immediate(0));  // NULL frame pointer.
2482    Push(Smi::FromInt(0));  // No context.
2483    state |= StackHandler::KindField::encode(StackHandler::ENTRY);
2484  }
2485
2486  // Push the state and the code object.
2487  push(Immediate(state));
2488  Push(CodeObject());
2489
2490  // Link the current handler as the next handler.
2491  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2492  push(ExternalOperand(handler_address));
2493  // Set this new handler as the current one.
2494  movq(ExternalOperand(handler_address), rsp);
2495}
2496
2497
2498void MacroAssembler::PopTryHandler() {
2499  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2500  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2501  pop(ExternalOperand(handler_address));
2502  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2503}
2504
2505
2506void MacroAssembler::JumpToHandlerEntry() {
2507  // Compute the handler entry address and jump to it.  The handler table is
2508  // a fixed array of (smi-tagged) code offsets.
2509  // rax = exception, rdi = code object, rdx = state.
2510  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
2511  shr(rdx, Immediate(StackHandler::kKindWidth));
2512  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
2513  SmiToInteger64(rdx, rdx);
2514  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2515  jmp(rdi);
2516}
2517
2518
2519void MacroAssembler::Throw(Register value) {
2520  // Adjust this code if not the case.
2521  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2522  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2523  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2524  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2525  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2526  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2527
2528  // The exception is expected in rax.
2529  if (!value.is(rax)) {
2530    movq(rax, value);
2531  }
2532  // Drop the stack pointer to the top of the top handler.
2533  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2534  movq(rsp, ExternalOperand(handler_address));
2535  // Restore the next handler.
2536  pop(ExternalOperand(handler_address));
2537
2538  // Remove the code object and state, compute the handler address in rdi.
2539  pop(rdi);  // Code object.
2540  pop(rdx);  // Offset and state.
2541
2542  // Restore the context and frame pointer.
2543  pop(rsi);  // Context.
2544  pop(rbp);  // Frame pointer.
2545
2546  // If the handler is a JS frame, restore the context to the frame.
2547  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
2548  // rbp or rsi.
2549  Label skip;
2550  testq(rsi, rsi);
2551  j(zero, &skip, Label::kNear);
2552  movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2553  bind(&skip);
2554
2555  JumpToHandlerEntry();
2556}
2557
2558
2559void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2560                                      Register value) {
2561  // Adjust this code if not the case.
2562  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2563  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2564  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2565  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2566  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2567  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2568
2569  // The exception is expected in rax.
2570  if (type == OUT_OF_MEMORY) {
2571    // Set external caught exception to false.
2572    ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
2573                                      isolate());
2574    Set(rax, static_cast<int64_t>(false));
2575    Store(external_caught, rax);
2576
2577    // Set pending exception and rax to out of memory exception.
2578    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2579                                        isolate());
2580    movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
2581    Store(pending_exception, rax);
2582  } else if (!value.is(rax)) {
2583    movq(rax, value);
2584  }
2585
2586  // Drop the stack pointer to the top of the top stack handler.
2587  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2588  Load(rsp, handler_address);
2589
2590  // Unwind the handlers until the top ENTRY handler is found.
2591  Label fetch_next, check_kind;
2592  jmp(&check_kind, Label::kNear);
2593  bind(&fetch_next);
2594  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
2595
2596  bind(&check_kind);
2597  STATIC_ASSERT(StackHandler::ENTRY == 0);
2598  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
2599        Immediate(StackHandler::KindField::kMask));
2600  j(not_zero, &fetch_next);
2601
2602  // Set the top handler address to next handler past the top ENTRY handler.
2603  pop(ExternalOperand(handler_address));
2604
2605  // Remove the code object and state, compute the handler address in rdi.
2606  pop(rdi);  // Code object.
2607  pop(rdx);  // Offset and state.
2608
2609  // Clear the context pointer and frame pointer (0 was saved in the handler).
2610  pop(rsi);
2611  pop(rbp);
2612
2613  JumpToHandlerEntry();
2614}
2615
2616
2617void MacroAssembler::Ret() {
2618  ret(0);
2619}
2620
2621
2622void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2623  if (is_uint16(bytes_dropped)) {
2624    ret(bytes_dropped);
2625  } else {
2626    pop(scratch);
2627    addq(rsp, Immediate(bytes_dropped));
2628    push(scratch);
2629    ret(0);
2630  }
2631}
2632
2633
2634void MacroAssembler::FCmp() {
2635  fucomip();
2636  fstp(0);
2637}
2638
2639
2640void MacroAssembler::CmpObjectType(Register heap_object,
2641                                   InstanceType type,
2642                                   Register map) {
2643  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2644  CmpInstanceType(map, type);
2645}
2646
2647
2648void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2649  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2650       Immediate(static_cast<int8_t>(type)));
2651}
2652
2653
2654void MacroAssembler::CheckFastElements(Register map,
2655                                       Label* fail,
2656                                       Label::Distance distance) {
2657  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2658  STATIC_ASSERT(FAST_ELEMENTS == 1);
2659  cmpb(FieldOperand(map, Map::kBitField2Offset),
2660       Immediate(Map::kMaximumBitField2FastElementValue));
2661  j(above, fail, distance);
2662}
2663
2664
2665void MacroAssembler::CheckFastObjectElements(Register map,
2666                                             Label* fail,
2667                                             Label::Distance distance) {
2668  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2669  STATIC_ASSERT(FAST_ELEMENTS == 1);
2670  cmpb(FieldOperand(map, Map::kBitField2Offset),
2671       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
2672  j(below_equal, fail, distance);
2673  cmpb(FieldOperand(map, Map::kBitField2Offset),
2674       Immediate(Map::kMaximumBitField2FastElementValue));
2675  j(above, fail, distance);
2676}
2677
2678
2679void MacroAssembler::CheckFastSmiOnlyElements(Register map,
2680                                              Label* fail,
2681                                              Label::Distance distance) {
2682  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
2683  cmpb(FieldOperand(map, Map::kBitField2Offset),
2684       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
2685  j(above, fail, distance);
2686}
2687
2688
2689void MacroAssembler::StoreNumberToDoubleElements(
2690    Register maybe_number,
2691    Register elements,
2692    Register index,
2693    XMMRegister xmm_scratch,
2694    Label* fail) {
2695  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2696
2697  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2698
2699  CheckMap(maybe_number,
2700           isolate()->factory()->heap_number_map(),
2701           fail,
2702           DONT_DO_SMI_CHECK);
2703
2704  // Double value, canonicalize NaN.
2705  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2706  cmpl(FieldOperand(maybe_number, offset),
2707       Immediate(kNaNOrInfinityLowerBoundUpper32));
2708  j(greater_equal, &maybe_nan, Label::kNear);
2709
2710  bind(&not_nan);
2711  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2712  bind(&have_double_value);
2713  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2714        xmm_scratch);
2715  jmp(&done);
2716
2717  bind(&maybe_nan);
2718  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2719  // it's an Infinity, and the non-NaN code path applies.
2720  j(greater, &is_nan, Label::kNear);
2721  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
2722  j(zero, &not_nan);
2723  bind(&is_nan);
2724  // Convert all NaNs to the same canonical NaN value when they are stored in
2725  // the double array.
2726  Set(kScratchRegister, BitCast<uint64_t>(
2727      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
2728  movq(xmm_scratch, kScratchRegister);
2729  jmp(&have_double_value, Label::kNear);
2730
2731  bind(&smi_value);
2732  // Value is a smi. convert to a double and store.
2733  // Preserve original value.
2734  SmiToInteger32(kScratchRegister, maybe_number);
2735  cvtlsi2sd(xmm_scratch, kScratchRegister);
2736  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2737        xmm_scratch);
2738  bind(&done);
2739}
2740
2741
2742void MacroAssembler::CompareMap(Register obj,
2743                                Handle<Map> map,
2744                                Label* early_success,
2745                                CompareMapMode mode) {
2746  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2747  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
2748    Map* transitioned_fast_element_map(
2749        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
2750    ASSERT(transitioned_fast_element_map == NULL ||
2751           map->elements_kind() != FAST_ELEMENTS);
2752    if (transitioned_fast_element_map != NULL) {
2753      j(equal, early_success, Label::kNear);
2754      Cmp(FieldOperand(obj, HeapObject::kMapOffset),
2755          Handle<Map>(transitioned_fast_element_map));
2756    }
2757
2758    Map* transitioned_double_map(
2759        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
2760    ASSERT(transitioned_double_map == NULL ||
2761           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
2762    if (transitioned_double_map != NULL) {
2763      j(equal, early_success, Label::kNear);
2764      Cmp(FieldOperand(obj, HeapObject::kMapOffset),
2765          Handle<Map>(transitioned_double_map));
2766    }
2767  }
2768}
2769
2770
2771void MacroAssembler::CheckMap(Register obj,
2772                              Handle<Map> map,
2773                              Label* fail,
2774                              SmiCheckType smi_check_type,
2775                              CompareMapMode mode) {
2776  if (smi_check_type == DO_SMI_CHECK) {
2777    JumpIfSmi(obj, fail);
2778  }
2779
2780  Label success;
2781  CompareMap(obj, map, &success, mode);
2782  j(not_equal, fail);
2783  bind(&success);
2784}
2785
2786
2787void MacroAssembler::ClampUint8(Register reg) {
2788  Label done;
2789  testl(reg, Immediate(0xFFFFFF00));
2790  j(zero, &done, Label::kNear);
2791  setcc(negative, reg);  // 1 if negative, 0 if positive.
2792  decb(reg);  // 0 if negative, 255 if positive.
2793  bind(&done);
2794}
2795
2796
2797void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
2798                                        XMMRegister temp_xmm_reg,
2799                                        Register result_reg,
2800                                        Register temp_reg) {
2801  Label done;
2802  Set(result_reg, 0);
2803  xorps(temp_xmm_reg, temp_xmm_reg);
2804  ucomisd(input_reg, temp_xmm_reg);
2805  j(below, &done, Label::kNear);
2806  uint64_t one_half = BitCast<uint64_t, double>(0.5);
2807  Set(temp_reg, one_half);
2808  movq(temp_xmm_reg, temp_reg);
2809  addsd(temp_xmm_reg, input_reg);
2810  cvttsd2si(result_reg, temp_xmm_reg);
2811  testl(result_reg, Immediate(0xFFFFFF00));
2812  j(zero, &done, Label::kNear);
2813  Set(result_reg, 255);
2814  bind(&done);
2815}
2816
2817
2818void MacroAssembler::LoadInstanceDescriptors(Register map,
2819                                             Register descriptors) {
2820  movq(descriptors, FieldOperand(map,
2821                                 Map::kInstanceDescriptorsOrBitField3Offset));
2822  Label not_smi;
2823  JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
2824  Move(descriptors, isolate()->factory()->empty_descriptor_array());
2825  bind(&not_smi);
2826}
2827
2828
2829void MacroAssembler::DispatchMap(Register obj,
2830                                 Handle<Map> map,
2831                                 Handle<Code> success,
2832                                 SmiCheckType smi_check_type) {
2833  Label fail;
2834  if (smi_check_type == DO_SMI_CHECK) {
2835    JumpIfSmi(obj, &fail);
2836  }
2837  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2838  j(equal, success, RelocInfo::CODE_TARGET);
2839
2840  bind(&fail);
2841}
2842
2843
2844void MacroAssembler::AbortIfNotNumber(Register object) {
2845  Label ok;
2846  Condition is_smi = CheckSmi(object);
2847  j(is_smi, &ok, Label::kNear);
2848  Cmp(FieldOperand(object, HeapObject::kMapOffset),
2849      isolate()->factory()->heap_number_map());
2850  Assert(equal, "Operand not a number");
2851  bind(&ok);
2852}
2853
2854
2855void MacroAssembler::AbortIfSmi(Register object) {
2856  Condition is_smi = CheckSmi(object);
2857  Assert(NegateCondition(is_smi), "Operand is a smi");
2858}
2859
2860
2861void MacroAssembler::AbortIfNotSmi(Register object) {
2862  Condition is_smi = CheckSmi(object);
2863  Assert(is_smi, "Operand is not a smi");
2864}
2865
2866
2867void MacroAssembler::AbortIfNotSmi(const Operand& object) {
2868  Condition is_smi = CheckSmi(object);
2869  Assert(is_smi, "Operand is not a smi");
2870}
2871
2872
2873void MacroAssembler::AbortIfNotString(Register object) {
2874  testb(object, Immediate(kSmiTagMask));
2875  Assert(not_equal, "Operand is not a string");
2876  push(object);
2877  movq(object, FieldOperand(object, HeapObject::kMapOffset));
2878  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
2879  pop(object);
2880  Assert(below, "Operand is not a string");
2881}
2882
2883
2884void MacroAssembler::AbortIfNotRootValue(Register src,
2885                                         Heap::RootListIndex root_value_index,
2886                                         const char* message) {
2887  ASSERT(!src.is(kScratchRegister));
2888  LoadRoot(kScratchRegister, root_value_index);
2889  cmpq(src, kScratchRegister);
2890  Check(equal, message);
2891}
2892
2893
2894
2895Condition MacroAssembler::IsObjectStringType(Register heap_object,
2896                                             Register map,
2897                                             Register instance_type) {
2898  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2899  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
2900  STATIC_ASSERT(kNotStringTag != 0);
2901  testb(instance_type, Immediate(kIsNotStringMask));
2902  return zero;
2903}
2904
2905
2906void MacroAssembler::TryGetFunctionPrototype(Register function,
2907                                             Register result,
2908                                             Label* miss,
2909                                             bool miss_on_bound_function) {
2910  // Check that the receiver isn't a smi.
2911  testl(function, Immediate(kSmiTagMask));
2912  j(zero, miss);
2913
2914  // Check that the function really is a function.
2915  CmpObjectType(function, JS_FUNCTION_TYPE, result);
2916  j(not_equal, miss);
2917
2918  if (miss_on_bound_function) {
2919    movq(kScratchRegister,
2920         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2921    // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
2922    // field).
2923    TestBit(FieldOperand(kScratchRegister,
2924                         SharedFunctionInfo::kCompilerHintsOffset),
2925            SharedFunctionInfo::kBoundFunction);
2926    j(not_zero, miss);
2927  }
2928
2929  // Make sure that the function has an instance prototype.
2930  Label non_instance;
2931  testb(FieldOperand(result, Map::kBitFieldOffset),
2932        Immediate(1 << Map::kHasNonInstancePrototype));
2933  j(not_zero, &non_instance, Label::kNear);
2934
2935  // Get the prototype or initial map from the function.
2936  movq(result,
2937       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2938
2939  // If the prototype or initial map is the hole, don't return it and
2940  // simply miss the cache instead. This will allow us to allocate a
2941  // prototype object on-demand in the runtime system.
2942  CompareRoot(result, Heap::kTheHoleValueRootIndex);
2943  j(equal, miss);
2944
2945  // If the function does not have an initial map, we're done.
2946  Label done;
2947  CmpObjectType(result, MAP_TYPE, kScratchRegister);
2948  j(not_equal, &done, Label::kNear);
2949
2950  // Get the prototype from the initial map.
2951  movq(result, FieldOperand(result, Map::kPrototypeOffset));
2952  jmp(&done, Label::kNear);
2953
2954  // Non-instance prototype: Fetch prototype from constructor field
2955  // in initial map.
2956  bind(&non_instance);
2957  movq(result, FieldOperand(result, Map::kConstructorOffset));
2958
2959  // All done.
2960  bind(&done);
2961}
2962
2963
2964void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2965  if (FLAG_native_code_counters && counter->Enabled()) {
2966    Operand counter_operand = ExternalOperand(ExternalReference(counter));
2967    movl(counter_operand, Immediate(value));
2968  }
2969}
2970
2971
2972void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2973  ASSERT(value > 0);
2974  if (FLAG_native_code_counters && counter->Enabled()) {
2975    Operand counter_operand = ExternalOperand(ExternalReference(counter));
2976    if (value == 1) {
2977      incl(counter_operand);
2978    } else {
2979      addl(counter_operand, Immediate(value));
2980    }
2981  }
2982}
2983
2984
2985void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2986  ASSERT(value > 0);
2987  if (FLAG_native_code_counters && counter->Enabled()) {
2988    Operand counter_operand = ExternalOperand(ExternalReference(counter));
2989    if (value == 1) {
2990      decl(counter_operand);
2991    } else {
2992      subl(counter_operand, Immediate(value));
2993    }
2994  }
2995}
2996
2997
2998#ifdef ENABLE_DEBUGGER_SUPPORT
2999void MacroAssembler::DebugBreak() {
3000  Set(rax, 0);  // No arguments.
3001  LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3002  CEntryStub ces(1);
3003  ASSERT(AllowThisStubCall(&ces));
3004  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3005}
3006#endif  // ENABLE_DEBUGGER_SUPPORT
3007
3008
3009void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3010  // This macro takes the dst register to make the code more readable
3011  // at the call sites. However, the dst register has to be rcx to
3012  // follow the calling convention which requires the call type to be
3013  // in rcx.
3014  ASSERT(dst.is(rcx));
3015  if (call_kind == CALL_AS_FUNCTION) {
3016    LoadSmiConstant(dst, Smi::FromInt(1));
3017  } else {
3018    LoadSmiConstant(dst, Smi::FromInt(0));
3019  }
3020}
3021
3022
3023void MacroAssembler::InvokeCode(Register code,
3024                                const ParameterCount& expected,
3025                                const ParameterCount& actual,
3026                                InvokeFlag flag,
3027                                const CallWrapper& call_wrapper,
3028                                CallKind call_kind) {
3029  // You can't call a function without a valid frame.
3030  ASSERT(flag == JUMP_FUNCTION || has_frame());
3031
3032  Label done;
3033  bool definitely_mismatches = false;
3034  InvokePrologue(expected,
3035                 actual,
3036                 Handle<Code>::null(),
3037                 code,
3038                 &done,
3039                 &definitely_mismatches,
3040                 flag,
3041                 Label::kNear,
3042                 call_wrapper,
3043                 call_kind);
3044  if (!definitely_mismatches) {
3045    if (flag == CALL_FUNCTION) {
3046      call_wrapper.BeforeCall(CallSize(code));
3047      SetCallKind(rcx, call_kind);
3048      call(code);
3049      call_wrapper.AfterCall();
3050    } else {
3051      ASSERT(flag == JUMP_FUNCTION);
3052      SetCallKind(rcx, call_kind);
3053      jmp(code);
3054    }
3055    bind(&done);
3056  }
3057}
3058
3059
3060void MacroAssembler::InvokeCode(Handle<Code> code,
3061                                const ParameterCount& expected,
3062                                const ParameterCount& actual,
3063                                RelocInfo::Mode rmode,
3064                                InvokeFlag flag,
3065                                const CallWrapper& call_wrapper,
3066                                CallKind call_kind) {
3067  // You can't call a function without a valid frame.
3068  ASSERT(flag == JUMP_FUNCTION || has_frame());
3069
3070  Label done;
3071  bool definitely_mismatches = false;
3072  Register dummy = rax;
3073  InvokePrologue(expected,
3074                 actual,
3075                 code,
3076                 dummy,
3077                 &done,
3078                 &definitely_mismatches,
3079                 flag,
3080                 Label::kNear,
3081                 call_wrapper,
3082                 call_kind);
3083  if (!definitely_mismatches) {
3084    if (flag == CALL_FUNCTION) {
3085      call_wrapper.BeforeCall(CallSize(code));
3086      SetCallKind(rcx, call_kind);
3087      Call(code, rmode);
3088      call_wrapper.AfterCall();
3089    } else {
3090      ASSERT(flag == JUMP_FUNCTION);
3091      SetCallKind(rcx, call_kind);
3092      Jump(code, rmode);
3093    }
3094    bind(&done);
3095  }
3096}
3097
3098
3099void MacroAssembler::InvokeFunction(Register function,
3100                                    const ParameterCount& actual,
3101                                    InvokeFlag flag,
3102                                    const CallWrapper& call_wrapper,
3103                                    CallKind call_kind) {
3104  // You can't call a function without a valid frame.
3105  ASSERT(flag == JUMP_FUNCTION || has_frame());
3106
3107  ASSERT(function.is(rdi));
3108  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3109  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3110  movsxlq(rbx,
3111          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3112  // Advances rdx to the end of the Code object header, to the start of
3113  // the executable code.
3114  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3115
3116  ParameterCount expected(rbx);
3117  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3118}
3119
3120
3121void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3122                                    const ParameterCount& actual,
3123                                    InvokeFlag flag,
3124                                    const CallWrapper& call_wrapper,
3125                                    CallKind call_kind) {
3126  // You can't call a function without a valid frame.
3127  ASSERT(flag == JUMP_FUNCTION || has_frame());
3128
3129  // Get the function and setup the context.
3130  LoadHeapObject(rdi, function);
3131  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3132
3133  // We call indirectly through the code field in the function to
3134  // allow recompilation to take effect without changing any of the
3135  // call sites.
3136  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3137  ParameterCount expected(function->shared()->formal_parameter_count());
3138  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3139}
3140
3141
3142void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3143                                    const ParameterCount& actual,
3144                                    Handle<Code> code_constant,
3145                                    Register code_register,
3146                                    Label* done,
3147                                    bool* definitely_mismatches,
3148                                    InvokeFlag flag,
3149                                    Label::Distance near_jump,
3150                                    const CallWrapper& call_wrapper,
3151                                    CallKind call_kind) {
3152  bool definitely_matches = false;
3153  *definitely_mismatches = false;
3154  Label invoke;
3155  if (expected.is_immediate()) {
3156    ASSERT(actual.is_immediate());
3157    if (expected.immediate() == actual.immediate()) {
3158      definitely_matches = true;
3159    } else {
3160      Set(rax, actual.immediate());
3161      if (expected.immediate() ==
3162              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3163        // Don't worry about adapting arguments for built-ins that
3164        // don't want that done. Skip adaption code by making it look
3165        // like we have a match between expected and actual number of
3166        // arguments.
3167        definitely_matches = true;
3168      } else {
3169        *definitely_mismatches = true;
3170        Set(rbx, expected.immediate());
3171      }
3172    }
3173  } else {
3174    if (actual.is_immediate()) {
3175      // Expected is in register, actual is immediate. This is the
3176      // case when we invoke function values without going through the
3177      // IC mechanism.
3178      cmpq(expected.reg(), Immediate(actual.immediate()));
3179      j(equal, &invoke, Label::kNear);
3180      ASSERT(expected.reg().is(rbx));
3181      Set(rax, actual.immediate());
3182    } else if (!expected.reg().is(actual.reg())) {
3183      // Both expected and actual are in (different) registers. This
3184      // is the case when we invoke functions using call and apply.
3185      cmpq(expected.reg(), actual.reg());
3186      j(equal, &invoke, Label::kNear);
3187      ASSERT(actual.reg().is(rax));
3188      ASSERT(expected.reg().is(rbx));
3189    }
3190  }
3191
3192  if (!definitely_matches) {
3193    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3194    if (!code_constant.is_null()) {
3195      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3196      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3197    } else if (!code_register.is(rdx)) {
3198      movq(rdx, code_register);
3199    }
3200
3201    if (flag == CALL_FUNCTION) {
3202      call_wrapper.BeforeCall(CallSize(adaptor));
3203      SetCallKind(rcx, call_kind);
3204      Call(adaptor, RelocInfo::CODE_TARGET);
3205      call_wrapper.AfterCall();
3206      if (!*definitely_mismatches) {
3207        jmp(done, near_jump);
3208      }
3209    } else {
3210      SetCallKind(rcx, call_kind);
3211      Jump(adaptor, RelocInfo::CODE_TARGET);
3212    }
3213    bind(&invoke);
3214  }
3215}
3216
3217
3218void MacroAssembler::EnterFrame(StackFrame::Type type) {
3219  push(rbp);
3220  movq(rbp, rsp);
3221  push(rsi);  // Context.
3222  Push(Smi::FromInt(type));
3223  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3224  push(kScratchRegister);
3225  if (emit_debug_code()) {
3226    movq(kScratchRegister,
3227         isolate()->factory()->undefined_value(),
3228         RelocInfo::EMBEDDED_OBJECT);
3229    cmpq(Operand(rsp, 0), kScratchRegister);
3230    Check(not_equal, "code object not properly patched");
3231  }
3232}
3233
3234
3235void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3236  if (emit_debug_code()) {
3237    Move(kScratchRegister, Smi::FromInt(type));
3238    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3239    Check(equal, "stack frame types must match");
3240  }
3241  movq(rsp, rbp);
3242  pop(rbp);
3243}
3244
3245
3246void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3247  // Set up the frame structure on the stack.
3248  // All constants are relative to the frame pointer of the exit frame.
3249  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
3250  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
3251  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
3252  push(rbp);
3253  movq(rbp, rsp);
3254
3255  // Reserve room for entry stack pointer and push the code object.
3256  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3257  push(Immediate(0));  // Saved entry sp, patched before call.
3258  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3259  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
3260
3261  // Save the frame pointer and the context in top.
3262  if (save_rax) {
3263    movq(r14, rax);  // Backup rax in callee-save register.
3264  }
3265
3266  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3267  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3268}
3269
3270
3271void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3272                                            bool save_doubles) {
3273#ifdef _WIN64
3274  const int kShadowSpace = 4;
3275  arg_stack_space += kShadowSpace;
3276#endif
3277  // Optionally save all XMM registers.
3278  if (save_doubles) {
3279    int space = XMMRegister::kNumRegisters * kDoubleSize +
3280        arg_stack_space * kPointerSize;
3281    subq(rsp, Immediate(space));
3282    int offset = -2 * kPointerSize;
3283    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3284      XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3285      movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3286    }
3287  } else if (arg_stack_space > 0) {
3288    subq(rsp, Immediate(arg_stack_space * kPointerSize));
3289  }
3290
3291  // Get the required frame alignment for the OS.
3292  const int kFrameAlignment = OS::ActivationFrameAlignment();
3293  if (kFrameAlignment > 0) {
3294    ASSERT(IsPowerOf2(kFrameAlignment));
3295    ASSERT(is_int8(kFrameAlignment));
3296    and_(rsp, Immediate(-kFrameAlignment));
3297  }
3298
3299  // Patch the saved entry sp.
3300  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3301}
3302
3303
3304void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3305  EnterExitFramePrologue(true);
3306
3307  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3308  // so it must be retained across the C-call.
3309  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3310  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
3311
3312  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3313}
3314
3315
3316void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3317  EnterExitFramePrologue(false);
3318  EnterExitFrameEpilogue(arg_stack_space, false);
3319}
3320
3321
3322void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3323  // Registers:
3324  // r15 : argv
3325  if (save_doubles) {
3326    int offset = -2 * kPointerSize;
3327    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3328      XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3329      movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3330    }
3331  }
3332  // Get the return address from the stack and restore the frame pointer.
3333  movq(rcx, Operand(rbp, 1 * kPointerSize));
3334  movq(rbp, Operand(rbp, 0 * kPointerSize));
3335
3336  // Drop everything up to and including the arguments and the receiver
3337  // from the caller stack.
3338  lea(rsp, Operand(r15, 1 * kPointerSize));
3339
3340  // Push the return address to get ready to return.
3341  push(rcx);
3342
3343  LeaveExitFrameEpilogue();
3344}
3345
3346
3347void MacroAssembler::LeaveApiExitFrame() {
3348  movq(rsp, rbp);
3349  pop(rbp);
3350
3351  LeaveExitFrameEpilogue();
3352}
3353
3354
3355void MacroAssembler::LeaveExitFrameEpilogue() {
3356  // Restore current context from top and clear it in debug mode.
3357  ExternalReference context_address(Isolate::kContextAddress, isolate());
3358  Operand context_operand = ExternalOperand(context_address);
3359  movq(rsi, context_operand);
3360#ifdef DEBUG
3361  movq(context_operand, Immediate(0));
3362#endif
3363
3364  // Clear the top frame.
3365  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3366                                       isolate());
3367  Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3368  movq(c_entry_fp_operand, Immediate(0));
3369}
3370
3371
3372void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3373                                            Register scratch,
3374                                            Label* miss) {
3375  Label same_contexts;
3376
3377  ASSERT(!holder_reg.is(scratch));
3378  ASSERT(!scratch.is(kScratchRegister));
3379  // Load current lexical context from the stack frame.
3380  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3381
3382  // When generating debug code, make sure the lexical context is set.
3383  if (emit_debug_code()) {
3384    cmpq(scratch, Immediate(0));
3385    Check(not_equal, "we should not have an empty lexical context");
3386  }
3387  // Load the global context of the current context.
3388  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
3389  movq(scratch, FieldOperand(scratch, offset));
3390  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
3391
3392  // Check the context is a global context.
3393  if (emit_debug_code()) {
3394    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3395        isolate()->factory()->global_context_map());
3396    Check(equal, "JSGlobalObject::global_context should be a global context.");
3397  }
3398
3399  // Check if both contexts are the same.
3400  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3401  j(equal, &same_contexts);
3402
3403  // Compare security tokens.
3404  // Check that the security token in the calling global object is
3405  // compatible with the security token in the receiving global
3406  // object.
3407
3408  // Check the context is a global context.
3409  if (emit_debug_code()) {
3410    // Preserve original value of holder_reg.
3411    push(holder_reg);
3412    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3413    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3414    Check(not_equal, "JSGlobalProxy::context() should not be null.");
3415
3416    // Read the first word and compare to global_context_map(),
3417    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3418    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
3419    Check(equal, "JSGlobalObject::global_context should be a global context.");
3420    pop(holder_reg);
3421  }
3422
3423  movq(kScratchRegister,
3424       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3425  int token_offset =
3426      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3427  movq(scratch, FieldOperand(scratch, token_offset));
3428  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
3429  j(not_equal, miss);
3430
3431  bind(&same_contexts);
3432}
3433
3434
3435void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3436  // First of all we assign the hash seed to scratch.
3437  LoadRoot(scratch, Heap::kHashSeedRootIndex);
3438  SmiToInteger32(scratch, scratch);
3439
3440  // Xor original key with a seed.
3441  xorl(r0, scratch);
3442
3443  // Compute the hash code from the untagged key.  This must be kept in sync
3444  // with ComputeIntegerHash in utils.h.
3445  //
3446  // hash = ~hash + (hash << 15);
3447  movl(scratch, r0);
3448  notl(r0);
3449  shll(scratch, Immediate(15));
3450  addl(r0, scratch);
3451  // hash = hash ^ (hash >> 12);
3452  movl(scratch, r0);
3453  shrl(scratch, Immediate(12));
3454  xorl(r0, scratch);
3455  // hash = hash + (hash << 2);
3456  leal(r0, Operand(r0, r0, times_4, 0));
3457  // hash = hash ^ (hash >> 4);
3458  movl(scratch, r0);
3459  shrl(scratch, Immediate(4));
3460  xorl(r0, scratch);
3461  // hash = hash * 2057;
3462  imull(r0, r0, Immediate(2057));
3463  // hash = hash ^ (hash >> 16);
3464  movl(scratch, r0);
3465  shrl(scratch, Immediate(16));
3466  xorl(r0, scratch);
3467}
3468
3469
3470
3471void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3472                                              Register elements,
3473                                              Register key,
3474                                              Register r0,
3475                                              Register r1,
3476                                              Register r2,
3477                                              Register result) {
3478  // Register use:
3479  //
3480  // elements - holds the slow-case elements of the receiver on entry.
3481  //            Unchanged unless 'result' is the same register.
3482  //
3483  // key      - holds the smi key on entry.
3484  //            Unchanged unless 'result' is the same register.
3485  //
3486  // Scratch registers:
3487  //
3488  // r0 - holds the untagged key on entry and holds the hash once computed.
3489  //
3490  // r1 - used to hold the capacity mask of the dictionary
3491  //
3492  // r2 - used for the index into the dictionary.
3493  //
3494  // result - holds the result on exit if the load succeeded.
3495  //          Allowed to be the same as 'key' or 'result'.
3496  //          Unchanged on bailout so 'key' or 'result' can be used
3497  //          in further computation.
3498
3499  Label done;
3500
3501  GetNumberHash(r0, r1);
3502
3503  // Compute capacity mask.
3504  SmiToInteger32(r1, FieldOperand(elements,
3505                                  SeededNumberDictionary::kCapacityOffset));
3506  decl(r1);
3507
3508  // Generate an unrolled loop that performs a few probes before giving up.
3509  const int kProbes = 4;
3510  for (int i = 0; i < kProbes; i++) {
3511    // Use r2 for index calculations and keep the hash intact in r0.
3512    movq(r2, r0);
3513    // Compute the masked index: (hash + i + i * i) & mask.
3514    if (i > 0) {
3515      addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
3516    }
3517    and_(r2, r1);
3518
3519    // Scale the index by multiplying by the entry size.
3520    ASSERT(SeededNumberDictionary::kEntrySize == 3);
3521    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
3522
3523    // Check if the key matches.
3524    cmpq(key, FieldOperand(elements,
3525                           r2,
3526                           times_pointer_size,
3527                           SeededNumberDictionary::kElementsStartOffset));
3528    if (i != (kProbes - 1)) {
3529      j(equal, &done);
3530    } else {
3531      j(not_equal, miss);
3532    }
3533  }
3534
3535  bind(&done);
3536  // Check that the value is a normal propety.
3537  const int kDetailsOffset =
3538      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3539  ASSERT_EQ(NORMAL, 0);
3540  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
3541       Smi::FromInt(PropertyDetails::TypeField::kMask));
3542  j(not_zero, miss);
3543
3544  // Get the value at the masked, scaled index.
3545  const int kValueOffset =
3546      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3547  movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
3548}
3549
3550
3551void MacroAssembler::LoadAllocationTopHelper(Register result,
3552                                             Register scratch,
3553                                             AllocationFlags flags) {
3554  ExternalReference new_space_allocation_top =
3555      ExternalReference::new_space_allocation_top_address(isolate());
3556
3557  // Just return if allocation top is already known.
3558  if ((flags & RESULT_CONTAINS_TOP) != 0) {
3559    // No use of scratch if allocation top is provided.
3560    ASSERT(!scratch.is_valid());
3561#ifdef DEBUG
3562    // Assert that result actually contains top on entry.
3563    Operand top_operand = ExternalOperand(new_space_allocation_top);
3564    cmpq(result, top_operand);
3565    Check(equal, "Unexpected allocation top");
3566#endif
3567    return;
3568  }
3569
3570  // Move address of new object to result. Use scratch register if available,
3571  // and keep address in scratch until call to UpdateAllocationTopHelper.
3572  if (scratch.is_valid()) {
3573    LoadAddress(scratch, new_space_allocation_top);
3574    movq(result, Operand(scratch, 0));
3575  } else {
3576    Load(result, new_space_allocation_top);
3577  }
3578}
3579
3580
3581void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
3582                                               Register scratch) {
3583  if (emit_debug_code()) {
3584    testq(result_end, Immediate(kObjectAlignmentMask));
3585    Check(zero, "Unaligned allocation in new space");
3586  }
3587
3588  ExternalReference new_space_allocation_top =
3589      ExternalReference::new_space_allocation_top_address(isolate());
3590
3591  // Update new top.
3592  if (scratch.is_valid()) {
3593    // Scratch already contains address of allocation top.
3594    movq(Operand(scratch, 0), result_end);
3595  } else {
3596    Store(new_space_allocation_top, result_end);
3597  }
3598}
3599
3600
3601void MacroAssembler::AllocateInNewSpace(int object_size,
3602                                        Register result,
3603                                        Register result_end,
3604                                        Register scratch,
3605                                        Label* gc_required,
3606                                        AllocationFlags flags) {
3607  if (!FLAG_inline_new) {
3608    if (emit_debug_code()) {
3609      // Trash the registers to simulate an allocation failure.
3610      movl(result, Immediate(0x7091));
3611      if (result_end.is_valid()) {
3612        movl(result_end, Immediate(0x7191));
3613      }
3614      if (scratch.is_valid()) {
3615        movl(scratch, Immediate(0x7291));
3616      }
3617    }
3618    jmp(gc_required);
3619    return;
3620  }
3621  ASSERT(!result.is(result_end));
3622
3623  // Load address of new object into result.
3624  LoadAllocationTopHelper(result, scratch, flags);
3625
3626  // Calculate new top and bail out if new space is exhausted.
3627  ExternalReference new_space_allocation_limit =
3628      ExternalReference::new_space_allocation_limit_address(isolate());
3629
3630  Register top_reg = result_end.is_valid() ? result_end : result;
3631
3632  if (!top_reg.is(result)) {
3633    movq(top_reg, result);
3634  }
3635  addq(top_reg, Immediate(object_size));
3636  j(carry, gc_required);
3637  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3638  cmpq(top_reg, limit_operand);
3639  j(above, gc_required);
3640
3641  // Update allocation top.
3642  UpdateAllocationTopHelper(top_reg, scratch);
3643
3644  if (top_reg.is(result)) {
3645    if ((flags & TAG_OBJECT) != 0) {
3646      subq(result, Immediate(object_size - kHeapObjectTag));
3647    } else {
3648      subq(result, Immediate(object_size));
3649    }
3650  } else if ((flags & TAG_OBJECT) != 0) {
3651    // Tag the result if requested.
3652    addq(result, Immediate(kHeapObjectTag));
3653  }
3654}
3655
3656
3657void MacroAssembler::AllocateInNewSpace(int header_size,
3658                                        ScaleFactor element_size,
3659                                        Register element_count,
3660                                        Register result,
3661                                        Register result_end,
3662                                        Register scratch,
3663                                        Label* gc_required,
3664                                        AllocationFlags flags) {
3665  if (!FLAG_inline_new) {
3666    if (emit_debug_code()) {
3667      // Trash the registers to simulate an allocation failure.
3668      movl(result, Immediate(0x7091));
3669      movl(result_end, Immediate(0x7191));
3670      if (scratch.is_valid()) {
3671        movl(scratch, Immediate(0x7291));
3672      }
3673      // Register element_count is not modified by the function.
3674    }
3675    jmp(gc_required);
3676    return;
3677  }
3678  ASSERT(!result.is(result_end));
3679
3680  // Load address of new object into result.
3681  LoadAllocationTopHelper(result, scratch, flags);
3682
3683  // Calculate new top and bail out if new space is exhausted.
3684  ExternalReference new_space_allocation_limit =
3685      ExternalReference::new_space_allocation_limit_address(isolate());
3686
3687  // We assume that element_count*element_size + header_size does not
3688  // overflow.
3689  lea(result_end, Operand(element_count, element_size, header_size));
3690  addq(result_end, result);
3691  j(carry, gc_required);
3692  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3693  cmpq(result_end, limit_operand);
3694  j(above, gc_required);
3695
3696  // Update allocation top.
3697  UpdateAllocationTopHelper(result_end, scratch);
3698
3699  // Tag the result if requested.
3700  if ((flags & TAG_OBJECT) != 0) {
3701    addq(result, Immediate(kHeapObjectTag));
3702  }
3703}
3704
3705
3706void MacroAssembler::AllocateInNewSpace(Register object_size,
3707                                        Register result,
3708                                        Register result_end,
3709                                        Register scratch,
3710                                        Label* gc_required,
3711                                        AllocationFlags flags) {
3712  if (!FLAG_inline_new) {
3713    if (emit_debug_code()) {
3714      // Trash the registers to simulate an allocation failure.
3715      movl(result, Immediate(0x7091));
3716      movl(result_end, Immediate(0x7191));
3717      if (scratch.is_valid()) {
3718        movl(scratch, Immediate(0x7291));
3719      }
3720      // object_size is left unchanged by this function.
3721    }
3722    jmp(gc_required);
3723    return;
3724  }
3725  ASSERT(!result.is(result_end));
3726
3727  // Load address of new object into result.
3728  LoadAllocationTopHelper(result, scratch, flags);
3729
3730  // Calculate new top and bail out if new space is exhausted.
3731  ExternalReference new_space_allocation_limit =
3732      ExternalReference::new_space_allocation_limit_address(isolate());
3733  if (!object_size.is(result_end)) {
3734    movq(result_end, object_size);
3735  }
3736  addq(result_end, result);
3737  j(carry, gc_required);
3738  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3739  cmpq(result_end, limit_operand);
3740  j(above, gc_required);
3741
3742  // Update allocation top.
3743  UpdateAllocationTopHelper(result_end, scratch);
3744
3745  // Tag the result if requested.
3746  if ((flags & TAG_OBJECT) != 0) {
3747    addq(result, Immediate(kHeapObjectTag));
3748  }
3749}
3750
3751
3752void MacroAssembler::UndoAllocationInNewSpace(Register object) {
3753  ExternalReference new_space_allocation_top =
3754      ExternalReference::new_space_allocation_top_address(isolate());
3755
3756  // Make sure the object has no tag before resetting top.
3757  and_(object, Immediate(~kHeapObjectTagMask));
3758  Operand top_operand = ExternalOperand(new_space_allocation_top);
3759#ifdef DEBUG
3760  cmpq(object, top_operand);
3761  Check(below, "Undo allocation of non allocated memory");
3762#endif
3763  movq(top_operand, object);
3764}
3765
3766
3767void MacroAssembler::AllocateHeapNumber(Register result,
3768                                        Register scratch,
3769                                        Label* gc_required) {
3770  // Allocate heap number in new space.
3771  AllocateInNewSpace(HeapNumber::kSize,
3772                     result,
3773                     scratch,
3774                     no_reg,
3775                     gc_required,
3776                     TAG_OBJECT);
3777
3778  // Set the map.
3779  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
3780  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3781}
3782
3783
3784void MacroAssembler::AllocateTwoByteString(Register result,
3785                                           Register length,
3786                                           Register scratch1,
3787                                           Register scratch2,
3788                                           Register scratch3,
3789                                           Label* gc_required) {
3790  // Calculate the number of bytes needed for the characters in the string while
3791  // observing object alignment.
3792  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
3793                               kObjectAlignmentMask;
3794  ASSERT(kShortSize == 2);
3795  // scratch1 = length * 2 + kObjectAlignmentMask.
3796  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
3797                kHeaderAlignment));
3798  and_(scratch1, Immediate(~kObjectAlignmentMask));
3799  if (kHeaderAlignment > 0) {
3800    subq(scratch1, Immediate(kHeaderAlignment));
3801  }
3802
3803  // Allocate two byte string in new space.
3804  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
3805                     times_1,
3806                     scratch1,
3807                     result,
3808                     scratch2,
3809                     scratch3,
3810                     gc_required,
3811                     TAG_OBJECT);
3812
3813  // Set the map, length and hash field.
3814  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
3815  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3816  Integer32ToSmi(scratch1, length);
3817  movq(FieldOperand(result, String::kLengthOffset), scratch1);
3818  movq(FieldOperand(result, String::kHashFieldOffset),
3819       Immediate(String::kEmptyHashField));
3820}
3821
3822
3823void MacroAssembler::AllocateAsciiString(Register result,
3824                                         Register length,
3825                                         Register scratch1,
3826                                         Register scratch2,
3827                                         Register scratch3,
3828                                         Label* gc_required) {
3829  // Calculate the number of bytes needed for the characters in the string while
3830  // observing object alignment.
3831  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
3832                               kObjectAlignmentMask;
3833  movl(scratch1, length);
3834  ASSERT(kCharSize == 1);
3835  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
3836  and_(scratch1, Immediate(~kObjectAlignmentMask));
3837  if (kHeaderAlignment > 0) {
3838    subq(scratch1, Immediate(kHeaderAlignment));
3839  }
3840
3841  // Allocate ASCII string in new space.
3842  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
3843                     times_1,
3844                     scratch1,
3845                     result,
3846                     scratch2,
3847                     scratch3,
3848                     gc_required,
3849                     TAG_OBJECT);
3850
3851  // Set the map, length and hash field.
3852  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
3853  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3854  Integer32ToSmi(scratch1, length);
3855  movq(FieldOperand(result, String::kLengthOffset), scratch1);
3856  movq(FieldOperand(result, String::kHashFieldOffset),
3857       Immediate(String::kEmptyHashField));
3858}
3859
3860
3861void MacroAssembler::AllocateTwoByteConsString(Register result,
3862                                        Register scratch1,
3863                                        Register scratch2,
3864                                        Label* gc_required) {
3865  // Allocate heap number in new space.
3866  AllocateInNewSpace(ConsString::kSize,
3867                     result,
3868                     scratch1,
3869                     scratch2,
3870                     gc_required,
3871                     TAG_OBJECT);
3872
3873  // Set the map. The other fields are left uninitialized.
3874  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
3875  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3876}
3877
3878
3879void MacroAssembler::AllocateAsciiConsString(Register result,
3880                                             Register scratch1,
3881                                             Register scratch2,
3882                                             Label* gc_required) {
3883  // Allocate heap number in new space.
3884  AllocateInNewSpace(ConsString::kSize,
3885                     result,
3886                     scratch1,
3887                     scratch2,
3888                     gc_required,
3889                     TAG_OBJECT);
3890
3891  // Set the map. The other fields are left uninitialized.
3892  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
3893  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3894}
3895
3896
3897void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3898                                          Register scratch1,
3899                                          Register scratch2,
3900                                          Label* gc_required) {
3901  // Allocate heap number in new space.
3902  AllocateInNewSpace(SlicedString::kSize,
3903                     result,
3904                     scratch1,
3905                     scratch2,
3906                     gc_required,
3907                     TAG_OBJECT);
3908
3909  // Set the map. The other fields are left uninitialized.
3910  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
3911  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3912}
3913
3914
3915void MacroAssembler::AllocateAsciiSlicedString(Register result,
3916                                               Register scratch1,
3917                                               Register scratch2,
3918                                               Label* gc_required) {
3919  // Allocate heap number in new space.
3920  AllocateInNewSpace(SlicedString::kSize,
3921                     result,
3922                     scratch1,
3923                     scratch2,
3924                     gc_required,
3925                     TAG_OBJECT);
3926
3927  // Set the map. The other fields are left uninitialized.
3928  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
3929  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3930}
3931
3932
3933// Copy memory, byte-by-byte, from source to destination.  Not optimized for
3934// long or aligned copies.  The contents of scratch and length are destroyed.
3935// Destination is incremented by length, source, length and scratch are
3936// clobbered.
3937// A simpler loop is faster on small copies, but slower on large ones.
3938// The cld() instruction must have been emitted, to set the direction flag(),
3939// before calling this function.
3940void MacroAssembler::CopyBytes(Register destination,
3941                               Register source,
3942                               Register length,
3943                               int min_length,
3944                               Register scratch) {
3945  ASSERT(min_length >= 0);
3946  if (FLAG_debug_code) {
3947    cmpl(length, Immediate(min_length));
3948    Assert(greater_equal, "Invalid min_length");
3949  }
3950  Label loop, done, short_string, short_loop;
3951
3952  const int kLongStringLimit = 20;
3953  if (min_length <= kLongStringLimit) {
3954    cmpl(length, Immediate(kLongStringLimit));
3955    j(less_equal, &short_string);
3956  }
3957
3958  ASSERT(source.is(rsi));
3959  ASSERT(destination.is(rdi));
3960  ASSERT(length.is(rcx));
3961
3962  // Because source is 8-byte aligned in our uses of this function,
3963  // we keep source aligned for the rep movs operation by copying the odd bytes
3964  // at the end of the ranges.
3965  movq(scratch, length);
3966  shrl(length, Immediate(3));
3967  repmovsq();
3968  // Move remaining bytes of length.
3969  andl(scratch, Immediate(0x7));
3970  movq(length, Operand(source, scratch, times_1, -8));
3971  movq(Operand(destination, scratch, times_1, -8), length);
3972  addq(destination, scratch);
3973
3974  if (min_length <= kLongStringLimit) {
3975    jmp(&done);
3976
3977    bind(&short_string);
3978    if (min_length == 0) {
3979      testl(length, length);
3980      j(zero, &done);
3981    }
3982    lea(scratch, Operand(destination, length, times_1, 0));
3983
3984    bind(&short_loop);
3985    movb(length, Operand(source, 0));
3986    movb(Operand(destination, 0), length);
3987    incq(source);
3988    incq(destination);
3989    cmpq(destination, scratch);
3990    j(not_equal, &short_loop);
3991
3992    bind(&done);
3993  }
3994}
3995
3996
3997void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3998                                                Register end_offset,
3999                                                Register filler) {
4000  Label loop, entry;
4001  jmp(&entry);
4002  bind(&loop);
4003  movq(Operand(start_offset, 0), filler);
4004  addq(start_offset, Immediate(kPointerSize));
4005  bind(&entry);
4006  cmpq(start_offset, end_offset);
4007  j(less, &loop);
4008}
4009
4010
4011void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4012  if (context_chain_length > 0) {
4013    // Move up the chain of contexts to the context containing the slot.
4014    movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4015    for (int i = 1; i < context_chain_length; i++) {
4016      movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4017    }
4018  } else {
4019    // Slot is in the current function context.  Move it into the
4020    // destination register in case we store into it (the write barrier
4021    // cannot be allowed to destroy the context in rsi).
4022    movq(dst, rsi);
4023  }
4024
4025  // We should not have found a with context by walking the context
4026  // chain (i.e., the static scope chain and runtime context chain do
4027  // not agree).  A variable occurring in such a scope should have
4028  // slot type LOOKUP and not CONTEXT.
4029  if (emit_debug_code()) {
4030    CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4031                Heap::kWithContextMapRootIndex);
4032    Check(not_equal, "Variable resolved to with context.");
4033  }
4034}
4035
4036#ifdef _WIN64
4037static const int kRegisterPassedArguments = 4;
4038#else
4039static const int kRegisterPassedArguments = 6;
4040#endif
4041
4042void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4043  // Load the global or builtins object from the current context.
4044  movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
4045  // Load the global context from the global or builtins object.
4046  movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
4047  // Load the function from the global context.
4048  movq(function, Operand(function, Context::SlotOffset(index)));
4049}
4050
4051
4052void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4053                                                  Register map) {
4054  // Load the initial map.  The global functions all have initial maps.
4055  movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4056  if (emit_debug_code()) {
4057    Label ok, fail;
4058    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4059    jmp(&ok);
4060    bind(&fail);
4061    Abort("Global functions must have initial map");
4062    bind(&ok);
4063  }
4064}
4065
4066
4067int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4068  // On Windows 64 stack slots are reserved by the caller for all arguments
4069  // including the ones passed in registers, and space is always allocated for
4070  // the four register arguments even if the function takes fewer than four
4071  // arguments.
4072  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4073  // and the caller does not reserve stack slots for them.
4074  ASSERT(num_arguments >= 0);
4075#ifdef _WIN64
4076  const int kMinimumStackSlots = kRegisterPassedArguments;
4077  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4078  return num_arguments;
4079#else
4080  if (num_arguments < kRegisterPassedArguments) return 0;
4081  return num_arguments - kRegisterPassedArguments;
4082#endif
4083}
4084
4085
4086void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4087  int frame_alignment = OS::ActivationFrameAlignment();
4088  ASSERT(frame_alignment != 0);
4089  ASSERT(num_arguments >= 0);
4090
4091  // Make stack end at alignment and allocate space for arguments and old rsp.
4092  movq(kScratchRegister, rsp);
4093  ASSERT(IsPowerOf2(frame_alignment));
4094  int argument_slots_on_stack =
4095      ArgumentStackSlotsForCFunctionCall(num_arguments);
4096  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
4097  and_(rsp, Immediate(-frame_alignment));
4098  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
4099}
4100
4101
4102void MacroAssembler::CallCFunction(ExternalReference function,
4103                                   int num_arguments) {
4104  LoadAddress(rax, function);
4105  CallCFunction(rax, num_arguments);
4106}
4107
4108
4109void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4110  ASSERT(has_frame());
4111  // Check stack alignment.
4112  if (emit_debug_code()) {
4113    CheckStackAlignment();
4114  }
4115
4116  call(function);
4117  ASSERT(OS::ActivationFrameAlignment() != 0);
4118  ASSERT(num_arguments >= 0);
4119  int argument_slots_on_stack =
4120      ArgumentStackSlotsForCFunctionCall(num_arguments);
4121  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
4122}
4123
4124
4125bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4126  if (r1.is(r2)) return true;
4127  if (r1.is(r3)) return true;
4128  if (r1.is(r4)) return true;
4129  if (r2.is(r3)) return true;
4130  if (r2.is(r4)) return true;
4131  if (r3.is(r4)) return true;
4132  return false;
4133}
4134
4135
4136CodePatcher::CodePatcher(byte* address, int size)
4137    : address_(address),
4138      size_(size),
4139      masm_(Isolate::Current(), address, size + Assembler::kGap) {
4140  // Create a new macro assembler pointing to the address of the code to patch.
4141  // The size is adjusted with kGap on order for the assembler to generate size
4142  // bytes of instructions without failing with buffer size constraints.
4143  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4144}
4145
4146
4147CodePatcher::~CodePatcher() {
4148  // Indicate that code has changed.
4149  CPU::FlushICache(address_, size_);
4150
4151  // Check that the code was patched as expected.
4152  ASSERT(masm_.pc_ == address_ + size_);
4153  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4154}
4155
4156
4157void MacroAssembler::CheckPageFlag(
4158    Register object,
4159    Register scratch,
4160    int mask,
4161    Condition cc,
4162    Label* condition_met,
4163    Label::Distance condition_met_distance) {
4164  ASSERT(cc == zero || cc == not_zero);
4165  if (scratch.is(object)) {
4166    and_(scratch, Immediate(~Page::kPageAlignmentMask));
4167  } else {
4168    movq(scratch, Immediate(~Page::kPageAlignmentMask));
4169    and_(scratch, object);
4170  }
4171  if (mask < (1 << kBitsPerByte)) {
4172    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4173          Immediate(static_cast<uint8_t>(mask)));
4174  } else {
4175    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4176  }
4177  j(cc, condition_met, condition_met_distance);
4178}
4179
4180
4181void MacroAssembler::JumpIfBlack(Register object,
4182                                 Register bitmap_scratch,
4183                                 Register mask_scratch,
4184                                 Label* on_black,
4185                                 Label::Distance on_black_distance) {
4186  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4187  GetMarkBits(object, bitmap_scratch, mask_scratch);
4188
4189  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4190  // The mask_scratch register contains a 1 at the position of the first bit
4191  // and a 0 at all other positions, including the position of the second bit.
4192  movq(rcx, mask_scratch);
4193  // Make rcx into a mask that covers both marking bits using the operation
4194  // rcx = mask | (mask << 1).
4195  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4196  // Note that we are using a 4-byte aligned 8-byte load.
4197  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4198  cmpq(mask_scratch, rcx);
4199  j(equal, on_black, on_black_distance);
4200}
4201
4202
4203// Detect some, but not all, common pointer-free objects.  This is used by the
4204// incremental write barrier which doesn't care about oddballs (they are always
4205// marked black immediately so this code is not hit).
4206void MacroAssembler::JumpIfDataObject(
4207    Register value,
4208    Register scratch,
4209    Label* not_data_object,
4210    Label::Distance not_data_object_distance) {
4211  Label is_data_object;
4212  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4213  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4214  j(equal, &is_data_object, Label::kNear);
4215  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4216  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4217  // If it's a string and it's not a cons string then it's an object containing
4218  // no GC pointers.
4219  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4220        Immediate(kIsIndirectStringMask | kIsNotStringMask));
4221  j(not_zero, not_data_object, not_data_object_distance);
4222  bind(&is_data_object);
4223}
4224
4225
4226void MacroAssembler::GetMarkBits(Register addr_reg,
4227                                 Register bitmap_reg,
4228                                 Register mask_reg) {
4229  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4230  movq(bitmap_reg, addr_reg);
4231  // Sign extended 32 bit immediate.
4232  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4233  movq(rcx, addr_reg);
4234  int shift =
4235      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4236  shrl(rcx, Immediate(shift));
4237  and_(rcx,
4238       Immediate((Page::kPageAlignmentMask >> shift) &
4239                 ~(Bitmap::kBytesPerCell - 1)));
4240
4241  addq(bitmap_reg, rcx);
4242  movq(rcx, addr_reg);
4243  shrl(rcx, Immediate(kPointerSizeLog2));
4244  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4245  movl(mask_reg, Immediate(1));
4246  shl_cl(mask_reg);
4247}
4248
4249
4250void MacroAssembler::EnsureNotWhite(
4251    Register value,
4252    Register bitmap_scratch,
4253    Register mask_scratch,
4254    Label* value_is_white_and_not_data,
4255    Label::Distance distance) {
4256  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4257  GetMarkBits(value, bitmap_scratch, mask_scratch);
4258
4259  // If the value is black or grey we don't need to do anything.
4260  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4261  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4262  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4263  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4264
4265  Label done;
4266
4267  // Since both black and grey have a 1 in the first position and white does
4268  // not have a 1 there we only need to check one bit.
4269  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4270  j(not_zero, &done, Label::kNear);
4271
4272  if (FLAG_debug_code) {
4273    // Check for impossible bit pattern.
4274    Label ok;
4275    push(mask_scratch);
4276    // shl.  May overflow making the check conservative.
4277    addq(mask_scratch, mask_scratch);
4278    testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4279    j(zero, &ok, Label::kNear);
4280    int3();
4281    bind(&ok);
4282    pop(mask_scratch);
4283  }
4284
4285  // Value is white.  We check whether it is data that doesn't need scanning.
4286  // Currently only checks for HeapNumber and non-cons strings.
4287  Register map = rcx;  // Holds map while checking type.
4288  Register length = rcx;  // Holds length of object after checking type.
4289  Label not_heap_number;
4290  Label is_data_object;
4291
4292  // Check for heap-number
4293  movq(map, FieldOperand(value, HeapObject::kMapOffset));
4294  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4295  j(not_equal, &not_heap_number, Label::kNear);
4296  movq(length, Immediate(HeapNumber::kSize));
4297  jmp(&is_data_object, Label::kNear);
4298
4299  bind(&not_heap_number);
4300  // Check for strings.
4301  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4302  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4303  // If it's a string and it's not a cons string then it's an object containing
4304  // no GC pointers.
4305  Register instance_type = rcx;
4306  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4307  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4308  j(not_zero, value_is_white_and_not_data);
4309  // It's a non-indirect (non-cons and non-slice) string.
4310  // If it's external, the length is just ExternalString::kSize.
4311  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4312  Label not_external;
4313  // External strings are the only ones with the kExternalStringTag bit
4314  // set.
4315  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4316  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4317  testb(instance_type, Immediate(kExternalStringTag));
4318  j(zero, &not_external, Label::kNear);
4319  movq(length, Immediate(ExternalString::kSize));
4320  jmp(&is_data_object, Label::kNear);
4321
4322  bind(&not_external);
4323  // Sequential string, either ASCII or UC16.
4324  ASSERT(kAsciiStringTag == 0x04);
4325  and_(length, Immediate(kStringEncodingMask));
4326  xor_(length, Immediate(kStringEncodingMask));
4327  addq(length, Immediate(0x04));
4328  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4329  imul(length, FieldOperand(value, String::kLengthOffset));
4330  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4331  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4332  and_(length, Immediate(~kObjectAlignmentMask));
4333
4334  bind(&is_data_object);
4335  // Value is a data object, and it is white.  Mark it black.  Since we know
4336  // that the object is white we can make it black by flipping one bit.
4337  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4338
4339  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4340  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4341
4342  bind(&done);
4343}
4344
4345} }  // namespace v8::internal
4346
4347#endif  // V8_TARGET_ARCH_X64
4348