macro-assembler-x64.cc revision 888f6729be6a6f6fbe246cb5a9f122e2dbe455b7
1// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "assembler-x64.h"
33#include "macro-assembler-x64.h"
34#include "serialize.h"
35#include "debug.h"
36
37namespace v8 {
38namespace internal {
39
40MacroAssembler::MacroAssembler(void* buffer, int size)
41    : Assembler(buffer, size),
42      unresolved_(0),
43      generating_stub_(false),
44      allow_stub_calls_(true),
45      code_object_(Heap::undefined_value()) {
46}
47
48
49void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
50  movq(destination, Operand(r13, index << kPointerSizeLog2));
51}
52
53
54void MacroAssembler::PushRoot(Heap::RootListIndex index) {
55  push(Operand(r13, index << kPointerSizeLog2));
56}
57
58
59void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
60  cmpq(with, Operand(r13, index << kPointerSizeLog2));
61}
62
63
64void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
65  LoadRoot(kScratchRegister, index);
66  cmpq(with, kScratchRegister);
67}
68
69
70void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
71  CompareRoot(rsp, Heap::kStackLimitRootIndex);
72  j(below, on_stack_overflow);
73}
74
75
76static void RecordWriteHelper(MacroAssembler* masm,
77                              Register object,
78                              Register addr,
79                              Register scratch) {
80  Label fast;
81
82  // Compute the page start address from the heap object pointer, and reuse
83  // the 'object' register for it.
84  ASSERT(is_int32(~Page::kPageAlignmentMask));
85  masm->and_(object,
86             Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
87  Register page_start = object;
88
89  // Compute the bit addr in the remembered set/index of the pointer in the
90  // page. Reuse 'addr' as pointer_offset.
91  masm->subq(addr, page_start);
92  masm->shr(addr, Immediate(kPointerSizeLog2));
93  Register pointer_offset = addr;
94
95  // If the bit offset lies beyond the normal remembered set range, it is in
96  // the extra remembered set area of a large object.
97  masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
98  masm->j(less, &fast);
99
100  // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
101  // extra remembered set after the large object.
102
103  // Load the array length into 'scratch'.
104  masm->movl(scratch,
105             Operand(page_start,
106                     Page::kObjectStartOffset + FixedArray::kLengthOffset));
107  Register array_length = scratch;
108
109  // Extra remembered set starts right after the large object (a FixedArray), at
110  //   page_start + kObjectStartOffset + objectSize
111  // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
112  // Add the delta between the end of the normal RSet and the start of the
113  // extra RSet to 'page_start', so that addressing the bit using
114  // 'pointer_offset' hits the extra RSet words.
115  masm->lea(page_start,
116            Operand(page_start, array_length, times_pointer_size,
117                    Page::kObjectStartOffset + FixedArray::kHeaderSize
118                        - Page::kRSetEndOffset));
119
120  // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
121  // to limit code size. We should probably evaluate this decision by
122  // measuring the performance of an equivalent implementation using
123  // "simpler" instructions
124  masm->bind(&fast);
125  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
126}
127
128
129class RecordWriteStub : public CodeStub {
130 public:
131  RecordWriteStub(Register object, Register addr, Register scratch)
132      : object_(object), addr_(addr), scratch_(scratch) { }
133
134  void Generate(MacroAssembler* masm);
135
136 private:
137  Register object_;
138  Register addr_;
139  Register scratch_;
140
141#ifdef DEBUG
142  void Print() {
143    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
144           object_.code(), addr_.code(), scratch_.code());
145  }
146#endif
147
148  // Minor key encoding in 12 bits of three registers (object, address and
149  // scratch) OOOOAAAASSSS.
150  class ScratchBits : public BitField<uint32_t, 0, 4> {};
151  class AddressBits : public BitField<uint32_t, 4, 4> {};
152  class ObjectBits : public BitField<uint32_t, 8, 4> {};
153
154  Major MajorKey() { return RecordWrite; }
155
156  int MinorKey() {
157    // Encode the registers.
158    return ObjectBits::encode(object_.code()) |
159           AddressBits::encode(addr_.code()) |
160           ScratchBits::encode(scratch_.code());
161  }
162};
163
164
165void RecordWriteStub::Generate(MacroAssembler* masm) {
166  RecordWriteHelper(masm, object_, addr_, scratch_);
167  masm->ret(0);
168}
169
170
171// Set the remembered set bit for [object+offset].
172// object is the object being stored into, value is the object being stored.
173// If offset is zero, then the smi_index register contains the array index into
174// the elements array represented as a smi. Otherwise it can be used as a
175// scratch register.
176// All registers are clobbered by the operation.
177void MacroAssembler::RecordWrite(Register object,
178                                 int offset,
179                                 Register value,
180                                 Register smi_index) {
181  // First, check if a remembered set write is even needed. The tests below
182  // catch stores of Smis and stores into young gen (which does not have space
183  // for the remembered set bits.
184  Label done;
185  JumpIfSmi(value, &done);
186
187  RecordWriteNonSmi(object, offset, value, smi_index);
188  bind(&done);
189}
190
191
192void MacroAssembler::RecordWriteNonSmi(Register object,
193                                       int offset,
194                                       Register scratch,
195                                       Register smi_index) {
196  Label done;
197  // Test that the object address is not in the new space.  We cannot
198  // set remembered set bits in the new space.
199  movq(scratch, object);
200  ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
201  and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
202  movq(kScratchRegister, ExternalReference::new_space_start());
203  cmpq(scratch, kScratchRegister);
204  j(equal, &done);
205
206  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
207    // Compute the bit offset in the remembered set, leave it in 'value'.
208    lea(scratch, Operand(object, offset));
209    ASSERT(is_int32(Page::kPageAlignmentMask));
210    and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
211    shr(scratch, Immediate(kObjectAlignmentBits));
212
213    // Compute the page address from the heap object pointer, leave it in
214    // 'object' (immediate value is sign extended).
215    and_(object, Immediate(~Page::kPageAlignmentMask));
216
217    // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
218    // to limit code size. We should probably evaluate this decision by
219    // measuring the performance of an equivalent implementation using
220    // "simpler" instructions
221    bts(Operand(object, Page::kRSetOffset), scratch);
222  } else {
223    Register dst = smi_index;
224    if (offset != 0) {
225      lea(dst, Operand(object, offset));
226    } else {
227      // array access: calculate the destination address in the same manner as
228      // KeyedStoreIC::GenerateGeneric.
229      SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
230      lea(dst, Operand(object,
231                       index.reg,
232                       index.scale,
233                       FixedArray::kHeaderSize - kHeapObjectTag));
234    }
235    // If we are already generating a shared stub, not inlining the
236    // record write code isn't going to save us any memory.
237    if (generating_stub()) {
238      RecordWriteHelper(this, object, dst, scratch);
239    } else {
240      RecordWriteStub stub(object, dst, scratch);
241      CallStub(&stub);
242    }
243  }
244
245  bind(&done);
246}
247
248
249void MacroAssembler::Assert(Condition cc, const char* msg) {
250  if (FLAG_debug_code) Check(cc, msg);
251}
252
253
254void MacroAssembler::Check(Condition cc, const char* msg) {
255  Label L;
256  j(cc, &L);
257  Abort(msg);
258  // will not return here
259  bind(&L);
260}
261
262
263void MacroAssembler::NegativeZeroTest(Register result,
264                                      Register op,
265                                      Label* then_label) {
266  Label ok;
267  testl(result, result);
268  j(not_zero, &ok);
269  testl(op, op);
270  j(sign, then_label);
271  bind(&ok);
272}
273
274
275void MacroAssembler::Abort(const char* msg) {
276  // We want to pass the msg string like a smi to avoid GC
277  // problems, however msg is not guaranteed to be aligned
278  // properly. Instead, we pass an aligned pointer that is
279  // a proper v8 smi, but also pass the alignment difference
280  // from the real pointer as a smi.
281  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
282  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
283  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
284  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
285#ifdef DEBUG
286  if (msg != NULL) {
287    RecordComment("Abort message: ");
288    RecordComment(msg);
289  }
290#endif
291  // Disable stub call restrictions to always allow calls to abort.
292  set_allow_stub_calls(true);
293
294  push(rax);
295  movq(kScratchRegister, p0, RelocInfo::NONE);
296  push(kScratchRegister);
297  movq(kScratchRegister,
298       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
299       RelocInfo::NONE);
300  push(kScratchRegister);
301  CallRuntime(Runtime::kAbort, 2);
302  // will not return here
303  int3();
304}
305
306
307void MacroAssembler::CallStub(CodeStub* stub) {
308  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
309  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
310}
311
312
313void MacroAssembler::TailCallStub(CodeStub* stub) {
314  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
315  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
316}
317
318
319void MacroAssembler::StubReturn(int argc) {
320  ASSERT(argc >= 1 && generating_stub());
321  ret((argc - 1) * kPointerSize);
322}
323
324
325void MacroAssembler::IllegalOperation(int num_arguments) {
326  if (num_arguments > 0) {
327    addq(rsp, Immediate(num_arguments * kPointerSize));
328  }
329  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
330}
331
332
333void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
334  CallRuntime(Runtime::FunctionForId(id), num_arguments);
335}
336
337
338void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
339  // If the expected number of arguments of the runtime function is
340  // constant, we check that the actual number of arguments match the
341  // expectation.
342  if (f->nargs >= 0 && f->nargs != num_arguments) {
343    IllegalOperation(num_arguments);
344    return;
345  }
346
347  Runtime::FunctionId function_id =
348      static_cast<Runtime::FunctionId>(f->stub_id);
349  RuntimeStub stub(function_id, num_arguments);
350  CallStub(&stub);
351}
352
353
354void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
355                                     int num_arguments,
356                                     int result_size) {
357  // ----------- S t a t e -------------
358  //  -- rsp[0] : return address
359  //  -- rsp[8] : argument num_arguments - 1
360  //  ...
361  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
362  // -----------------------------------
363
364  // TODO(1236192): Most runtime routines don't need the number of
365  // arguments passed in because it is constant. At some point we
366  // should remove this need and make the runtime routine entry code
367  // smarter.
368  movq(rax, Immediate(num_arguments));
369  JumpToRuntime(ext, result_size);
370}
371
372
373void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
374                                   int result_size) {
375  // Set the entry point and jump to the C entry runtime stub.
376  movq(rbx, ext);
377  CEntryStub ces(result_size);
378  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
379}
380
381
382void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
383  bool resolved;
384  Handle<Code> code = ResolveBuiltin(id, &resolved);
385
386  const char* name = Builtins::GetName(id);
387  int argc = Builtins::GetArgumentsCount(id);
388
389  movq(target, code, RelocInfo::EMBEDDED_OBJECT);
390  if (!resolved) {
391    uint32_t flags =
392        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
393        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
394    Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
395    unresolved_.Add(entry);
396  }
397  addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
398}
399
400Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
401                                            bool* resolved) {
402  // Move the builtin function into the temporary function slot by
403  // reading it from the builtins object. NOTE: We should be able to
404  // reduce this to two instructions by putting the function table in
405  // the global object instead of the "builtins" object and by using a
406  // real register for the function.
407  movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
408  movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
409  int builtins_offset =
410      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
411  movq(rdi, FieldOperand(rdx, builtins_offset));
412
413  return Builtins::GetCode(id, resolved);
414}
415
416
417void MacroAssembler::Set(Register dst, int64_t x) {
418  if (x == 0) {
419    xor_(dst, dst);
420  } else if (is_int32(x)) {
421    movq(dst, Immediate(static_cast<int32_t>(x)));
422  } else if (is_uint32(x)) {
423    movl(dst, Immediate(static_cast<uint32_t>(x)));
424  } else {
425    movq(dst, x, RelocInfo::NONE);
426  }
427}
428
429
430void MacroAssembler::Set(const Operand& dst, int64_t x) {
431  if (x == 0) {
432    xor_(kScratchRegister, kScratchRegister);
433    movq(dst, kScratchRegister);
434  } else if (is_int32(x)) {
435    movq(dst, Immediate(static_cast<int32_t>(x)));
436  } else if (is_uint32(x)) {
437    movl(dst, Immediate(static_cast<uint32_t>(x)));
438  } else {
439    movq(kScratchRegister, x, RelocInfo::NONE);
440    movq(dst, kScratchRegister);
441  }
442}
443
444// ----------------------------------------------------------------------------
445// Smi tagging, untagging and tag detection.
446
447static int kSmiShift = kSmiTagSize + kSmiShiftSize;
448
449void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
450  ASSERT_EQ(0, kSmiTag);
451  if (!dst.is(src)) {
452    movl(dst, src);
453  }
454  shl(dst, Immediate(kSmiShift));
455}
456
457
458void MacroAssembler::Integer32ToSmi(Register dst,
459                                    Register src,
460                                    Label* on_overflow) {
461  ASSERT_EQ(0, kSmiTag);
462  // 32-bit integer always fits in a long smi.
463  if (!dst.is(src)) {
464    movl(dst, src);
465  }
466  shl(dst, Immediate(kSmiShift));
467}
468
469
470void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
471                                                Register src,
472                                                int constant) {
473  if (dst.is(src)) {
474    addq(dst, Immediate(constant));
475  } else {
476    lea(dst, Operand(src, constant));
477  }
478  shl(dst, Immediate(kSmiShift));
479}
480
481
482void MacroAssembler::SmiToInteger32(Register dst, Register src) {
483  ASSERT_EQ(0, kSmiTag);
484  if (!dst.is(src)) {
485    movq(dst, src);
486  }
487  shr(dst, Immediate(kSmiShift));
488}
489
490
491void MacroAssembler::SmiToInteger64(Register dst, Register src) {
492  ASSERT_EQ(0, kSmiTag);
493  if (!dst.is(src)) {
494    movq(dst, src);
495  }
496  sar(dst, Immediate(kSmiShift));
497}
498
499
500void MacroAssembler::SmiTest(Register src) {
501  testq(src, src);
502}
503
504
505void MacroAssembler::SmiCompare(Register dst, Register src) {
506  cmpq(dst, src);
507}
508
509
510void MacroAssembler::SmiCompare(Register dst, Smi* src) {
511  ASSERT(!dst.is(kScratchRegister));
512  if (src->value() == 0) {
513    testq(dst, dst);
514  } else {
515    Move(kScratchRegister, src);
516    cmpq(dst, kScratchRegister);
517  }
518}
519
520
521void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
522  cmpq(dst, src);
523}
524
525
526void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
527  if (src->value() == 0) {
528    // Only tagged long smi to have 32-bit representation.
529    cmpq(dst, Immediate(0));
530  } else {
531    Move(kScratchRegister, src);
532    cmpq(dst, kScratchRegister);
533  }
534}
535
536
537void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
538                                                           Register src,
539                                                           int power) {
540  ASSERT(power >= 0);
541  ASSERT(power < 64);
542  if (power == 0) {
543    SmiToInteger64(dst, src);
544    return;
545  }
546  if (!dst.is(src)) {
547    movq(dst, src);
548  }
549  if (power < kSmiShift) {
550    sar(dst, Immediate(kSmiShift - power));
551  } else if (power > kSmiShift) {
552    shl(dst, Immediate(power - kSmiShift));
553  }
554}
555
556
557Condition MacroAssembler::CheckSmi(Register src) {
558  ASSERT_EQ(0, kSmiTag);
559  testb(src, Immediate(kSmiTagMask));
560  return zero;
561}
562
563
564Condition MacroAssembler::CheckPositiveSmi(Register src) {
565  ASSERT_EQ(0, kSmiTag);
566  movq(kScratchRegister, src);
567  rol(kScratchRegister, Immediate(1));
568  testl(kScratchRegister, Immediate(0x03));
569  return zero;
570}
571
572
573Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
574  if (first.is(second)) {
575    return CheckSmi(first);
576  }
577  movl(kScratchRegister, first);
578  orl(kScratchRegister, second);
579  testb(kScratchRegister, Immediate(kSmiTagMask));
580  return zero;
581}
582
583
584Condition MacroAssembler::CheckBothPositiveSmi(Register first,
585                                               Register second) {
586  if (first.is(second)) {
587    return CheckPositiveSmi(first);
588  }
589  movl(kScratchRegister, first);
590  orl(kScratchRegister, second);
591  rol(kScratchRegister, Immediate(1));
592  testl(kScratchRegister, Immediate(0x03));
593  return zero;
594}
595
596
597
598Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
599  if (first.is(second)) {
600    return CheckSmi(first);
601  }
602  movl(kScratchRegister, first);
603  andl(kScratchRegister, second);
604  testb(kScratchRegister, Immediate(kSmiTagMask));
605  return zero;
606}
607
608
609Condition MacroAssembler::CheckIsMinSmi(Register src) {
610  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
611  movq(kScratchRegister, src);
612  rol(kScratchRegister, Immediate(1));
613  cmpq(kScratchRegister, Immediate(1));
614  return equal;
615}
616
617
618Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
619  // A 32-bit integer value can always be converted to a smi.
620  return always;
621}
622
623
624Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
625  // An unsigned 32-bit integer value is valid as long as the high bit
626  // is not set.
627  testq(src, Immediate(0x80000000));
628  return zero;
629}
630
631
632void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
633  if (dst.is(src)) {
634    ASSERT(!dst.is(kScratchRegister));
635    movq(kScratchRegister, src);
636    neg(dst);  // Low 32 bits are retained as zero by negation.
637    // Test if result is zero or Smi::kMinValue.
638    cmpq(dst, kScratchRegister);
639    j(not_equal, on_smi_result);
640    movq(src, kScratchRegister);
641  } else {
642    movq(dst, src);
643    neg(dst);
644    cmpq(dst, src);
645    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
646    j(not_equal, on_smi_result);
647  }
648}
649
650
651void MacroAssembler::SmiAdd(Register dst,
652                            Register src1,
653                            Register src2,
654                            Label* on_not_smi_result) {
655  ASSERT(!dst.is(src2));
656  if (dst.is(src1)) {
657    addq(dst, src2);
658    Label smi_result;
659    j(no_overflow, &smi_result);
660    // Restore src1.
661    subq(src1, src2);
662    jmp(on_not_smi_result);
663    bind(&smi_result);
664  } else {
665    movq(dst, src1);
666    addq(dst, src2);
667    j(overflow, on_not_smi_result);
668  }
669}
670
671
672void MacroAssembler::SmiSub(Register dst,
673                            Register src1,
674                            Register src2,
675                            Label* on_not_smi_result) {
676  ASSERT(!dst.is(src2));
677  if (on_not_smi_result == NULL) {
678    // No overflow checking. Use only when it's known that
679    // overflowing is impossible (e.g., subtracting two positive smis).
680    if (dst.is(src1)) {
681      subq(dst, src2);
682    } else {
683      movq(dst, src1);
684      subq(dst, src2);
685    }
686    Assert(no_overflow, "Smi substraction onverflow");
687  } else if (dst.is(src1)) {
688    subq(dst, src2);
689    Label smi_result;
690    j(no_overflow, &smi_result);
691    // Restore src1.
692    addq(src1, src2);
693    jmp(on_not_smi_result);
694    bind(&smi_result);
695  } else {
696    movq(dst, src1);
697    subq(dst, src2);
698    j(overflow, on_not_smi_result);
699  }
700}
701
702
703void MacroAssembler::SmiMul(Register dst,
704                            Register src1,
705                            Register src2,
706                            Label* on_not_smi_result) {
707  ASSERT(!dst.is(src2));
708  ASSERT(!dst.is(kScratchRegister));
709  ASSERT(!src1.is(kScratchRegister));
710  ASSERT(!src2.is(kScratchRegister));
711
712  if (dst.is(src1)) {
713    Label failure, zero_correct_result;
714    movq(kScratchRegister, src1);  // Create backup for later testing.
715    SmiToInteger64(dst, src1);
716    imul(dst, src2);
717    j(overflow, &failure);
718
719    // Check for negative zero result.  If product is zero, and one
720    // argument is negative, go to slow case.
721    Label correct_result;
722    testq(dst, dst);
723    j(not_zero, &correct_result);
724
725    movq(dst, kScratchRegister);
726    xor_(dst, src2);
727    j(positive, &zero_correct_result);  // Result was positive zero.
728
729    bind(&failure);  // Reused failure exit, restores src1.
730    movq(src1, kScratchRegister);
731    jmp(on_not_smi_result);
732
733    bind(&zero_correct_result);
734    xor_(dst, dst);
735
736    bind(&correct_result);
737  } else {
738    SmiToInteger64(dst, src1);
739    imul(dst, src2);
740    j(overflow, on_not_smi_result);
741    // Check for negative zero result.  If product is zero, and one
742    // argument is negative, go to slow case.
743    Label correct_result;
744    testq(dst, dst);
745    j(not_zero, &correct_result);
746    // One of src1 and src2 is zero, the check whether the other is
747    // negative.
748    movq(kScratchRegister, src1);
749    xor_(kScratchRegister, src2);
750    j(negative, on_not_smi_result);
751    bind(&correct_result);
752  }
753}
754
755
756void MacroAssembler::SmiTryAddConstant(Register dst,
757                                       Register src,
758                                       Smi* constant,
759                                       Label* on_not_smi_result) {
760  // Does not assume that src is a smi.
761  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
762  ASSERT_EQ(0, kSmiTag);
763  ASSERT(!dst.is(kScratchRegister));
764  ASSERT(!src.is(kScratchRegister));
765
766  JumpIfNotSmi(src, on_not_smi_result);
767  Register tmp = (dst.is(src) ? kScratchRegister : dst);
768  Move(tmp, constant);
769  addq(tmp, src);
770  j(overflow, on_not_smi_result);
771  if (dst.is(src)) {
772    movq(dst, tmp);
773  }
774}
775
776
777void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
778  if (constant->value() == 0) {
779    if (!dst.is(src)) {
780      movq(dst, src);
781    }
782  } else if (dst.is(src)) {
783    ASSERT(!dst.is(kScratchRegister));
784
785    Move(kScratchRegister, constant);
786    addq(dst, kScratchRegister);
787  } else {
788    Move(dst, constant);
789    addq(dst, src);
790  }
791}
792
793
794void MacroAssembler::SmiAddConstant(Register dst,
795                                    Register src,
796                                    Smi* constant,
797                                    Label* on_not_smi_result) {
798  if (constant->value() == 0) {
799    if (!dst.is(src)) {
800      movq(dst, src);
801    }
802  } else if (dst.is(src)) {
803    ASSERT(!dst.is(kScratchRegister));
804
805    Move(kScratchRegister, constant);
806    addq(dst, kScratchRegister);
807    Label result_ok;
808    j(no_overflow, &result_ok);
809    subq(dst, kScratchRegister);
810    jmp(on_not_smi_result);
811    bind(&result_ok);
812  } else {
813    Move(dst, constant);
814    addq(dst, src);
815    j(overflow, on_not_smi_result);
816  }
817}
818
819
820void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
821  if (constant->value() == 0) {
822    if (!dst.is(src)) {
823      movq(dst, src);
824    }
825  } else if (dst.is(src)) {
826    ASSERT(!dst.is(kScratchRegister));
827
828    Move(kScratchRegister, constant);
829    subq(dst, kScratchRegister);
830  } else {
831    // Subtract by adding the negative, to do it in two operations.
832    if (constant->value() == Smi::kMinValue) {
833      Move(kScratchRegister, constant);
834      movq(dst, src);
835      subq(dst, kScratchRegister);
836    } else {
837      Move(dst, Smi::FromInt(-constant->value()));
838      addq(dst, src);
839    }
840  }
841}
842
843
844void MacroAssembler::SmiSubConstant(Register dst,
845                                    Register src,
846                                    Smi* constant,
847                                    Label* on_not_smi_result) {
848  if (constant->value() == 0) {
849    if (!dst.is(src)) {
850      movq(dst, src);
851    }
852  } else if (dst.is(src)) {
853    ASSERT(!dst.is(kScratchRegister));
854
855    Move(kScratchRegister, constant);
856    subq(dst, kScratchRegister);
857    Label sub_success;
858    j(no_overflow, &sub_success);
859    addq(src, kScratchRegister);
860    jmp(on_not_smi_result);
861    bind(&sub_success);
862  } else {
863    if (constant->value() == Smi::kMinValue) {
864      Move(kScratchRegister, constant);
865      movq(dst, src);
866      subq(dst, kScratchRegister);
867      j(overflow, on_not_smi_result);
868    } else {
869      Move(dst, Smi::FromInt(-(constant->value())));
870      addq(dst, src);
871      j(overflow, on_not_smi_result);
872    }
873  }
874}
875
876
877void MacroAssembler::SmiDiv(Register dst,
878                            Register src1,
879                            Register src2,
880                            Label* on_not_smi_result) {
881  ASSERT(!src1.is(kScratchRegister));
882  ASSERT(!src2.is(kScratchRegister));
883  ASSERT(!dst.is(kScratchRegister));
884  ASSERT(!src2.is(rax));
885  ASSERT(!src2.is(rdx));
886  ASSERT(!src1.is(rdx));
887
888  // Check for 0 divisor (result is +/-Infinity).
889  Label positive_divisor;
890  testq(src2, src2);
891  j(zero, on_not_smi_result);
892
893  if (src1.is(rax)) {
894    movq(kScratchRegister, src1);
895  }
896  SmiToInteger32(rax, src1);
897  // We need to rule out dividing Smi::kMinValue by -1, since that would
898  // overflow in idiv and raise an exception.
899  // We combine this with negative zero test (negative zero only happens
900  // when dividing zero by a negative number).
901
902  // We overshoot a little and go to slow case if we divide min-value
903  // by any negative value, not just -1.
904  Label safe_div;
905  testl(rax, Immediate(0x7fffffff));
906  j(not_zero, &safe_div);
907  testq(src2, src2);
908  if (src1.is(rax)) {
909    j(positive, &safe_div);
910    movq(src1, kScratchRegister);
911    jmp(on_not_smi_result);
912  } else {
913    j(negative, on_not_smi_result);
914  }
915  bind(&safe_div);
916
917  SmiToInteger32(src2, src2);
918  // Sign extend src1 into edx:eax.
919  cdq();
920  idivl(src2);
921  Integer32ToSmi(src2, src2);
922  // Check that the remainder is zero.
923  testl(rdx, rdx);
924  if (src1.is(rax)) {
925    Label smi_result;
926    j(zero, &smi_result);
927    movq(src1, kScratchRegister);
928    jmp(on_not_smi_result);
929    bind(&smi_result);
930  } else {
931    j(not_zero, on_not_smi_result);
932  }
933  if (!dst.is(src1) && src1.is(rax)) {
934    movq(src1, kScratchRegister);
935  }
936  Integer32ToSmi(dst, rax);
937}
938
939
940void MacroAssembler::SmiMod(Register dst,
941                            Register src1,
942                            Register src2,
943                            Label* on_not_smi_result) {
944  ASSERT(!dst.is(kScratchRegister));
945  ASSERT(!src1.is(kScratchRegister));
946  ASSERT(!src2.is(kScratchRegister));
947  ASSERT(!src2.is(rax));
948  ASSERT(!src2.is(rdx));
949  ASSERT(!src1.is(rdx));
950  ASSERT(!src1.is(src2));
951
952  testq(src2, src2);
953  j(zero, on_not_smi_result);
954
955  if (src1.is(rax)) {
956    movq(kScratchRegister, src1);
957  }
958  SmiToInteger32(rax, src1);
959  SmiToInteger32(src2, src2);
960
961  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
962  Label safe_div;
963  cmpl(rax, Immediate(Smi::kMinValue));
964  j(not_equal, &safe_div);
965  cmpl(src2, Immediate(-1));
966  j(not_equal, &safe_div);
967  // Retag inputs and go slow case.
968  Integer32ToSmi(src2, src2);
969  if (src1.is(rax)) {
970    movq(src1, kScratchRegister);
971  }
972  jmp(on_not_smi_result);
973  bind(&safe_div);
974
975  // Sign extend eax into edx:eax.
976  cdq();
977  idivl(src2);
978  // Restore smi tags on inputs.
979  Integer32ToSmi(src2, src2);
980  if (src1.is(rax)) {
981    movq(src1, kScratchRegister);
982  }
983  // Check for a negative zero result.  If the result is zero, and the
984  // dividend is negative, go slow to return a floating point negative zero.
985  Label smi_result;
986  testl(rdx, rdx);
987  j(not_zero, &smi_result);
988  testq(src1, src1);
989  j(negative, on_not_smi_result);
990  bind(&smi_result);
991  Integer32ToSmi(dst, rdx);
992}
993
994
995void MacroAssembler::SmiNot(Register dst, Register src) {
996  ASSERT(!dst.is(kScratchRegister));
997  ASSERT(!src.is(kScratchRegister));
998  // Set tag and padding bits before negating, so that they are zero afterwards.
999  movl(kScratchRegister, Immediate(~0));
1000  if (dst.is(src)) {
1001    xor_(dst, kScratchRegister);
1002  } else {
1003    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1004  }
1005  not_(dst);
1006}
1007
1008
1009void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1010  ASSERT(!dst.is(src2));
1011  if (!dst.is(src1)) {
1012    movq(dst, src1);
1013  }
1014  and_(dst, src2);
1015}
1016
1017
1018void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1019  if (constant->value() == 0) {
1020    xor_(dst, dst);
1021  } else if (dst.is(src)) {
1022    ASSERT(!dst.is(kScratchRegister));
1023    Move(kScratchRegister, constant);
1024    and_(dst, kScratchRegister);
1025  } else {
1026    Move(dst, constant);
1027    and_(dst, src);
1028  }
1029}
1030
1031
1032void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1033  if (!dst.is(src1)) {
1034    movq(dst, src1);
1035  }
1036  or_(dst, src2);
1037}
1038
1039
1040void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1041  if (dst.is(src)) {
1042    ASSERT(!dst.is(kScratchRegister));
1043    Move(kScratchRegister, constant);
1044    or_(dst, kScratchRegister);
1045  } else {
1046    Move(dst, constant);
1047    or_(dst, src);
1048  }
1049}
1050
1051
1052void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1053  if (!dst.is(src1)) {
1054    movq(dst, src1);
1055  }
1056  xor_(dst, src2);
1057}
1058
1059
1060void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1061  if (dst.is(src)) {
1062    ASSERT(!dst.is(kScratchRegister));
1063    Move(kScratchRegister, constant);
1064    xor_(dst, kScratchRegister);
1065  } else {
1066    Move(dst, constant);
1067    xor_(dst, src);
1068  }
1069}
1070
1071
1072void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1073                                                     Register src,
1074                                                     int shift_value) {
1075  ASSERT(is_uint5(shift_value));
1076  if (shift_value > 0) {
1077    if (dst.is(src)) {
1078      sar(dst, Immediate(shift_value + kSmiShift));
1079      shl(dst, Immediate(kSmiShift));
1080    } else {
1081      UNIMPLEMENTED();  // Not used.
1082    }
1083  }
1084}
1085
1086
1087void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
1088                                                  Register src,
1089                                                  int shift_value,
1090                                                  Label* on_not_smi_result) {
1091  // Logic right shift interprets its result as an *unsigned* number.
1092  if (dst.is(src)) {
1093    UNIMPLEMENTED();  // Not used.
1094  } else {
1095    movq(dst, src);
1096    if (shift_value == 0) {
1097      testq(dst, dst);
1098      j(negative, on_not_smi_result);
1099    }
1100    shr(dst, Immediate(shift_value + kSmiShift));
1101    shl(dst, Immediate(kSmiShift));
1102  }
1103}
1104
1105
1106void MacroAssembler::SmiShiftLeftConstant(Register dst,
1107                                          Register src,
1108                                          int shift_value,
1109                                          Label* on_not_smi_result) {
1110  if (!dst.is(src)) {
1111    movq(dst, src);
1112  }
1113  if (shift_value > 0) {
1114    shl(dst, Immediate(shift_value));
1115  }
1116}
1117
1118
1119void MacroAssembler::SmiShiftLeft(Register dst,
1120                                  Register src1,
1121                                  Register src2,
1122                                  Label* on_not_smi_result) {
1123  ASSERT(!dst.is(rcx));
1124  Label result_ok;
1125  // Untag shift amount.
1126  if (!dst.is(src1)) {
1127    movq(dst, src1);
1128  }
1129  SmiToInteger32(rcx, src2);
1130  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1131  and_(rcx, Immediate(0x1f));
1132  shl_cl(dst);
1133}
1134
1135
1136void MacroAssembler::SmiShiftLogicalRight(Register dst,
1137                                          Register src1,
1138                                          Register src2,
1139                                          Label* on_not_smi_result) {
1140  ASSERT(!dst.is(kScratchRegister));
1141  ASSERT(!src1.is(kScratchRegister));
1142  ASSERT(!src2.is(kScratchRegister));
1143  ASSERT(!dst.is(rcx));
1144  Label result_ok;
1145  if (src1.is(rcx) || src2.is(rcx)) {
1146    movq(kScratchRegister, rcx);
1147  }
1148  if (!dst.is(src1)) {
1149    movq(dst, src1);
1150  }
1151  SmiToInteger32(rcx, src2);
1152  orl(rcx, Immediate(kSmiShift));
1153  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
1154  shl(dst, Immediate(kSmiShift));
1155  testq(dst, dst);
1156  if (src1.is(rcx) || src2.is(rcx)) {
1157    Label positive_result;
1158    j(positive, &positive_result);
1159    if (src1.is(rcx)) {
1160      movq(src1, kScratchRegister);
1161    } else {
1162      movq(src2, kScratchRegister);
1163    }
1164    jmp(on_not_smi_result);
1165    bind(&positive_result);
1166  } else {
1167    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
1168  }
1169}
1170
1171
1172void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1173                                             Register src1,
1174                                             Register src2) {
1175  ASSERT(!dst.is(kScratchRegister));
1176  ASSERT(!src1.is(kScratchRegister));
1177  ASSERT(!src2.is(kScratchRegister));
1178  ASSERT(!dst.is(rcx));
1179  if (src1.is(rcx)) {
1180    movq(kScratchRegister, src1);
1181  } else if (src2.is(rcx)) {
1182    movq(kScratchRegister, src2);
1183  }
1184  if (!dst.is(src1)) {
1185    movq(dst, src1);
1186  }
1187  SmiToInteger32(rcx, src2);
1188  orl(rcx, Immediate(kSmiShift));
1189  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
1190  shl(dst, Immediate(kSmiShift));
1191  if (src1.is(rcx)) {
1192    movq(src1, kScratchRegister);
1193  } else if (src2.is(rcx)) {
1194    movq(src2, kScratchRegister);
1195  }
1196}
1197
1198
1199void MacroAssembler::SelectNonSmi(Register dst,
1200                                  Register src1,
1201                                  Register src2,
1202                                  Label* on_not_smis) {
1203  ASSERT(!dst.is(kScratchRegister));
1204  ASSERT(!src1.is(kScratchRegister));
1205  ASSERT(!src2.is(kScratchRegister));
1206  ASSERT(!dst.is(src1));
1207  ASSERT(!dst.is(src2));
1208  // Both operands must not be smis.
1209#ifdef DEBUG
1210  if (allow_stub_calls()) {  // Check contains a stub call.
1211    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
1212    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
1213  }
1214#endif
1215  ASSERT_EQ(0, kSmiTag);
1216  ASSERT_EQ(0, Smi::FromInt(0));
1217  movl(kScratchRegister, Immediate(kSmiTagMask));
1218  and_(kScratchRegister, src1);
1219  testl(kScratchRegister, src2);
1220  // If non-zero then both are smis.
1221  j(not_zero, on_not_smis);
1222
1223  // Exactly one operand is a smi.
1224  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1225  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1226  subq(kScratchRegister, Immediate(1));
1227  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1228  movq(dst, src1);
1229  xor_(dst, src2);
1230  and_(dst, kScratchRegister);
1231  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1232  xor_(dst, src1);
1233  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
1234}
1235
1236SmiIndex MacroAssembler::SmiToIndex(Register dst,
1237                                    Register src,
1238                                    int shift) {
1239  ASSERT(is_uint6(shift));
1240  // There is a possible optimization if shift is in the range 60-63, but that
1241  // will (and must) never happen.
1242  if (!dst.is(src)) {
1243    movq(dst, src);
1244  }
1245  if (shift < kSmiShift) {
1246    sar(dst, Immediate(kSmiShift - shift));
1247  } else {
1248    shl(dst, Immediate(shift - kSmiShift));
1249  }
1250  return SmiIndex(dst, times_1);
1251}
1252
1253SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1254                                            Register src,
1255                                            int shift) {
1256  // Register src holds a positive smi.
1257  ASSERT(is_uint6(shift));
1258  if (!dst.is(src)) {
1259    movq(dst, src);
1260  }
1261  neg(dst);
1262  if (shift < kSmiShift) {
1263    sar(dst, Immediate(kSmiShift - shift));
1264  } else {
1265    shl(dst, Immediate(shift - kSmiShift));
1266  }
1267  return SmiIndex(dst, times_1);
1268}
1269
1270
1271void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
1272  ASSERT_EQ(0, kSmiTag);
1273  Condition smi = CheckSmi(src);
1274  j(smi, on_smi);
1275}
1276
1277
1278void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
1279  Condition smi = CheckSmi(src);
1280  j(NegateCondition(smi), on_not_smi);
1281}
1282
1283
1284void MacroAssembler::JumpIfNotPositiveSmi(Register src,
1285                                          Label* on_not_positive_smi) {
1286  Condition positive_smi = CheckPositiveSmi(src);
1287  j(NegateCondition(positive_smi), on_not_positive_smi);
1288}
1289
1290
1291void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1292                                             Smi* constant,
1293                                             Label* on_equals) {
1294  SmiCompare(src, constant);
1295  j(equal, on_equals);
1296}
1297
1298
1299void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
1300  Condition is_valid = CheckInteger32ValidSmiValue(src);
1301  j(NegateCondition(is_valid), on_invalid);
1302}
1303
1304
1305void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1306                                                Label* on_invalid) {
1307  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1308  j(NegateCondition(is_valid), on_invalid);
1309}
1310
1311
1312void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
1313                                      Label* on_not_both_smi) {
1314  Condition both_smi = CheckBothSmi(src1, src2);
1315  j(NegateCondition(both_smi), on_not_both_smi);
1316}
1317
1318
1319void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
1320                                              Label* on_not_both_smi) {
1321  Condition both_smi = CheckBothPositiveSmi(src1, src2);
1322  j(NegateCondition(both_smi), on_not_both_smi);
1323}
1324
1325
1326
1327void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
1328                                                         Register second_object,
1329                                                         Register scratch1,
1330                                                         Register scratch2,
1331                                                         Label* on_fail) {
1332  // Check that both objects are not smis.
1333  Condition either_smi = CheckEitherSmi(first_object, second_object);
1334  j(either_smi, on_fail);
1335
1336  // Load instance type for both strings.
1337  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
1338  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
1339  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
1340  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
1341
1342  // Check that both are flat ascii strings.
1343  ASSERT(kNotStringTag != 0);
1344  const int kFlatAsciiStringMask =
1345      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1346  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1347
1348  andl(scratch1, Immediate(kFlatAsciiStringMask));
1349  andl(scratch2, Immediate(kFlatAsciiStringMask));
1350  // Interleave the bits to check both scratch1 and scratch2 in one test.
1351  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1352  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1353  cmpl(scratch1,
1354       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1355  j(not_equal, on_fail);
1356}
1357
1358
1359void MacroAssembler::Move(Register dst, Handle<Object> source) {
1360  ASSERT(!source->IsFailure());
1361  if (source->IsSmi()) {
1362    Move(dst, Smi::cast(*source));
1363  } else {
1364    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1365  }
1366}
1367
1368
1369void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
1370  ASSERT(!source->IsFailure());
1371  if (source->IsSmi()) {
1372    Move(dst, Smi::cast(*source));
1373  } else {
1374    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1375    movq(dst, kScratchRegister);
1376  }
1377}
1378
1379
1380void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1381  if (source->IsSmi()) {
1382    SmiCompare(dst, Smi::cast(*source));
1383  } else {
1384    Move(kScratchRegister, source);
1385    cmpq(dst, kScratchRegister);
1386  }
1387}
1388
1389
1390void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
1391  if (source->IsSmi()) {
1392    SmiCompare(dst, Smi::cast(*source));
1393  } else {
1394    ASSERT(source->IsHeapObject());
1395    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1396    cmpq(dst, kScratchRegister);
1397  }
1398}
1399
1400
1401void MacroAssembler::Push(Handle<Object> source) {
1402  if (source->IsSmi()) {
1403    Push(Smi::cast(*source));
1404  } else {
1405    ASSERT(source->IsHeapObject());
1406    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1407    push(kScratchRegister);
1408  }
1409}
1410
1411
1412void MacroAssembler::Push(Smi* source) {
1413  intptr_t smi = reinterpret_cast<intptr_t>(source);
1414  if (is_int32(smi)) {
1415    push(Immediate(static_cast<int32_t>(smi)));
1416  } else {
1417    Set(kScratchRegister, smi);
1418    push(kScratchRegister);
1419  }
1420}
1421
1422
1423void MacroAssembler::Drop(int stack_elements) {
1424  if (stack_elements > 0) {
1425    addq(rsp, Immediate(stack_elements * kPointerSize));
1426  }
1427}
1428
1429
1430void MacroAssembler::Test(const Operand& src, Smi* source) {
1431  intptr_t smi = reinterpret_cast<intptr_t>(source);
1432  if (is_int32(smi)) {
1433    testl(src, Immediate(static_cast<int32_t>(smi)));
1434  } else {
1435    Move(kScratchRegister, source);
1436    testq(src, kScratchRegister);
1437  }
1438}
1439
1440
1441void MacroAssembler::Jump(ExternalReference ext) {
1442  movq(kScratchRegister, ext);
1443  jmp(kScratchRegister);
1444}
1445
1446
1447void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1448  movq(kScratchRegister, destination, rmode);
1449  jmp(kScratchRegister);
1450}
1451
1452
1453void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1454  // TODO(X64): Inline this
1455  jmp(code_object, rmode);
1456}
1457
1458
1459void MacroAssembler::Call(ExternalReference ext) {
1460  movq(kScratchRegister, ext);
1461  call(kScratchRegister);
1462}
1463
1464
1465void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1466  movq(kScratchRegister, destination, rmode);
1467  call(kScratchRegister);
1468}
1469
1470
1471void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1472  ASSERT(RelocInfo::IsCodeTarget(rmode));
1473  WriteRecordedPositions();
1474  call(code_object, rmode);
1475}
1476
1477
1478void MacroAssembler::PushTryHandler(CodeLocation try_location,
1479                                    HandlerType type) {
1480  // Adjust this code if not the case.
1481  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1482
1483  // The pc (return address) is already on TOS.  This code pushes state,
1484  // frame pointer and current handler.  Check that they are expected
1485  // next on the stack, in that order.
1486  ASSERT_EQ(StackHandlerConstants::kStateOffset,
1487            StackHandlerConstants::kPCOffset - kPointerSize);
1488  ASSERT_EQ(StackHandlerConstants::kFPOffset,
1489            StackHandlerConstants::kStateOffset - kPointerSize);
1490  ASSERT_EQ(StackHandlerConstants::kNextOffset,
1491            StackHandlerConstants::kFPOffset - kPointerSize);
1492
1493  if (try_location == IN_JAVASCRIPT) {
1494    if (type == TRY_CATCH_HANDLER) {
1495      push(Immediate(StackHandler::TRY_CATCH));
1496    } else {
1497      push(Immediate(StackHandler::TRY_FINALLY));
1498    }
1499    push(rbp);
1500  } else {
1501    ASSERT(try_location == IN_JS_ENTRY);
1502    // The frame pointer does not point to a JS frame so we save NULL
1503    // for rbp. We expect the code throwing an exception to check rbp
1504    // before dereferencing it to restore the context.
1505    push(Immediate(StackHandler::ENTRY));
1506    push(Immediate(0));  // NULL frame pointer.
1507  }
1508  // Save the current handler.
1509  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1510  push(Operand(kScratchRegister, 0));
1511  // Link this handler.
1512  movq(Operand(kScratchRegister, 0), rsp);
1513}
1514
1515
1516void MacroAssembler::PopTryHandler() {
1517  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1518  // Unlink this handler.
1519  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1520  pop(Operand(kScratchRegister, 0));
1521  // Remove the remaining fields.
1522  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1523}
1524
1525
1526void MacroAssembler::Ret() {
1527  ret(0);
1528}
1529
1530
1531void MacroAssembler::FCmp() {
1532  fucomip();
1533  ffree(0);
1534  fincstp();
1535}
1536
1537
1538void MacroAssembler::CmpObjectType(Register heap_object,
1539                                   InstanceType type,
1540                                   Register map) {
1541  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1542  CmpInstanceType(map, type);
1543}
1544
1545
1546void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1547  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1548       Immediate(static_cast<int8_t>(type)));
1549}
1550
1551
1552Condition MacroAssembler::IsObjectStringType(Register heap_object,
1553                                             Register map,
1554                                             Register instance_type) {
1555  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1556  movzxbq(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
1557  ASSERT(kNotStringTag != 0);
1558  testb(instance_type, Immediate(kIsNotStringMask));
1559  return zero;
1560}
1561
1562
1563void MacroAssembler::TryGetFunctionPrototype(Register function,
1564                                             Register result,
1565                                             Label* miss) {
1566  // Check that the receiver isn't a smi.
1567  testl(function, Immediate(kSmiTagMask));
1568  j(zero, miss);
1569
1570  // Check that the function really is a function.
1571  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1572  j(not_equal, miss);
1573
1574  // Make sure that the function has an instance prototype.
1575  Label non_instance;
1576  testb(FieldOperand(result, Map::kBitFieldOffset),
1577        Immediate(1 << Map::kHasNonInstancePrototype));
1578  j(not_zero, &non_instance);
1579
1580  // Get the prototype or initial map from the function.
1581  movq(result,
1582       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1583
1584  // If the prototype or initial map is the hole, don't return it and
1585  // simply miss the cache instead. This will allow us to allocate a
1586  // prototype object on-demand in the runtime system.
1587  CompareRoot(result, Heap::kTheHoleValueRootIndex);
1588  j(equal, miss);
1589
1590  // If the function does not have an initial map, we're done.
1591  Label done;
1592  CmpObjectType(result, MAP_TYPE, kScratchRegister);
1593  j(not_equal, &done);
1594
1595  // Get the prototype from the initial map.
1596  movq(result, FieldOperand(result, Map::kPrototypeOffset));
1597  jmp(&done);
1598
1599  // Non-instance prototype: Fetch prototype from constructor field
1600  // in initial map.
1601  bind(&non_instance);
1602  movq(result, FieldOperand(result, Map::kConstructorOffset));
1603
1604  // All done.
1605  bind(&done);
1606}
1607
1608
1609void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
1610  if (FLAG_native_code_counters && counter->Enabled()) {
1611    movq(kScratchRegister, ExternalReference(counter));
1612    movl(Operand(kScratchRegister, 0), Immediate(value));
1613  }
1614}
1615
1616
1617void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
1618  ASSERT(value > 0);
1619  if (FLAG_native_code_counters && counter->Enabled()) {
1620    movq(kScratchRegister, ExternalReference(counter));
1621    Operand operand(kScratchRegister, 0);
1622    if (value == 1) {
1623      incl(operand);
1624    } else {
1625      addl(operand, Immediate(value));
1626    }
1627  }
1628}
1629
1630
1631void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
1632  ASSERT(value > 0);
1633  if (FLAG_native_code_counters && counter->Enabled()) {
1634    movq(kScratchRegister, ExternalReference(counter));
1635    Operand operand(kScratchRegister, 0);
1636    if (value == 1) {
1637      decl(operand);
1638    } else {
1639      subl(operand, Immediate(value));
1640    }
1641  }
1642}
1643
1644#ifdef ENABLE_DEBUGGER_SUPPORT
1645
1646void MacroAssembler::PushRegistersFromMemory(RegList regs) {
1647  ASSERT((regs & ~kJSCallerSaved) == 0);
1648  // Push the content of the memory location to the stack.
1649  for (int i = 0; i < kNumJSCallerSaved; i++) {
1650    int r = JSCallerSavedCode(i);
1651    if ((regs & (1 << r)) != 0) {
1652      ExternalReference reg_addr =
1653          ExternalReference(Debug_Address::Register(i));
1654      movq(kScratchRegister, reg_addr);
1655      push(Operand(kScratchRegister, 0));
1656    }
1657  }
1658}
1659
1660
1661void MacroAssembler::SaveRegistersToMemory(RegList regs) {
1662  ASSERT((regs & ~kJSCallerSaved) == 0);
1663  // Copy the content of registers to memory location.
1664  for (int i = 0; i < kNumJSCallerSaved; i++) {
1665    int r = JSCallerSavedCode(i);
1666    if ((regs & (1 << r)) != 0) {
1667      Register reg = { r };
1668      ExternalReference reg_addr =
1669          ExternalReference(Debug_Address::Register(i));
1670      movq(kScratchRegister, reg_addr);
1671      movq(Operand(kScratchRegister, 0), reg);
1672    }
1673  }
1674}
1675
1676
1677void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
1678  ASSERT((regs & ~kJSCallerSaved) == 0);
1679  // Copy the content of memory location to registers.
1680  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1681    int r = JSCallerSavedCode(i);
1682    if ((regs & (1 << r)) != 0) {
1683      Register reg = { r };
1684      ExternalReference reg_addr =
1685          ExternalReference(Debug_Address::Register(i));
1686      movq(kScratchRegister, reg_addr);
1687      movq(reg, Operand(kScratchRegister, 0));
1688    }
1689  }
1690}
1691
1692
1693void MacroAssembler::PopRegistersToMemory(RegList regs) {
1694  ASSERT((regs & ~kJSCallerSaved) == 0);
1695  // Pop the content from the stack to the memory location.
1696  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1697    int r = JSCallerSavedCode(i);
1698    if ((regs & (1 << r)) != 0) {
1699      ExternalReference reg_addr =
1700          ExternalReference(Debug_Address::Register(i));
1701      movq(kScratchRegister, reg_addr);
1702      pop(Operand(kScratchRegister, 0));
1703    }
1704  }
1705}
1706
1707
1708void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
1709                                                    Register scratch,
1710                                                    RegList regs) {
1711  ASSERT(!scratch.is(kScratchRegister));
1712  ASSERT(!base.is(kScratchRegister));
1713  ASSERT(!base.is(scratch));
1714  ASSERT((regs & ~kJSCallerSaved) == 0);
1715  // Copy the content of the stack to the memory location and adjust base.
1716  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1717    int r = JSCallerSavedCode(i);
1718    if ((regs & (1 << r)) != 0) {
1719      movq(scratch, Operand(base, 0));
1720      ExternalReference reg_addr =
1721          ExternalReference(Debug_Address::Register(i));
1722      movq(kScratchRegister, reg_addr);
1723      movq(Operand(kScratchRegister, 0), scratch);
1724      lea(base, Operand(base, kPointerSize));
1725    }
1726  }
1727}
1728
1729#endif  // ENABLE_DEBUGGER_SUPPORT
1730
1731
1732void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
1733  bool resolved;
1734  Handle<Code> code = ResolveBuiltin(id, &resolved);
1735
1736  // Calls are not allowed in some stubs.
1737  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
1738
1739  // Rely on the assertion to check that the number of provided
1740  // arguments match the expected number of arguments. Fake a
1741  // parameter count to avoid emitting code to do the check.
1742  ParameterCount expected(0);
1743  InvokeCode(Handle<Code>(code),
1744             expected,
1745             expected,
1746             RelocInfo::CODE_TARGET,
1747             flag);
1748
1749  const char* name = Builtins::GetName(id);
1750  int argc = Builtins::GetArgumentsCount(id);
1751  // The target address for the jump is stored as an immediate at offset
1752  // kInvokeCodeAddressOffset.
1753  if (!resolved) {
1754    uint32_t flags =
1755        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
1756        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
1757    Unresolved entry =
1758        { pc_offset() - kCallTargetAddressOffset, flags, name };
1759    unresolved_.Add(entry);
1760  }
1761}
1762
1763
1764void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1765                                    const ParameterCount& actual,
1766                                    Handle<Code> code_constant,
1767                                    Register code_register,
1768                                    Label* done,
1769                                    InvokeFlag flag) {
1770  bool definitely_matches = false;
1771  Label invoke;
1772  if (expected.is_immediate()) {
1773    ASSERT(actual.is_immediate());
1774    if (expected.immediate() == actual.immediate()) {
1775      definitely_matches = true;
1776    } else {
1777      movq(rax, Immediate(actual.immediate()));
1778      if (expected.immediate() ==
1779              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
1780        // Don't worry about adapting arguments for built-ins that
1781        // don't want that done. Skip adaption code by making it look
1782        // like we have a match between expected and actual number of
1783        // arguments.
1784        definitely_matches = true;
1785      } else {
1786        movq(rbx, Immediate(expected.immediate()));
1787      }
1788    }
1789  } else {
1790    if (actual.is_immediate()) {
1791      // Expected is in register, actual is immediate. This is the
1792      // case when we invoke function values without going through the
1793      // IC mechanism.
1794      cmpq(expected.reg(), Immediate(actual.immediate()));
1795      j(equal, &invoke);
1796      ASSERT(expected.reg().is(rbx));
1797      movq(rax, Immediate(actual.immediate()));
1798    } else if (!expected.reg().is(actual.reg())) {
1799      // Both expected and actual are in (different) registers. This
1800      // is the case when we invoke functions using call and apply.
1801      cmpq(expected.reg(), actual.reg());
1802      j(equal, &invoke);
1803      ASSERT(actual.reg().is(rax));
1804      ASSERT(expected.reg().is(rbx));
1805    }
1806  }
1807
1808  if (!definitely_matches) {
1809    Handle<Code> adaptor =
1810        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
1811    if (!code_constant.is_null()) {
1812      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
1813      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1814    } else if (!code_register.is(rdx)) {
1815      movq(rdx, code_register);
1816    }
1817
1818    if (flag == CALL_FUNCTION) {
1819      Call(adaptor, RelocInfo::CODE_TARGET);
1820      jmp(done);
1821    } else {
1822      Jump(adaptor, RelocInfo::CODE_TARGET);
1823    }
1824    bind(&invoke);
1825  }
1826}
1827
1828
1829void MacroAssembler::InvokeCode(Register code,
1830                                const ParameterCount& expected,
1831                                const ParameterCount& actual,
1832                                InvokeFlag flag) {
1833  Label done;
1834  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
1835  if (flag == CALL_FUNCTION) {
1836    call(code);
1837  } else {
1838    ASSERT(flag == JUMP_FUNCTION);
1839    jmp(code);
1840  }
1841  bind(&done);
1842}
1843
1844
1845void MacroAssembler::InvokeCode(Handle<Code> code,
1846                                const ParameterCount& expected,
1847                                const ParameterCount& actual,
1848                                RelocInfo::Mode rmode,
1849                                InvokeFlag flag) {
1850  Label done;
1851  Register dummy = rax;
1852  InvokePrologue(expected, actual, code, dummy, &done, flag);
1853  if (flag == CALL_FUNCTION) {
1854    Call(code, rmode);
1855  } else {
1856    ASSERT(flag == JUMP_FUNCTION);
1857    Jump(code, rmode);
1858  }
1859  bind(&done);
1860}
1861
1862
1863void MacroAssembler::InvokeFunction(Register function,
1864                                    const ParameterCount& actual,
1865                                    InvokeFlag flag) {
1866  ASSERT(function.is(rdi));
1867  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1868  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
1869  movsxlq(rbx,
1870          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
1871  movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
1872  // Advances rdx to the end of the Code object header, to the start of
1873  // the executable code.
1874  lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
1875
1876  ParameterCount expected(rbx);
1877  InvokeCode(rdx, expected, actual, flag);
1878}
1879
1880
1881void MacroAssembler::EnterFrame(StackFrame::Type type) {
1882  push(rbp);
1883  movq(rbp, rsp);
1884  push(rsi);  // Context.
1885  Push(Smi::FromInt(type));
1886  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1887  push(kScratchRegister);
1888  if (FLAG_debug_code) {
1889    movq(kScratchRegister,
1890         Factory::undefined_value(),
1891         RelocInfo::EMBEDDED_OBJECT);
1892    cmpq(Operand(rsp, 0), kScratchRegister);
1893    Check(not_equal, "code object not properly patched");
1894  }
1895}
1896
1897
1898void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1899  if (FLAG_debug_code) {
1900    Move(kScratchRegister, Smi::FromInt(type));
1901    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
1902    Check(equal, "stack frame types must match");
1903  }
1904  movq(rsp, rbp);
1905  pop(rbp);
1906}
1907
1908
1909void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
1910  // Setup the frame structure on the stack.
1911  // All constants are relative to the frame pointer of the exit frame.
1912  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1913  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1914  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
1915  push(rbp);
1916  movq(rbp, rsp);
1917
1918  // Reserve room for entry stack pointer and push the debug marker.
1919  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1920  push(Immediate(0));  // saved entry sp, patched before call
1921  if (mode == ExitFrame::MODE_DEBUG) {
1922    push(Immediate(0));
1923  } else {
1924    movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1925    push(kScratchRegister);
1926  }
1927
1928  // Save the frame pointer and the context in top.
1929  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1930  ExternalReference context_address(Top::k_context_address);
1931  movq(r14, rax);  // Backup rax before we use it.
1932
1933  movq(rax, rbp);
1934  store_rax(c_entry_fp_address);
1935  movq(rax, rsi);
1936  store_rax(context_address);
1937
1938  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
1939  // so it must be retained across the C-call.
1940  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1941  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
1942
1943#ifdef ENABLE_DEBUGGER_SUPPORT
1944  // Save the state of all registers to the stack from the memory
1945  // location. This is needed to allow nested break points.
1946  if (mode == ExitFrame::MODE_DEBUG) {
1947    // TODO(1243899): This should be symmetric to
1948    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
1949    // correct here, but computed for the other call. Very error
1950    // prone! FIX THIS.  Actually there are deeper problems with
1951    // register saving than this asymmetry (see the bug report
1952    // associated with this issue).
1953    PushRegistersFromMemory(kJSCallerSaved);
1954  }
1955#endif
1956
1957#ifdef _WIN64
1958  // Reserve space on stack for result and argument structures, if necessary.
1959  int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
1960  // Reserve space for the Arguments object.  The Windows 64-bit ABI
1961  // requires us to pass this structure as a pointer to its location on
1962  // the stack.  The structure contains 2 values.
1963  int argument_stack_space = 2 * kPointerSize;
1964  // We also need backing space for 4 parameters, even though
1965  // we only pass one or two parameter, and it is in a register.
1966  int argument_mirror_space = 4 * kPointerSize;
1967  int total_stack_space =
1968      argument_mirror_space + argument_stack_space + result_stack_space;
1969  subq(rsp, Immediate(total_stack_space));
1970#endif
1971
1972  // Get the required frame alignment for the OS.
1973  static const int kFrameAlignment = OS::ActivationFrameAlignment();
1974  if (kFrameAlignment > 0) {
1975    ASSERT(IsPowerOf2(kFrameAlignment));
1976    movq(kScratchRegister, Immediate(-kFrameAlignment));
1977    and_(rsp, kScratchRegister);
1978  }
1979
1980  // Patch the saved entry sp.
1981  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
1982}
1983
1984
1985void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
1986  // Registers:
1987  // r15 : argv
1988#ifdef ENABLE_DEBUGGER_SUPPORT
1989  // Restore the memory copy of the registers by digging them out from
1990  // the stack. This is needed to allow nested break points.
1991  if (mode == ExitFrame::MODE_DEBUG) {
1992    // It's okay to clobber register rbx below because we don't need
1993    // the function pointer after this.
1994    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
1995    int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
1996    lea(rbx, Operand(rbp, kOffset));
1997    CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
1998  }
1999#endif
2000
2001  // Get the return address from the stack and restore the frame pointer.
2002  movq(rcx, Operand(rbp, 1 * kPointerSize));
2003  movq(rbp, Operand(rbp, 0 * kPointerSize));
2004
2005  // Pop everything up to and including the arguments and the receiver
2006  // from the caller stack.
2007  lea(rsp, Operand(r15, 1 * kPointerSize));
2008
2009  // Restore current context from top and clear it in debug mode.
2010  ExternalReference context_address(Top::k_context_address);
2011  movq(kScratchRegister, context_address);
2012  movq(rsi, Operand(kScratchRegister, 0));
2013#ifdef DEBUG
2014  movq(Operand(kScratchRegister, 0), Immediate(0));
2015#endif
2016
2017  // Push the return address to get ready to return.
2018  push(rcx);
2019
2020  // Clear the top frame.
2021  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
2022  movq(kScratchRegister, c_entry_fp_address);
2023  movq(Operand(kScratchRegister, 0), Immediate(0));
2024}
2025
2026
2027Register MacroAssembler::CheckMaps(JSObject* object,
2028                                   Register object_reg,
2029                                   JSObject* holder,
2030                                   Register holder_reg,
2031                                   Register scratch,
2032                                   Label* miss) {
2033  // Make sure there's no overlap between scratch and the other
2034  // registers.
2035  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
2036
2037  // Keep track of the current object in register reg.  On the first
2038  // iteration, reg is an alias for object_reg, on later iterations,
2039  // it is an alias for holder_reg.
2040  Register reg = object_reg;
2041  int depth = 1;
2042
2043  // Check the maps in the prototype chain.
2044  // Traverse the prototype chain from the object and do map checks.
2045  while (object != holder) {
2046    depth++;
2047
2048    // Only global objects and objects that do not require access
2049    // checks are allowed in stubs.
2050    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2051
2052    JSObject* prototype = JSObject::cast(object->GetPrototype());
2053    if (Heap::InNewSpace(prototype)) {
2054      // Get the map of the current object.
2055      movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
2056      Cmp(scratch, Handle<Map>(object->map()));
2057      // Branch on the result of the map check.
2058      j(not_equal, miss);
2059      // Check access rights to the global object.  This has to happen
2060      // after the map check so that we know that the object is
2061      // actually a global object.
2062      if (object->IsJSGlobalProxy()) {
2063        CheckAccessGlobalProxy(reg, scratch, miss);
2064
2065        // Restore scratch register to be the map of the object.
2066        // We load the prototype from the map in the scratch register.
2067        movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
2068      }
2069      // The prototype is in new space; we cannot store a reference
2070      // to it in the code. Load it from the map.
2071      reg = holder_reg;  // from now the object is in holder_reg
2072      movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
2073
2074    } else {
2075      // Check the map of the current object.
2076      Cmp(FieldOperand(reg, HeapObject::kMapOffset),
2077          Handle<Map>(object->map()));
2078      // Branch on the result of the map check.
2079      j(not_equal, miss);
2080      // Check access rights to the global object.  This has to happen
2081      // after the map check so that we know that the object is
2082      // actually a global object.
2083      if (object->IsJSGlobalProxy()) {
2084        CheckAccessGlobalProxy(reg, scratch, miss);
2085      }
2086      // The prototype is in old space; load it directly.
2087      reg = holder_reg;  // from now the object is in holder_reg
2088      Move(reg, Handle<JSObject>(prototype));
2089    }
2090
2091    // Go to the next object in the prototype chain.
2092    object = prototype;
2093  }
2094
2095  // Check the holder map.
2096  Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
2097  j(not_equal, miss);
2098
2099  // Log the check depth.
2100  LOG(IntEvent("check-maps-depth", depth));
2101
2102  // Perform security check for access to the global object and return
2103  // the holder register.
2104  ASSERT(object == holder);
2105  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2106  if (object->IsJSGlobalProxy()) {
2107    CheckAccessGlobalProxy(reg, scratch, miss);
2108  }
2109  return reg;
2110}
2111
2112
2113void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
2114                                            Register scratch,
2115                                            Label* miss) {
2116  Label same_contexts;
2117
2118  ASSERT(!holder_reg.is(scratch));
2119  ASSERT(!scratch.is(kScratchRegister));
2120  // Load current lexical context from the stack frame.
2121  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
2122
2123  // When generating debug code, make sure the lexical context is set.
2124  if (FLAG_debug_code) {
2125    cmpq(scratch, Immediate(0));
2126    Check(not_equal, "we should not have an empty lexical context");
2127  }
2128  // Load the global context of the current context.
2129  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
2130  movq(scratch, FieldOperand(scratch, offset));
2131  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
2132
2133  // Check the context is a global context.
2134  if (FLAG_debug_code) {
2135    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
2136        Factory::global_context_map());
2137    Check(equal, "JSGlobalObject::global_context should be a global context.");
2138  }
2139
2140  // Check if both contexts are the same.
2141  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2142  j(equal, &same_contexts);
2143
2144  // Compare security tokens.
2145  // Check that the security token in the calling global object is
2146  // compatible with the security token in the receiving global
2147  // object.
2148
2149  // Check the context is a global context.
2150  if (FLAG_debug_code) {
2151    // Preserve original value of holder_reg.
2152    push(holder_reg);
2153    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2154    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
2155    Check(not_equal, "JSGlobalProxy::context() should not be null.");
2156
2157    // Read the first word and compare to global_context_map(),
2158    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
2159    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
2160    Check(equal, "JSGlobalObject::global_context should be a global context.");
2161    pop(holder_reg);
2162  }
2163
2164  movq(kScratchRegister,
2165       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2166  int token_offset =
2167      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
2168  movq(scratch, FieldOperand(scratch, token_offset));
2169  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
2170  j(not_equal, miss);
2171
2172  bind(&same_contexts);
2173}
2174
2175
2176void MacroAssembler::LoadAllocationTopHelper(Register result,
2177                                             Register result_end,
2178                                             Register scratch,
2179                                             AllocationFlags flags) {
2180  ExternalReference new_space_allocation_top =
2181      ExternalReference::new_space_allocation_top_address();
2182
2183  // Just return if allocation top is already known.
2184  if ((flags & RESULT_CONTAINS_TOP) != 0) {
2185    // No use of scratch if allocation top is provided.
2186    ASSERT(scratch.is(no_reg));
2187#ifdef DEBUG
2188    // Assert that result actually contains top on entry.
2189    movq(kScratchRegister, new_space_allocation_top);
2190    cmpq(result, Operand(kScratchRegister, 0));
2191    Check(equal, "Unexpected allocation top");
2192#endif
2193    return;
2194  }
2195
2196  // Move address of new object to result. Use scratch register if available.
2197  if (scratch.is(no_reg)) {
2198    movq(kScratchRegister, new_space_allocation_top);
2199    movq(result, Operand(kScratchRegister, 0));
2200  } else {
2201    ASSERT(!scratch.is(result_end));
2202    movq(scratch, new_space_allocation_top);
2203    movq(result, Operand(scratch, 0));
2204  }
2205}
2206
2207
2208void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
2209                                               Register scratch) {
2210  if (FLAG_debug_code) {
2211    testq(result_end, Immediate(kObjectAlignmentMask));
2212    Check(zero, "Unaligned allocation in new space");
2213  }
2214
2215  ExternalReference new_space_allocation_top =
2216      ExternalReference::new_space_allocation_top_address();
2217
2218  // Update new top.
2219  if (result_end.is(rax)) {
2220    // rax can be stored directly to a memory location.
2221    store_rax(new_space_allocation_top);
2222  } else {
2223    // Register required - use scratch provided if available.
2224    if (scratch.is(no_reg)) {
2225      movq(kScratchRegister, new_space_allocation_top);
2226      movq(Operand(kScratchRegister, 0), result_end);
2227    } else {
2228      movq(Operand(scratch, 0), result_end);
2229    }
2230  }
2231}
2232
2233
2234void MacroAssembler::AllocateInNewSpace(int object_size,
2235                                        Register result,
2236                                        Register result_end,
2237                                        Register scratch,
2238                                        Label* gc_required,
2239                                        AllocationFlags flags) {
2240  ASSERT(!result.is(result_end));
2241
2242  // Load address of new object into result.
2243  LoadAllocationTopHelper(result, result_end, scratch, flags);
2244
2245  // Calculate new top and bail out if new space is exhausted.
2246  ExternalReference new_space_allocation_limit =
2247      ExternalReference::new_space_allocation_limit_address();
2248  lea(result_end, Operand(result, object_size));
2249  movq(kScratchRegister, new_space_allocation_limit);
2250  cmpq(result_end, Operand(kScratchRegister, 0));
2251  j(above, gc_required);
2252
2253  // Update allocation top.
2254  UpdateAllocationTopHelper(result_end, scratch);
2255
2256  // Tag the result if requested.
2257  if ((flags & TAG_OBJECT) != 0) {
2258    addq(result, Immediate(kHeapObjectTag));
2259  }
2260}
2261
2262
2263void MacroAssembler::AllocateInNewSpace(int header_size,
2264                                        ScaleFactor element_size,
2265                                        Register element_count,
2266                                        Register result,
2267                                        Register result_end,
2268                                        Register scratch,
2269                                        Label* gc_required,
2270                                        AllocationFlags flags) {
2271  ASSERT(!result.is(result_end));
2272
2273  // Load address of new object into result.
2274  LoadAllocationTopHelper(result, result_end, scratch, flags);
2275
2276  // Calculate new top and bail out if new space is exhausted.
2277  ExternalReference new_space_allocation_limit =
2278      ExternalReference::new_space_allocation_limit_address();
2279  lea(result_end, Operand(result, element_count, element_size, header_size));
2280  movq(kScratchRegister, new_space_allocation_limit);
2281  cmpq(result_end, Operand(kScratchRegister, 0));
2282  j(above, gc_required);
2283
2284  // Update allocation top.
2285  UpdateAllocationTopHelper(result_end, scratch);
2286
2287  // Tag the result if requested.
2288  if ((flags & TAG_OBJECT) != 0) {
2289    addq(result, Immediate(kHeapObjectTag));
2290  }
2291}
2292
2293
2294void MacroAssembler::AllocateInNewSpace(Register object_size,
2295                                        Register result,
2296                                        Register result_end,
2297                                        Register scratch,
2298                                        Label* gc_required,
2299                                        AllocationFlags flags) {
2300  // Load address of new object into result.
2301  LoadAllocationTopHelper(result, result_end, scratch, flags);
2302
2303  // Calculate new top and bail out if new space is exhausted.
2304  ExternalReference new_space_allocation_limit =
2305      ExternalReference::new_space_allocation_limit_address();
2306  if (!object_size.is(result_end)) {
2307    movq(result_end, object_size);
2308  }
2309  addq(result_end, result);
2310  movq(kScratchRegister, new_space_allocation_limit);
2311  cmpq(result_end, Operand(kScratchRegister, 0));
2312  j(above, gc_required);
2313
2314  // Update allocation top.
2315  UpdateAllocationTopHelper(result_end, scratch);
2316
2317  // Tag the result if requested.
2318  if ((flags & TAG_OBJECT) != 0) {
2319    addq(result, Immediate(kHeapObjectTag));
2320  }
2321}
2322
2323
2324void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2325  ExternalReference new_space_allocation_top =
2326      ExternalReference::new_space_allocation_top_address();
2327
2328  // Make sure the object has no tag before resetting top.
2329  and_(object, Immediate(~kHeapObjectTagMask));
2330  movq(kScratchRegister, new_space_allocation_top);
2331#ifdef DEBUG
2332  cmpq(object, Operand(kScratchRegister, 0));
2333  Check(below, "Undo allocation of non allocated memory");
2334#endif
2335  movq(Operand(kScratchRegister, 0), object);
2336}
2337
2338
2339void MacroAssembler::AllocateHeapNumber(Register result,
2340                                        Register scratch,
2341                                        Label* gc_required) {
2342  // Allocate heap number in new space.
2343  AllocateInNewSpace(HeapNumber::kSize,
2344                     result,
2345                     scratch,
2346                     no_reg,
2347                     gc_required,
2348                     TAG_OBJECT);
2349
2350  // Set the map.
2351  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2352  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2353}
2354
2355
2356void MacroAssembler::AllocateTwoByteString(Register result,
2357                                           Register length,
2358                                           Register scratch1,
2359                                           Register scratch2,
2360                                           Register scratch3,
2361                                           Label* gc_required) {
2362  // Calculate the number of bytes needed for the characters in the string while
2363  // observing object alignment.
2364  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2365  ASSERT(kShortSize == 2);
2366  // scratch1 = length * 2 + kObjectAlignmentMask.
2367  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
2368  and_(scratch1, Immediate(~kObjectAlignmentMask));
2369
2370  // Allocate two byte string in new space.
2371  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
2372                     times_1,
2373                     scratch1,
2374                     result,
2375                     scratch2,
2376                     scratch3,
2377                     gc_required,
2378                     TAG_OBJECT);
2379
2380  // Set the map, length and hash field.
2381  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2382  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2383  movl(FieldOperand(result, String::kLengthOffset), length);
2384  movl(FieldOperand(result, String::kHashFieldOffset),
2385       Immediate(String::kEmptyHashField));
2386}
2387
2388
2389void MacroAssembler::AllocateAsciiString(Register result,
2390                                         Register length,
2391                                         Register scratch1,
2392                                         Register scratch2,
2393                                         Register scratch3,
2394                                         Label* gc_required) {
2395  // Calculate the number of bytes needed for the characters in the string while
2396  // observing object alignment.
2397  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2398  movl(scratch1, length);
2399  ASSERT(kCharSize == 1);
2400  addq(scratch1, Immediate(kObjectAlignmentMask));
2401  and_(scratch1, Immediate(~kObjectAlignmentMask));
2402
2403  // Allocate ascii string in new space.
2404  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
2405                     times_1,
2406                     scratch1,
2407                     result,
2408                     scratch2,
2409                     scratch3,
2410                     gc_required,
2411                     TAG_OBJECT);
2412
2413  // Set the map, length and hash field.
2414  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2415  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2416  movl(FieldOperand(result, String::kLengthOffset), length);
2417  movl(FieldOperand(result, String::kHashFieldOffset),
2418       Immediate(String::kEmptyHashField));
2419}
2420
2421
2422void MacroAssembler::AllocateConsString(Register result,
2423                                        Register scratch1,
2424                                        Register scratch2,
2425                                        Label* gc_required) {
2426  // Allocate heap number in new space.
2427  AllocateInNewSpace(ConsString::kSize,
2428                     result,
2429                     scratch1,
2430                     scratch2,
2431                     gc_required,
2432                     TAG_OBJECT);
2433
2434  // Set the map. The other fields are left uninitialized.
2435  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
2436  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2437}
2438
2439
2440void MacroAssembler::AllocateAsciiConsString(Register result,
2441                                             Register scratch1,
2442                                             Register scratch2,
2443                                             Label* gc_required) {
2444  // Allocate heap number in new space.
2445  AllocateInNewSpace(ConsString::kSize,
2446                     result,
2447                     scratch1,
2448                     scratch2,
2449                     gc_required,
2450                     TAG_OBJECT);
2451
2452  // Set the map. The other fields are left uninitialized.
2453  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2454  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2455}
2456
2457
2458void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2459  if (context_chain_length > 0) {
2460    // Move up the chain of contexts to the context containing the slot.
2461    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2462    // Load the function context (which is the incoming, outer context).
2463    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2464    for (int i = 1; i < context_chain_length; i++) {
2465      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2466      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2467    }
2468    // The context may be an intermediate context, not a function context.
2469    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2470  } else {  // context is the current function context.
2471    // The context may be an intermediate context, not a function context.
2472    movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2473  }
2474}
2475
2476
2477CodePatcher::CodePatcher(byte* address, int size)
2478    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2479  // Create a new macro assembler pointing to the address of the code to patch.
2480  // The size is adjusted with kGap on order for the assembler to generate size
2481  // bytes of instructions without failing with buffer size constraints.
2482  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2483}
2484
2485
2486CodePatcher::~CodePatcher() {
2487  // Indicate that code has changed.
2488  CPU::FlushICache(address_, size_);
2489
2490  // Check that the code was patched as expected.
2491  ASSERT(masm_.pc_ == address_ + size_);
2492  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2493}
2494
2495} }  // namespace v8::internal
2496