macro-assembler-x64.cc revision eab96aab0834f21954b5d6aa6366bcfb348ed811
1// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "assembler-x64.h"
33#include "macro-assembler-x64.h"
34#include "serialize.h"
35#include "debug.h"
36
37namespace v8 {
38namespace internal {
39
40MacroAssembler::MacroAssembler(void* buffer, int size)
41    : Assembler(buffer, size),
42      unresolved_(0),
43      generating_stub_(false),
44      allow_stub_calls_(true),
45      code_object_(Heap::undefined_value()) {
46}
47
48
49void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
50  movq(destination, Operand(r13, index << kPointerSizeLog2));
51}
52
53
54void MacroAssembler::PushRoot(Heap::RootListIndex index) {
55  push(Operand(r13, index << kPointerSizeLog2));
56}
57
58
59void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
60  cmpq(with, Operand(r13, index << kPointerSizeLog2));
61}
62
63
64void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
65  LoadRoot(kScratchRegister, index);
66  cmpq(with, kScratchRegister);
67}
68
69
70void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
71  CompareRoot(rsp, Heap::kStackLimitRootIndex);
72  j(below, on_stack_overflow);
73}
74
75
76static void RecordWriteHelper(MacroAssembler* masm,
77                              Register object,
78                              Register addr,
79                              Register scratch) {
80  Label fast;
81
82  // Compute the page start address from the heap object pointer, and reuse
83  // the 'object' register for it.
84  ASSERT(is_int32(~Page::kPageAlignmentMask));
85  masm->and_(object,
86             Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
87  Register page_start = object;
88
89  // Compute the bit addr in the remembered set/index of the pointer in the
90  // page. Reuse 'addr' as pointer_offset.
91  masm->subq(addr, page_start);
92  masm->shr(addr, Immediate(kPointerSizeLog2));
93  Register pointer_offset = addr;
94
95  // If the bit offset lies beyond the normal remembered set range, it is in
96  // the extra remembered set area of a large object.
97  masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
98  masm->j(less, &fast);
99
100  // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
101  // extra remembered set after the large object.
102
103  // Load the array length into 'scratch'.
104  masm->movl(scratch,
105             Operand(page_start,
106                     Page::kObjectStartOffset + FixedArray::kLengthOffset));
107  Register array_length = scratch;
108
109  // Extra remembered set starts right after the large object (a FixedArray), at
110  //   page_start + kObjectStartOffset + objectSize
111  // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
112  // Add the delta between the end of the normal RSet and the start of the
113  // extra RSet to 'page_start', so that addressing the bit using
114  // 'pointer_offset' hits the extra RSet words.
115  masm->lea(page_start,
116            Operand(page_start, array_length, times_pointer_size,
117                    Page::kObjectStartOffset + FixedArray::kHeaderSize
118                        - Page::kRSetEndOffset));
119
120  // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
121  // to limit code size. We should probably evaluate this decision by
122  // measuring the performance of an equivalent implementation using
123  // "simpler" instructions
124  masm->bind(&fast);
125  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
126}
127
128
129class RecordWriteStub : public CodeStub {
130 public:
131  RecordWriteStub(Register object, Register addr, Register scratch)
132      : object_(object), addr_(addr), scratch_(scratch) { }
133
134  void Generate(MacroAssembler* masm);
135
136 private:
137  Register object_;
138  Register addr_;
139  Register scratch_;
140
141#ifdef DEBUG
142  void Print() {
143    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
144           object_.code(), addr_.code(), scratch_.code());
145  }
146#endif
147
148  // Minor key encoding in 12 bits of three registers (object, address and
149  // scratch) OOOOAAAASSSS.
150  class ScratchBits : public BitField<uint32_t, 0, 4> {};
151  class AddressBits : public BitField<uint32_t, 4, 4> {};
152  class ObjectBits : public BitField<uint32_t, 8, 4> {};
153
154  Major MajorKey() { return RecordWrite; }
155
156  int MinorKey() {
157    // Encode the registers.
158    return ObjectBits::encode(object_.code()) |
159           AddressBits::encode(addr_.code()) |
160           ScratchBits::encode(scratch_.code());
161  }
162};
163
164
165void RecordWriteStub::Generate(MacroAssembler* masm) {
166  RecordWriteHelper(masm, object_, addr_, scratch_);
167  masm->ret(0);
168}
169
170
171// Set the remembered set bit for [object+offset].
172// object is the object being stored into, value is the object being stored.
173// If offset is zero, then the smi_index register contains the array index into
174// the elements array represented as a smi. Otherwise it can be used as a
175// scratch register.
176// All registers are clobbered by the operation.
177void MacroAssembler::RecordWrite(Register object,
178                                 int offset,
179                                 Register value,
180                                 Register smi_index) {
181  // First, check if a remembered set write is even needed. The tests below
182  // catch stores of Smis and stores into young gen (which does not have space
183  // for the remembered set bits.
184  Label done;
185  JumpIfSmi(value, &done);
186
187  RecordWriteNonSmi(object, offset, value, smi_index);
188  bind(&done);
189}
190
191
192void MacroAssembler::RecordWriteNonSmi(Register object,
193                                       int offset,
194                                       Register scratch,
195                                       Register smi_index) {
196  Label done;
197  // Test that the object address is not in the new space.  We cannot
198  // set remembered set bits in the new space.
199  movq(scratch, object);
200  ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
201  and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
202  movq(kScratchRegister, ExternalReference::new_space_start());
203  cmpq(scratch, kScratchRegister);
204  j(equal, &done);
205
206  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
207    // Compute the bit offset in the remembered set, leave it in 'value'.
208    lea(scratch, Operand(object, offset));
209    ASSERT(is_int32(Page::kPageAlignmentMask));
210    and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
211    shr(scratch, Immediate(kObjectAlignmentBits));
212
213    // Compute the page address from the heap object pointer, leave it in
214    // 'object' (immediate value is sign extended).
215    and_(object, Immediate(~Page::kPageAlignmentMask));
216
217    // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
218    // to limit code size. We should probably evaluate this decision by
219    // measuring the performance of an equivalent implementation using
220    // "simpler" instructions
221    bts(Operand(object, Page::kRSetOffset), scratch);
222  } else {
223    Register dst = smi_index;
224    if (offset != 0) {
225      lea(dst, Operand(object, offset));
226    } else {
227      // array access: calculate the destination address in the same manner as
228      // KeyedStoreIC::GenerateGeneric.
229      SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
230      lea(dst, Operand(object,
231                       index.reg,
232                       index.scale,
233                       FixedArray::kHeaderSize - kHeapObjectTag));
234    }
235    // If we are already generating a shared stub, not inlining the
236    // record write code isn't going to save us any memory.
237    if (generating_stub()) {
238      RecordWriteHelper(this, object, dst, scratch);
239    } else {
240      RecordWriteStub stub(object, dst, scratch);
241      CallStub(&stub);
242    }
243  }
244
245  bind(&done);
246}
247
248
249void MacroAssembler::Assert(Condition cc, const char* msg) {
250  if (FLAG_debug_code) Check(cc, msg);
251}
252
253
254void MacroAssembler::Check(Condition cc, const char* msg) {
255  Label L;
256  j(cc, &L);
257  Abort(msg);
258  // will not return here
259  bind(&L);
260}
261
262
263void MacroAssembler::NegativeZeroTest(Register result,
264                                      Register op,
265                                      Label* then_label) {
266  Label ok;
267  testl(result, result);
268  j(not_zero, &ok);
269  testl(op, op);
270  j(sign, then_label);
271  bind(&ok);
272}
273
274
275void MacroAssembler::Abort(const char* msg) {
276  // We want to pass the msg string like a smi to avoid GC
277  // problems, however msg is not guaranteed to be aligned
278  // properly. Instead, we pass an aligned pointer that is
279  // a proper v8 smi, but also pass the alignment difference
280  // from the real pointer as a smi.
281  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
282  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
283  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
284  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
285#ifdef DEBUG
286  if (msg != NULL) {
287    RecordComment("Abort message: ");
288    RecordComment(msg);
289  }
290#endif
291  // Disable stub call restrictions to always allow calls to abort.
292  set_allow_stub_calls(true);
293
294  push(rax);
295  movq(kScratchRegister, p0, RelocInfo::NONE);
296  push(kScratchRegister);
297  movq(kScratchRegister,
298       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
299       RelocInfo::NONE);
300  push(kScratchRegister);
301  CallRuntime(Runtime::kAbort, 2);
302  // will not return here
303  int3();
304}
305
306
307void MacroAssembler::CallStub(CodeStub* stub) {
308  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
309  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
310}
311
312
313void MacroAssembler::TailCallStub(CodeStub* stub) {
314  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
315  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
316}
317
318
319void MacroAssembler::StubReturn(int argc) {
320  ASSERT(argc >= 1 && generating_stub());
321  ret((argc - 1) * kPointerSize);
322}
323
324
325void MacroAssembler::IllegalOperation(int num_arguments) {
326  if (num_arguments > 0) {
327    addq(rsp, Immediate(num_arguments * kPointerSize));
328  }
329  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
330}
331
332
333void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
334  CallRuntime(Runtime::FunctionForId(id), num_arguments);
335}
336
337
338void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
339  // If the expected number of arguments of the runtime function is
340  // constant, we check that the actual number of arguments match the
341  // expectation.
342  if (f->nargs >= 0 && f->nargs != num_arguments) {
343    IllegalOperation(num_arguments);
344    return;
345  }
346
347  Runtime::FunctionId function_id =
348      static_cast<Runtime::FunctionId>(f->stub_id);
349  RuntimeStub stub(function_id, num_arguments);
350  CallStub(&stub);
351}
352
353
354void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
355                                     int num_arguments,
356                                     int result_size) {
357  // ----------- S t a t e -------------
358  //  -- rsp[0] : return address
359  //  -- rsp[8] : argument num_arguments - 1
360  //  ...
361  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
362  // -----------------------------------
363
364  // TODO(1236192): Most runtime routines don't need the number of
365  // arguments passed in because it is constant. At some point we
366  // should remove this need and make the runtime routine entry code
367  // smarter.
368  movq(rax, Immediate(num_arguments));
369  JumpToRuntime(ext, result_size);
370}
371
372
373void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
374                                   int result_size) {
375  // Set the entry point and jump to the C entry runtime stub.
376  movq(rbx, ext);
377  CEntryStub ces(result_size);
378  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
379}
380
381
382void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
383  bool resolved;
384  Handle<Code> code = ResolveBuiltin(id, &resolved);
385
386  const char* name = Builtins::GetName(id);
387  int argc = Builtins::GetArgumentsCount(id);
388
389  movq(target, code, RelocInfo::EMBEDDED_OBJECT);
390  if (!resolved) {
391    uint32_t flags =
392        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
393        Bootstrapper::FixupFlagsUseCodeObject::encode(true);
394    Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
395    unresolved_.Add(entry);
396  }
397  addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
398}
399
400Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
401                                            bool* resolved) {
402  // Move the builtin function into the temporary function slot by
403  // reading it from the builtins object. NOTE: We should be able to
404  // reduce this to two instructions by putting the function table in
405  // the global object instead of the "builtins" object and by using a
406  // real register for the function.
407  movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
408  movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
409  int builtins_offset =
410      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
411  movq(rdi, FieldOperand(rdx, builtins_offset));
412
413  return Builtins::GetCode(id, resolved);
414}
415
416
417void MacroAssembler::Set(Register dst, int64_t x) {
418  if (x == 0) {
419    xor_(dst, dst);
420  } else if (is_int32(x)) {
421    movq(dst, Immediate(static_cast<int32_t>(x)));
422  } else if (is_uint32(x)) {
423    movl(dst, Immediate(static_cast<uint32_t>(x)));
424  } else {
425    movq(dst, x, RelocInfo::NONE);
426  }
427}
428
429
430void MacroAssembler::Set(const Operand& dst, int64_t x) {
431  if (x == 0) {
432    xor_(kScratchRegister, kScratchRegister);
433    movq(dst, kScratchRegister);
434  } else if (is_int32(x)) {
435    movq(dst, Immediate(static_cast<int32_t>(x)));
436  } else if (is_uint32(x)) {
437    movl(dst, Immediate(static_cast<uint32_t>(x)));
438  } else {
439    movq(kScratchRegister, x, RelocInfo::NONE);
440    movq(dst, kScratchRegister);
441  }
442}
443
444// ----------------------------------------------------------------------------
445// Smi tagging, untagging and tag detection.
446
447static int kSmiShift = kSmiTagSize + kSmiShiftSize;
448
449void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
450  ASSERT_EQ(0, kSmiTag);
451  if (!dst.is(src)) {
452    movl(dst, src);
453  }
454  shl(dst, Immediate(kSmiShift));
455}
456
457
458void MacroAssembler::Integer32ToSmi(Register dst,
459                                    Register src,
460                                    Label* on_overflow) {
461  ASSERT_EQ(0, kSmiTag);
462  // 32-bit integer always fits in a long smi.
463  if (!dst.is(src)) {
464    movl(dst, src);
465  }
466  shl(dst, Immediate(kSmiShift));
467}
468
469
470void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
471                                                Register src,
472                                                int constant) {
473  if (dst.is(src)) {
474    addq(dst, Immediate(constant));
475  } else {
476    lea(dst, Operand(src, constant));
477  }
478  shl(dst, Immediate(kSmiShift));
479}
480
481
482void MacroAssembler::SmiToInteger32(Register dst, Register src) {
483  ASSERT_EQ(0, kSmiTag);
484  if (!dst.is(src)) {
485    movq(dst, src);
486  }
487  shr(dst, Immediate(kSmiShift));
488}
489
490
491void MacroAssembler::SmiToInteger64(Register dst, Register src) {
492  ASSERT_EQ(0, kSmiTag);
493  if (!dst.is(src)) {
494    movq(dst, src);
495  }
496  sar(dst, Immediate(kSmiShift));
497}
498
499
500void MacroAssembler::SmiTest(Register src) {
501  testq(src, src);
502}
503
504
505void MacroAssembler::SmiCompare(Register dst, Register src) {
506  cmpq(dst, src);
507}
508
509
510void MacroAssembler::SmiCompare(Register dst, Smi* src) {
511  ASSERT(!dst.is(kScratchRegister));
512  if (src->value() == 0) {
513    testq(dst, dst);
514  } else {
515    Move(kScratchRegister, src);
516    cmpq(dst, kScratchRegister);
517  }
518}
519
520
521void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
522  cmpq(dst, src);
523}
524
525
526void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
527  if (src->value() == 0) {
528    // Only tagged long smi to have 32-bit representation.
529    cmpq(dst, Immediate(0));
530  } else {
531    Move(kScratchRegister, src);
532    cmpq(dst, kScratchRegister);
533  }
534}
535
536
537void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
538                                                           Register src,
539                                                           int power) {
540  ASSERT(power >= 0);
541  ASSERT(power < 64);
542  if (power == 0) {
543    SmiToInteger64(dst, src);
544    return;
545  }
546  if (!dst.is(src)) {
547    movq(dst, src);
548  }
549  if (power < kSmiShift) {
550    sar(dst, Immediate(kSmiShift - power));
551  } else if (power > kSmiShift) {
552    shl(dst, Immediate(power - kSmiShift));
553  }
554}
555
556
557Condition MacroAssembler::CheckSmi(Register src) {
558  ASSERT_EQ(0, kSmiTag);
559  testb(src, Immediate(kSmiTagMask));
560  return zero;
561}
562
563
564Condition MacroAssembler::CheckPositiveSmi(Register src) {
565  ASSERT_EQ(0, kSmiTag);
566  movq(kScratchRegister, src);
567  rol(kScratchRegister, Immediate(1));
568  testl(kScratchRegister, Immediate(0x03));
569  return zero;
570}
571
572
573Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
574  if (first.is(second)) {
575    return CheckSmi(first);
576  }
577  movl(kScratchRegister, first);
578  orl(kScratchRegister, second);
579  testb(kScratchRegister, Immediate(kSmiTagMask));
580  return zero;
581}
582
583
584Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
585  if (first.is(second)) {
586    return CheckSmi(first);
587  }
588  movl(kScratchRegister, first);
589  andl(kScratchRegister, second);
590  testb(kScratchRegister, Immediate(kSmiTagMask));
591  return zero;
592}
593
594
595Condition MacroAssembler::CheckIsMinSmi(Register src) {
596  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
597  movq(kScratchRegister, src);
598  rol(kScratchRegister, Immediate(1));
599  cmpq(kScratchRegister, Immediate(1));
600  return equal;
601}
602
603
604Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
605  // A 32-bit integer value can always be converted to a smi.
606  return always;
607}
608
609
610Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
611  // An unsigned 32-bit integer value is valid as long as the high bit
612  // is not set.
613  testq(src, Immediate(0x80000000));
614  return zero;
615}
616
617
618void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
619  if (dst.is(src)) {
620    ASSERT(!dst.is(kScratchRegister));
621    movq(kScratchRegister, src);
622    neg(dst);  // Low 32 bits are retained as zero by negation.
623    // Test if result is zero or Smi::kMinValue.
624    cmpq(dst, kScratchRegister);
625    j(not_equal, on_smi_result);
626    movq(src, kScratchRegister);
627  } else {
628    movq(dst, src);
629    neg(dst);
630    cmpq(dst, src);
631    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
632    j(not_equal, on_smi_result);
633  }
634}
635
636
637void MacroAssembler::SmiAdd(Register dst,
638                            Register src1,
639                            Register src2,
640                            Label* on_not_smi_result) {
641  ASSERT(!dst.is(src2));
642  if (dst.is(src1)) {
643    addq(dst, src2);
644    Label smi_result;
645    j(no_overflow, &smi_result);
646    // Restore src1.
647    subq(src1, src2);
648    jmp(on_not_smi_result);
649    bind(&smi_result);
650  } else {
651    movq(dst, src1);
652    addq(dst, src2);
653    j(overflow, on_not_smi_result);
654  }
655}
656
657
658void MacroAssembler::SmiSub(Register dst,
659                            Register src1,
660                            Register src2,
661                            Label* on_not_smi_result) {
662  ASSERT(!dst.is(src2));
663  if (dst.is(src1)) {
664    subq(dst, src2);
665    Label smi_result;
666    j(no_overflow, &smi_result);
667    // Restore src1.
668    addq(src1, src2);
669    jmp(on_not_smi_result);
670    bind(&smi_result);
671  } else {
672    movq(dst, src1);
673    subq(dst, src2);
674    j(overflow, on_not_smi_result);
675  }
676}
677
678
679void MacroAssembler::SmiMul(Register dst,
680                            Register src1,
681                            Register src2,
682                            Label* on_not_smi_result) {
683  ASSERT(!dst.is(src2));
684  ASSERT(!dst.is(kScratchRegister));
685  ASSERT(!src1.is(kScratchRegister));
686  ASSERT(!src2.is(kScratchRegister));
687
688  if (dst.is(src1)) {
689    Label failure, zero_correct_result;
690    movq(kScratchRegister, src1);  // Create backup for later testing.
691    SmiToInteger64(dst, src1);
692    imul(dst, src2);
693    j(overflow, &failure);
694
695    // Check for negative zero result.  If product is zero, and one
696    // argument is negative, go to slow case.
697    Label correct_result;
698    testq(dst, dst);
699    j(not_zero, &correct_result);
700
701    movq(dst, kScratchRegister);
702    xor_(dst, src2);
703    j(positive, &zero_correct_result);  // Result was positive zero.
704
705    bind(&failure);  // Reused failure exit, restores src1.
706    movq(src1, kScratchRegister);
707    jmp(on_not_smi_result);
708
709    bind(&zero_correct_result);
710    xor_(dst, dst);
711
712    bind(&correct_result);
713  } else {
714    SmiToInteger64(dst, src1);
715    imul(dst, src2);
716    j(overflow, on_not_smi_result);
717    // Check for negative zero result.  If product is zero, and one
718    // argument is negative, go to slow case.
719    Label correct_result;
720    testq(dst, dst);
721    j(not_zero, &correct_result);
722    // One of src1 and src2 is zero, the check whether the other is
723    // negative.
724    movq(kScratchRegister, src1);
725    xor_(kScratchRegister, src2);
726    j(negative, on_not_smi_result);
727    bind(&correct_result);
728  }
729}
730
731
732void MacroAssembler::SmiTryAddConstant(Register dst,
733                                       Register src,
734                                       Smi* constant,
735                                       Label* on_not_smi_result) {
736  // Does not assume that src is a smi.
737  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
738  ASSERT_EQ(0, kSmiTag);
739  ASSERT(!dst.is(kScratchRegister));
740  ASSERT(!src.is(kScratchRegister));
741
742  JumpIfNotSmi(src, on_not_smi_result);
743  Register tmp = (dst.is(src) ? kScratchRegister : dst);
744  Move(tmp, constant);
745  addq(tmp, src);
746  j(overflow, on_not_smi_result);
747  if (dst.is(src)) {
748    movq(dst, tmp);
749  }
750}
751
752
753void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
754  if (constant->value() == 0) {
755    if (!dst.is(src)) {
756      movq(dst, src);
757    }
758  } else if (dst.is(src)) {
759    ASSERT(!dst.is(kScratchRegister));
760
761    Move(kScratchRegister, constant);
762    addq(dst, kScratchRegister);
763  } else {
764    Move(dst, constant);
765    addq(dst, src);
766  }
767}
768
769
770void MacroAssembler::SmiAddConstant(Register dst,
771                                    Register src,
772                                    Smi* constant,
773                                    Label* on_not_smi_result) {
774  if (constant->value() == 0) {
775    if (!dst.is(src)) {
776      movq(dst, src);
777    }
778  } else if (dst.is(src)) {
779    ASSERT(!dst.is(kScratchRegister));
780
781    Move(kScratchRegister, constant);
782    addq(dst, kScratchRegister);
783    Label result_ok;
784    j(no_overflow, &result_ok);
785    subq(dst, kScratchRegister);
786    jmp(on_not_smi_result);
787    bind(&result_ok);
788  } else {
789    Move(dst, constant);
790    addq(dst, src);
791    j(overflow, on_not_smi_result);
792  }
793}
794
795
796void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
797  if (constant->value() == 0) {
798    if (!dst.is(src)) {
799      movq(dst, src);
800    }
801  } else if (dst.is(src)) {
802    ASSERT(!dst.is(kScratchRegister));
803
804    Move(kScratchRegister, constant);
805    subq(dst, kScratchRegister);
806  } else {
807    // Subtract by adding the negative, to do it in two operations.
808    if (constant->value() == Smi::kMinValue) {
809      Move(kScratchRegister, constant);
810      movq(dst, src);
811      subq(dst, kScratchRegister);
812    } else {
813      Move(dst, Smi::FromInt(-constant->value()));
814      addq(dst, src);
815    }
816  }
817}
818
819
820void MacroAssembler::SmiSubConstant(Register dst,
821                                    Register src,
822                                    Smi* constant,
823                                    Label* on_not_smi_result) {
824  if (constant->value() == 0) {
825    if (!dst.is(src)) {
826      movq(dst, src);
827    }
828  } else if (dst.is(src)) {
829    ASSERT(!dst.is(kScratchRegister));
830
831    Move(kScratchRegister, constant);
832    subq(dst, kScratchRegister);
833    Label sub_success;
834    j(no_overflow, &sub_success);
835    addq(src, kScratchRegister);
836    jmp(on_not_smi_result);
837    bind(&sub_success);
838  } else {
839    if (constant->value() == Smi::kMinValue) {
840      Move(kScratchRegister, constant);
841      movq(dst, src);
842      subq(dst, kScratchRegister);
843      j(overflow, on_not_smi_result);
844    } else {
845      Move(dst, Smi::FromInt(-(constant->value())));
846      addq(dst, src);
847      j(overflow, on_not_smi_result);
848    }
849  }
850}
851
852
853void MacroAssembler::SmiDiv(Register dst,
854                            Register src1,
855                            Register src2,
856                            Label* on_not_smi_result) {
857  ASSERT(!src1.is(kScratchRegister));
858  ASSERT(!src2.is(kScratchRegister));
859  ASSERT(!dst.is(kScratchRegister));
860  ASSERT(!src2.is(rax));
861  ASSERT(!src2.is(rdx));
862  ASSERT(!src1.is(rdx));
863
864  // Check for 0 divisor (result is +/-Infinity).
865  Label positive_divisor;
866  testq(src2, src2);
867  j(zero, on_not_smi_result);
868
869  if (src1.is(rax)) {
870    movq(kScratchRegister, src1);
871  }
872  SmiToInteger32(rax, src1);
873  // We need to rule out dividing Smi::kMinValue by -1, since that would
874  // overflow in idiv and raise an exception.
875  // We combine this with negative zero test (negative zero only happens
876  // when dividing zero by a negative number).
877
878  // We overshoot a little and go to slow case if we divide min-value
879  // by any negative value, not just -1.
880  Label safe_div;
881  testl(rax, Immediate(0x7fffffff));
882  j(not_zero, &safe_div);
883  testq(src2, src2);
884  if (src1.is(rax)) {
885    j(positive, &safe_div);
886    movq(src1, kScratchRegister);
887    jmp(on_not_smi_result);
888  } else {
889    j(negative, on_not_smi_result);
890  }
891  bind(&safe_div);
892
893  SmiToInteger32(src2, src2);
894  // Sign extend src1 into edx:eax.
895  cdq();
896  idivl(src2);
897  Integer32ToSmi(src2, src2);
898  // Check that the remainder is zero.
899  testl(rdx, rdx);
900  if (src1.is(rax)) {
901    Label smi_result;
902    j(zero, &smi_result);
903    movq(src1, kScratchRegister);
904    jmp(on_not_smi_result);
905    bind(&smi_result);
906  } else {
907    j(not_zero, on_not_smi_result);
908  }
909  if (!dst.is(src1) && src1.is(rax)) {
910    movq(src1, kScratchRegister);
911  }
912  Integer32ToSmi(dst, rax);
913}
914
915
916void MacroAssembler::SmiMod(Register dst,
917                            Register src1,
918                            Register src2,
919                            Label* on_not_smi_result) {
920  ASSERT(!dst.is(kScratchRegister));
921  ASSERT(!src1.is(kScratchRegister));
922  ASSERT(!src2.is(kScratchRegister));
923  ASSERT(!src2.is(rax));
924  ASSERT(!src2.is(rdx));
925  ASSERT(!src1.is(rdx));
926  ASSERT(!src1.is(src2));
927
928  testq(src2, src2);
929  j(zero, on_not_smi_result);
930
931  if (src1.is(rax)) {
932    movq(kScratchRegister, src1);
933  }
934  SmiToInteger32(rax, src1);
935  SmiToInteger32(src2, src2);
936
937  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
938  Label safe_div;
939  cmpl(rax, Immediate(Smi::kMinValue));
940  j(not_equal, &safe_div);
941  cmpl(src2, Immediate(-1));
942  j(not_equal, &safe_div);
943  // Retag inputs and go slow case.
944  Integer32ToSmi(src2, src2);
945  if (src1.is(rax)) {
946    movq(src1, kScratchRegister);
947  }
948  jmp(on_not_smi_result);
949  bind(&safe_div);
950
951  // Sign extend eax into edx:eax.
952  cdq();
953  idivl(src2);
954  // Restore smi tags on inputs.
955  Integer32ToSmi(src2, src2);
956  if (src1.is(rax)) {
957    movq(src1, kScratchRegister);
958  }
959  // Check for a negative zero result.  If the result is zero, and the
960  // dividend is negative, go slow to return a floating point negative zero.
961  Label smi_result;
962  testl(rdx, rdx);
963  j(not_zero, &smi_result);
964  testq(src1, src1);
965  j(negative, on_not_smi_result);
966  bind(&smi_result);
967  Integer32ToSmi(dst, rdx);
968}
969
970
971void MacroAssembler::SmiNot(Register dst, Register src) {
972  ASSERT(!dst.is(kScratchRegister));
973  ASSERT(!src.is(kScratchRegister));
974  // Set tag and padding bits before negating, so that they are zero afterwards.
975  movl(kScratchRegister, Immediate(~0));
976  if (dst.is(src)) {
977    xor_(dst, kScratchRegister);
978  } else {
979    lea(dst, Operand(src, kScratchRegister, times_1, 0));
980  }
981  not_(dst);
982}
983
984
985void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
986  ASSERT(!dst.is(src2));
987  if (!dst.is(src1)) {
988    movq(dst, src1);
989  }
990  and_(dst, src2);
991}
992
993
994void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
995  if (constant->value() == 0) {
996    xor_(dst, dst);
997  } else if (dst.is(src)) {
998    ASSERT(!dst.is(kScratchRegister));
999    Move(kScratchRegister, constant);
1000    and_(dst, kScratchRegister);
1001  } else {
1002    Move(dst, constant);
1003    and_(dst, src);
1004  }
1005}
1006
1007
1008void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1009  if (!dst.is(src1)) {
1010    movq(dst, src1);
1011  }
1012  or_(dst, src2);
1013}
1014
1015
1016void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1017  if (dst.is(src)) {
1018    ASSERT(!dst.is(kScratchRegister));
1019    Move(kScratchRegister, constant);
1020    or_(dst, kScratchRegister);
1021  } else {
1022    Move(dst, constant);
1023    or_(dst, src);
1024  }
1025}
1026
1027
1028void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1029  if (!dst.is(src1)) {
1030    movq(dst, src1);
1031  }
1032  xor_(dst, src2);
1033}
1034
1035
1036void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1037  if (dst.is(src)) {
1038    ASSERT(!dst.is(kScratchRegister));
1039    Move(kScratchRegister, constant);
1040    xor_(dst, kScratchRegister);
1041  } else {
1042    Move(dst, constant);
1043    xor_(dst, src);
1044  }
1045}
1046
1047
1048void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1049                                                     Register src,
1050                                                     int shift_value) {
1051  ASSERT(is_uint5(shift_value));
1052  if (shift_value > 0) {
1053    if (dst.is(src)) {
1054      sar(dst, Immediate(shift_value + kSmiShift));
1055      shl(dst, Immediate(kSmiShift));
1056    } else {
1057      UNIMPLEMENTED();  // Not used.
1058    }
1059  }
1060}
1061
1062
1063void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
1064                                                  Register src,
1065                                                  int shift_value,
1066                                                  Label* on_not_smi_result) {
1067  // Logic right shift interprets its result as an *unsigned* number.
1068  if (dst.is(src)) {
1069    UNIMPLEMENTED();  // Not used.
1070  } else {
1071    movq(dst, src);
1072    if (shift_value == 0) {
1073      testq(dst, dst);
1074      j(negative, on_not_smi_result);
1075    }
1076    shr(dst, Immediate(shift_value + kSmiShift));
1077    shl(dst, Immediate(kSmiShift));
1078  }
1079}
1080
1081
1082void MacroAssembler::SmiShiftLeftConstant(Register dst,
1083                                          Register src,
1084                                          int shift_value,
1085                                          Label* on_not_smi_result) {
1086  if (!dst.is(src)) {
1087    movq(dst, src);
1088  }
1089  if (shift_value > 0) {
1090    shl(dst, Immediate(shift_value));
1091  }
1092}
1093
1094
1095void MacroAssembler::SmiShiftLeft(Register dst,
1096                                  Register src1,
1097                                  Register src2,
1098                                  Label* on_not_smi_result) {
1099  ASSERT(!dst.is(rcx));
1100  Label result_ok;
1101  // Untag shift amount.
1102  if (!dst.is(src1)) {
1103    movq(dst, src1);
1104  }
1105  SmiToInteger32(rcx, src2);
1106  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1107  and_(rcx, Immediate(0x1f));
1108  shl_cl(dst);
1109}
1110
1111
1112void MacroAssembler::SmiShiftLogicalRight(Register dst,
1113                                          Register src1,
1114                                          Register src2,
1115                                          Label* on_not_smi_result) {
1116  ASSERT(!dst.is(kScratchRegister));
1117  ASSERT(!src1.is(kScratchRegister));
1118  ASSERT(!src2.is(kScratchRegister));
1119  ASSERT(!dst.is(rcx));
1120  Label result_ok;
1121  if (src1.is(rcx) || src2.is(rcx)) {
1122    movq(kScratchRegister, rcx);
1123  }
1124  if (!dst.is(src1)) {
1125    movq(dst, src1);
1126  }
1127  SmiToInteger32(rcx, src2);
1128  orl(rcx, Immediate(kSmiShift));
1129  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
1130  shl(dst, Immediate(kSmiShift));
1131  testq(dst, dst);
1132  if (src1.is(rcx) || src2.is(rcx)) {
1133    Label positive_result;
1134    j(positive, &positive_result);
1135    if (src1.is(rcx)) {
1136      movq(src1, kScratchRegister);
1137    } else {
1138      movq(src2, kScratchRegister);
1139    }
1140    jmp(on_not_smi_result);
1141    bind(&positive_result);
1142  } else {
1143    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
1144  }
1145}
1146
1147
1148void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1149                                             Register src1,
1150                                             Register src2) {
1151  ASSERT(!dst.is(kScratchRegister));
1152  ASSERT(!src1.is(kScratchRegister));
1153  ASSERT(!src2.is(kScratchRegister));
1154  ASSERT(!dst.is(rcx));
1155  if (src1.is(rcx)) {
1156    movq(kScratchRegister, src1);
1157  } else if (src2.is(rcx)) {
1158    movq(kScratchRegister, src2);
1159  }
1160  if (!dst.is(src1)) {
1161    movq(dst, src1);
1162  }
1163  SmiToInteger32(rcx, src2);
1164  orl(rcx, Immediate(kSmiShift));
1165  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
1166  shl(dst, Immediate(kSmiShift));
1167  if (src1.is(rcx)) {
1168    movq(src1, kScratchRegister);
1169  } else if (src2.is(rcx)) {
1170    movq(src2, kScratchRegister);
1171  }
1172}
1173
1174
1175void MacroAssembler::SelectNonSmi(Register dst,
1176                                  Register src1,
1177                                  Register src2,
1178                                  Label* on_not_smis) {
1179  ASSERT(!dst.is(kScratchRegister));
1180  ASSERT(!src1.is(kScratchRegister));
1181  ASSERT(!src2.is(kScratchRegister));
1182  ASSERT(!dst.is(src1));
1183  ASSERT(!dst.is(src2));
1184  // Both operands must not be smis.
1185#ifdef DEBUG
1186  if (allow_stub_calls()) {  // Check contains a stub call.
1187    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
1188    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
1189  }
1190#endif
1191  ASSERT_EQ(0, kSmiTag);
1192  ASSERT_EQ(0, Smi::FromInt(0));
1193  movl(kScratchRegister, Immediate(kSmiTagMask));
1194  and_(kScratchRegister, src1);
1195  testl(kScratchRegister, src2);
1196  // If non-zero then both are smis.
1197  j(not_zero, on_not_smis);
1198
1199  // Exactly one operand is a smi.
1200  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1201  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1202  subq(kScratchRegister, Immediate(1));
1203  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1204  movq(dst, src1);
1205  xor_(dst, src2);
1206  and_(dst, kScratchRegister);
1207  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1208  xor_(dst, src1);
1209  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
1210}
1211
1212SmiIndex MacroAssembler::SmiToIndex(Register dst,
1213                                    Register src,
1214                                    int shift) {
1215  ASSERT(is_uint6(shift));
1216  // There is a possible optimization if shift is in the range 60-63, but that
1217  // will (and must) never happen.
1218  if (!dst.is(src)) {
1219    movq(dst, src);
1220  }
1221  if (shift < kSmiShift) {
1222    sar(dst, Immediate(kSmiShift - shift));
1223  } else {
1224    shl(dst, Immediate(shift - kSmiShift));
1225  }
1226  return SmiIndex(dst, times_1);
1227}
1228
1229SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1230                                            Register src,
1231                                            int shift) {
1232  // Register src holds a positive smi.
1233  ASSERT(is_uint6(shift));
1234  if (!dst.is(src)) {
1235    movq(dst, src);
1236  }
1237  neg(dst);
1238  if (shift < kSmiShift) {
1239    sar(dst, Immediate(kSmiShift - shift));
1240  } else {
1241    shl(dst, Immediate(shift - kSmiShift));
1242  }
1243  return SmiIndex(dst, times_1);
1244}
1245
1246
1247void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
1248  ASSERT_EQ(0, kSmiTag);
1249  Condition smi = CheckSmi(src);
1250  j(smi, on_smi);
1251}
1252
1253
1254void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
1255  Condition smi = CheckSmi(src);
1256  j(NegateCondition(smi), on_not_smi);
1257}
1258
1259
1260void MacroAssembler::JumpIfNotPositiveSmi(Register src,
1261                                          Label* on_not_positive_smi) {
1262  Condition positive_smi = CheckPositiveSmi(src);
1263  j(NegateCondition(positive_smi), on_not_positive_smi);
1264}
1265
1266
1267void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1268                                             Smi* constant,
1269                                             Label* on_equals) {
1270  SmiCompare(src, constant);
1271  j(equal, on_equals);
1272}
1273
1274
1275void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
1276  Condition is_valid = CheckInteger32ValidSmiValue(src);
1277  j(NegateCondition(is_valid), on_invalid);
1278}
1279
1280
1281void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1282                                                Label* on_invalid) {
1283  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1284  j(NegateCondition(is_valid), on_invalid);
1285}
1286
1287
1288void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
1289                                      Label* on_not_both_smi) {
1290  Condition both_smi = CheckBothSmi(src1, src2);
1291  j(NegateCondition(both_smi), on_not_both_smi);
1292}
1293
1294
1295void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
1296                                                         Register second_object,
1297                                                         Register scratch1,
1298                                                         Register scratch2,
1299                                                         Label* on_fail) {
1300  // Check that both objects are not smis.
1301  Condition either_smi = CheckEitherSmi(first_object, second_object);
1302  j(either_smi, on_fail);
1303
1304  // Load instance type for both strings.
1305  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
1306  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
1307  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
1308  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
1309
1310  // Check that both are flat ascii strings.
1311  ASSERT(kNotStringTag != 0);
1312  const int kFlatAsciiStringMask =
1313      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1314  const int kFlatAsciiStringBits =
1315      kNotStringTag | kSeqStringTag | kAsciiStringTag;
1316
1317  andl(scratch1, Immediate(kFlatAsciiStringMask));
1318  andl(scratch2, Immediate(kFlatAsciiStringMask));
1319  // Interleave the bits to check both scratch1 and scratch2 in one test.
1320  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1321  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1322  cmpl(scratch1,
1323       Immediate(kFlatAsciiStringBits + (kFlatAsciiStringBits << 3)));
1324  j(not_equal, on_fail);
1325}
1326
1327
1328void MacroAssembler::Move(Register dst, Handle<Object> source) {
1329  ASSERT(!source->IsFailure());
1330  if (source->IsSmi()) {
1331    Move(dst, Smi::cast(*source));
1332  } else {
1333    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1334  }
1335}
1336
1337
1338void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
1339  ASSERT(!source->IsFailure());
1340  if (source->IsSmi()) {
1341    Move(dst, Smi::cast(*source));
1342  } else {
1343    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1344    movq(dst, kScratchRegister);
1345  }
1346}
1347
1348
1349void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1350  if (source->IsSmi()) {
1351    SmiCompare(dst, Smi::cast(*source));
1352  } else {
1353    Move(kScratchRegister, source);
1354    cmpq(dst, kScratchRegister);
1355  }
1356}
1357
1358
1359void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
1360  if (source->IsSmi()) {
1361    SmiCompare(dst, Smi::cast(*source));
1362  } else {
1363    ASSERT(source->IsHeapObject());
1364    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1365    cmpq(dst, kScratchRegister);
1366  }
1367}
1368
1369
1370void MacroAssembler::Push(Handle<Object> source) {
1371  if (source->IsSmi()) {
1372    Push(Smi::cast(*source));
1373  } else {
1374    ASSERT(source->IsHeapObject());
1375    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1376    push(kScratchRegister);
1377  }
1378}
1379
1380
1381void MacroAssembler::Push(Smi* source) {
1382  intptr_t smi = reinterpret_cast<intptr_t>(source);
1383  if (is_int32(smi)) {
1384    push(Immediate(static_cast<int32_t>(smi)));
1385  } else {
1386    Set(kScratchRegister, smi);
1387    push(kScratchRegister);
1388  }
1389}
1390
1391
1392void MacroAssembler::Drop(int stack_elements) {
1393  if (stack_elements > 0) {
1394    addq(rsp, Immediate(stack_elements * kPointerSize));
1395  }
1396}
1397
1398
1399void MacroAssembler::Test(const Operand& src, Smi* source) {
1400  intptr_t smi = reinterpret_cast<intptr_t>(source);
1401  if (is_int32(smi)) {
1402    testl(src, Immediate(static_cast<int32_t>(smi)));
1403  } else {
1404    Move(kScratchRegister, source);
1405    testq(src, kScratchRegister);
1406  }
1407}
1408
1409
1410void MacroAssembler::Jump(ExternalReference ext) {
1411  movq(kScratchRegister, ext);
1412  jmp(kScratchRegister);
1413}
1414
1415
1416void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1417  movq(kScratchRegister, destination, rmode);
1418  jmp(kScratchRegister);
1419}
1420
1421
1422void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1423  // TODO(X64): Inline this
1424  jmp(code_object, rmode);
1425}
1426
1427
1428void MacroAssembler::Call(ExternalReference ext) {
1429  movq(kScratchRegister, ext);
1430  call(kScratchRegister);
1431}
1432
1433
1434void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1435  movq(kScratchRegister, destination, rmode);
1436  call(kScratchRegister);
1437}
1438
1439
1440void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1441  ASSERT(RelocInfo::IsCodeTarget(rmode));
1442  WriteRecordedPositions();
1443  call(code_object, rmode);
1444}
1445
1446
1447void MacroAssembler::PushTryHandler(CodeLocation try_location,
1448                                    HandlerType type) {
1449  // Adjust this code if not the case.
1450  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1451
1452  // The pc (return address) is already on TOS.  This code pushes state,
1453  // frame pointer and current handler.  Check that they are expected
1454  // next on the stack, in that order.
1455  ASSERT_EQ(StackHandlerConstants::kStateOffset,
1456            StackHandlerConstants::kPCOffset - kPointerSize);
1457  ASSERT_EQ(StackHandlerConstants::kFPOffset,
1458            StackHandlerConstants::kStateOffset - kPointerSize);
1459  ASSERT_EQ(StackHandlerConstants::kNextOffset,
1460            StackHandlerConstants::kFPOffset - kPointerSize);
1461
1462  if (try_location == IN_JAVASCRIPT) {
1463    if (type == TRY_CATCH_HANDLER) {
1464      push(Immediate(StackHandler::TRY_CATCH));
1465    } else {
1466      push(Immediate(StackHandler::TRY_FINALLY));
1467    }
1468    push(rbp);
1469  } else {
1470    ASSERT(try_location == IN_JS_ENTRY);
1471    // The frame pointer does not point to a JS frame so we save NULL
1472    // for rbp. We expect the code throwing an exception to check rbp
1473    // before dereferencing it to restore the context.
1474    push(Immediate(StackHandler::ENTRY));
1475    push(Immediate(0));  // NULL frame pointer.
1476  }
1477  // Save the current handler.
1478  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1479  push(Operand(kScratchRegister, 0));
1480  // Link this handler.
1481  movq(Operand(kScratchRegister, 0), rsp);
1482}
1483
1484
1485void MacroAssembler::PopTryHandler() {
1486  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1487  // Unlink this handler.
1488  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1489  pop(Operand(kScratchRegister, 0));
1490  // Remove the remaining fields.
1491  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1492}
1493
1494
1495void MacroAssembler::Ret() {
1496  ret(0);
1497}
1498
1499
1500void MacroAssembler::FCmp() {
1501  fucomip();
1502  ffree(0);
1503  fincstp();
1504}
1505
1506
1507void MacroAssembler::CmpObjectType(Register heap_object,
1508                                   InstanceType type,
1509                                   Register map) {
1510  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1511  CmpInstanceType(map, type);
1512}
1513
1514
1515void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1516  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1517       Immediate(static_cast<int8_t>(type)));
1518}
1519
1520
1521void MacroAssembler::TryGetFunctionPrototype(Register function,
1522                                             Register result,
1523                                             Label* miss) {
1524  // Check that the receiver isn't a smi.
1525  testl(function, Immediate(kSmiTagMask));
1526  j(zero, miss);
1527
1528  // Check that the function really is a function.
1529  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1530  j(not_equal, miss);
1531
1532  // Make sure that the function has an instance prototype.
1533  Label non_instance;
1534  testb(FieldOperand(result, Map::kBitFieldOffset),
1535        Immediate(1 << Map::kHasNonInstancePrototype));
1536  j(not_zero, &non_instance);
1537
1538  // Get the prototype or initial map from the function.
1539  movq(result,
1540       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1541
1542  // If the prototype or initial map is the hole, don't return it and
1543  // simply miss the cache instead. This will allow us to allocate a
1544  // prototype object on-demand in the runtime system.
1545  CompareRoot(result, Heap::kTheHoleValueRootIndex);
1546  j(equal, miss);
1547
1548  // If the function does not have an initial map, we're done.
1549  Label done;
1550  CmpObjectType(result, MAP_TYPE, kScratchRegister);
1551  j(not_equal, &done);
1552
1553  // Get the prototype from the initial map.
1554  movq(result, FieldOperand(result, Map::kPrototypeOffset));
1555  jmp(&done);
1556
1557  // Non-instance prototype: Fetch prototype from constructor field
1558  // in initial map.
1559  bind(&non_instance);
1560  movq(result, FieldOperand(result, Map::kConstructorOffset));
1561
1562  // All done.
1563  bind(&done);
1564}
1565
1566
1567void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
1568  if (FLAG_native_code_counters && counter->Enabled()) {
1569    movq(kScratchRegister, ExternalReference(counter));
1570    movl(Operand(kScratchRegister, 0), Immediate(value));
1571  }
1572}
1573
1574
1575void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
1576  ASSERT(value > 0);
1577  if (FLAG_native_code_counters && counter->Enabled()) {
1578    movq(kScratchRegister, ExternalReference(counter));
1579    Operand operand(kScratchRegister, 0);
1580    if (value == 1) {
1581      incl(operand);
1582    } else {
1583      addl(operand, Immediate(value));
1584    }
1585  }
1586}
1587
1588
1589void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
1590  ASSERT(value > 0);
1591  if (FLAG_native_code_counters && counter->Enabled()) {
1592    movq(kScratchRegister, ExternalReference(counter));
1593    Operand operand(kScratchRegister, 0);
1594    if (value == 1) {
1595      decl(operand);
1596    } else {
1597      subl(operand, Immediate(value));
1598    }
1599  }
1600}
1601
1602#ifdef ENABLE_DEBUGGER_SUPPORT
1603
1604void MacroAssembler::PushRegistersFromMemory(RegList regs) {
1605  ASSERT((regs & ~kJSCallerSaved) == 0);
1606  // Push the content of the memory location to the stack.
1607  for (int i = 0; i < kNumJSCallerSaved; i++) {
1608    int r = JSCallerSavedCode(i);
1609    if ((regs & (1 << r)) != 0) {
1610      ExternalReference reg_addr =
1611          ExternalReference(Debug_Address::Register(i));
1612      movq(kScratchRegister, reg_addr);
1613      push(Operand(kScratchRegister, 0));
1614    }
1615  }
1616}
1617
1618
1619void MacroAssembler::SaveRegistersToMemory(RegList regs) {
1620  ASSERT((regs & ~kJSCallerSaved) == 0);
1621  // Copy the content of registers to memory location.
1622  for (int i = 0; i < kNumJSCallerSaved; i++) {
1623    int r = JSCallerSavedCode(i);
1624    if ((regs & (1 << r)) != 0) {
1625      Register reg = { r };
1626      ExternalReference reg_addr =
1627          ExternalReference(Debug_Address::Register(i));
1628      movq(kScratchRegister, reg_addr);
1629      movq(Operand(kScratchRegister, 0), reg);
1630    }
1631  }
1632}
1633
1634
1635void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
1636  ASSERT((regs & ~kJSCallerSaved) == 0);
1637  // Copy the content of memory location to registers.
1638  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1639    int r = JSCallerSavedCode(i);
1640    if ((regs & (1 << r)) != 0) {
1641      Register reg = { r };
1642      ExternalReference reg_addr =
1643          ExternalReference(Debug_Address::Register(i));
1644      movq(kScratchRegister, reg_addr);
1645      movq(reg, Operand(kScratchRegister, 0));
1646    }
1647  }
1648}
1649
1650
1651void MacroAssembler::PopRegistersToMemory(RegList regs) {
1652  ASSERT((regs & ~kJSCallerSaved) == 0);
1653  // Pop the content from the stack to the memory location.
1654  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1655    int r = JSCallerSavedCode(i);
1656    if ((regs & (1 << r)) != 0) {
1657      ExternalReference reg_addr =
1658          ExternalReference(Debug_Address::Register(i));
1659      movq(kScratchRegister, reg_addr);
1660      pop(Operand(kScratchRegister, 0));
1661    }
1662  }
1663}
1664
1665
1666void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
1667                                                    Register scratch,
1668                                                    RegList regs) {
1669  ASSERT(!scratch.is(kScratchRegister));
1670  ASSERT(!base.is(kScratchRegister));
1671  ASSERT(!base.is(scratch));
1672  ASSERT((regs & ~kJSCallerSaved) == 0);
1673  // Copy the content of the stack to the memory location and adjust base.
1674  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1675    int r = JSCallerSavedCode(i);
1676    if ((regs & (1 << r)) != 0) {
1677      movq(scratch, Operand(base, 0));
1678      ExternalReference reg_addr =
1679          ExternalReference(Debug_Address::Register(i));
1680      movq(kScratchRegister, reg_addr);
1681      movq(Operand(kScratchRegister, 0), scratch);
1682      lea(base, Operand(base, kPointerSize));
1683    }
1684  }
1685}
1686
1687#endif  // ENABLE_DEBUGGER_SUPPORT
1688
1689
1690void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
1691  bool resolved;
1692  Handle<Code> code = ResolveBuiltin(id, &resolved);
1693
1694  // Calls are not allowed in some stubs.
1695  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
1696
1697  // Rely on the assertion to check that the number of provided
1698  // arguments match the expected number of arguments. Fake a
1699  // parameter count to avoid emitting code to do the check.
1700  ParameterCount expected(0);
1701  InvokeCode(Handle<Code>(code),
1702             expected,
1703             expected,
1704             RelocInfo::CODE_TARGET,
1705             flag);
1706
1707  const char* name = Builtins::GetName(id);
1708  int argc = Builtins::GetArgumentsCount(id);
1709  // The target address for the jump is stored as an immediate at offset
1710  // kInvokeCodeAddressOffset.
1711  if (!resolved) {
1712    uint32_t flags =
1713        Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
1714        Bootstrapper::FixupFlagsUseCodeObject::encode(false);
1715    Unresolved entry =
1716        { pc_offset() - kCallTargetAddressOffset, flags, name };
1717    unresolved_.Add(entry);
1718  }
1719}
1720
1721
1722void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1723                                    const ParameterCount& actual,
1724                                    Handle<Code> code_constant,
1725                                    Register code_register,
1726                                    Label* done,
1727                                    InvokeFlag flag) {
1728  bool definitely_matches = false;
1729  Label invoke;
1730  if (expected.is_immediate()) {
1731    ASSERT(actual.is_immediate());
1732    if (expected.immediate() == actual.immediate()) {
1733      definitely_matches = true;
1734    } else {
1735      movq(rax, Immediate(actual.immediate()));
1736      if (expected.immediate() ==
1737              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
1738        // Don't worry about adapting arguments for built-ins that
1739        // don't want that done. Skip adaption code by making it look
1740        // like we have a match between expected and actual number of
1741        // arguments.
1742        definitely_matches = true;
1743      } else {
1744        movq(rbx, Immediate(expected.immediate()));
1745      }
1746    }
1747  } else {
1748    if (actual.is_immediate()) {
1749      // Expected is in register, actual is immediate. This is the
1750      // case when we invoke function values without going through the
1751      // IC mechanism.
1752      cmpq(expected.reg(), Immediate(actual.immediate()));
1753      j(equal, &invoke);
1754      ASSERT(expected.reg().is(rbx));
1755      movq(rax, Immediate(actual.immediate()));
1756    } else if (!expected.reg().is(actual.reg())) {
1757      // Both expected and actual are in (different) registers. This
1758      // is the case when we invoke functions using call and apply.
1759      cmpq(expected.reg(), actual.reg());
1760      j(equal, &invoke);
1761      ASSERT(actual.reg().is(rax));
1762      ASSERT(expected.reg().is(rbx));
1763    }
1764  }
1765
1766  if (!definitely_matches) {
1767    Handle<Code> adaptor =
1768        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
1769    if (!code_constant.is_null()) {
1770      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
1771      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1772    } else if (!code_register.is(rdx)) {
1773      movq(rdx, code_register);
1774    }
1775
1776    if (flag == CALL_FUNCTION) {
1777      Call(adaptor, RelocInfo::CODE_TARGET);
1778      jmp(done);
1779    } else {
1780      Jump(adaptor, RelocInfo::CODE_TARGET);
1781    }
1782    bind(&invoke);
1783  }
1784}
1785
1786
1787void MacroAssembler::InvokeCode(Register code,
1788                                const ParameterCount& expected,
1789                                const ParameterCount& actual,
1790                                InvokeFlag flag) {
1791  Label done;
1792  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
1793  if (flag == CALL_FUNCTION) {
1794    call(code);
1795  } else {
1796    ASSERT(flag == JUMP_FUNCTION);
1797    jmp(code);
1798  }
1799  bind(&done);
1800}
1801
1802
1803void MacroAssembler::InvokeCode(Handle<Code> code,
1804                                const ParameterCount& expected,
1805                                const ParameterCount& actual,
1806                                RelocInfo::Mode rmode,
1807                                InvokeFlag flag) {
1808  Label done;
1809  Register dummy = rax;
1810  InvokePrologue(expected, actual, code, dummy, &done, flag);
1811  if (flag == CALL_FUNCTION) {
1812    Call(code, rmode);
1813  } else {
1814    ASSERT(flag == JUMP_FUNCTION);
1815    Jump(code, rmode);
1816  }
1817  bind(&done);
1818}
1819
1820
1821void MacroAssembler::InvokeFunction(Register function,
1822                                    const ParameterCount& actual,
1823                                    InvokeFlag flag) {
1824  ASSERT(function.is(rdi));
1825  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1826  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
1827  movsxlq(rbx,
1828          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
1829  movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
1830  // Advances rdx to the end of the Code object header, to the start of
1831  // the executable code.
1832  lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
1833
1834  ParameterCount expected(rbx);
1835  InvokeCode(rdx, expected, actual, flag);
1836}
1837
1838
1839void MacroAssembler::EnterFrame(StackFrame::Type type) {
1840  push(rbp);
1841  movq(rbp, rsp);
1842  push(rsi);  // Context.
1843  Push(Smi::FromInt(type));
1844  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1845  push(kScratchRegister);
1846  if (FLAG_debug_code) {
1847    movq(kScratchRegister,
1848         Factory::undefined_value(),
1849         RelocInfo::EMBEDDED_OBJECT);
1850    cmpq(Operand(rsp, 0), kScratchRegister);
1851    Check(not_equal, "code object not properly patched");
1852  }
1853}
1854
1855
1856void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1857  if (FLAG_debug_code) {
1858    Move(kScratchRegister, Smi::FromInt(type));
1859    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
1860    Check(equal, "stack frame types must match");
1861  }
1862  movq(rsp, rbp);
1863  pop(rbp);
1864}
1865
1866
1867void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
1868  // Setup the frame structure on the stack.
1869  // All constants are relative to the frame pointer of the exit frame.
1870  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1871  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1872  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
1873  push(rbp);
1874  movq(rbp, rsp);
1875
1876  // Reserve room for entry stack pointer and push the debug marker.
1877  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1878  push(Immediate(0));  // saved entry sp, patched before call
1879  if (mode == ExitFrame::MODE_DEBUG) {
1880    push(Immediate(0));
1881  } else {
1882    movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1883    push(kScratchRegister);
1884  }
1885
1886  // Save the frame pointer and the context in top.
1887  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1888  ExternalReference context_address(Top::k_context_address);
1889  movq(r14, rax);  // Backup rax before we use it.
1890
1891  movq(rax, rbp);
1892  store_rax(c_entry_fp_address);
1893  movq(rax, rsi);
1894  store_rax(context_address);
1895
1896  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
1897  // so it must be retained across the C-call.
1898  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1899  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
1900
1901#ifdef ENABLE_DEBUGGER_SUPPORT
1902  // Save the state of all registers to the stack from the memory
1903  // location. This is needed to allow nested break points.
1904  if (mode == ExitFrame::MODE_DEBUG) {
1905    // TODO(1243899): This should be symmetric to
1906    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
1907    // correct here, but computed for the other call. Very error
1908    // prone! FIX THIS.  Actually there are deeper problems with
1909    // register saving than this asymmetry (see the bug report
1910    // associated with this issue).
1911    PushRegistersFromMemory(kJSCallerSaved);
1912  }
1913#endif
1914
1915#ifdef _WIN64
1916  // Reserve space on stack for result and argument structures, if necessary.
1917  int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
1918  // Reserve space for the Arguments object.  The Windows 64-bit ABI
1919  // requires us to pass this structure as a pointer to its location on
1920  // the stack.  The structure contains 2 values.
1921  int argument_stack_space = 2 * kPointerSize;
1922  // We also need backing space for 4 parameters, even though
1923  // we only pass one or two parameter, and it is in a register.
1924  int argument_mirror_space = 4 * kPointerSize;
1925  int total_stack_space =
1926      argument_mirror_space + argument_stack_space + result_stack_space;
1927  subq(rsp, Immediate(total_stack_space));
1928#endif
1929
1930  // Get the required frame alignment for the OS.
1931  static const int kFrameAlignment = OS::ActivationFrameAlignment();
1932  if (kFrameAlignment > 0) {
1933    ASSERT(IsPowerOf2(kFrameAlignment));
1934    movq(kScratchRegister, Immediate(-kFrameAlignment));
1935    and_(rsp, kScratchRegister);
1936  }
1937
1938  // Patch the saved entry sp.
1939  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
1940}
1941
1942
1943void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
1944  // Registers:
1945  // r15 : argv
1946#ifdef ENABLE_DEBUGGER_SUPPORT
1947  // Restore the memory copy of the registers by digging them out from
1948  // the stack. This is needed to allow nested break points.
1949  if (mode == ExitFrame::MODE_DEBUG) {
1950    // It's okay to clobber register rbx below because we don't need
1951    // the function pointer after this.
1952    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
1953    int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
1954    lea(rbx, Operand(rbp, kOffset));
1955    CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
1956  }
1957#endif
1958
1959  // Get the return address from the stack and restore the frame pointer.
1960  movq(rcx, Operand(rbp, 1 * kPointerSize));
1961  movq(rbp, Operand(rbp, 0 * kPointerSize));
1962
1963  // Pop everything up to and including the arguments and the receiver
1964  // from the caller stack.
1965  lea(rsp, Operand(r15, 1 * kPointerSize));
1966
1967  // Restore current context from top and clear it in debug mode.
1968  ExternalReference context_address(Top::k_context_address);
1969  movq(kScratchRegister, context_address);
1970  movq(rsi, Operand(kScratchRegister, 0));
1971#ifdef DEBUG
1972  movq(Operand(kScratchRegister, 0), Immediate(0));
1973#endif
1974
1975  // Push the return address to get ready to return.
1976  push(rcx);
1977
1978  // Clear the top frame.
1979  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1980  movq(kScratchRegister, c_entry_fp_address);
1981  movq(Operand(kScratchRegister, 0), Immediate(0));
1982}
1983
1984
1985Register MacroAssembler::CheckMaps(JSObject* object,
1986                                   Register object_reg,
1987                                   JSObject* holder,
1988                                   Register holder_reg,
1989                                   Register scratch,
1990                                   Label* miss) {
1991  // Make sure there's no overlap between scratch and the other
1992  // registers.
1993  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
1994
1995  // Keep track of the current object in register reg.  On the first
1996  // iteration, reg is an alias for object_reg, on later iterations,
1997  // it is an alias for holder_reg.
1998  Register reg = object_reg;
1999  int depth = 1;
2000
2001  // Check the maps in the prototype chain.
2002  // Traverse the prototype chain from the object and do map checks.
2003  while (object != holder) {
2004    depth++;
2005
2006    // Only global objects and objects that do not require access
2007    // checks are allowed in stubs.
2008    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2009
2010    JSObject* prototype = JSObject::cast(object->GetPrototype());
2011    if (Heap::InNewSpace(prototype)) {
2012      // Get the map of the current object.
2013      movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
2014      Cmp(scratch, Handle<Map>(object->map()));
2015      // Branch on the result of the map check.
2016      j(not_equal, miss);
2017      // Check access rights to the global object.  This has to happen
2018      // after the map check so that we know that the object is
2019      // actually a global object.
2020      if (object->IsJSGlobalProxy()) {
2021        CheckAccessGlobalProxy(reg, scratch, miss);
2022
2023        // Restore scratch register to be the map of the object.
2024        // We load the prototype from the map in the scratch register.
2025        movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
2026      }
2027      // The prototype is in new space; we cannot store a reference
2028      // to it in the code. Load it from the map.
2029      reg = holder_reg;  // from now the object is in holder_reg
2030      movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
2031
2032    } else {
2033      // Check the map of the current object.
2034      Cmp(FieldOperand(reg, HeapObject::kMapOffset),
2035          Handle<Map>(object->map()));
2036      // Branch on the result of the map check.
2037      j(not_equal, miss);
2038      // Check access rights to the global object.  This has to happen
2039      // after the map check so that we know that the object is
2040      // actually a global object.
2041      if (object->IsJSGlobalProxy()) {
2042        CheckAccessGlobalProxy(reg, scratch, miss);
2043      }
2044      // The prototype is in old space; load it directly.
2045      reg = holder_reg;  // from now the object is in holder_reg
2046      Move(reg, Handle<JSObject>(prototype));
2047    }
2048
2049    // Go to the next object in the prototype chain.
2050    object = prototype;
2051  }
2052
2053  // Check the holder map.
2054  Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
2055  j(not_equal, miss);
2056
2057  // Log the check depth.
2058  LOG(IntEvent("check-maps-depth", depth));
2059
2060  // Perform security check for access to the global object and return
2061  // the holder register.
2062  ASSERT(object == holder);
2063  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2064  if (object->IsJSGlobalProxy()) {
2065    CheckAccessGlobalProxy(reg, scratch, miss);
2066  }
2067  return reg;
2068}
2069
2070
2071void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
2072                                            Register scratch,
2073                                            Label* miss) {
2074  Label same_contexts;
2075
2076  ASSERT(!holder_reg.is(scratch));
2077  ASSERT(!scratch.is(kScratchRegister));
2078  // Load current lexical context from the stack frame.
2079  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
2080
2081  // When generating debug code, make sure the lexical context is set.
2082  if (FLAG_debug_code) {
2083    cmpq(scratch, Immediate(0));
2084    Check(not_equal, "we should not have an empty lexical context");
2085  }
2086  // Load the global context of the current context.
2087  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
2088  movq(scratch, FieldOperand(scratch, offset));
2089  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
2090
2091  // Check the context is a global context.
2092  if (FLAG_debug_code) {
2093    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
2094        Factory::global_context_map());
2095    Check(equal, "JSGlobalObject::global_context should be a global context.");
2096  }
2097
2098  // Check if both contexts are the same.
2099  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2100  j(equal, &same_contexts);
2101
2102  // Compare security tokens.
2103  // Check that the security token in the calling global object is
2104  // compatible with the security token in the receiving global
2105  // object.
2106
2107  // Check the context is a global context.
2108  if (FLAG_debug_code) {
2109    // Preserve original value of holder_reg.
2110    push(holder_reg);
2111    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2112    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
2113    Check(not_equal, "JSGlobalProxy::context() should not be null.");
2114
2115    // Read the first word and compare to global_context_map(),
2116    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
2117    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
2118    Check(equal, "JSGlobalObject::global_context should be a global context.");
2119    pop(holder_reg);
2120  }
2121
2122  movq(kScratchRegister,
2123       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
2124  int token_offset =
2125      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
2126  movq(scratch, FieldOperand(scratch, token_offset));
2127  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
2128  j(not_equal, miss);
2129
2130  bind(&same_contexts);
2131}
2132
2133
2134void MacroAssembler::LoadAllocationTopHelper(Register result,
2135                                             Register result_end,
2136                                             Register scratch,
2137                                             AllocationFlags flags) {
2138  ExternalReference new_space_allocation_top =
2139      ExternalReference::new_space_allocation_top_address();
2140
2141  // Just return if allocation top is already known.
2142  if ((flags & RESULT_CONTAINS_TOP) != 0) {
2143    // No use of scratch if allocation top is provided.
2144    ASSERT(scratch.is(no_reg));
2145#ifdef DEBUG
2146    // Assert that result actually contains top on entry.
2147    movq(kScratchRegister, new_space_allocation_top);
2148    cmpq(result, Operand(kScratchRegister, 0));
2149    Check(equal, "Unexpected allocation top");
2150#endif
2151    return;
2152  }
2153
2154  // Move address of new object to result. Use scratch register if available.
2155  if (scratch.is(no_reg)) {
2156    movq(kScratchRegister, new_space_allocation_top);
2157    movq(result, Operand(kScratchRegister, 0));
2158  } else {
2159    ASSERT(!scratch.is(result_end));
2160    movq(scratch, new_space_allocation_top);
2161    movq(result, Operand(scratch, 0));
2162  }
2163}
2164
2165
2166void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
2167                                               Register scratch) {
2168  if (FLAG_debug_code) {
2169    testq(result_end, Immediate(kObjectAlignmentMask));
2170    Check(zero, "Unaligned allocation in new space");
2171  }
2172
2173  ExternalReference new_space_allocation_top =
2174      ExternalReference::new_space_allocation_top_address();
2175
2176  // Update new top.
2177  if (result_end.is(rax)) {
2178    // rax can be stored directly to a memory location.
2179    store_rax(new_space_allocation_top);
2180  } else {
2181    // Register required - use scratch provided if available.
2182    if (scratch.is(no_reg)) {
2183      movq(kScratchRegister, new_space_allocation_top);
2184      movq(Operand(kScratchRegister, 0), result_end);
2185    } else {
2186      movq(Operand(scratch, 0), result_end);
2187    }
2188  }
2189}
2190
2191
2192void MacroAssembler::AllocateInNewSpace(int object_size,
2193                                        Register result,
2194                                        Register result_end,
2195                                        Register scratch,
2196                                        Label* gc_required,
2197                                        AllocationFlags flags) {
2198  ASSERT(!result.is(result_end));
2199
2200  // Load address of new object into result.
2201  LoadAllocationTopHelper(result, result_end, scratch, flags);
2202
2203  // Calculate new top and bail out if new space is exhausted.
2204  ExternalReference new_space_allocation_limit =
2205      ExternalReference::new_space_allocation_limit_address();
2206  lea(result_end, Operand(result, object_size));
2207  movq(kScratchRegister, new_space_allocation_limit);
2208  cmpq(result_end, Operand(kScratchRegister, 0));
2209  j(above, gc_required);
2210
2211  // Update allocation top.
2212  UpdateAllocationTopHelper(result_end, scratch);
2213
2214  // Tag the result if requested.
2215  if ((flags & TAG_OBJECT) != 0) {
2216    addq(result, Immediate(kHeapObjectTag));
2217  }
2218}
2219
2220
2221void MacroAssembler::AllocateInNewSpace(int header_size,
2222                                        ScaleFactor element_size,
2223                                        Register element_count,
2224                                        Register result,
2225                                        Register result_end,
2226                                        Register scratch,
2227                                        Label* gc_required,
2228                                        AllocationFlags flags) {
2229  ASSERT(!result.is(result_end));
2230
2231  // Load address of new object into result.
2232  LoadAllocationTopHelper(result, result_end, scratch, flags);
2233
2234  // Calculate new top and bail out if new space is exhausted.
2235  ExternalReference new_space_allocation_limit =
2236      ExternalReference::new_space_allocation_limit_address();
2237  lea(result_end, Operand(result, element_count, element_size, header_size));
2238  movq(kScratchRegister, new_space_allocation_limit);
2239  cmpq(result_end, Operand(kScratchRegister, 0));
2240  j(above, gc_required);
2241
2242  // Update allocation top.
2243  UpdateAllocationTopHelper(result_end, scratch);
2244
2245  // Tag the result if requested.
2246  if ((flags & TAG_OBJECT) != 0) {
2247    addq(result, Immediate(kHeapObjectTag));
2248  }
2249}
2250
2251
2252void MacroAssembler::AllocateInNewSpace(Register object_size,
2253                                        Register result,
2254                                        Register result_end,
2255                                        Register scratch,
2256                                        Label* gc_required,
2257                                        AllocationFlags flags) {
2258  // Load address of new object into result.
2259  LoadAllocationTopHelper(result, result_end, scratch, flags);
2260
2261  // Calculate new top and bail out if new space is exhausted.
2262  ExternalReference new_space_allocation_limit =
2263      ExternalReference::new_space_allocation_limit_address();
2264  if (!object_size.is(result_end)) {
2265    movq(result_end, object_size);
2266  }
2267  addq(result_end, result);
2268  movq(kScratchRegister, new_space_allocation_limit);
2269  cmpq(result_end, Operand(kScratchRegister, 0));
2270  j(above, gc_required);
2271
2272  // Update allocation top.
2273  UpdateAllocationTopHelper(result_end, scratch);
2274
2275  // Tag the result if requested.
2276  if ((flags & TAG_OBJECT) != 0) {
2277    addq(result, Immediate(kHeapObjectTag));
2278  }
2279}
2280
2281
2282void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2283  ExternalReference new_space_allocation_top =
2284      ExternalReference::new_space_allocation_top_address();
2285
2286  // Make sure the object has no tag before resetting top.
2287  and_(object, Immediate(~kHeapObjectTagMask));
2288  movq(kScratchRegister, new_space_allocation_top);
2289#ifdef DEBUG
2290  cmpq(object, Operand(kScratchRegister, 0));
2291  Check(below, "Undo allocation of non allocated memory");
2292#endif
2293  movq(Operand(kScratchRegister, 0), object);
2294}
2295
2296
2297void MacroAssembler::AllocateHeapNumber(Register result,
2298                                        Register scratch,
2299                                        Label* gc_required) {
2300  // Allocate heap number in new space.
2301  AllocateInNewSpace(HeapNumber::kSize,
2302                     result,
2303                     scratch,
2304                     no_reg,
2305                     gc_required,
2306                     TAG_OBJECT);
2307
2308  // Set the map.
2309  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2310  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2311}
2312
2313
2314void MacroAssembler::AllocateTwoByteString(Register result,
2315                                           Register length,
2316                                           Register scratch1,
2317                                           Register scratch2,
2318                                           Register scratch3,
2319                                           Label* gc_required) {
2320  // Calculate the number of bytes needed for the characters in the string while
2321  // observing object alignment.
2322  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2323  ASSERT(kShortSize == 2);
2324  // scratch1 = length * 2 + kObjectAlignmentMask.
2325  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
2326  and_(scratch1, Immediate(~kObjectAlignmentMask));
2327
2328  // Allocate two byte string in new space.
2329  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
2330                     times_1,
2331                     scratch1,
2332                     result,
2333                     scratch2,
2334                     scratch3,
2335                     gc_required,
2336                     TAG_OBJECT);
2337
2338  // Set the map, length and hash field.
2339  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2340  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2341  movl(FieldOperand(result, String::kLengthOffset), length);
2342  movl(FieldOperand(result, String::kHashFieldOffset),
2343       Immediate(String::kEmptyHashField));
2344}
2345
2346
2347void MacroAssembler::AllocateAsciiString(Register result,
2348                                         Register length,
2349                                         Register scratch1,
2350                                         Register scratch2,
2351                                         Register scratch3,
2352                                         Label* gc_required) {
2353  // Calculate the number of bytes needed for the characters in the string while
2354  // observing object alignment.
2355  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2356  movl(scratch1, length);
2357  ASSERT(kCharSize == 1);
2358  addq(scratch1, Immediate(kObjectAlignmentMask));
2359  and_(scratch1, Immediate(~kObjectAlignmentMask));
2360
2361  // Allocate ascii string in new space.
2362  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
2363                     times_1,
2364                     scratch1,
2365                     result,
2366                     scratch2,
2367                     scratch3,
2368                     gc_required,
2369                     TAG_OBJECT);
2370
2371  // Set the map, length and hash field.
2372  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2373  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2374  movl(FieldOperand(result, String::kLengthOffset), length);
2375  movl(FieldOperand(result, String::kHashFieldOffset),
2376       Immediate(String::kEmptyHashField));
2377}
2378
2379
2380void MacroAssembler::AllocateConsString(Register result,
2381                                        Register scratch1,
2382                                        Register scratch2,
2383                                        Label* gc_required) {
2384  // Allocate heap number in new space.
2385  AllocateInNewSpace(ConsString::kSize,
2386                     result,
2387                     scratch1,
2388                     scratch2,
2389                     gc_required,
2390                     TAG_OBJECT);
2391
2392  // Set the map. The other fields are left uninitialized.
2393  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
2394  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2395}
2396
2397
2398void MacroAssembler::AllocateAsciiConsString(Register result,
2399                                             Register scratch1,
2400                                             Register scratch2,
2401                                             Label* gc_required) {
2402  // Allocate heap number in new space.
2403  AllocateInNewSpace(ConsString::kSize,
2404                     result,
2405                     scratch1,
2406                     scratch2,
2407                     gc_required,
2408                     TAG_OBJECT);
2409
2410  // Set the map. The other fields are left uninitialized.
2411  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2412  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2413}
2414
2415
2416void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2417  if (context_chain_length > 0) {
2418    // Move up the chain of contexts to the context containing the slot.
2419    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2420    // Load the function context (which is the incoming, outer context).
2421    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2422    for (int i = 1; i < context_chain_length; i++) {
2423      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2424      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2425    }
2426    // The context may be an intermediate context, not a function context.
2427    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2428  } else {  // context is the current function context.
2429    // The context may be an intermediate context, not a function context.
2430    movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2431  }
2432}
2433
2434
2435CodePatcher::CodePatcher(byte* address, int size)
2436    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2437  // Create a new macro assembler pointing to the address of the code to patch.
2438  // The size is adjusted with kGap on order for the assembler to generate size
2439  // bytes of instructions without failing with buffer size constraints.
2440  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2441}
2442
2443
2444CodePatcher::~CodePatcher() {
2445  // Indicate that code has changed.
2446  CPU::FlushICache(address_, size_);
2447
2448  // Check that the code was patched as expected.
2449  ASSERT(masm_.pc_ == address_ + size_);
2450  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2451}
2452
2453} }  // namespace v8::internal
2454