macro-assembler-x64.cc revision 8a31eba00023874d4a1dcdc5f411cc4336776874
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "assembler-x64.h"
35#include "macro-assembler-x64.h"
36#include "serialize.h"
37#include "debug.h"
38#include "heap.h"
39
40namespace v8 {
41namespace internal {
42
43MacroAssembler::MacroAssembler(void* buffer, int size)
44    : Assembler(buffer, size),
45      generating_stub_(false),
46      allow_stub_calls_(true),
47      code_object_(Heap::undefined_value()) {
48}
49
50
51void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
52  movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
53}
54
55
56void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
57  movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
58}
59
60
61void MacroAssembler::PushRoot(Heap::RootListIndex index) {
62  push(Operand(kRootRegister, index << kPointerSizeLog2));
63}
64
65
66void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
67  cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
68}
69
70
71void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
72  LoadRoot(kScratchRegister, index);
73  cmpq(with, kScratchRegister);
74}
75
76
77void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
78  CompareRoot(rsp, Heap::kStackLimitRootIndex);
79  j(below, on_stack_overflow);
80}
81
82
83void MacroAssembler::RecordWriteHelper(Register object,
84                                       Register addr,
85                                       Register scratch) {
86  if (FLAG_debug_code) {
87    // Check that the object is not in new space.
88    NearLabel not_in_new_space;
89    InNewSpace(object, scratch, not_equal, &not_in_new_space);
90    Abort("new-space object passed to RecordWriteHelper");
91    bind(&not_in_new_space);
92  }
93
94  // Compute the page start address from the heap object pointer, and reuse
95  // the 'object' register for it.
96  and_(object, Immediate(~Page::kPageAlignmentMask));
97
98  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
99  // method for more details.
100  shrl(addr, Immediate(Page::kRegionSizeLog2));
101  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
102
103  // Set dirty mark for region.
104  bts(Operand(object, Page::kDirtyFlagOffset), addr);
105}
106
107
108void MacroAssembler::RecordWrite(Register object,
109                                 int offset,
110                                 Register value,
111                                 Register index) {
112  // The compiled code assumes that record write doesn't change the
113  // context register, so we check that none of the clobbered
114  // registers are rsi.
115  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
116
117  // First, check if a write barrier is even needed. The tests below
118  // catch stores of Smis and stores into young gen.
119  Label done;
120  JumpIfSmi(value, &done);
121
122  RecordWriteNonSmi(object, offset, value, index);
123  bind(&done);
124
125  // Clobber all input registers when running with the debug-code flag
126  // turned on to provoke errors. This clobbering repeats the
127  // clobbering done inside RecordWriteNonSmi but it's necessary to
128  // avoid having the fast case for smis leave the registers
129  // unchanged.
130  if (FLAG_debug_code) {
131    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
132    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
133    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
134  }
135}
136
137
138void MacroAssembler::RecordWrite(Register object,
139                                 Register address,
140                                 Register value) {
141  // The compiled code assumes that record write doesn't change the
142  // context register, so we check that none of the clobbered
143  // registers are esi.
144  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
145
146  // First, check if a write barrier is even needed. The tests below
147  // catch stores of Smis and stores into young gen.
148  Label done;
149  JumpIfSmi(value, &done);
150
151  InNewSpace(object, value, equal, &done);
152
153  RecordWriteHelper(object, address, value);
154
155  bind(&done);
156
157  // Clobber all input registers when running with the debug-code flag
158  // turned on to provoke errors.
159  if (FLAG_debug_code) {
160    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
161    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
162    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
163  }
164}
165
166
167void MacroAssembler::RecordWriteNonSmi(Register object,
168                                       int offset,
169                                       Register scratch,
170                                       Register index) {
171  Label done;
172
173  if (FLAG_debug_code) {
174    NearLabel okay;
175    JumpIfNotSmi(object, &okay);
176    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
177    bind(&okay);
178
179    if (offset == 0) {
180      // index must be int32.
181      Register tmp = index.is(rax) ? rbx : rax;
182      push(tmp);
183      movl(tmp, index);
184      cmpq(tmp, index);
185      Check(equal, "Index register for RecordWrite must be untagged int32.");
186      pop(tmp);
187    }
188  }
189
190  // Test that the object address is not in the new space. We cannot
191  // update page dirty marks for new space pages.
192  InNewSpace(object, scratch, equal, &done);
193
194  // The offset is relative to a tagged or untagged HeapObject pointer,
195  // so either offset or offset + kHeapObjectTag must be a
196  // multiple of kPointerSize.
197  ASSERT(IsAligned(offset, kPointerSize) ||
198         IsAligned(offset + kHeapObjectTag, kPointerSize));
199
200  Register dst = index;
201  if (offset != 0) {
202    lea(dst, Operand(object, offset));
203  } else {
204    // array access: calculate the destination address in the same manner as
205    // KeyedStoreIC::GenerateGeneric.
206    lea(dst, FieldOperand(object,
207                          index,
208                          times_pointer_size,
209                          FixedArray::kHeaderSize));
210  }
211  RecordWriteHelper(object, dst, scratch);
212
213  bind(&done);
214
215  // Clobber all input registers when running with the debug-code flag
216  // turned on to provoke errors.
217  if (FLAG_debug_code) {
218    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
219    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
220    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
221  }
222}
223
224void MacroAssembler::Assert(Condition cc, const char* msg) {
225  if (FLAG_debug_code) Check(cc, msg);
226}
227
228
229void MacroAssembler::AssertFastElements(Register elements) {
230  if (FLAG_debug_code) {
231    NearLabel ok;
232    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
233                Heap::kFixedArrayMapRootIndex);
234    j(equal, &ok);
235    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
236                Heap::kFixedCOWArrayMapRootIndex);
237    j(equal, &ok);
238    Abort("JSObject with fast elements map has slow elements");
239    bind(&ok);
240  }
241}
242
243
244void MacroAssembler::Check(Condition cc, const char* msg) {
245  NearLabel L;
246  j(cc, &L);
247  Abort(msg);
248  // will not return here
249  bind(&L);
250}
251
252
253void MacroAssembler::CheckStackAlignment() {
254  int frame_alignment = OS::ActivationFrameAlignment();
255  int frame_alignment_mask = frame_alignment - 1;
256  if (frame_alignment > kPointerSize) {
257    ASSERT(IsPowerOf2(frame_alignment));
258    NearLabel alignment_as_expected;
259    testq(rsp, Immediate(frame_alignment_mask));
260    j(zero, &alignment_as_expected);
261    // Abort if stack is not aligned.
262    int3();
263    bind(&alignment_as_expected);
264  }
265}
266
267
268void MacroAssembler::NegativeZeroTest(Register result,
269                                      Register op,
270                                      Label* then_label) {
271  NearLabel ok;
272  testl(result, result);
273  j(not_zero, &ok);
274  testl(op, op);
275  j(sign, then_label);
276  bind(&ok);
277}
278
279
280void MacroAssembler::Abort(const char* msg) {
281  // We want to pass the msg string like a smi to avoid GC
282  // problems, however msg is not guaranteed to be aligned
283  // properly. Instead, we pass an aligned pointer that is
284  // a proper v8 smi, but also pass the alignment difference
285  // from the real pointer as a smi.
286  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
287  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
288  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
289  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
290#ifdef DEBUG
291  if (msg != NULL) {
292    RecordComment("Abort message: ");
293    RecordComment(msg);
294  }
295#endif
296  // Disable stub call restrictions to always allow calls to abort.
297  set_allow_stub_calls(true);
298
299  push(rax);
300  movq(kScratchRegister, p0, RelocInfo::NONE);
301  push(kScratchRegister);
302  movq(kScratchRegister,
303       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
304       RelocInfo::NONE);
305  push(kScratchRegister);
306  CallRuntime(Runtime::kAbort, 2);
307  // will not return here
308  int3();
309}
310
311
312void MacroAssembler::CallStub(CodeStub* stub) {
313  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
314  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
315}
316
317
318MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
319  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
320  MaybeObject* result = stub->TryGetCode();
321  if (!result->IsFailure()) {
322    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
323         RelocInfo::CODE_TARGET);
324  }
325  return result;
326}
327
328
329void MacroAssembler::TailCallStub(CodeStub* stub) {
330  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
331  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
332}
333
334
335MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
336  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
337  MaybeObject* result = stub->TryGetCode();
338  if (!result->IsFailure()) {
339    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
340        RelocInfo::CODE_TARGET);
341  }
342  return result;
343}
344
345
346void MacroAssembler::StubReturn(int argc) {
347  ASSERT(argc >= 1 && generating_stub());
348  ret((argc - 1) * kPointerSize);
349}
350
351
352void MacroAssembler::IllegalOperation(int num_arguments) {
353  if (num_arguments > 0) {
354    addq(rsp, Immediate(num_arguments * kPointerSize));
355  }
356  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
357}
358
359
360void MacroAssembler::IndexFromHash(Register hash, Register index) {
361  // The assert checks that the constants for the maximum number of digits
362  // for an array index cached in the hash field and the number of bits
363  // reserved for it does not conflict.
364  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
365         (1 << String::kArrayIndexValueBits));
366  // We want the smi-tagged index in key. Even if we subsequently go to
367  // the slow case, converting the key to a smi is always valid.
368  // key: string key
369  // hash: key's hash field, including its array index value.
370  and_(hash, Immediate(String::kArrayIndexValueMask));
371  shr(hash, Immediate(String::kHashShift));
372  // Here we actually clobber the key which will be used if calling into
373  // runtime later. However as the new key is the numeric value of a string key
374  // there is no difference in using either key.
375  Integer32ToSmi(index, hash);
376}
377
378
379void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
380  CallRuntime(Runtime::FunctionForId(id), num_arguments);
381}
382
383
384MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
385                                            int num_arguments) {
386  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
387}
388
389
390void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
391  // If the expected number of arguments of the runtime function is
392  // constant, we check that the actual number of arguments match the
393  // expectation.
394  if (f->nargs >= 0 && f->nargs != num_arguments) {
395    IllegalOperation(num_arguments);
396    return;
397  }
398
399  // TODO(1236192): Most runtime routines don't need the number of
400  // arguments passed in because it is constant. At some point we
401  // should remove this need and make the runtime routine entry code
402  // smarter.
403  Set(rax, num_arguments);
404  movq(rbx, ExternalReference(f));
405  CEntryStub ces(f->result_size);
406  CallStub(&ces);
407}
408
409
410MaybeObject* MacroAssembler::TryCallRuntime(Runtime::Function* f,
411                                            int num_arguments) {
412  if (f->nargs >= 0 && f->nargs != num_arguments) {
413    IllegalOperation(num_arguments);
414    // Since we did not call the stub, there was no allocation failure.
415    // Return some non-failure object.
416    return Heap::undefined_value();
417  }
418
419  // TODO(1236192): Most runtime routines don't need the number of
420  // arguments passed in because it is constant. At some point we
421  // should remove this need and make the runtime routine entry code
422  // smarter.
423  Set(rax, num_arguments);
424  movq(rbx, ExternalReference(f));
425  CEntryStub ces(f->result_size);
426  return TryCallStub(&ces);
427}
428
429
430void MacroAssembler::CallExternalReference(const ExternalReference& ext,
431                                           int num_arguments) {
432  Set(rax, num_arguments);
433  movq(rbx, ext);
434
435  CEntryStub stub(1);
436  CallStub(&stub);
437}
438
439
440void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
441                                               int num_arguments,
442                                               int result_size) {
443  // ----------- S t a t e -------------
444  //  -- rsp[0] : return address
445  //  -- rsp[8] : argument num_arguments - 1
446  //  ...
447  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
448  // -----------------------------------
449
450  // TODO(1236192): Most runtime routines don't need the number of
451  // arguments passed in because it is constant. At some point we
452  // should remove this need and make the runtime routine entry code
453  // smarter.
454  Set(rax, num_arguments);
455  JumpToExternalReference(ext, result_size);
456}
457
458
459MaybeObject* MacroAssembler::TryTailCallExternalReference(
460    const ExternalReference& ext, int num_arguments, int result_size) {
461  // ----------- S t a t e -------------
462  //  -- rsp[0] : return address
463  //  -- rsp[8] : argument num_arguments - 1
464  //  ...
465  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
466  // -----------------------------------
467
468  // TODO(1236192): Most runtime routines don't need the number of
469  // arguments passed in because it is constant. At some point we
470  // should remove this need and make the runtime routine entry code
471  // smarter.
472  Set(rax, num_arguments);
473  return TryJumpToExternalReference(ext, result_size);
474}
475
476
477void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
478                                     int num_arguments,
479                                     int result_size) {
480  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
481}
482
483
484MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
485                                                int num_arguments,
486                                                int result_size) {
487  return TryTailCallExternalReference(ExternalReference(fid),
488                                      num_arguments,
489                                      result_size);
490}
491
492
493static int Offset(ExternalReference ref0, ExternalReference ref1) {
494  int64_t offset = (ref0.address() - ref1.address());
495  // Check that fits into int.
496  ASSERT(static_cast<int>(offset) == offset);
497  return static_cast<int>(offset);
498}
499
500
501void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
502#ifdef _WIN64
503  // We need to prepare a slot for result handle on stack and put
504  // a pointer to it into 1st arg register.
505  EnterApiExitFrame(arg_stack_space + 1);
506
507  // rcx must be used to pass the pointer to the return value slot.
508  lea(rcx, StackSpaceOperand(arg_stack_space));
509#else
510  EnterApiExitFrame(arg_stack_space);
511#endif
512}
513
514
515MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
516    ApiFunction* function, int stack_space) {
517  Label empty_result;
518  Label prologue;
519  Label promote_scheduled_exception;
520  Label delete_allocated_handles;
521  Label leave_exit_frame;
522  Label write_back;
523
524  ExternalReference next_address =
525      ExternalReference::handle_scope_next_address();
526  const int kNextOffset = 0;
527  const int kLimitOffset = Offset(
528      ExternalReference::handle_scope_limit_address(),
529      next_address);
530  const int kLevelOffset = Offset(
531      ExternalReference::handle_scope_level_address(),
532      next_address);
533  ExternalReference scheduled_exception_address =
534      ExternalReference::scheduled_exception_address();
535
536  // Allocate HandleScope in callee-save registers.
537  Register prev_next_address_reg = r14;
538  Register prev_limit_reg = rbx;
539  Register base_reg = r12;
540  movq(base_reg, next_address);
541  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
542  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
543  addl(Operand(base_reg, kLevelOffset), Immediate(1));
544  // Call the api function!
545  movq(rax,
546       reinterpret_cast<int64_t>(function->address()),
547       RelocInfo::RUNTIME_ENTRY);
548  call(rax);
549
550#ifdef _WIN64
551  // rax keeps a pointer to v8::Handle, unpack it.
552  movq(rax, Operand(rax, 0));
553#endif
554  // Check if the result handle holds 0.
555  testq(rax, rax);
556  j(zero, &empty_result);
557  // It was non-zero.  Dereference to get the result value.
558  movq(rax, Operand(rax, 0));
559  bind(&prologue);
560
561  // No more valid handles (the result handle was the last one). Restore
562  // previous handle scope.
563  subl(Operand(base_reg, kLevelOffset), Immediate(1));
564  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
565  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
566  j(not_equal, &delete_allocated_handles);
567  bind(&leave_exit_frame);
568
569  // Check if the function scheduled an exception.
570  movq(rsi, scheduled_exception_address);
571  Cmp(Operand(rsi, 0), Factory::the_hole_value());
572  j(not_equal, &promote_scheduled_exception);
573
574  LeaveApiExitFrame();
575  ret(stack_space * kPointerSize);
576
577  bind(&promote_scheduled_exception);
578  MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
579                                           0, 1);
580  if (result->IsFailure()) {
581    return result;
582  }
583
584  bind(&empty_result);
585  // It was zero; the result is undefined.
586  Move(rax, Factory::undefined_value());
587  jmp(&prologue);
588
589  // HandleScope limit has changed. Delete allocated extensions.
590  bind(&delete_allocated_handles);
591  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
592  movq(prev_limit_reg, rax);
593  movq(rax, ExternalReference::delete_handle_scope_extensions());
594  call(rax);
595  movq(rax, prev_limit_reg);
596  jmp(&leave_exit_frame);
597
598  return result;
599}
600
601
602void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
603                                             int result_size) {
604  // Set the entry point and jump to the C entry runtime stub.
605  movq(rbx, ext);
606  CEntryStub ces(result_size);
607  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
608}
609
610
611MaybeObject* MacroAssembler::TryJumpToExternalReference(
612    const ExternalReference& ext, int result_size) {
613  // Set the entry point and jump to the C entry runtime stub.
614  movq(rbx, ext);
615  CEntryStub ces(result_size);
616  return TryTailCallStub(&ces);
617}
618
619
620void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
621  // Calls are not allowed in some stubs.
622  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
623
624  // Rely on the assertion to check that the number of provided
625  // arguments match the expected number of arguments. Fake a
626  // parameter count to avoid emitting code to do the check.
627  ParameterCount expected(0);
628  GetBuiltinEntry(rdx, id);
629  InvokeCode(rdx, expected, expected, flag);
630}
631
632
633void MacroAssembler::GetBuiltinFunction(Register target,
634                                        Builtins::JavaScript id) {
635  // Load the builtins object into target register.
636  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
637  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
638  movq(target, FieldOperand(target,
639                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
640}
641
642
643void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
644  ASSERT(!target.is(rdi));
645  // Load the JavaScript builtin function from the builtins object.
646  GetBuiltinFunction(rdi, id);
647  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
648}
649
650
651void MacroAssembler::Set(Register dst, int64_t x) {
652  if (x == 0) {
653    xorl(dst, dst);
654  } else if (is_int32(x)) {
655    movq(dst, Immediate(static_cast<int32_t>(x)));
656  } else if (is_uint32(x)) {
657    movl(dst, Immediate(static_cast<uint32_t>(x)));
658  } else {
659    movq(dst, x, RelocInfo::NONE);
660  }
661}
662
663void MacroAssembler::Set(const Operand& dst, int64_t x) {
664  if (is_int32(x)) {
665    movq(dst, Immediate(static_cast<int32_t>(x)));
666  } else {
667    movq(kScratchRegister, x, RelocInfo::NONE);
668    movq(dst, kScratchRegister);
669  }
670}
671
672// ----------------------------------------------------------------------------
673// Smi tagging, untagging and tag detection.
674
675Register MacroAssembler::GetSmiConstant(Smi* source) {
676  int value = source->value();
677  if (value == 0) {
678    xorl(kScratchRegister, kScratchRegister);
679    return kScratchRegister;
680  }
681  if (value == 1) {
682    return kSmiConstantRegister;
683  }
684  LoadSmiConstant(kScratchRegister, source);
685  return kScratchRegister;
686}
687
688void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
689  if (FLAG_debug_code) {
690    movq(dst,
691         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
692         RelocInfo::NONE);
693    cmpq(dst, kSmiConstantRegister);
694    if (allow_stub_calls()) {
695      Assert(equal, "Uninitialized kSmiConstantRegister");
696    } else {
697      NearLabel ok;
698      j(equal, &ok);
699      int3();
700      bind(&ok);
701    }
702  }
703  if (source->value() == 0) {
704    xorl(dst, dst);
705    return;
706  }
707  int value = source->value();
708  bool negative = value < 0;
709  unsigned int uvalue = negative ? -value : value;
710
711  switch (uvalue) {
712    case 9:
713      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
714      break;
715    case 8:
716      xorl(dst, dst);
717      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
718      break;
719    case 4:
720      xorl(dst, dst);
721      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
722      break;
723    case 5:
724      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
725      break;
726    case 3:
727      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
728      break;
729    case 2:
730      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
731      break;
732    case 1:
733      movq(dst, kSmiConstantRegister);
734      break;
735    case 0:
736      UNREACHABLE();
737      return;
738    default:
739      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
740      return;
741  }
742  if (negative) {
743    neg(dst);
744  }
745}
746
747
748void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
749  ASSERT_EQ(0, kSmiTag);
750  if (!dst.is(src)) {
751    movl(dst, src);
752  }
753  shl(dst, Immediate(kSmiShift));
754}
755
756
757void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
758  if (FLAG_debug_code) {
759    testb(dst, Immediate(0x01));
760    NearLabel ok;
761    j(zero, &ok);
762    if (allow_stub_calls()) {
763      Abort("Integer32ToSmiField writing to non-smi location");
764    } else {
765      int3();
766    }
767    bind(&ok);
768  }
769  ASSERT(kSmiShift % kBitsPerByte == 0);
770  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
771}
772
773
774void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
775                                                Register src,
776                                                int constant) {
777  if (dst.is(src)) {
778    addq(dst, Immediate(constant));
779  } else {
780    lea(dst, Operand(src, constant));
781  }
782  shl(dst, Immediate(kSmiShift));
783}
784
785
786void MacroAssembler::SmiToInteger32(Register dst, Register src) {
787  ASSERT_EQ(0, kSmiTag);
788  if (!dst.is(src)) {
789    movq(dst, src);
790  }
791  shr(dst, Immediate(kSmiShift));
792}
793
794
795void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
796  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
797}
798
799
800void MacroAssembler::SmiToInteger64(Register dst, Register src) {
801  ASSERT_EQ(0, kSmiTag);
802  if (!dst.is(src)) {
803    movq(dst, src);
804  }
805  sar(dst, Immediate(kSmiShift));
806}
807
808
809void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
810  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
811}
812
813
814void MacroAssembler::SmiTest(Register src) {
815  testq(src, src);
816}
817
818
819void MacroAssembler::SmiCompare(Register dst, Register src) {
820  cmpq(dst, src);
821}
822
823
824void MacroAssembler::SmiCompare(Register dst, Smi* src) {
825  ASSERT(!dst.is(kScratchRegister));
826  if (src->value() == 0) {
827    testq(dst, dst);
828  } else {
829    Register constant_reg = GetSmiConstant(src);
830    cmpq(dst, constant_reg);
831  }
832}
833
834
835void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
836  cmpq(dst, src);
837}
838
839
840void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
841  cmpq(dst, src);
842}
843
844
845void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
846  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
847}
848
849
850void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
851  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
852}
853
854
855void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
856                                                           Register src,
857                                                           int power) {
858  ASSERT(power >= 0);
859  ASSERT(power < 64);
860  if (power == 0) {
861    SmiToInteger64(dst, src);
862    return;
863  }
864  if (!dst.is(src)) {
865    movq(dst, src);
866  }
867  if (power < kSmiShift) {
868    sar(dst, Immediate(kSmiShift - power));
869  } else if (power > kSmiShift) {
870    shl(dst, Immediate(power - kSmiShift));
871  }
872}
873
874
875void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
876                                                         Register src,
877                                                         int power) {
878  ASSERT((0 <= power) && (power < 32));
879  if (dst.is(src)) {
880    shr(dst, Immediate(power + kSmiShift));
881  } else {
882    UNIMPLEMENTED();  // Not used.
883  }
884}
885
886
887Condition MacroAssembler::CheckSmi(Register src) {
888  ASSERT_EQ(0, kSmiTag);
889  testb(src, Immediate(kSmiTagMask));
890  return zero;
891}
892
893
894Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
895  ASSERT_EQ(0, kSmiTag);
896  // Make mask 0x8000000000000001 and test that both bits are zero.
897  movq(kScratchRegister, src);
898  rol(kScratchRegister, Immediate(1));
899  testb(kScratchRegister, Immediate(3));
900  return zero;
901}
902
903
904Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
905  if (first.is(second)) {
906    return CheckSmi(first);
907  }
908  ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
909  leal(kScratchRegister, Operand(first, second, times_1, 0));
910  testb(kScratchRegister, Immediate(0x03));
911  return zero;
912}
913
914
915Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
916                                                  Register second) {
917  if (first.is(second)) {
918    return CheckNonNegativeSmi(first);
919  }
920  movq(kScratchRegister, first);
921  or_(kScratchRegister, second);
922  rol(kScratchRegister, Immediate(1));
923  testl(kScratchRegister, Immediate(3));
924  return zero;
925}
926
927
928Condition MacroAssembler::CheckEitherSmi(Register first,
929                                         Register second,
930                                         Register scratch) {
931  if (first.is(second)) {
932    return CheckSmi(first);
933  }
934  if (scratch.is(second)) {
935    andl(scratch, first);
936  } else {
937    if (!scratch.is(first)) {
938      movl(scratch, first);
939    }
940    andl(scratch, second);
941  }
942  testb(scratch, Immediate(kSmiTagMask));
943  return zero;
944}
945
946
947Condition MacroAssembler::CheckIsMinSmi(Register src) {
948  ASSERT(!src.is(kScratchRegister));
949  // If we overflow by subtracting one, it's the minimal smi value.
950  cmpq(src, kSmiConstantRegister);
951  return overflow;
952}
953
954
955Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
956  // A 32-bit integer value can always be converted to a smi.
957  return always;
958}
959
960
961Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
962  // An unsigned 32-bit integer value is valid as long as the high bit
963  // is not set.
964  testl(src, src);
965  return positive;
966}
967
968
969void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
970  if (constant->value() == 0) {
971    if (!dst.is(src)) {
972      movq(dst, src);
973    }
974    return;
975  } else if (dst.is(src)) {
976    ASSERT(!dst.is(kScratchRegister));
977    switch (constant->value()) {
978      case 1:
979        addq(dst, kSmiConstantRegister);
980        return;
981      case 2:
982        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
983        return;
984      case 4:
985        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
986        return;
987      case 8:
988        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
989        return;
990      default:
991        Register constant_reg = GetSmiConstant(constant);
992        addq(dst, constant_reg);
993        return;
994    }
995  } else {
996    switch (constant->value()) {
997      case 1:
998        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
999        return;
1000      case 2:
1001        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1002        return;
1003      case 4:
1004        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1005        return;
1006      case 8:
1007        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1008        return;
1009      default:
1010        LoadSmiConstant(dst, constant);
1011        addq(dst, src);
1012        return;
1013    }
1014  }
1015}
1016
1017
1018void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1019  if (constant->value() != 0) {
1020    addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1021  }
1022}
1023
1024
1025void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1026  if (constant->value() == 0) {
1027    if (!dst.is(src)) {
1028      movq(dst, src);
1029    }
1030  } else if (dst.is(src)) {
1031    ASSERT(!dst.is(kScratchRegister));
1032    Register constant_reg = GetSmiConstant(constant);
1033    subq(dst, constant_reg);
1034  } else {
1035    if (constant->value() == Smi::kMinValue) {
1036      LoadSmiConstant(dst, constant);
1037      // Adding and subtracting the min-value gives the same result, it only
1038      // differs on the overflow bit, which we don't check here.
1039      addq(dst, src);
1040    } else {
1041      // Subtract by adding the negation.
1042      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1043      addq(dst, src);
1044    }
1045  }
1046}
1047
1048
1049void MacroAssembler::SmiAdd(Register dst,
1050                            Register src1,
1051                            Register src2) {
1052  // No overflow checking. Use only when it's known that
1053  // overflowing is impossible.
1054  ASSERT(!dst.is(src2));
1055  if (dst.is(src1)) {
1056    addq(dst, src2);
1057  } else {
1058    movq(dst, src1);
1059    addq(dst, src2);
1060  }
1061  Assert(no_overflow, "Smi addition overflow");
1062}
1063
1064
1065void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1066  // No overflow checking. Use only when it's known that
1067  // overflowing is impossible (e.g., subtracting two positive smis).
1068  ASSERT(!dst.is(src2));
1069  if (dst.is(src1)) {
1070    subq(dst, src2);
1071  } else {
1072    movq(dst, src1);
1073    subq(dst, src2);
1074  }
1075  Assert(no_overflow, "Smi subtraction overflow");
1076}
1077
1078
1079void MacroAssembler::SmiSub(Register dst,
1080                            Register src1,
1081                            const Operand& src2) {
1082  // No overflow checking. Use only when it's known that
1083  // overflowing is impossible (e.g., subtracting two positive smis).
1084  if (dst.is(src1)) {
1085    subq(dst, src2);
1086  } else {
1087    movq(dst, src1);
1088    subq(dst, src2);
1089  }
1090  Assert(no_overflow, "Smi subtraction overflow");
1091}
1092
1093
1094void MacroAssembler::SmiNot(Register dst, Register src) {
1095  ASSERT(!dst.is(kScratchRegister));
1096  ASSERT(!src.is(kScratchRegister));
1097  // Set tag and padding bits before negating, so that they are zero afterwards.
1098  movl(kScratchRegister, Immediate(~0));
1099  if (dst.is(src)) {
1100    xor_(dst, kScratchRegister);
1101  } else {
1102    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1103  }
1104  not_(dst);
1105}
1106
1107
1108void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1109  ASSERT(!dst.is(src2));
1110  if (!dst.is(src1)) {
1111    movq(dst, src1);
1112  }
1113  and_(dst, src2);
1114}
1115
1116
1117void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1118  if (constant->value() == 0) {
1119    xor_(dst, dst);
1120  } else if (dst.is(src)) {
1121    ASSERT(!dst.is(kScratchRegister));
1122    Register constant_reg = GetSmiConstant(constant);
1123    and_(dst, constant_reg);
1124  } else {
1125    LoadSmiConstant(dst, constant);
1126    and_(dst, src);
1127  }
1128}
1129
1130
1131void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1132  if (!dst.is(src1)) {
1133    movq(dst, src1);
1134  }
1135  or_(dst, src2);
1136}
1137
1138
1139void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1140  if (dst.is(src)) {
1141    ASSERT(!dst.is(kScratchRegister));
1142    Register constant_reg = GetSmiConstant(constant);
1143    or_(dst, constant_reg);
1144  } else {
1145    LoadSmiConstant(dst, constant);
1146    or_(dst, src);
1147  }
1148}
1149
1150
1151void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1152  if (!dst.is(src1)) {
1153    movq(dst, src1);
1154  }
1155  xor_(dst, src2);
1156}
1157
1158
1159void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1160  if (dst.is(src)) {
1161    ASSERT(!dst.is(kScratchRegister));
1162    Register constant_reg = GetSmiConstant(constant);
1163    xor_(dst, constant_reg);
1164  } else {
1165    LoadSmiConstant(dst, constant);
1166    xor_(dst, src);
1167  }
1168}
1169
1170
1171void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1172                                                     Register src,
1173                                                     int shift_value) {
1174  ASSERT(is_uint5(shift_value));
1175  if (shift_value > 0) {
1176    if (dst.is(src)) {
1177      sar(dst, Immediate(shift_value + kSmiShift));
1178      shl(dst, Immediate(kSmiShift));
1179    } else {
1180      UNIMPLEMENTED();  // Not used.
1181    }
1182  }
1183}
1184
1185
1186void MacroAssembler::SmiShiftLeftConstant(Register dst,
1187                                          Register src,
1188                                          int shift_value) {
1189  if (!dst.is(src)) {
1190    movq(dst, src);
1191  }
1192  if (shift_value > 0) {
1193    shl(dst, Immediate(shift_value));
1194  }
1195}
1196
1197
1198void MacroAssembler::SmiShiftLeft(Register dst,
1199                                  Register src1,
1200                                  Register src2) {
1201  ASSERT(!dst.is(rcx));
1202  NearLabel result_ok;
1203  // Untag shift amount.
1204  if (!dst.is(src1)) {
1205    movq(dst, src1);
1206  }
1207  SmiToInteger32(rcx, src2);
1208  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1209  and_(rcx, Immediate(0x1f));
1210  shl_cl(dst);
1211}
1212
1213
1214void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1215                                             Register src1,
1216                                             Register src2) {
1217  ASSERT(!dst.is(kScratchRegister));
1218  ASSERT(!src1.is(kScratchRegister));
1219  ASSERT(!src2.is(kScratchRegister));
1220  ASSERT(!dst.is(rcx));
1221  if (src1.is(rcx)) {
1222    movq(kScratchRegister, src1);
1223  } else if (src2.is(rcx)) {
1224    movq(kScratchRegister, src2);
1225  }
1226  if (!dst.is(src1)) {
1227    movq(dst, src1);
1228  }
1229  SmiToInteger32(rcx, src2);
1230  orl(rcx, Immediate(kSmiShift));
1231  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
1232  shl(dst, Immediate(kSmiShift));
1233  if (src1.is(rcx)) {
1234    movq(src1, kScratchRegister);
1235  } else if (src2.is(rcx)) {
1236    movq(src2, kScratchRegister);
1237  }
1238}
1239
1240
1241SmiIndex MacroAssembler::SmiToIndex(Register dst,
1242                                    Register src,
1243                                    int shift) {
1244  ASSERT(is_uint6(shift));
1245  // There is a possible optimization if shift is in the range 60-63, but that
1246  // will (and must) never happen.
1247  if (!dst.is(src)) {
1248    movq(dst, src);
1249  }
1250  if (shift < kSmiShift) {
1251    sar(dst, Immediate(kSmiShift - shift));
1252  } else {
1253    shl(dst, Immediate(shift - kSmiShift));
1254  }
1255  return SmiIndex(dst, times_1);
1256}
1257
1258SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1259                                            Register src,
1260                                            int shift) {
1261  // Register src holds a positive smi.
1262  ASSERT(is_uint6(shift));
1263  if (!dst.is(src)) {
1264    movq(dst, src);
1265  }
1266  neg(dst);
1267  if (shift < kSmiShift) {
1268    sar(dst, Immediate(kSmiShift - shift));
1269  } else {
1270    shl(dst, Immediate(shift - kSmiShift));
1271  }
1272  return SmiIndex(dst, times_1);
1273}
1274
1275
1276void MacroAssembler::Move(Register dst, Register src) {
1277  if (!dst.is(src)) {
1278    movq(dst, src);
1279  }
1280}
1281
1282
1283
1284
1285void MacroAssembler::Move(Register dst, Handle<Object> source) {
1286  ASSERT(!source->IsFailure());
1287  if (source->IsSmi()) {
1288    Move(dst, Smi::cast(*source));
1289  } else {
1290    movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1291  }
1292}
1293
1294
1295void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
1296  ASSERT(!source->IsFailure());
1297  if (source->IsSmi()) {
1298    Move(dst, Smi::cast(*source));
1299  } else {
1300    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1301    movq(dst, kScratchRegister);
1302  }
1303}
1304
1305
1306void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1307  if (source->IsSmi()) {
1308    SmiCompare(dst, Smi::cast(*source));
1309  } else {
1310    Move(kScratchRegister, source);
1311    cmpq(dst, kScratchRegister);
1312  }
1313}
1314
1315
1316void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
1317  if (source->IsSmi()) {
1318    SmiCompare(dst, Smi::cast(*source));
1319  } else {
1320    ASSERT(source->IsHeapObject());
1321    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1322    cmpq(dst, kScratchRegister);
1323  }
1324}
1325
1326
1327void MacroAssembler::Push(Handle<Object> source) {
1328  if (source->IsSmi()) {
1329    Push(Smi::cast(*source));
1330  } else {
1331    ASSERT(source->IsHeapObject());
1332    movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1333    push(kScratchRegister);
1334  }
1335}
1336
1337
1338void MacroAssembler::Push(Smi* source) {
1339  intptr_t smi = reinterpret_cast<intptr_t>(source);
1340  if (is_int32(smi)) {
1341    push(Immediate(static_cast<int32_t>(smi)));
1342  } else {
1343    Register constant = GetSmiConstant(source);
1344    push(constant);
1345  }
1346}
1347
1348
1349void MacroAssembler::Drop(int stack_elements) {
1350  if (stack_elements > 0) {
1351    addq(rsp, Immediate(stack_elements * kPointerSize));
1352  }
1353}
1354
1355
1356void MacroAssembler::Test(const Operand& src, Smi* source) {
1357  testl(Operand(src, kIntSize), Immediate(source->value()));
1358}
1359
1360
1361void MacroAssembler::Jump(ExternalReference ext) {
1362  movq(kScratchRegister, ext);
1363  jmp(kScratchRegister);
1364}
1365
1366
1367void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1368  movq(kScratchRegister, destination, rmode);
1369  jmp(kScratchRegister);
1370}
1371
1372
1373void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1374  // TODO(X64): Inline this
1375  jmp(code_object, rmode);
1376}
1377
1378
1379void MacroAssembler::Call(ExternalReference ext) {
1380  movq(kScratchRegister, ext);
1381  call(kScratchRegister);
1382}
1383
1384
1385void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1386  movq(kScratchRegister, destination, rmode);
1387  call(kScratchRegister);
1388}
1389
1390
1391void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1392  ASSERT(RelocInfo::IsCodeTarget(rmode));
1393  call(code_object, rmode);
1394}
1395
1396
1397void MacroAssembler::PushTryHandler(CodeLocation try_location,
1398                                    HandlerType type) {
1399  // Adjust this code if not the case.
1400  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1401
1402  // The pc (return address) is already on TOS.  This code pushes state,
1403  // frame pointer and current handler.  Check that they are expected
1404  // next on the stack, in that order.
1405  ASSERT_EQ(StackHandlerConstants::kStateOffset,
1406            StackHandlerConstants::kPCOffset - kPointerSize);
1407  ASSERT_EQ(StackHandlerConstants::kFPOffset,
1408            StackHandlerConstants::kStateOffset - kPointerSize);
1409  ASSERT_EQ(StackHandlerConstants::kNextOffset,
1410            StackHandlerConstants::kFPOffset - kPointerSize);
1411
1412  if (try_location == IN_JAVASCRIPT) {
1413    if (type == TRY_CATCH_HANDLER) {
1414      push(Immediate(StackHandler::TRY_CATCH));
1415    } else {
1416      push(Immediate(StackHandler::TRY_FINALLY));
1417    }
1418    push(rbp);
1419  } else {
1420    ASSERT(try_location == IN_JS_ENTRY);
1421    // The frame pointer does not point to a JS frame so we save NULL
1422    // for rbp. We expect the code throwing an exception to check rbp
1423    // before dereferencing it to restore the context.
1424    push(Immediate(StackHandler::ENTRY));
1425    push(Immediate(0));  // NULL frame pointer.
1426  }
1427  // Save the current handler.
1428  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1429  push(Operand(kScratchRegister, 0));
1430  // Link this handler.
1431  movq(Operand(kScratchRegister, 0), rsp);
1432}
1433
1434
1435void MacroAssembler::PopTryHandler() {
1436  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1437  // Unlink this handler.
1438  movq(kScratchRegister, ExternalReference(Top::k_handler_address));
1439  pop(Operand(kScratchRegister, 0));
1440  // Remove the remaining fields.
1441  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1442}
1443
1444
1445void MacroAssembler::Ret() {
1446  ret(0);
1447}
1448
1449
1450void MacroAssembler::FCmp() {
1451  fucomip();
1452  fstp(0);
1453}
1454
1455
1456void MacroAssembler::CmpObjectType(Register heap_object,
1457                                   InstanceType type,
1458                                   Register map) {
1459  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1460  CmpInstanceType(map, type);
1461}
1462
1463
1464void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1465  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1466       Immediate(static_cast<int8_t>(type)));
1467}
1468
1469
1470void MacroAssembler::CheckMap(Register obj,
1471                              Handle<Map> map,
1472                              Label* fail,
1473                              bool is_heap_object) {
1474  if (!is_heap_object) {
1475    JumpIfSmi(obj, fail);
1476  }
1477  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
1478  j(not_equal, fail);
1479}
1480
1481
1482void MacroAssembler::AbortIfNotNumber(Register object) {
1483  NearLabel ok;
1484  Condition is_smi = CheckSmi(object);
1485  j(is_smi, &ok);
1486  Cmp(FieldOperand(object, HeapObject::kMapOffset),
1487      Factory::heap_number_map());
1488  Assert(equal, "Operand not a number");
1489  bind(&ok);
1490}
1491
1492
1493void MacroAssembler::AbortIfSmi(Register object) {
1494  NearLabel ok;
1495  Condition is_smi = CheckSmi(object);
1496  Assert(NegateCondition(is_smi), "Operand is a smi");
1497}
1498
1499
1500void MacroAssembler::AbortIfNotSmi(Register object) {
1501  NearLabel ok;
1502  Condition is_smi = CheckSmi(object);
1503  Assert(is_smi, "Operand is not a smi");
1504}
1505
1506
1507void MacroAssembler::AbortIfNotRootValue(Register src,
1508                                         Heap::RootListIndex root_value_index,
1509                                         const char* message) {
1510  ASSERT(!src.is(kScratchRegister));
1511  LoadRoot(kScratchRegister, root_value_index);
1512  cmpq(src, kScratchRegister);
1513  Check(equal, message);
1514}
1515
1516
1517
1518Condition MacroAssembler::IsObjectStringType(Register heap_object,
1519                                             Register map,
1520                                             Register instance_type) {
1521  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1522  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
1523  ASSERT(kNotStringTag != 0);
1524  testb(instance_type, Immediate(kIsNotStringMask));
1525  return zero;
1526}
1527
1528
1529void MacroAssembler::TryGetFunctionPrototype(Register function,
1530                                             Register result,
1531                                             Label* miss) {
1532  // Check that the receiver isn't a smi.
1533  testl(function, Immediate(kSmiTagMask));
1534  j(zero, miss);
1535
1536  // Check that the function really is a function.
1537  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1538  j(not_equal, miss);
1539
1540  // Make sure that the function has an instance prototype.
1541  NearLabel non_instance;
1542  testb(FieldOperand(result, Map::kBitFieldOffset),
1543        Immediate(1 << Map::kHasNonInstancePrototype));
1544  j(not_zero, &non_instance);
1545
1546  // Get the prototype or initial map from the function.
1547  movq(result,
1548       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1549
1550  // If the prototype or initial map is the hole, don't return it and
1551  // simply miss the cache instead. This will allow us to allocate a
1552  // prototype object on-demand in the runtime system.
1553  CompareRoot(result, Heap::kTheHoleValueRootIndex);
1554  j(equal, miss);
1555
1556  // If the function does not have an initial map, we're done.
1557  NearLabel done;
1558  CmpObjectType(result, MAP_TYPE, kScratchRegister);
1559  j(not_equal, &done);
1560
1561  // Get the prototype from the initial map.
1562  movq(result, FieldOperand(result, Map::kPrototypeOffset));
1563  jmp(&done);
1564
1565  // Non-instance prototype: Fetch prototype from constructor field
1566  // in initial map.
1567  bind(&non_instance);
1568  movq(result, FieldOperand(result, Map::kConstructorOffset));
1569
1570  // All done.
1571  bind(&done);
1572}
1573
1574
1575void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
1576  if (FLAG_native_code_counters && counter->Enabled()) {
1577    movq(kScratchRegister, ExternalReference(counter));
1578    movl(Operand(kScratchRegister, 0), Immediate(value));
1579  }
1580}
1581
1582
1583void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
1584  ASSERT(value > 0);
1585  if (FLAG_native_code_counters && counter->Enabled()) {
1586    movq(kScratchRegister, ExternalReference(counter));
1587    Operand operand(kScratchRegister, 0);
1588    if (value == 1) {
1589      incl(operand);
1590    } else {
1591      addl(operand, Immediate(value));
1592    }
1593  }
1594}
1595
1596
1597void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
1598  ASSERT(value > 0);
1599  if (FLAG_native_code_counters && counter->Enabled()) {
1600    movq(kScratchRegister, ExternalReference(counter));
1601    Operand operand(kScratchRegister, 0);
1602    if (value == 1) {
1603      decl(operand);
1604    } else {
1605      subl(operand, Immediate(value));
1606    }
1607  }
1608}
1609
1610
1611#ifdef ENABLE_DEBUGGER_SUPPORT
1612void MacroAssembler::DebugBreak() {
1613  ASSERT(allow_stub_calls());
1614  xor_(rax, rax);  // no arguments
1615  movq(rbx, ExternalReference(Runtime::kDebugBreak));
1616  CEntryStub ces(1);
1617  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1618}
1619#endif  // ENABLE_DEBUGGER_SUPPORT
1620
1621
1622void MacroAssembler::InvokeCode(Register code,
1623                                const ParameterCount& expected,
1624                                const ParameterCount& actual,
1625                                InvokeFlag flag) {
1626  NearLabel done;
1627  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
1628  if (flag == CALL_FUNCTION) {
1629    call(code);
1630  } else {
1631    ASSERT(flag == JUMP_FUNCTION);
1632    jmp(code);
1633  }
1634  bind(&done);
1635}
1636
1637
1638void MacroAssembler::InvokeCode(Handle<Code> code,
1639                                const ParameterCount& expected,
1640                                const ParameterCount& actual,
1641                                RelocInfo::Mode rmode,
1642                                InvokeFlag flag) {
1643  NearLabel done;
1644  Register dummy = rax;
1645  InvokePrologue(expected, actual, code, dummy, &done, flag);
1646  if (flag == CALL_FUNCTION) {
1647    Call(code, rmode);
1648  } else {
1649    ASSERT(flag == JUMP_FUNCTION);
1650    Jump(code, rmode);
1651  }
1652  bind(&done);
1653}
1654
1655
1656void MacroAssembler::InvokeFunction(Register function,
1657                                    const ParameterCount& actual,
1658                                    InvokeFlag flag) {
1659  ASSERT(function.is(rdi));
1660  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1661  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
1662  movsxlq(rbx,
1663          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
1664  // Advances rdx to the end of the Code object header, to the start of
1665  // the executable code.
1666  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
1667
1668  ParameterCount expected(rbx);
1669  InvokeCode(rdx, expected, actual, flag);
1670}
1671
1672
1673void MacroAssembler::InvokeFunction(JSFunction* function,
1674                                    const ParameterCount& actual,
1675                                    InvokeFlag flag) {
1676  ASSERT(function->is_compiled());
1677  // Get the function and setup the context.
1678  Move(rdi, Handle<JSFunction>(function));
1679  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
1680
1681  // Invoke the cached code.
1682  Handle<Code> code(function->code());
1683  ParameterCount expected(function->shared()->formal_parameter_count());
1684  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
1685}
1686
1687
1688void MacroAssembler::EnterFrame(StackFrame::Type type) {
1689  push(rbp);
1690  movq(rbp, rsp);
1691  push(rsi);  // Context.
1692  Push(Smi::FromInt(type));
1693  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1694  push(kScratchRegister);
1695  if (FLAG_debug_code) {
1696    movq(kScratchRegister,
1697         Factory::undefined_value(),
1698         RelocInfo::EMBEDDED_OBJECT);
1699    cmpq(Operand(rsp, 0), kScratchRegister);
1700    Check(not_equal, "code object not properly patched");
1701  }
1702}
1703
1704
1705void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1706  if (FLAG_debug_code) {
1707    Move(kScratchRegister, Smi::FromInt(type));
1708    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
1709    Check(equal, "stack frame types must match");
1710  }
1711  movq(rsp, rbp);
1712  pop(rbp);
1713}
1714
1715
1716void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
1717  // Setup the frame structure on the stack.
1718  // All constants are relative to the frame pointer of the exit frame.
1719  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1720  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1721  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
1722  push(rbp);
1723  movq(rbp, rsp);
1724
1725  // Reserve room for entry stack pointer and push the code object.
1726  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1727  push(Immediate(0));  // Saved entry sp, patched before call.
1728  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1729  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
1730
1731  // Save the frame pointer and the context in top.
1732  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1733  ExternalReference context_address(Top::k_context_address);
1734  if (save_rax) {
1735    movq(r14, rax);  // Backup rax before we use it.
1736  }
1737
1738  movq(rax, rbp);
1739  store_rax(c_entry_fp_address);
1740  movq(rax, rsi);
1741  store_rax(context_address);
1742}
1743
1744
1745void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space) {
1746#ifdef _WIN64
1747  const int kShaddowSpace = 4;
1748  arg_stack_space += kShaddowSpace;
1749#endif
1750  if (arg_stack_space > 0) {
1751    subq(rsp, Immediate(arg_stack_space * kPointerSize));
1752  }
1753
1754  // Get the required frame alignment for the OS.
1755  static const int kFrameAlignment = OS::ActivationFrameAlignment();
1756  if (kFrameAlignment > 0) {
1757    ASSERT(IsPowerOf2(kFrameAlignment));
1758    movq(kScratchRegister, Immediate(-kFrameAlignment));
1759    and_(rsp, kScratchRegister);
1760  }
1761
1762  // Patch the saved entry sp.
1763  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
1764}
1765
1766
1767void MacroAssembler::EnterExitFrame(int arg_stack_space) {
1768  EnterExitFramePrologue(true);
1769
1770  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
1771  // so it must be retained across the C-call.
1772  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1773  lea(r12, Operand(rbp, r14, times_pointer_size, offset));
1774
1775  EnterExitFrameEpilogue(arg_stack_space);
1776}
1777
1778
1779void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
1780  EnterExitFramePrologue(false);
1781  EnterExitFrameEpilogue(arg_stack_space);
1782}
1783
1784
1785void MacroAssembler::LeaveExitFrame() {
1786  // Registers:
1787  // r12 : argv
1788
1789  // Get the return address from the stack and restore the frame pointer.
1790  movq(rcx, Operand(rbp, 1 * kPointerSize));
1791  movq(rbp, Operand(rbp, 0 * kPointerSize));
1792
1793  // Pop everything up to and including the arguments and the receiver
1794  // from the caller stack.
1795  lea(rsp, Operand(r12, 1 * kPointerSize));
1796
1797  // Push the return address to get ready to return.
1798  push(rcx);
1799
1800  LeaveExitFrameEpilogue();
1801}
1802
1803
1804void MacroAssembler::LeaveApiExitFrame() {
1805  movq(rsp, rbp);
1806  pop(rbp);
1807
1808  LeaveExitFrameEpilogue();
1809}
1810
1811
1812void MacroAssembler::LeaveExitFrameEpilogue() {
1813  // Restore current context from top and clear it in debug mode.
1814  ExternalReference context_address(Top::k_context_address);
1815  movq(kScratchRegister, context_address);
1816  movq(rsi, Operand(kScratchRegister, 0));
1817#ifdef DEBUG
1818  movq(Operand(kScratchRegister, 0), Immediate(0));
1819#endif
1820
1821  // Clear the top frame.
1822  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1823  movq(kScratchRegister, c_entry_fp_address);
1824  movq(Operand(kScratchRegister, 0), Immediate(0));
1825}
1826
1827
1828void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1829                                            Register scratch,
1830                                            Label* miss) {
1831  Label same_contexts;
1832
1833  ASSERT(!holder_reg.is(scratch));
1834  ASSERT(!scratch.is(kScratchRegister));
1835  // Load current lexical context from the stack frame.
1836  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
1837
1838  // When generating debug code, make sure the lexical context is set.
1839  if (FLAG_debug_code) {
1840    cmpq(scratch, Immediate(0));
1841    Check(not_equal, "we should not have an empty lexical context");
1842  }
1843  // Load the global context of the current context.
1844  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1845  movq(scratch, FieldOperand(scratch, offset));
1846  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
1847
1848  // Check the context is a global context.
1849  if (FLAG_debug_code) {
1850    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
1851        Factory::global_context_map());
1852    Check(equal, "JSGlobalObject::global_context should be a global context.");
1853  }
1854
1855  // Check if both contexts are the same.
1856  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1857  j(equal, &same_contexts);
1858
1859  // Compare security tokens.
1860  // Check that the security token in the calling global object is
1861  // compatible with the security token in the receiving global
1862  // object.
1863
1864  // Check the context is a global context.
1865  if (FLAG_debug_code) {
1866    // Preserve original value of holder_reg.
1867    push(holder_reg);
1868    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1869    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
1870    Check(not_equal, "JSGlobalProxy::context() should not be null.");
1871
1872    // Read the first word and compare to global_context_map(),
1873    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
1874    CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
1875    Check(equal, "JSGlobalObject::global_context should be a global context.");
1876    pop(holder_reg);
1877  }
1878
1879  movq(kScratchRegister,
1880       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1881  int token_offset =
1882      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1883  movq(scratch, FieldOperand(scratch, token_offset));
1884  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
1885  j(not_equal, miss);
1886
1887  bind(&same_contexts);
1888}
1889
1890
1891void MacroAssembler::LoadAllocationTopHelper(Register result,
1892                                             Register scratch,
1893                                             AllocationFlags flags) {
1894  ExternalReference new_space_allocation_top =
1895      ExternalReference::new_space_allocation_top_address();
1896
1897  // Just return if allocation top is already known.
1898  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1899    // No use of scratch if allocation top is provided.
1900    ASSERT(!scratch.is_valid());
1901#ifdef DEBUG
1902    // Assert that result actually contains top on entry.
1903    movq(kScratchRegister, new_space_allocation_top);
1904    cmpq(result, Operand(kScratchRegister, 0));
1905    Check(equal, "Unexpected allocation top");
1906#endif
1907    return;
1908  }
1909
1910  // Move address of new object to result. Use scratch register if available,
1911  // and keep address in scratch until call to UpdateAllocationTopHelper.
1912  if (scratch.is_valid()) {
1913    movq(scratch, new_space_allocation_top);
1914    movq(result, Operand(scratch, 0));
1915  } else if (result.is(rax)) {
1916    load_rax(new_space_allocation_top);
1917  } else {
1918    movq(kScratchRegister, new_space_allocation_top);
1919    movq(result, Operand(kScratchRegister, 0));
1920  }
1921}
1922
1923
1924void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1925                                               Register scratch) {
1926  if (FLAG_debug_code) {
1927    testq(result_end, Immediate(kObjectAlignmentMask));
1928    Check(zero, "Unaligned allocation in new space");
1929  }
1930
1931  ExternalReference new_space_allocation_top =
1932      ExternalReference::new_space_allocation_top_address();
1933
1934  // Update new top.
1935  if (result_end.is(rax)) {
1936    // rax can be stored directly to a memory location.
1937    store_rax(new_space_allocation_top);
1938  } else {
1939    // Register required - use scratch provided if available.
1940    if (scratch.is_valid()) {
1941      movq(Operand(scratch, 0), result_end);
1942    } else {
1943      movq(kScratchRegister, new_space_allocation_top);
1944      movq(Operand(kScratchRegister, 0), result_end);
1945    }
1946  }
1947}
1948
1949
1950void MacroAssembler::AllocateInNewSpace(int object_size,
1951                                        Register result,
1952                                        Register result_end,
1953                                        Register scratch,
1954                                        Label* gc_required,
1955                                        AllocationFlags flags) {
1956  if (!FLAG_inline_new) {
1957    if (FLAG_debug_code) {
1958      // Trash the registers to simulate an allocation failure.
1959      movl(result, Immediate(0x7091));
1960      if (result_end.is_valid()) {
1961        movl(result_end, Immediate(0x7191));
1962      }
1963      if (scratch.is_valid()) {
1964        movl(scratch, Immediate(0x7291));
1965      }
1966    }
1967    jmp(gc_required);
1968    return;
1969  }
1970  ASSERT(!result.is(result_end));
1971
1972  // Load address of new object into result.
1973  LoadAllocationTopHelper(result, scratch, flags);
1974
1975  // Calculate new top and bail out if new space is exhausted.
1976  ExternalReference new_space_allocation_limit =
1977      ExternalReference::new_space_allocation_limit_address();
1978
1979  Register top_reg = result_end.is_valid() ? result_end : result;
1980
1981  if (top_reg.is(result)) {
1982    addq(top_reg, Immediate(object_size));
1983  } else {
1984    lea(top_reg, Operand(result, object_size));
1985  }
1986  movq(kScratchRegister, new_space_allocation_limit);
1987  cmpq(top_reg, Operand(kScratchRegister, 0));
1988  j(above, gc_required);
1989
1990  // Update allocation top.
1991  UpdateAllocationTopHelper(top_reg, scratch);
1992
1993  if (top_reg.is(result)) {
1994    if ((flags & TAG_OBJECT) != 0) {
1995      subq(result, Immediate(object_size - kHeapObjectTag));
1996    } else {
1997      subq(result, Immediate(object_size));
1998    }
1999  } else if ((flags & TAG_OBJECT) != 0) {
2000    // Tag the result if requested.
2001    addq(result, Immediate(kHeapObjectTag));
2002  }
2003}
2004
2005
2006void MacroAssembler::AllocateInNewSpace(int header_size,
2007                                        ScaleFactor element_size,
2008                                        Register element_count,
2009                                        Register result,
2010                                        Register result_end,
2011                                        Register scratch,
2012                                        Label* gc_required,
2013                                        AllocationFlags flags) {
2014  if (!FLAG_inline_new) {
2015    if (FLAG_debug_code) {
2016      // Trash the registers to simulate an allocation failure.
2017      movl(result, Immediate(0x7091));
2018      movl(result_end, Immediate(0x7191));
2019      if (scratch.is_valid()) {
2020        movl(scratch, Immediate(0x7291));
2021      }
2022      // Register element_count is not modified by the function.
2023    }
2024    jmp(gc_required);
2025    return;
2026  }
2027  ASSERT(!result.is(result_end));
2028
2029  // Load address of new object into result.
2030  LoadAllocationTopHelper(result, scratch, flags);
2031
2032  // Calculate new top and bail out if new space is exhausted.
2033  ExternalReference new_space_allocation_limit =
2034      ExternalReference::new_space_allocation_limit_address();
2035  lea(result_end, Operand(result, element_count, element_size, header_size));
2036  movq(kScratchRegister, new_space_allocation_limit);
2037  cmpq(result_end, Operand(kScratchRegister, 0));
2038  j(above, gc_required);
2039
2040  // Update allocation top.
2041  UpdateAllocationTopHelper(result_end, scratch);
2042
2043  // Tag the result if requested.
2044  if ((flags & TAG_OBJECT) != 0) {
2045    addq(result, Immediate(kHeapObjectTag));
2046  }
2047}
2048
2049
2050void MacroAssembler::AllocateInNewSpace(Register object_size,
2051                                        Register result,
2052                                        Register result_end,
2053                                        Register scratch,
2054                                        Label* gc_required,
2055                                        AllocationFlags flags) {
2056  if (!FLAG_inline_new) {
2057    if (FLAG_debug_code) {
2058      // Trash the registers to simulate an allocation failure.
2059      movl(result, Immediate(0x7091));
2060      movl(result_end, Immediate(0x7191));
2061      if (scratch.is_valid()) {
2062        movl(scratch, Immediate(0x7291));
2063      }
2064      // object_size is left unchanged by this function.
2065    }
2066    jmp(gc_required);
2067    return;
2068  }
2069  ASSERT(!result.is(result_end));
2070
2071  // Load address of new object into result.
2072  LoadAllocationTopHelper(result, scratch, flags);
2073
2074  // Calculate new top and bail out if new space is exhausted.
2075  ExternalReference new_space_allocation_limit =
2076      ExternalReference::new_space_allocation_limit_address();
2077  if (!object_size.is(result_end)) {
2078    movq(result_end, object_size);
2079  }
2080  addq(result_end, result);
2081  movq(kScratchRegister, new_space_allocation_limit);
2082  cmpq(result_end, Operand(kScratchRegister, 0));
2083  j(above, gc_required);
2084
2085  // Update allocation top.
2086  UpdateAllocationTopHelper(result_end, scratch);
2087
2088  // Tag the result if requested.
2089  if ((flags & TAG_OBJECT) != 0) {
2090    addq(result, Immediate(kHeapObjectTag));
2091  }
2092}
2093
2094
2095void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2096  ExternalReference new_space_allocation_top =
2097      ExternalReference::new_space_allocation_top_address();
2098
2099  // Make sure the object has no tag before resetting top.
2100  and_(object, Immediate(~kHeapObjectTagMask));
2101  movq(kScratchRegister, new_space_allocation_top);
2102#ifdef DEBUG
2103  cmpq(object, Operand(kScratchRegister, 0));
2104  Check(below, "Undo allocation of non allocated memory");
2105#endif
2106  movq(Operand(kScratchRegister, 0), object);
2107}
2108
2109
2110void MacroAssembler::AllocateHeapNumber(Register result,
2111                                        Register scratch,
2112                                        Label* gc_required) {
2113  // Allocate heap number in new space.
2114  AllocateInNewSpace(HeapNumber::kSize,
2115                     result,
2116                     scratch,
2117                     no_reg,
2118                     gc_required,
2119                     TAG_OBJECT);
2120
2121  // Set the map.
2122  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2123  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2124}
2125
2126
2127void MacroAssembler::AllocateTwoByteString(Register result,
2128                                           Register length,
2129                                           Register scratch1,
2130                                           Register scratch2,
2131                                           Register scratch3,
2132                                           Label* gc_required) {
2133  // Calculate the number of bytes needed for the characters in the string while
2134  // observing object alignment.
2135  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
2136                               kObjectAlignmentMask;
2137  ASSERT(kShortSize == 2);
2138  // scratch1 = length * 2 + kObjectAlignmentMask.
2139  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
2140                kHeaderAlignment));
2141  and_(scratch1, Immediate(~kObjectAlignmentMask));
2142  if (kHeaderAlignment > 0) {
2143    subq(scratch1, Immediate(kHeaderAlignment));
2144  }
2145
2146  // Allocate two byte string in new space.
2147  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
2148                     times_1,
2149                     scratch1,
2150                     result,
2151                     scratch2,
2152                     scratch3,
2153                     gc_required,
2154                     TAG_OBJECT);
2155
2156  // Set the map, length and hash field.
2157  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
2158  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2159  Integer32ToSmi(scratch1, length);
2160  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2161  movq(FieldOperand(result, String::kHashFieldOffset),
2162       Immediate(String::kEmptyHashField));
2163}
2164
2165
2166void MacroAssembler::AllocateAsciiString(Register result,
2167                                         Register length,
2168                                         Register scratch1,
2169                                         Register scratch2,
2170                                         Register scratch3,
2171                                         Label* gc_required) {
2172  // Calculate the number of bytes needed for the characters in the string while
2173  // observing object alignment.
2174  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
2175                               kObjectAlignmentMask;
2176  movl(scratch1, length);
2177  ASSERT(kCharSize == 1);
2178  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
2179  and_(scratch1, Immediate(~kObjectAlignmentMask));
2180  if (kHeaderAlignment > 0) {
2181    subq(scratch1, Immediate(kHeaderAlignment));
2182  }
2183
2184  // Allocate ascii string in new space.
2185  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
2186                     times_1,
2187                     scratch1,
2188                     result,
2189                     scratch2,
2190                     scratch3,
2191                     gc_required,
2192                     TAG_OBJECT);
2193
2194  // Set the map, length and hash field.
2195  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
2196  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2197  Integer32ToSmi(scratch1, length);
2198  movq(FieldOperand(result, String::kLengthOffset), scratch1);
2199  movq(FieldOperand(result, String::kHashFieldOffset),
2200       Immediate(String::kEmptyHashField));
2201}
2202
2203
2204void MacroAssembler::AllocateConsString(Register result,
2205                                        Register scratch1,
2206                                        Register scratch2,
2207                                        Label* gc_required) {
2208  // Allocate heap number in new space.
2209  AllocateInNewSpace(ConsString::kSize,
2210                     result,
2211                     scratch1,
2212                     scratch2,
2213                     gc_required,
2214                     TAG_OBJECT);
2215
2216  // Set the map. The other fields are left uninitialized.
2217  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
2218  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2219}
2220
2221
2222void MacroAssembler::AllocateAsciiConsString(Register result,
2223                                             Register scratch1,
2224                                             Register scratch2,
2225                                             Label* gc_required) {
2226  // Allocate heap number in new space.
2227  AllocateInNewSpace(ConsString::kSize,
2228                     result,
2229                     scratch1,
2230                     scratch2,
2231                     gc_required,
2232                     TAG_OBJECT);
2233
2234  // Set the map. The other fields are left uninitialized.
2235  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
2236  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
2237}
2238
2239
2240void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2241  if (context_chain_length > 0) {
2242    // Move up the chain of contexts to the context containing the slot.
2243    movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
2244    // Load the function context (which is the incoming, outer context).
2245    movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2246    for (int i = 1; i < context_chain_length; i++) {
2247      movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2248      movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
2249    }
2250    // The context may be an intermediate context, not a function context.
2251    movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2252  } else {  // context is the current function context.
2253    // The context may be an intermediate context, not a function context.
2254    movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2255  }
2256}
2257
2258
2259int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2260  // On Windows 64 stack slots are reserved by the caller for all arguments
2261  // including the ones passed in registers, and space is always allocated for
2262  // the four register arguments even if the function takes fewer than four
2263  // arguments.
2264  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2265  // and the caller does not reserve stack slots for them.
2266  ASSERT(num_arguments >= 0);
2267#ifdef _WIN64
2268  static const int kMinimumStackSlots = 4;
2269  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2270  return num_arguments;
2271#else
2272  static const int kRegisterPassedArguments = 6;
2273  if (num_arguments < kRegisterPassedArguments) return 0;
2274  return num_arguments - kRegisterPassedArguments;
2275#endif
2276}
2277
2278
2279void MacroAssembler::PrepareCallCFunction(int num_arguments) {
2280  int frame_alignment = OS::ActivationFrameAlignment();
2281  ASSERT(frame_alignment != 0);
2282  ASSERT(num_arguments >= 0);
2283  // Make stack end at alignment and allocate space for arguments and old rsp.
2284  movq(kScratchRegister, rsp);
2285  ASSERT(IsPowerOf2(frame_alignment));
2286  int argument_slots_on_stack =
2287      ArgumentStackSlotsForCFunctionCall(num_arguments);
2288  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
2289  and_(rsp, Immediate(-frame_alignment));
2290  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
2291}
2292
2293
2294void MacroAssembler::CallCFunction(ExternalReference function,
2295                                   int num_arguments) {
2296  movq(rax, function);
2297  CallCFunction(rax, num_arguments);
2298}
2299
2300
2301void MacroAssembler::CallCFunction(Register function, int num_arguments) {
2302  // Check stack alignment.
2303  if (FLAG_debug_code) {
2304    CheckStackAlignment();
2305  }
2306
2307  call(function);
2308  ASSERT(OS::ActivationFrameAlignment() != 0);
2309  ASSERT(num_arguments >= 0);
2310  int argument_slots_on_stack =
2311      ArgumentStackSlotsForCFunctionCall(num_arguments);
2312  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
2313}
2314
2315
2316CodePatcher::CodePatcher(byte* address, int size)
2317    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2318  // Create a new macro assembler pointing to the address of the code to patch.
2319  // The size is adjusted with kGap on order for the assembler to generate size
2320  // bytes of instructions without failing with buffer size constraints.
2321  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2322}
2323
2324
2325CodePatcher::~CodePatcher() {
2326  // Indicate that code has changed.
2327  CPU::FlushICache(address_, size_);
2328
2329  // Check that the code was patched as expected.
2330  ASSERT(masm_.pc_ == address_ + size_);
2331  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2332}
2333
2334} }  // namespace v8::internal
2335
2336#endif  // V8_TARGET_ARCH_X64
2337