macro-assembler-x64.cc revision 8389745919cae02139ddc085a63c00d024269cf2
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_X64
6
7#include "src/base/bits.h"
8#include "src/base/division-by-constant.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
11#include "src/debug/debug.h"
12#include "src/heap/heap.h"
13#include "src/register-configuration.h"
14#include "src/x64/assembler-x64.h"
15#include "src/x64/macro-assembler-x64.h"
16
17namespace v8 {
18namespace internal {
19
20MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
21                               CodeObjectRequired create_code_object)
22    : Assembler(arg_isolate, buffer, size),
23      generating_stub_(false),
24      has_frame_(false),
25      root_array_available_(true) {
26  if (create_code_object == CodeObjectRequired::kYes) {
27    code_object_ =
28        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
29  }
30}
31
32
33static const int64_t kInvalidRootRegisterDelta = -1;
34
35
36int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
37  if (predictable_code_size() &&
38      (other.address() < reinterpret_cast<Address>(isolate()) ||
39       other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
40    return kInvalidRootRegisterDelta;
41  }
42  Address roots_register_value = kRootRegisterBias +
43      reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
44
45  int64_t delta = kInvalidRootRegisterDelta;  // Bogus initialization.
46  if (kPointerSize == kInt64Size) {
47    delta = other.address() - roots_register_value;
48  } else {
49    // For x32, zero extend the address to 64-bit and calculate the delta.
50    uint64_t o = static_cast<uint32_t>(
51        reinterpret_cast<intptr_t>(other.address()));
52    uint64_t r = static_cast<uint32_t>(
53        reinterpret_cast<intptr_t>(roots_register_value));
54    delta = o - r;
55  }
56  return delta;
57}
58
59
60Operand MacroAssembler::ExternalOperand(ExternalReference target,
61                                        Register scratch) {
62  if (root_array_available_ && !serializer_enabled()) {
63    int64_t delta = RootRegisterDelta(target);
64    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
65      return Operand(kRootRegister, static_cast<int32_t>(delta));
66    }
67  }
68  Move(scratch, target);
69  return Operand(scratch, 0);
70}
71
72
73void MacroAssembler::Load(Register destination, ExternalReference source) {
74  if (root_array_available_ && !serializer_enabled()) {
75    int64_t delta = RootRegisterDelta(source);
76    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
77      movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
78      return;
79    }
80  }
81  // Safe code.
82  if (destination.is(rax)) {
83    load_rax(source);
84  } else {
85    Move(kScratchRegister, source);
86    movp(destination, Operand(kScratchRegister, 0));
87  }
88}
89
90
91void MacroAssembler::Store(ExternalReference destination, Register source) {
92  if (root_array_available_ && !serializer_enabled()) {
93    int64_t delta = RootRegisterDelta(destination);
94    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
95      movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
96      return;
97    }
98  }
99  // Safe code.
100  if (source.is(rax)) {
101    store_rax(destination);
102  } else {
103    Move(kScratchRegister, destination);
104    movp(Operand(kScratchRegister, 0), source);
105  }
106}
107
108
109void MacroAssembler::LoadAddress(Register destination,
110                                 ExternalReference source) {
111  if (root_array_available_ && !serializer_enabled()) {
112    int64_t delta = RootRegisterDelta(source);
113    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
114      leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
115      return;
116    }
117  }
118  // Safe code.
119  Move(destination, source);
120}
121
122
123int MacroAssembler::LoadAddressSize(ExternalReference source) {
124  if (root_array_available_ && !serializer_enabled()) {
125    // This calculation depends on the internals of LoadAddress.
126    // It's correctness is ensured by the asserts in the Call
127    // instruction below.
128    int64_t delta = RootRegisterDelta(source);
129    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
130      // Operand is leap(scratch, Operand(kRootRegister, delta));
131      // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
132      int size = 4;
133      if (!is_int8(static_cast<int32_t>(delta))) {
134        size += 3;  // Need full four-byte displacement in lea.
135      }
136      return size;
137    }
138  }
139  // Size of movp(destination, src);
140  return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
141}
142
143
144void MacroAssembler::PushAddress(ExternalReference source) {
145  int64_t address = reinterpret_cast<int64_t>(source.address());
146  if (is_int32(address) && !serializer_enabled()) {
147    if (emit_debug_code()) {
148      Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
149    }
150    Push(Immediate(static_cast<int32_t>(address)));
151    return;
152  }
153  LoadAddress(kScratchRegister, source);
154  Push(kScratchRegister);
155}
156
157
158void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
159  DCHECK(root_array_available_);
160  movp(destination, Operand(kRootRegister,
161                            (index << kPointerSizeLog2) - kRootRegisterBias));
162}
163
164
165void MacroAssembler::LoadRootIndexed(Register destination,
166                                     Register variable_offset,
167                                     int fixed_offset) {
168  DCHECK(root_array_available_);
169  movp(destination,
170       Operand(kRootRegister,
171               variable_offset, times_pointer_size,
172               (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
173}
174
175
176void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
177  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
178  DCHECK(root_array_available_);
179  movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
180       source);
181}
182
183
184void MacroAssembler::PushRoot(Heap::RootListIndex index) {
185  DCHECK(root_array_available_);
186  Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
187}
188
189
190void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
191  DCHECK(root_array_available_);
192  cmpp(with, Operand(kRootRegister,
193                     (index << kPointerSizeLog2) - kRootRegisterBias));
194}
195
196
197void MacroAssembler::CompareRoot(const Operand& with,
198                                 Heap::RootListIndex index) {
199  DCHECK(root_array_available_);
200  DCHECK(!with.AddressUsesRegister(kScratchRegister));
201  LoadRoot(kScratchRegister, index);
202  cmpp(with, kScratchRegister);
203}
204
205
206void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
207                                         Register addr,
208                                         Register scratch,
209                                         SaveFPRegsMode save_fp,
210                                         RememberedSetFinalAction and_then) {
211  if (emit_debug_code()) {
212    Label ok;
213    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
214    int3();
215    bind(&ok);
216  }
217  // Load store buffer top.
218  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
219  // Store pointer to buffer.
220  movp(Operand(scratch, 0), addr);
221  // Increment buffer top.
222  addp(scratch, Immediate(kPointerSize));
223  // Write back new top of buffer.
224  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
225  // Call stub on end of buffer.
226  Label done;
227  // Check for end of buffer.
228  testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
229  if (and_then == kReturnAtEnd) {
230    Label buffer_overflowed;
231    j(not_equal, &buffer_overflowed, Label::kNear);
232    ret(0);
233    bind(&buffer_overflowed);
234  } else {
235    DCHECK(and_then == kFallThroughAtEnd);
236    j(equal, &done, Label::kNear);
237  }
238  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
239  CallStub(&store_buffer_overflow);
240  if (and_then == kReturnAtEnd) {
241    ret(0);
242  } else {
243    DCHECK(and_then == kFallThroughAtEnd);
244    bind(&done);
245  }
246}
247
248
249void MacroAssembler::InNewSpace(Register object,
250                                Register scratch,
251                                Condition cc,
252                                Label* branch,
253                                Label::Distance distance) {
254  const int mask =
255      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
256  CheckPageFlag(object, scratch, mask, cc, branch, distance);
257}
258
259
260void MacroAssembler::RecordWriteField(
261    Register object,
262    int offset,
263    Register value,
264    Register dst,
265    SaveFPRegsMode save_fp,
266    RememberedSetAction remembered_set_action,
267    SmiCheck smi_check,
268    PointersToHereCheck pointers_to_here_check_for_value) {
269  // First, check if a write barrier is even needed. The tests below
270  // catch stores of Smis.
271  Label done;
272
273  // Skip barrier if writing a smi.
274  if (smi_check == INLINE_SMI_CHECK) {
275    JumpIfSmi(value, &done);
276  }
277
278  // Although the object register is tagged, the offset is relative to the start
279  // of the object, so so offset must be a multiple of kPointerSize.
280  DCHECK(IsAligned(offset, kPointerSize));
281
282  leap(dst, FieldOperand(object, offset));
283  if (emit_debug_code()) {
284    Label ok;
285    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
286    j(zero, &ok, Label::kNear);
287    int3();
288    bind(&ok);
289  }
290
291  RecordWrite(object, dst, value, save_fp, remembered_set_action,
292              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
293
294  bind(&done);
295
296  // Clobber clobbered input registers when running with the debug-code flag
297  // turned on to provoke errors.
298  if (emit_debug_code()) {
299    Move(value, kZapValue, Assembler::RelocInfoNone());
300    Move(dst, kZapValue, Assembler::RelocInfoNone());
301  }
302}
303
304
305void MacroAssembler::RecordWriteArray(
306    Register object,
307    Register value,
308    Register index,
309    SaveFPRegsMode save_fp,
310    RememberedSetAction remembered_set_action,
311    SmiCheck smi_check,
312    PointersToHereCheck pointers_to_here_check_for_value) {
313  // First, check if a write barrier is even needed. The tests below
314  // catch stores of Smis.
315  Label done;
316
317  // Skip barrier if writing a smi.
318  if (smi_check == INLINE_SMI_CHECK) {
319    JumpIfSmi(value, &done);
320  }
321
322  // Array access: calculate the destination address. Index is not a smi.
323  Register dst = index;
324  leap(dst, Operand(object, index, times_pointer_size,
325                   FixedArray::kHeaderSize - kHeapObjectTag));
326
327  RecordWrite(object, dst, value, save_fp, remembered_set_action,
328              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
329
330  bind(&done);
331
332  // Clobber clobbered input registers when running with the debug-code flag
333  // turned on to provoke errors.
334  if (emit_debug_code()) {
335    Move(value, kZapValue, Assembler::RelocInfoNone());
336    Move(index, kZapValue, Assembler::RelocInfoNone());
337  }
338}
339
340
341void MacroAssembler::RecordWriteForMap(Register object,
342                                       Register map,
343                                       Register dst,
344                                       SaveFPRegsMode fp_mode) {
345  DCHECK(!object.is(kScratchRegister));
346  DCHECK(!object.is(map));
347  DCHECK(!object.is(dst));
348  DCHECK(!map.is(dst));
349  AssertNotSmi(object);
350
351  if (emit_debug_code()) {
352    Label ok;
353    if (map.is(kScratchRegister)) pushq(map);
354    CompareMap(map, isolate()->factory()->meta_map());
355    if (map.is(kScratchRegister)) popq(map);
356    j(equal, &ok, Label::kNear);
357    int3();
358    bind(&ok);
359  }
360
361  if (!FLAG_incremental_marking) {
362    return;
363  }
364
365  if (emit_debug_code()) {
366    Label ok;
367    if (map.is(kScratchRegister)) pushq(map);
368    cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
369    if (map.is(kScratchRegister)) popq(map);
370    j(equal, &ok, Label::kNear);
371    int3();
372    bind(&ok);
373  }
374
375  // Compute the address.
376  leap(dst, FieldOperand(object, HeapObject::kMapOffset));
377
378  // First, check if a write barrier is even needed. The tests below
379  // catch stores of smis and stores into the young generation.
380  Label done;
381
382  // A single check of the map's pages interesting flag suffices, since it is
383  // only set during incremental collection, and then it's also guaranteed that
384  // the from object's page's interesting flag is also set.  This optimization
385  // relies on the fact that maps can never be in new space.
386  CheckPageFlag(map,
387                map,  // Used as scratch.
388                MemoryChunk::kPointersToHereAreInterestingMask,
389                zero,
390                &done,
391                Label::kNear);
392
393  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
394                       fp_mode);
395  CallStub(&stub);
396
397  bind(&done);
398
399  // Count number of write barriers in generated code.
400  isolate()->counters()->write_barriers_static()->Increment();
401  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
402
403  // Clobber clobbered registers when running with the debug-code flag
404  // turned on to provoke errors.
405  if (emit_debug_code()) {
406    Move(dst, kZapValue, Assembler::RelocInfoNone());
407    Move(map, kZapValue, Assembler::RelocInfoNone());
408  }
409}
410
411
412void MacroAssembler::RecordWrite(
413    Register object,
414    Register address,
415    Register value,
416    SaveFPRegsMode fp_mode,
417    RememberedSetAction remembered_set_action,
418    SmiCheck smi_check,
419    PointersToHereCheck pointers_to_here_check_for_value) {
420  DCHECK(!object.is(value));
421  DCHECK(!object.is(address));
422  DCHECK(!value.is(address));
423  AssertNotSmi(object);
424
425  if (remembered_set_action == OMIT_REMEMBERED_SET &&
426      !FLAG_incremental_marking) {
427    return;
428  }
429
430  if (emit_debug_code()) {
431    Label ok;
432    cmpp(value, Operand(address, 0));
433    j(equal, &ok, Label::kNear);
434    int3();
435    bind(&ok);
436  }
437
438  // First, check if a write barrier is even needed. The tests below
439  // catch stores of smis and stores into the young generation.
440  Label done;
441
442  if (smi_check == INLINE_SMI_CHECK) {
443    // Skip barrier if writing a smi.
444    JumpIfSmi(value, &done);
445  }
446
447  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
448    CheckPageFlag(value,
449                  value,  // Used as scratch.
450                  MemoryChunk::kPointersToHereAreInterestingMask,
451                  zero,
452                  &done,
453                  Label::kNear);
454  }
455
456  CheckPageFlag(object,
457                value,  // Used as scratch.
458                MemoryChunk::kPointersFromHereAreInterestingMask,
459                zero,
460                &done,
461                Label::kNear);
462
463  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
464                       fp_mode);
465  CallStub(&stub);
466
467  bind(&done);
468
469  // Count number of write barriers in generated code.
470  isolate()->counters()->write_barriers_static()->Increment();
471  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
472
473  // Clobber clobbered registers when running with the debug-code flag
474  // turned on to provoke errors.
475  if (emit_debug_code()) {
476    Move(address, kZapValue, Assembler::RelocInfoNone());
477    Move(value, kZapValue, Assembler::RelocInfoNone());
478  }
479}
480
481void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
482                                               Register code_entry,
483                                               Register scratch) {
484  const int offset = JSFunction::kCodeEntryOffset;
485
486  // The input registers are fixed to make calling the C write barrier function
487  // easier.
488  DCHECK(js_function.is(rdi));
489  DCHECK(code_entry.is(rcx));
490  DCHECK(scratch.is(rax));
491
492  // Since a code entry (value) is always in old space, we don't need to update
493  // remembered set. If incremental marking is off, there is nothing for us to
494  // do.
495  if (!FLAG_incremental_marking) return;
496
497  AssertNotSmi(js_function);
498
499  if (emit_debug_code()) {
500    Label ok;
501    leap(scratch, FieldOperand(js_function, offset));
502    cmpp(code_entry, Operand(scratch, 0));
503    j(equal, &ok, Label::kNear);
504    int3();
505    bind(&ok);
506  }
507
508  // First, check if a write barrier is even needed. The tests below
509  // catch stores of Smis and stores into young gen.
510  Label done;
511
512  CheckPageFlag(code_entry, scratch,
513                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
514                Label::kNear);
515  CheckPageFlag(js_function, scratch,
516                MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
517                Label::kNear);
518
519  // Save input registers.
520  Push(js_function);
521  Push(code_entry);
522
523  const Register dst = scratch;
524  leap(dst, FieldOperand(js_function, offset));
525
526  // Save caller-saved registers.
527  PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
528
529  int argument_count = 3;
530  PrepareCallCFunction(argument_count);
531
532  // Load the argument registers.
533  if (arg_reg_1.is(rcx)) {
534    // Windows calling convention.
535    DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
536
537    movp(arg_reg_1, js_function);  // rcx gets rdi.
538    movp(arg_reg_2, dst);          // rdx gets rax.
539  } else {
540    // AMD64 calling convention.
541    DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
542
543    // rdi is already loaded with js_function.
544    movp(arg_reg_2, dst);  // rsi gets rax.
545  }
546  Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
547
548  {
549    AllowExternalCallThatCantCauseGC scope(this);
550    CallCFunction(
551        ExternalReference::incremental_marking_record_write_code_entry_function(
552            isolate()),
553        argument_count);
554  }
555
556  // Restore caller-saved registers.
557  PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
558
559  // Restore input registers.
560  Pop(code_entry);
561  Pop(js_function);
562
563  bind(&done);
564}
565
566void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
567  if (emit_debug_code()) Check(cc, reason);
568}
569
570
571void MacroAssembler::AssertFastElements(Register elements) {
572  if (emit_debug_code()) {
573    Label ok;
574    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
575                Heap::kFixedArrayMapRootIndex);
576    j(equal, &ok, Label::kNear);
577    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
578                Heap::kFixedDoubleArrayMapRootIndex);
579    j(equal, &ok, Label::kNear);
580    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
581                Heap::kFixedCOWArrayMapRootIndex);
582    j(equal, &ok, Label::kNear);
583    Abort(kJSObjectWithFastElementsMapHasSlowElements);
584    bind(&ok);
585  }
586}
587
588
589void MacroAssembler::Check(Condition cc, BailoutReason reason) {
590  Label L;
591  j(cc, &L, Label::kNear);
592  Abort(reason);
593  // Control will not return here.
594  bind(&L);
595}
596
597
598void MacroAssembler::CheckStackAlignment() {
599  int frame_alignment = base::OS::ActivationFrameAlignment();
600  int frame_alignment_mask = frame_alignment - 1;
601  if (frame_alignment > kPointerSize) {
602    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
603    Label alignment_as_expected;
604    testp(rsp, Immediate(frame_alignment_mask));
605    j(zero, &alignment_as_expected, Label::kNear);
606    // Abort if stack is not aligned.
607    int3();
608    bind(&alignment_as_expected);
609  }
610}
611
612
613void MacroAssembler::NegativeZeroTest(Register result,
614                                      Register op,
615                                      Label* then_label) {
616  Label ok;
617  testl(result, result);
618  j(not_zero, &ok, Label::kNear);
619  testl(op, op);
620  j(sign, then_label);
621  bind(&ok);
622}
623
624
625void MacroAssembler::Abort(BailoutReason reason) {
626#ifdef DEBUG
627  const char* msg = GetBailoutReason(reason);
628  if (msg != NULL) {
629    RecordComment("Abort message: ");
630    RecordComment(msg);
631  }
632
633  if (FLAG_trap_on_abort) {
634    int3();
635    return;
636  }
637#endif
638
639  Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
640       Assembler::RelocInfoNone());
641  Push(kScratchRegister);
642
643  if (!has_frame_) {
644    // We don't actually want to generate a pile of code for this, so just
645    // claim there is a stack frame, without generating one.
646    FrameScope scope(this, StackFrame::NONE);
647    CallRuntime(Runtime::kAbort);
648  } else {
649    CallRuntime(Runtime::kAbort);
650  }
651  // Control will not return here.
652  int3();
653}
654
655
656void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
657  DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
658  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
659}
660
661
662void MacroAssembler::TailCallStub(CodeStub* stub) {
663  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
664}
665
666
667void MacroAssembler::StubReturn(int argc) {
668  DCHECK(argc >= 1 && generating_stub());
669  ret((argc - 1) * kPointerSize);
670}
671
672
673bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
674  return has_frame_ || !stub->SometimesSetsUpAFrame();
675}
676
677
678void MacroAssembler::IndexFromHash(Register hash, Register index) {
679  // The assert checks that the constants for the maximum number of digits
680  // for an array index cached in the hash field and the number of bits
681  // reserved for it does not conflict.
682  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
683         (1 << String::kArrayIndexValueBits));
684  if (!hash.is(index)) {
685    movl(index, hash);
686  }
687  DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
688}
689
690
691void MacroAssembler::CallRuntime(const Runtime::Function* f,
692                                 int num_arguments,
693                                 SaveFPRegsMode save_doubles) {
694  // If the expected number of arguments of the runtime function is
695  // constant, we check that the actual number of arguments match the
696  // expectation.
697  CHECK(f->nargs < 0 || f->nargs == num_arguments);
698
699  // TODO(1236192): Most runtime routines don't need the number of
700  // arguments passed in because it is constant. At some point we
701  // should remove this need and make the runtime routine entry code
702  // smarter.
703  Set(rax, num_arguments);
704  LoadAddress(rbx, ExternalReference(f, isolate()));
705  CEntryStub ces(isolate(), f->result_size, save_doubles);
706  CallStub(&ces);
707}
708
709
710void MacroAssembler::CallExternalReference(const ExternalReference& ext,
711                                           int num_arguments) {
712  Set(rax, num_arguments);
713  LoadAddress(rbx, ext);
714
715  CEntryStub stub(isolate(), 1);
716  CallStub(&stub);
717}
718
719
720void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
721  // ----------- S t a t e -------------
722  //  -- rsp[0]                 : return address
723  //  -- rsp[8]                 : argument num_arguments - 1
724  //  ...
725  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
726  //
727  //  For runtime functions with variable arguments:
728  //  -- rax                    : number of  arguments
729  // -----------------------------------
730
731  const Runtime::Function* function = Runtime::FunctionForId(fid);
732  DCHECK_EQ(1, function->result_size);
733  if (function->nargs >= 0) {
734    Set(rax, function->nargs);
735  }
736  JumpToExternalReference(ExternalReference(fid, isolate()));
737}
738
739
740void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
741  // Set the entry point and jump to the C entry runtime stub.
742  LoadAddress(rbx, ext);
743  CEntryStub ces(isolate(), 1);
744  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
745}
746
747
748#define REG(Name) \
749  { Register::kCode_##Name }
750
751static const Register saved_regs[] = {
752  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
753  REG(r9), REG(r10), REG(r11)
754};
755
756#undef REG
757
758static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
759
760
761void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
762                                     Register exclusion1,
763                                     Register exclusion2,
764                                     Register exclusion3) {
765  // We don't allow a GC during a store buffer overflow so there is no need to
766  // store the registers in any particular way, but we do have to store and
767  // restore them.
768  for (int i = 0; i < kNumberOfSavedRegs; i++) {
769    Register reg = saved_regs[i];
770    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
771      pushq(reg);
772    }
773  }
774  // R12 to r15 are callee save on all platforms.
775  if (fp_mode == kSaveFPRegs) {
776    subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
777    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
778      XMMRegister reg = XMMRegister::from_code(i);
779      Movsd(Operand(rsp, i * kDoubleSize), reg);
780    }
781  }
782}
783
784
785void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
786                                    Register exclusion1,
787                                    Register exclusion2,
788                                    Register exclusion3) {
789  if (fp_mode == kSaveFPRegs) {
790    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
791      XMMRegister reg = XMMRegister::from_code(i);
792      Movsd(reg, Operand(rsp, i * kDoubleSize));
793    }
794    addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
795  }
796  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
797    Register reg = saved_regs[i];
798    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
799      popq(reg);
800    }
801  }
802}
803
804
805void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
806  if (CpuFeatures::IsSupported(AVX)) {
807    CpuFeatureScope scope(this, AVX);
808    vcvtss2sd(dst, src, src);
809  } else {
810    cvtss2sd(dst, src);
811  }
812}
813
814
815void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
816  if (CpuFeatures::IsSupported(AVX)) {
817    CpuFeatureScope scope(this, AVX);
818    vcvtss2sd(dst, dst, src);
819  } else {
820    cvtss2sd(dst, src);
821  }
822}
823
824
825void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
826  if (CpuFeatures::IsSupported(AVX)) {
827    CpuFeatureScope scope(this, AVX);
828    vcvtsd2ss(dst, src, src);
829  } else {
830    cvtsd2ss(dst, src);
831  }
832}
833
834
835void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
836  if (CpuFeatures::IsSupported(AVX)) {
837    CpuFeatureScope scope(this, AVX);
838    vcvtsd2ss(dst, dst, src);
839  } else {
840    cvtsd2ss(dst, src);
841  }
842}
843
844
845void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
846  if (CpuFeatures::IsSupported(AVX)) {
847    CpuFeatureScope scope(this, AVX);
848    vxorpd(dst, dst, dst);
849    vcvtlsi2sd(dst, dst, src);
850  } else {
851    xorpd(dst, dst);
852    cvtlsi2sd(dst, src);
853  }
854}
855
856
857void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
858  if (CpuFeatures::IsSupported(AVX)) {
859    CpuFeatureScope scope(this, AVX);
860    vxorpd(dst, dst, dst);
861    vcvtlsi2sd(dst, dst, src);
862  } else {
863    xorpd(dst, dst);
864    cvtlsi2sd(dst, src);
865  }
866}
867
868
869void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
870  if (CpuFeatures::IsSupported(AVX)) {
871    CpuFeatureScope scope(this, AVX);
872    vxorps(dst, dst, dst);
873    vcvtlsi2ss(dst, dst, src);
874  } else {
875    xorps(dst, dst);
876    cvtlsi2ss(dst, src);
877  }
878}
879
880
881void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
882  if (CpuFeatures::IsSupported(AVX)) {
883    CpuFeatureScope scope(this, AVX);
884    vxorps(dst, dst, dst);
885    vcvtlsi2ss(dst, dst, src);
886  } else {
887    xorps(dst, dst);
888    cvtlsi2ss(dst, src);
889  }
890}
891
892
893void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
894  if (CpuFeatures::IsSupported(AVX)) {
895    CpuFeatureScope scope(this, AVX);
896    vxorps(dst, dst, dst);
897    vcvtqsi2ss(dst, dst, src);
898  } else {
899    xorps(dst, dst);
900    cvtqsi2ss(dst, src);
901  }
902}
903
904
905void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
906  if (CpuFeatures::IsSupported(AVX)) {
907    CpuFeatureScope scope(this, AVX);
908    vxorps(dst, dst, dst);
909    vcvtqsi2ss(dst, dst, src);
910  } else {
911    xorps(dst, dst);
912    cvtqsi2ss(dst, src);
913  }
914}
915
916
917void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
918  if (CpuFeatures::IsSupported(AVX)) {
919    CpuFeatureScope scope(this, AVX);
920    vxorpd(dst, dst, dst);
921    vcvtqsi2sd(dst, dst, src);
922  } else {
923    xorpd(dst, dst);
924    cvtqsi2sd(dst, src);
925  }
926}
927
928
929void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
930  if (CpuFeatures::IsSupported(AVX)) {
931    CpuFeatureScope scope(this, AVX);
932    vxorpd(dst, dst, dst);
933    vcvtqsi2sd(dst, dst, src);
934  } else {
935    xorpd(dst, dst);
936    cvtqsi2sd(dst, src);
937  }
938}
939
940
941void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
942  Label msb_set_src;
943  Label jmp_return;
944  testq(src, src);
945  j(sign, &msb_set_src, Label::kNear);
946  Cvtqsi2ss(dst, src);
947  jmp(&jmp_return, Label::kNear);
948  bind(&msb_set_src);
949  movq(tmp, src);
950  shrq(src, Immediate(1));
951  // Recover the least significant bit to avoid rounding errors.
952  andq(tmp, Immediate(1));
953  orq(src, tmp);
954  Cvtqsi2ss(dst, src);
955  addss(dst, dst);
956  bind(&jmp_return);
957}
958
959
960void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
961  Label msb_set_src;
962  Label jmp_return;
963  testq(src, src);
964  j(sign, &msb_set_src, Label::kNear);
965  Cvtqsi2sd(dst, src);
966  jmp(&jmp_return, Label::kNear);
967  bind(&msb_set_src);
968  movq(tmp, src);
969  shrq(src, Immediate(1));
970  andq(tmp, Immediate(1));
971  orq(src, tmp);
972  Cvtqsi2sd(dst, src);
973  addsd(dst, dst);
974  bind(&jmp_return);
975}
976
977
978void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
979  if (CpuFeatures::IsSupported(AVX)) {
980    CpuFeatureScope scope(this, AVX);
981    vcvtsd2si(dst, src);
982  } else {
983    cvtsd2si(dst, src);
984  }
985}
986
987
988void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
989  if (CpuFeatures::IsSupported(AVX)) {
990    CpuFeatureScope scope(this, AVX);
991    vcvttss2si(dst, src);
992  } else {
993    cvttss2si(dst, src);
994  }
995}
996
997
998void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
999  if (CpuFeatures::IsSupported(AVX)) {
1000    CpuFeatureScope scope(this, AVX);
1001    vcvttss2si(dst, src);
1002  } else {
1003    cvttss2si(dst, src);
1004  }
1005}
1006
1007
1008void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
1009  if (CpuFeatures::IsSupported(AVX)) {
1010    CpuFeatureScope scope(this, AVX);
1011    vcvttsd2si(dst, src);
1012  } else {
1013    cvttsd2si(dst, src);
1014  }
1015}
1016
1017
1018void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
1019  if (CpuFeatures::IsSupported(AVX)) {
1020    CpuFeatureScope scope(this, AVX);
1021    vcvttsd2si(dst, src);
1022  } else {
1023    cvttsd2si(dst, src);
1024  }
1025}
1026
1027
1028void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
1029  if (CpuFeatures::IsSupported(AVX)) {
1030    CpuFeatureScope scope(this, AVX);
1031    vcvttss2siq(dst, src);
1032  } else {
1033    cvttss2siq(dst, src);
1034  }
1035}
1036
1037
1038void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
1039  if (CpuFeatures::IsSupported(AVX)) {
1040    CpuFeatureScope scope(this, AVX);
1041    vcvttss2siq(dst, src);
1042  } else {
1043    cvttss2siq(dst, src);
1044  }
1045}
1046
1047
1048void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1049  if (CpuFeatures::IsSupported(AVX)) {
1050    CpuFeatureScope scope(this, AVX);
1051    vcvttsd2siq(dst, src);
1052  } else {
1053    cvttsd2siq(dst, src);
1054  }
1055}
1056
1057
1058void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
1059  if (CpuFeatures::IsSupported(AVX)) {
1060    CpuFeatureScope scope(this, AVX);
1061    vcvttsd2siq(dst, src);
1062  } else {
1063    cvttsd2siq(dst, src);
1064  }
1065}
1066
1067
1068void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
1069  DCHECK(!r.IsDouble());
1070  if (r.IsInteger8()) {
1071    movsxbq(dst, src);
1072  } else if (r.IsUInteger8()) {
1073    movzxbl(dst, src);
1074  } else if (r.IsInteger16()) {
1075    movsxwq(dst, src);
1076  } else if (r.IsUInteger16()) {
1077    movzxwl(dst, src);
1078  } else if (r.IsInteger32()) {
1079    movl(dst, src);
1080  } else {
1081    movp(dst, src);
1082  }
1083}
1084
1085
1086void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
1087  DCHECK(!r.IsDouble());
1088  if (r.IsInteger8() || r.IsUInteger8()) {
1089    movb(dst, src);
1090  } else if (r.IsInteger16() || r.IsUInteger16()) {
1091    movw(dst, src);
1092  } else if (r.IsInteger32()) {
1093    movl(dst, src);
1094  } else {
1095    if (r.IsHeapObject()) {
1096      AssertNotSmi(src);
1097    } else if (r.IsSmi()) {
1098      AssertSmi(src);
1099    }
1100    movp(dst, src);
1101  }
1102}
1103
1104
1105void MacroAssembler::Set(Register dst, int64_t x) {
1106  if (x == 0) {
1107    xorl(dst, dst);
1108  } else if (is_uint32(x)) {
1109    movl(dst, Immediate(static_cast<uint32_t>(x)));
1110  } else if (is_int32(x)) {
1111    movq(dst, Immediate(static_cast<int32_t>(x)));
1112  } else {
1113    movq(dst, x);
1114  }
1115}
1116
1117
1118void MacroAssembler::Set(const Operand& dst, intptr_t x) {
1119  if (kPointerSize == kInt64Size) {
1120    if (is_int32(x)) {
1121      movp(dst, Immediate(static_cast<int32_t>(x)));
1122    } else {
1123      Set(kScratchRegister, x);
1124      movp(dst, kScratchRegister);
1125    }
1126  } else {
1127    movp(dst, Immediate(static_cast<int32_t>(x)));
1128  }
1129}
1130
1131
1132// ----------------------------------------------------------------------------
1133// Smi tagging, untagging and tag detection.
1134
1135bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1136  static const int kMaxBits = 17;
1137  return !is_intn(x, kMaxBits);
1138}
1139
1140
1141void MacroAssembler::SafeMove(Register dst, Smi* src) {
1142  DCHECK(!dst.is(kScratchRegister));
1143  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1144    if (SmiValuesAre32Bits()) {
1145      // JIT cookie can be converted to Smi.
1146      Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1147      Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1148      xorp(dst, kScratchRegister);
1149    } else {
1150      DCHECK(SmiValuesAre31Bits());
1151      int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1152      movp(dst, Immediate(value ^ jit_cookie()));
1153      xorp(dst, Immediate(jit_cookie()));
1154    }
1155  } else {
1156    Move(dst, src);
1157  }
1158}
1159
1160
1161void MacroAssembler::SafePush(Smi* src) {
1162  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1163    if (SmiValuesAre32Bits()) {
1164      // JIT cookie can be converted to Smi.
1165      Push(Smi::FromInt(src->value() ^ jit_cookie()));
1166      Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1167      xorp(Operand(rsp, 0), kScratchRegister);
1168    } else {
1169      DCHECK(SmiValuesAre31Bits());
1170      int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
1171      Push(Immediate(value ^ jit_cookie()));
1172      xorp(Operand(rsp, 0), Immediate(jit_cookie()));
1173    }
1174  } else {
1175    Push(src);
1176  }
1177}
1178
1179
1180Register MacroAssembler::GetSmiConstant(Smi* source) {
1181  STATIC_ASSERT(kSmiTag == 0);
1182  int value = source->value();
1183  if (value == 0) {
1184    xorl(kScratchRegister, kScratchRegister);
1185    return kScratchRegister;
1186  }
1187  LoadSmiConstant(kScratchRegister, source);
1188  return kScratchRegister;
1189}
1190
1191
1192void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1193  STATIC_ASSERT(kSmiTag == 0);
1194  int value = source->value();
1195  if (value == 0) {
1196    xorl(dst, dst);
1197  } else {
1198    Move(dst, source, Assembler::RelocInfoNone());
1199  }
1200}
1201
1202
1203void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1204  STATIC_ASSERT(kSmiTag == 0);
1205  if (!dst.is(src)) {
1206    movl(dst, src);
1207  }
1208  shlp(dst, Immediate(kSmiShift));
1209}
1210
1211
1212void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1213  if (emit_debug_code()) {
1214    testb(dst, Immediate(0x01));
1215    Label ok;
1216    j(zero, &ok, Label::kNear);
1217    Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1218    bind(&ok);
1219  }
1220
1221  if (SmiValuesAre32Bits()) {
1222    DCHECK(kSmiShift % kBitsPerByte == 0);
1223    movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1224  } else {
1225    DCHECK(SmiValuesAre31Bits());
1226    Integer32ToSmi(kScratchRegister, src);
1227    movp(dst, kScratchRegister);
1228  }
1229}
1230
1231
1232void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1233                                                Register src,
1234                                                int constant) {
1235  if (dst.is(src)) {
1236    addl(dst, Immediate(constant));
1237  } else {
1238    leal(dst, Operand(src, constant));
1239  }
1240  shlp(dst, Immediate(kSmiShift));
1241}
1242
1243
1244void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1245  STATIC_ASSERT(kSmiTag == 0);
1246  if (!dst.is(src)) {
1247    movp(dst, src);
1248  }
1249
1250  if (SmiValuesAre32Bits()) {
1251    shrp(dst, Immediate(kSmiShift));
1252  } else {
1253    DCHECK(SmiValuesAre31Bits());
1254    sarl(dst, Immediate(kSmiShift));
1255  }
1256}
1257
1258
1259void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1260  if (SmiValuesAre32Bits()) {
1261    movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1262  } else {
1263    DCHECK(SmiValuesAre31Bits());
1264    movl(dst, src);
1265    sarl(dst, Immediate(kSmiShift));
1266  }
1267}
1268
1269
1270void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1271  STATIC_ASSERT(kSmiTag == 0);
1272  if (!dst.is(src)) {
1273    movp(dst, src);
1274  }
1275  sarp(dst, Immediate(kSmiShift));
1276  if (kPointerSize == kInt32Size) {
1277    // Sign extend to 64-bit.
1278    movsxlq(dst, dst);
1279  }
1280}
1281
1282
1283void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1284  if (SmiValuesAre32Bits()) {
1285    movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1286  } else {
1287    DCHECK(SmiValuesAre31Bits());
1288    movp(dst, src);
1289    SmiToInteger64(dst, dst);
1290  }
1291}
1292
1293
1294void MacroAssembler::SmiTest(Register src) {
1295  AssertSmi(src);
1296  testp(src, src);
1297}
1298
1299
1300void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1301  AssertSmi(smi1);
1302  AssertSmi(smi2);
1303  cmpp(smi1, smi2);
1304}
1305
1306
1307void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1308  AssertSmi(dst);
1309  Cmp(dst, src);
1310}
1311
1312
1313void MacroAssembler::Cmp(Register dst, Smi* src) {
1314  DCHECK(!dst.is(kScratchRegister));
1315  if (src->value() == 0) {
1316    testp(dst, dst);
1317  } else {
1318    Register constant_reg = GetSmiConstant(src);
1319    cmpp(dst, constant_reg);
1320  }
1321}
1322
1323
1324void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1325  AssertSmi(dst);
1326  AssertSmi(src);
1327  cmpp(dst, src);
1328}
1329
1330
1331void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1332  AssertSmi(dst);
1333  AssertSmi(src);
1334  cmpp(dst, src);
1335}
1336
1337
1338void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1339  AssertSmi(dst);
1340  if (SmiValuesAre32Bits()) {
1341    cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1342  } else {
1343    DCHECK(SmiValuesAre31Bits());
1344    cmpl(dst, Immediate(src));
1345  }
1346}
1347
1348
1349void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1350  // The Operand cannot use the smi register.
1351  Register smi_reg = GetSmiConstant(src);
1352  DCHECK(!dst.AddressUsesRegister(smi_reg));
1353  cmpp(dst, smi_reg);
1354}
1355
1356
1357void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1358  if (SmiValuesAre32Bits()) {
1359    cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1360  } else {
1361    DCHECK(SmiValuesAre31Bits());
1362    SmiToInteger32(kScratchRegister, dst);
1363    cmpl(kScratchRegister, src);
1364  }
1365}
1366
1367
1368void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1369                                                           Register src,
1370                                                           int power) {
1371  DCHECK(power >= 0);
1372  DCHECK(power < 64);
1373  if (power == 0) {
1374    SmiToInteger64(dst, src);
1375    return;
1376  }
1377  if (!dst.is(src)) {
1378    movp(dst, src);
1379  }
1380  if (power < kSmiShift) {
1381    sarp(dst, Immediate(kSmiShift - power));
1382  } else if (power > kSmiShift) {
1383    shlp(dst, Immediate(power - kSmiShift));
1384  }
1385}
1386
1387
1388void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1389                                                         Register src,
1390                                                         int power) {
1391  DCHECK((0 <= power) && (power < 32));
1392  if (dst.is(src)) {
1393    shrp(dst, Immediate(power + kSmiShift));
1394  } else {
1395    UNIMPLEMENTED();  // Not used.
1396  }
1397}
1398
1399
1400void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1401                                 Label* on_not_smis,
1402                                 Label::Distance near_jump) {
1403  if (dst.is(src1) || dst.is(src2)) {
1404    DCHECK(!src1.is(kScratchRegister));
1405    DCHECK(!src2.is(kScratchRegister));
1406    movp(kScratchRegister, src1);
1407    orp(kScratchRegister, src2);
1408    JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1409    movp(dst, kScratchRegister);
1410  } else {
1411    movp(dst, src1);
1412    orp(dst, src2);
1413    JumpIfNotSmi(dst, on_not_smis, near_jump);
1414  }
1415}
1416
1417
1418Condition MacroAssembler::CheckSmi(Register src) {
1419  STATIC_ASSERT(kSmiTag == 0);
1420  testb(src, Immediate(kSmiTagMask));
1421  return zero;
1422}
1423
1424
1425Condition MacroAssembler::CheckSmi(const Operand& src) {
1426  STATIC_ASSERT(kSmiTag == 0);
1427  testb(src, Immediate(kSmiTagMask));
1428  return zero;
1429}
1430
1431
1432Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1433  STATIC_ASSERT(kSmiTag == 0);
1434  // Test that both bits of the mask 0x8000000000000001 are zero.
1435  movp(kScratchRegister, src);
1436  rolp(kScratchRegister, Immediate(1));
1437  testb(kScratchRegister, Immediate(3));
1438  return zero;
1439}
1440
1441
1442Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1443  if (first.is(second)) {
1444    return CheckSmi(first);
1445  }
1446  STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1447  if (SmiValuesAre32Bits()) {
1448    leal(kScratchRegister, Operand(first, second, times_1, 0));
1449    testb(kScratchRegister, Immediate(0x03));
1450  } else {
1451    DCHECK(SmiValuesAre31Bits());
1452    movl(kScratchRegister, first);
1453    orl(kScratchRegister, second);
1454    testb(kScratchRegister, Immediate(kSmiTagMask));
1455  }
1456  return zero;
1457}
1458
1459
1460Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1461                                                  Register second) {
1462  if (first.is(second)) {
1463    return CheckNonNegativeSmi(first);
1464  }
1465  movp(kScratchRegister, first);
1466  orp(kScratchRegister, second);
1467  rolp(kScratchRegister, Immediate(1));
1468  testl(kScratchRegister, Immediate(3));
1469  return zero;
1470}
1471
1472
1473Condition MacroAssembler::CheckEitherSmi(Register first,
1474                                         Register second,
1475                                         Register scratch) {
1476  if (first.is(second)) {
1477    return CheckSmi(first);
1478  }
1479  if (scratch.is(second)) {
1480    andl(scratch, first);
1481  } else {
1482    if (!scratch.is(first)) {
1483      movl(scratch, first);
1484    }
1485    andl(scratch, second);
1486  }
1487  testb(scratch, Immediate(kSmiTagMask));
1488  return zero;
1489}
1490
1491
1492Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1493  if (SmiValuesAre32Bits()) {
1494    // A 32-bit integer value can always be converted to a smi.
1495    return always;
1496  } else {
1497    DCHECK(SmiValuesAre31Bits());
1498    cmpl(src, Immediate(0xc0000000));
1499    return positive;
1500  }
1501}
1502
1503
1504Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1505  if (SmiValuesAre32Bits()) {
1506    // An unsigned 32-bit integer value is valid as long as the high bit
1507    // is not set.
1508    testl(src, src);
1509    return positive;
1510  } else {
1511    DCHECK(SmiValuesAre31Bits());
1512    testl(src, Immediate(0xc0000000));
1513    return zero;
1514  }
1515}
1516
1517
1518void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1519  if (dst.is(src)) {
1520    andl(dst, Immediate(kSmiTagMask));
1521  } else {
1522    movl(dst, Immediate(kSmiTagMask));
1523    andl(dst, src);
1524  }
1525}
1526
1527
1528void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1529  if (!(src.AddressUsesRegister(dst))) {
1530    movl(dst, Immediate(kSmiTagMask));
1531    andl(dst, src);
1532  } else {
1533    movl(dst, src);
1534    andl(dst, Immediate(kSmiTagMask));
1535  }
1536}
1537
1538
1539void MacroAssembler::JumpIfValidSmiValue(Register src,
1540                                         Label* on_valid,
1541                                         Label::Distance near_jump) {
1542  Condition is_valid = CheckInteger32ValidSmiValue(src);
1543  j(is_valid, on_valid, near_jump);
1544}
1545
1546
1547void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1548                                            Label* on_invalid,
1549                                            Label::Distance near_jump) {
1550  Condition is_valid = CheckInteger32ValidSmiValue(src);
1551  j(NegateCondition(is_valid), on_invalid, near_jump);
1552}
1553
1554
1555void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1556                                             Label* on_valid,
1557                                             Label::Distance near_jump) {
1558  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1559  j(is_valid, on_valid, near_jump);
1560}
1561
1562
1563void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1564                                                Label* on_invalid,
1565                                                Label::Distance near_jump) {
1566  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1567  j(NegateCondition(is_valid), on_invalid, near_jump);
1568}
1569
1570
1571void MacroAssembler::JumpIfSmi(Register src,
1572                               Label* on_smi,
1573                               Label::Distance near_jump) {
1574  Condition smi = CheckSmi(src);
1575  j(smi, on_smi, near_jump);
1576}
1577
1578
1579void MacroAssembler::JumpIfNotSmi(Register src,
1580                                  Label* on_not_smi,
1581                                  Label::Distance near_jump) {
1582  Condition smi = CheckSmi(src);
1583  j(NegateCondition(smi), on_not_smi, near_jump);
1584}
1585
1586
1587void MacroAssembler::JumpUnlessNonNegativeSmi(
1588    Register src, Label* on_not_smi_or_negative,
1589    Label::Distance near_jump) {
1590  Condition non_negative_smi = CheckNonNegativeSmi(src);
1591  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1592}
1593
1594
1595void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1596                                             Smi* constant,
1597                                             Label* on_equals,
1598                                             Label::Distance near_jump) {
1599  SmiCompare(src, constant);
1600  j(equal, on_equals, near_jump);
1601}
1602
1603
1604void MacroAssembler::JumpIfNotBothSmi(Register src1,
1605                                      Register src2,
1606                                      Label* on_not_both_smi,
1607                                      Label::Distance near_jump) {
1608  Condition both_smi = CheckBothSmi(src1, src2);
1609  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1610}
1611
1612
1613void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1614                                                  Register src2,
1615                                                  Label* on_not_both_smi,
1616                                                  Label::Distance near_jump) {
1617  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1618  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1619}
1620
1621
1622void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1623  if (constant->value() == 0) {
1624    if (!dst.is(src)) {
1625      movp(dst, src);
1626    }
1627    return;
1628  } else if (dst.is(src)) {
1629    DCHECK(!dst.is(kScratchRegister));
1630    Register constant_reg = GetSmiConstant(constant);
1631    addp(dst, constant_reg);
1632  } else {
1633    LoadSmiConstant(dst, constant);
1634    addp(dst, src);
1635  }
1636}
1637
1638
1639void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1640  if (constant->value() != 0) {
1641    if (SmiValuesAre32Bits()) {
1642      addl(Operand(dst, kSmiShift / kBitsPerByte),
1643           Immediate(constant->value()));
1644    } else {
1645      DCHECK(SmiValuesAre31Bits());
1646      addp(dst, Immediate(constant));
1647    }
1648  }
1649}
1650
1651
1652void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant,
1653                                    SmiOperationConstraints constraints,
1654                                    Label* bailout_label,
1655                                    Label::Distance near_jump) {
1656  if (constant->value() == 0) {
1657    if (!dst.is(src)) {
1658      movp(dst, src);
1659    }
1660  } else if (dst.is(src)) {
1661    DCHECK(!dst.is(kScratchRegister));
1662    LoadSmiConstant(kScratchRegister, constant);
1663    addp(dst, kScratchRegister);
1664    if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1665      j(no_overflow, bailout_label, near_jump);
1666      DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1667      subp(dst, kScratchRegister);
1668    } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1669      if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1670        Label done;
1671        j(no_overflow, &done, Label::kNear);
1672        subp(dst, kScratchRegister);
1673        jmp(bailout_label, near_jump);
1674        bind(&done);
1675      } else {
1676        // Bailout if overflow without reserving src.
1677        j(overflow, bailout_label, near_jump);
1678      }
1679    } else {
1680      UNREACHABLE();
1681    }
1682  } else {
1683    DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1684    DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1685    LoadSmiConstant(dst, constant);
1686    addp(dst, src);
1687    j(overflow, bailout_label, near_jump);
1688  }
1689}
1690
1691
1692void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1693  if (constant->value() == 0) {
1694    if (!dst.is(src)) {
1695      movp(dst, src);
1696    }
1697  } else if (dst.is(src)) {
1698    DCHECK(!dst.is(kScratchRegister));
1699    Register constant_reg = GetSmiConstant(constant);
1700    subp(dst, constant_reg);
1701  } else {
1702    if (constant->value() == Smi::kMinValue) {
1703      LoadSmiConstant(dst, constant);
1704      // Adding and subtracting the min-value gives the same result, it only
1705      // differs on the overflow bit, which we don't check here.
1706      addp(dst, src);
1707    } else {
1708      // Subtract by adding the negation.
1709      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1710      addp(dst, src);
1711    }
1712  }
1713}
1714
1715
1716void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant,
1717                                    SmiOperationConstraints constraints,
1718                                    Label* bailout_label,
1719                                    Label::Distance near_jump) {
1720  if (constant->value() == 0) {
1721    if (!dst.is(src)) {
1722      movp(dst, src);
1723    }
1724  } else if (dst.is(src)) {
1725    DCHECK(!dst.is(kScratchRegister));
1726    LoadSmiConstant(kScratchRegister, constant);
1727    subp(dst, kScratchRegister);
1728    if (constraints & SmiOperationConstraint::kBailoutOnNoOverflow) {
1729      j(no_overflow, bailout_label, near_jump);
1730      DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1731      addp(dst, kScratchRegister);
1732    } else if (constraints & SmiOperationConstraint::kBailoutOnOverflow) {
1733      if (constraints & SmiOperationConstraint::kPreserveSourceRegister) {
1734        Label done;
1735        j(no_overflow, &done, Label::kNear);
1736        addp(dst, kScratchRegister);
1737        jmp(bailout_label, near_jump);
1738        bind(&done);
1739      } else {
1740        // Bailout if overflow without reserving src.
1741        j(overflow, bailout_label, near_jump);
1742      }
1743    } else {
1744      UNREACHABLE();
1745    }
1746  } else {
1747    DCHECK(constraints & SmiOperationConstraint::kPreserveSourceRegister);
1748    DCHECK(constraints & SmiOperationConstraint::kBailoutOnOverflow);
1749    if (constant->value() == Smi::kMinValue) {
1750      DCHECK(!dst.is(kScratchRegister));
1751      movp(dst, src);
1752      LoadSmiConstant(kScratchRegister, constant);
1753      subp(dst, kScratchRegister);
1754      j(overflow, bailout_label, near_jump);
1755    } else {
1756      // Subtract by adding the negation.
1757      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1758      addp(dst, src);
1759      j(overflow, bailout_label, near_jump);
1760    }
1761  }
1762}
1763
1764
1765void MacroAssembler::SmiNeg(Register dst,
1766                            Register src,
1767                            Label* on_smi_result,
1768                            Label::Distance near_jump) {
1769  if (dst.is(src)) {
1770    DCHECK(!dst.is(kScratchRegister));
1771    movp(kScratchRegister, src);
1772    negp(dst);  // Low 32 bits are retained as zero by negation.
1773    // Test if result is zero or Smi::kMinValue.
1774    cmpp(dst, kScratchRegister);
1775    j(not_equal, on_smi_result, near_jump);
1776    movp(src, kScratchRegister);
1777  } else {
1778    movp(dst, src);
1779    negp(dst);
1780    cmpp(dst, src);
1781    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1782    j(not_equal, on_smi_result, near_jump);
1783  }
1784}
1785
1786
1787template<class T>
1788static void SmiAddHelper(MacroAssembler* masm,
1789                         Register dst,
1790                         Register src1,
1791                         T src2,
1792                         Label* on_not_smi_result,
1793                         Label::Distance near_jump) {
1794  if (dst.is(src1)) {
1795    Label done;
1796    masm->addp(dst, src2);
1797    masm->j(no_overflow, &done, Label::kNear);
1798    // Restore src1.
1799    masm->subp(dst, src2);
1800    masm->jmp(on_not_smi_result, near_jump);
1801    masm->bind(&done);
1802  } else {
1803    masm->movp(dst, src1);
1804    masm->addp(dst, src2);
1805    masm->j(overflow, on_not_smi_result, near_jump);
1806  }
1807}
1808
1809
1810void MacroAssembler::SmiAdd(Register dst,
1811                            Register src1,
1812                            Register src2,
1813                            Label* on_not_smi_result,
1814                            Label::Distance near_jump) {
1815  DCHECK_NOT_NULL(on_not_smi_result);
1816  DCHECK(!dst.is(src2));
1817  SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1818}
1819
1820
1821void MacroAssembler::SmiAdd(Register dst,
1822                            Register src1,
1823                            const Operand& src2,
1824                            Label* on_not_smi_result,
1825                            Label::Distance near_jump) {
1826  DCHECK_NOT_NULL(on_not_smi_result);
1827  DCHECK(!src2.AddressUsesRegister(dst));
1828  SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1829}
1830
1831
1832void MacroAssembler::SmiAdd(Register dst,
1833                            Register src1,
1834                            Register src2) {
1835  // No overflow checking. Use only when it's known that
1836  // overflowing is impossible.
1837  if (!dst.is(src1)) {
1838    if (emit_debug_code()) {
1839      movp(kScratchRegister, src1);
1840      addp(kScratchRegister, src2);
1841      Check(no_overflow, kSmiAdditionOverflow);
1842    }
1843    leap(dst, Operand(src1, src2, times_1, 0));
1844  } else {
1845    addp(dst, src2);
1846    Assert(no_overflow, kSmiAdditionOverflow);
1847  }
1848}
1849
1850
1851template<class T>
1852static void SmiSubHelper(MacroAssembler* masm,
1853                         Register dst,
1854                         Register src1,
1855                         T src2,
1856                         Label* on_not_smi_result,
1857                         Label::Distance near_jump) {
1858  if (dst.is(src1)) {
1859    Label done;
1860    masm->subp(dst, src2);
1861    masm->j(no_overflow, &done, Label::kNear);
1862    // Restore src1.
1863    masm->addp(dst, src2);
1864    masm->jmp(on_not_smi_result, near_jump);
1865    masm->bind(&done);
1866  } else {
1867    masm->movp(dst, src1);
1868    masm->subp(dst, src2);
1869    masm->j(overflow, on_not_smi_result, near_jump);
1870  }
1871}
1872
1873
1874void MacroAssembler::SmiSub(Register dst,
1875                            Register src1,
1876                            Register src2,
1877                            Label* on_not_smi_result,
1878                            Label::Distance near_jump) {
1879  DCHECK_NOT_NULL(on_not_smi_result);
1880  DCHECK(!dst.is(src2));
1881  SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1882}
1883
1884
1885void MacroAssembler::SmiSub(Register dst,
1886                            Register src1,
1887                            const Operand& src2,
1888                            Label* on_not_smi_result,
1889                            Label::Distance near_jump) {
1890  DCHECK_NOT_NULL(on_not_smi_result);
1891  DCHECK(!src2.AddressUsesRegister(dst));
1892  SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1893}
1894
1895
1896template<class T>
1897static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1898                                   Register dst,
1899                                   Register src1,
1900                                   T src2) {
1901  // No overflow checking. Use only when it's known that
1902  // overflowing is impossible (e.g., subtracting two positive smis).
1903  if (!dst.is(src1)) {
1904    masm->movp(dst, src1);
1905  }
1906  masm->subp(dst, src2);
1907  masm->Assert(no_overflow, kSmiSubtractionOverflow);
1908}
1909
1910
1911void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1912  DCHECK(!dst.is(src2));
1913  SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1914}
1915
1916
1917void MacroAssembler::SmiSub(Register dst,
1918                            Register src1,
1919                            const Operand& src2) {
1920  SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1921}
1922
1923
1924void MacroAssembler::SmiMul(Register dst,
1925                            Register src1,
1926                            Register src2,
1927                            Label* on_not_smi_result,
1928                            Label::Distance near_jump) {
1929  DCHECK(!dst.is(src2));
1930  DCHECK(!dst.is(kScratchRegister));
1931  DCHECK(!src1.is(kScratchRegister));
1932  DCHECK(!src2.is(kScratchRegister));
1933
1934  if (dst.is(src1)) {
1935    Label failure, zero_correct_result;
1936    movp(kScratchRegister, src1);  // Create backup for later testing.
1937    SmiToInteger64(dst, src1);
1938    imulp(dst, src2);
1939    j(overflow, &failure, Label::kNear);
1940
1941    // Check for negative zero result.  If product is zero, and one
1942    // argument is negative, go to slow case.
1943    Label correct_result;
1944    testp(dst, dst);
1945    j(not_zero, &correct_result, Label::kNear);
1946
1947    movp(dst, kScratchRegister);
1948    xorp(dst, src2);
1949    // Result was positive zero.
1950    j(positive, &zero_correct_result, Label::kNear);
1951
1952    bind(&failure);  // Reused failure exit, restores src1.
1953    movp(src1, kScratchRegister);
1954    jmp(on_not_smi_result, near_jump);
1955
1956    bind(&zero_correct_result);
1957    Set(dst, 0);
1958
1959    bind(&correct_result);
1960  } else {
1961    SmiToInteger64(dst, src1);
1962    imulp(dst, src2);
1963    j(overflow, on_not_smi_result, near_jump);
1964    // Check for negative zero result.  If product is zero, and one
1965    // argument is negative, go to slow case.
1966    Label correct_result;
1967    testp(dst, dst);
1968    j(not_zero, &correct_result, Label::kNear);
1969    // One of src1 and src2 is zero, the check whether the other is
1970    // negative.
1971    movp(kScratchRegister, src1);
1972    xorp(kScratchRegister, src2);
1973    j(negative, on_not_smi_result, near_jump);
1974    bind(&correct_result);
1975  }
1976}
1977
1978
1979void MacroAssembler::SmiDiv(Register dst,
1980                            Register src1,
1981                            Register src2,
1982                            Label* on_not_smi_result,
1983                            Label::Distance near_jump) {
1984  DCHECK(!src1.is(kScratchRegister));
1985  DCHECK(!src2.is(kScratchRegister));
1986  DCHECK(!dst.is(kScratchRegister));
1987  DCHECK(!src2.is(rax));
1988  DCHECK(!src2.is(rdx));
1989  DCHECK(!src1.is(rdx));
1990
1991  // Check for 0 divisor (result is +/-Infinity).
1992  testp(src2, src2);
1993  j(zero, on_not_smi_result, near_jump);
1994
1995  if (src1.is(rax)) {
1996    movp(kScratchRegister, src1);
1997  }
1998  SmiToInteger32(rax, src1);
1999  // We need to rule out dividing Smi::kMinValue by -1, since that would
2000  // overflow in idiv and raise an exception.
2001  // We combine this with negative zero test (negative zero only happens
2002  // when dividing zero by a negative number).
2003
2004  // We overshoot a little and go to slow case if we divide min-value
2005  // by any negative value, not just -1.
2006  Label safe_div;
2007  testl(rax, Immediate(~Smi::kMinValue));
2008  j(not_zero, &safe_div, Label::kNear);
2009  testp(src2, src2);
2010  if (src1.is(rax)) {
2011    j(positive, &safe_div, Label::kNear);
2012    movp(src1, kScratchRegister);
2013    jmp(on_not_smi_result, near_jump);
2014  } else {
2015    j(negative, on_not_smi_result, near_jump);
2016  }
2017  bind(&safe_div);
2018
2019  SmiToInteger32(src2, src2);
2020  // Sign extend src1 into edx:eax.
2021  cdq();
2022  idivl(src2);
2023  Integer32ToSmi(src2, src2);
2024  // Check that the remainder is zero.
2025  testl(rdx, rdx);
2026  if (src1.is(rax)) {
2027    Label smi_result;
2028    j(zero, &smi_result, Label::kNear);
2029    movp(src1, kScratchRegister);
2030    jmp(on_not_smi_result, near_jump);
2031    bind(&smi_result);
2032  } else {
2033    j(not_zero, on_not_smi_result, near_jump);
2034  }
2035  if (!dst.is(src1) && src1.is(rax)) {
2036    movp(src1, kScratchRegister);
2037  }
2038  Integer32ToSmi(dst, rax);
2039}
2040
2041
2042void MacroAssembler::SmiMod(Register dst,
2043                            Register src1,
2044                            Register src2,
2045                            Label* on_not_smi_result,
2046                            Label::Distance near_jump) {
2047  DCHECK(!dst.is(kScratchRegister));
2048  DCHECK(!src1.is(kScratchRegister));
2049  DCHECK(!src2.is(kScratchRegister));
2050  DCHECK(!src2.is(rax));
2051  DCHECK(!src2.is(rdx));
2052  DCHECK(!src1.is(rdx));
2053  DCHECK(!src1.is(src2));
2054
2055  testp(src2, src2);
2056  j(zero, on_not_smi_result, near_jump);
2057
2058  if (src1.is(rax)) {
2059    movp(kScratchRegister, src1);
2060  }
2061  SmiToInteger32(rax, src1);
2062  SmiToInteger32(src2, src2);
2063
2064  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
2065  Label safe_div;
2066  cmpl(rax, Immediate(Smi::kMinValue));
2067  j(not_equal, &safe_div, Label::kNear);
2068  cmpl(src2, Immediate(-1));
2069  j(not_equal, &safe_div, Label::kNear);
2070  // Retag inputs and go slow case.
2071  Integer32ToSmi(src2, src2);
2072  if (src1.is(rax)) {
2073    movp(src1, kScratchRegister);
2074  }
2075  jmp(on_not_smi_result, near_jump);
2076  bind(&safe_div);
2077
2078  // Sign extend eax into edx:eax.
2079  cdq();
2080  idivl(src2);
2081  // Restore smi tags on inputs.
2082  Integer32ToSmi(src2, src2);
2083  if (src1.is(rax)) {
2084    movp(src1, kScratchRegister);
2085  }
2086  // Check for a negative zero result.  If the result is zero, and the
2087  // dividend is negative, go slow to return a floating point negative zero.
2088  Label smi_result;
2089  testl(rdx, rdx);
2090  j(not_zero, &smi_result, Label::kNear);
2091  testp(src1, src1);
2092  j(negative, on_not_smi_result, near_jump);
2093  bind(&smi_result);
2094  Integer32ToSmi(dst, rdx);
2095}
2096
2097
2098void MacroAssembler::SmiNot(Register dst, Register src) {
2099  DCHECK(!dst.is(kScratchRegister));
2100  DCHECK(!src.is(kScratchRegister));
2101  if (SmiValuesAre32Bits()) {
2102    // Set tag and padding bits before negating, so that they are zero
2103    // afterwards.
2104    movl(kScratchRegister, Immediate(~0));
2105  } else {
2106    DCHECK(SmiValuesAre31Bits());
2107    movl(kScratchRegister, Immediate(1));
2108  }
2109  if (dst.is(src)) {
2110    xorp(dst, kScratchRegister);
2111  } else {
2112    leap(dst, Operand(src, kScratchRegister, times_1, 0));
2113  }
2114  notp(dst);
2115}
2116
2117
2118void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
2119  DCHECK(!dst.is(src2));
2120  if (!dst.is(src1)) {
2121    movp(dst, src1);
2122  }
2123  andp(dst, src2);
2124}
2125
2126
2127void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
2128  if (constant->value() == 0) {
2129    Set(dst, 0);
2130  } else if (dst.is(src)) {
2131    DCHECK(!dst.is(kScratchRegister));
2132    Register constant_reg = GetSmiConstant(constant);
2133    andp(dst, constant_reg);
2134  } else {
2135    LoadSmiConstant(dst, constant);
2136    andp(dst, src);
2137  }
2138}
2139
2140
2141void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
2142  if (!dst.is(src1)) {
2143    DCHECK(!src1.is(src2));
2144    movp(dst, src1);
2145  }
2146  orp(dst, src2);
2147}
2148
2149
2150void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
2151  if (dst.is(src)) {
2152    DCHECK(!dst.is(kScratchRegister));
2153    Register constant_reg = GetSmiConstant(constant);
2154    orp(dst, constant_reg);
2155  } else {
2156    LoadSmiConstant(dst, constant);
2157    orp(dst, src);
2158  }
2159}
2160
2161
2162void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2163  if (!dst.is(src1)) {
2164    DCHECK(!src1.is(src2));
2165    movp(dst, src1);
2166  }
2167  xorp(dst, src2);
2168}
2169
2170
2171void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2172  if (dst.is(src)) {
2173    DCHECK(!dst.is(kScratchRegister));
2174    Register constant_reg = GetSmiConstant(constant);
2175    xorp(dst, constant_reg);
2176  } else {
2177    LoadSmiConstant(dst, constant);
2178    xorp(dst, src);
2179  }
2180}
2181
2182
2183void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2184                                                     Register src,
2185                                                     int shift_value) {
2186  DCHECK(is_uint5(shift_value));
2187  if (shift_value > 0) {
2188    if (dst.is(src)) {
2189      sarp(dst, Immediate(shift_value + kSmiShift));
2190      shlp(dst, Immediate(kSmiShift));
2191    } else {
2192      UNIMPLEMENTED();  // Not used.
2193    }
2194  }
2195}
2196
2197
2198void MacroAssembler::SmiShiftLeftConstant(Register dst,
2199                                          Register src,
2200                                          int shift_value,
2201                                          Label* on_not_smi_result,
2202                                          Label::Distance near_jump) {
2203  if (SmiValuesAre32Bits()) {
2204    if (!dst.is(src)) {
2205      movp(dst, src);
2206    }
2207    if (shift_value > 0) {
2208      // Shift amount specified by lower 5 bits, not six as the shl opcode.
2209      shlq(dst, Immediate(shift_value & 0x1f));
2210    }
2211  } else {
2212    DCHECK(SmiValuesAre31Bits());
2213    if (dst.is(src)) {
2214      UNIMPLEMENTED();  // Not used.
2215    } else {
2216      SmiToInteger32(dst, src);
2217      shll(dst, Immediate(shift_value));
2218      JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
2219      Integer32ToSmi(dst, dst);
2220    }
2221  }
2222}
2223
2224
2225void MacroAssembler::SmiShiftLogicalRightConstant(
2226    Register dst, Register src, int shift_value,
2227    Label* on_not_smi_result, Label::Distance near_jump) {
2228  // Logic right shift interprets its result as an *unsigned* number.
2229  if (dst.is(src)) {
2230    UNIMPLEMENTED();  // Not used.
2231  } else {
2232    if (shift_value == 0) {
2233      testp(src, src);
2234      j(negative, on_not_smi_result, near_jump);
2235    }
2236    if (SmiValuesAre32Bits()) {
2237      movp(dst, src);
2238      shrp(dst, Immediate(shift_value + kSmiShift));
2239      shlp(dst, Immediate(kSmiShift));
2240    } else {
2241      DCHECK(SmiValuesAre31Bits());
2242      SmiToInteger32(dst, src);
2243      shrp(dst, Immediate(shift_value));
2244      JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2245      Integer32ToSmi(dst, dst);
2246    }
2247  }
2248}
2249
2250
2251void MacroAssembler::SmiShiftLeft(Register dst,
2252                                  Register src1,
2253                                  Register src2,
2254                                  Label* on_not_smi_result,
2255                                  Label::Distance near_jump) {
2256  if (SmiValuesAre32Bits()) {
2257    DCHECK(!dst.is(rcx));
2258    if (!dst.is(src1)) {
2259      movp(dst, src1);
2260    }
2261    // Untag shift amount.
2262    SmiToInteger32(rcx, src2);
2263    // Shift amount specified by lower 5 bits, not six as the shl opcode.
2264    andp(rcx, Immediate(0x1f));
2265    shlq_cl(dst);
2266  } else {
2267    DCHECK(SmiValuesAre31Bits());
2268    DCHECK(!dst.is(kScratchRegister));
2269    DCHECK(!src1.is(kScratchRegister));
2270    DCHECK(!src2.is(kScratchRegister));
2271    DCHECK(!dst.is(src2));
2272    DCHECK(!dst.is(rcx));
2273
2274    if (src1.is(rcx) || src2.is(rcx)) {
2275      movq(kScratchRegister, rcx);
2276    }
2277    if (dst.is(src1)) {
2278      UNIMPLEMENTED();  // Not used.
2279    } else {
2280      Label valid_result;
2281      SmiToInteger32(dst, src1);
2282      SmiToInteger32(rcx, src2);
2283      shll_cl(dst);
2284      JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
2285      // As src1 or src2 could not be dst, we do not need to restore them for
2286      // clobbering dst.
2287      if (src1.is(rcx) || src2.is(rcx)) {
2288        if (src1.is(rcx)) {
2289          movq(src1, kScratchRegister);
2290        } else {
2291          movq(src2, kScratchRegister);
2292        }
2293      }
2294      jmp(on_not_smi_result, near_jump);
2295      bind(&valid_result);
2296      Integer32ToSmi(dst, dst);
2297    }
2298  }
2299}
2300
2301
2302void MacroAssembler::SmiShiftLogicalRight(Register dst,
2303                                          Register src1,
2304                                          Register src2,
2305                                          Label* on_not_smi_result,
2306                                          Label::Distance near_jump) {
2307  DCHECK(!dst.is(kScratchRegister));
2308  DCHECK(!src1.is(kScratchRegister));
2309  DCHECK(!src2.is(kScratchRegister));
2310  DCHECK(!dst.is(src2));
2311  DCHECK(!dst.is(rcx));
2312  if (src1.is(rcx) || src2.is(rcx)) {
2313    movq(kScratchRegister, rcx);
2314  }
2315  if (dst.is(src1)) {
2316    UNIMPLEMENTED();  // Not used.
2317  } else {
2318    Label valid_result;
2319    SmiToInteger32(dst, src1);
2320    SmiToInteger32(rcx, src2);
2321    shrl_cl(dst);
2322    JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
2323    // As src1 or src2 could not be dst, we do not need to restore them for
2324    // clobbering dst.
2325    if (src1.is(rcx) || src2.is(rcx)) {
2326      if (src1.is(rcx)) {
2327        movq(src1, kScratchRegister);
2328      } else {
2329        movq(src2, kScratchRegister);
2330      }
2331     }
2332    jmp(on_not_smi_result, near_jump);
2333    bind(&valid_result);
2334    Integer32ToSmi(dst, dst);
2335  }
2336}
2337
2338
2339void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2340                                             Register src1,
2341                                             Register src2) {
2342  DCHECK(!dst.is(kScratchRegister));
2343  DCHECK(!src1.is(kScratchRegister));
2344  DCHECK(!src2.is(kScratchRegister));
2345  DCHECK(!dst.is(rcx));
2346
2347  SmiToInteger32(rcx, src2);
2348  if (!dst.is(src1)) {
2349    movp(dst, src1);
2350  }
2351  SmiToInteger32(dst, dst);
2352  sarl_cl(dst);
2353  Integer32ToSmi(dst, dst);
2354}
2355
2356
2357void MacroAssembler::SelectNonSmi(Register dst,
2358                                  Register src1,
2359                                  Register src2,
2360                                  Label* on_not_smis,
2361                                  Label::Distance near_jump) {
2362  DCHECK(!dst.is(kScratchRegister));
2363  DCHECK(!src1.is(kScratchRegister));
2364  DCHECK(!src2.is(kScratchRegister));
2365  DCHECK(!dst.is(src1));
2366  DCHECK(!dst.is(src2));
2367  // Both operands must not be smis.
2368#ifdef DEBUG
2369  Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2370  Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2371#endif
2372  STATIC_ASSERT(kSmiTag == 0);
2373  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
2374  movl(kScratchRegister, Immediate(kSmiTagMask));
2375  andp(kScratchRegister, src1);
2376  testl(kScratchRegister, src2);
2377  // If non-zero then both are smis.
2378  j(not_zero, on_not_smis, near_jump);
2379
2380  // Exactly one operand is a smi.
2381  DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2382  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2383  subp(kScratchRegister, Immediate(1));
2384  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2385  movp(dst, src1);
2386  xorp(dst, src2);
2387  andp(dst, kScratchRegister);
2388  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2389  xorp(dst, src1);
2390  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2391}
2392
2393
2394SmiIndex MacroAssembler::SmiToIndex(Register dst,
2395                                    Register src,
2396                                    int shift) {
2397  if (SmiValuesAre32Bits()) {
2398    DCHECK(is_uint6(shift));
2399    // There is a possible optimization if shift is in the range 60-63, but that
2400    // will (and must) never happen.
2401    if (!dst.is(src)) {
2402      movp(dst, src);
2403    }
2404    if (shift < kSmiShift) {
2405      sarp(dst, Immediate(kSmiShift - shift));
2406    } else {
2407      shlp(dst, Immediate(shift - kSmiShift));
2408    }
2409    return SmiIndex(dst, times_1);
2410  } else {
2411    DCHECK(SmiValuesAre31Bits());
2412    DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2413    if (!dst.is(src)) {
2414      movp(dst, src);
2415    }
2416    // We have to sign extend the index register to 64-bit as the SMI might
2417    // be negative.
2418    movsxlq(dst, dst);
2419    if (shift == times_1) {
2420      sarq(dst, Immediate(kSmiShift));
2421      return SmiIndex(dst, times_1);
2422    }
2423    return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2424  }
2425}
2426
2427
2428SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2429                                            Register src,
2430                                            int shift) {
2431  if (SmiValuesAre32Bits()) {
2432    // Register src holds a positive smi.
2433    DCHECK(is_uint6(shift));
2434    if (!dst.is(src)) {
2435      movp(dst, src);
2436    }
2437    negp(dst);
2438    if (shift < kSmiShift) {
2439      sarp(dst, Immediate(kSmiShift - shift));
2440    } else {
2441      shlp(dst, Immediate(shift - kSmiShift));
2442    }
2443    return SmiIndex(dst, times_1);
2444  } else {
2445    DCHECK(SmiValuesAre31Bits());
2446    DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
2447    if (!dst.is(src)) {
2448      movp(dst, src);
2449    }
2450    negq(dst);
2451    if (shift == times_1) {
2452      sarq(dst, Immediate(kSmiShift));
2453      return SmiIndex(dst, times_1);
2454    }
2455    return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2456  }
2457}
2458
2459
2460void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2461  if (SmiValuesAre32Bits()) {
2462    DCHECK_EQ(0, kSmiShift % kBitsPerByte);
2463    addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2464  } else {
2465    DCHECK(SmiValuesAre31Bits());
2466    SmiToInteger32(kScratchRegister, src);
2467    addl(dst, kScratchRegister);
2468  }
2469}
2470
2471
2472void MacroAssembler::Push(Smi* source) {
2473  intptr_t smi = reinterpret_cast<intptr_t>(source);
2474  if (is_int32(smi)) {
2475    Push(Immediate(static_cast<int32_t>(smi)));
2476  } else {
2477    Register constant = GetSmiConstant(source);
2478    Push(constant);
2479  }
2480}
2481
2482
2483void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2484  DCHECK(!src.is(scratch));
2485  movp(scratch, src);
2486  // High bits.
2487  shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2488  shlp(src, Immediate(kSmiShift));
2489  Push(src);
2490  // Low bits.
2491  shlp(scratch, Immediate(kSmiShift));
2492  Push(scratch);
2493}
2494
2495
2496void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2497  DCHECK(!dst.is(scratch));
2498  Pop(scratch);
2499  // Low bits.
2500  shrp(scratch, Immediate(kSmiShift));
2501  Pop(dst);
2502  shrp(dst, Immediate(kSmiShift));
2503  // High bits.
2504  shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
2505  orp(dst, scratch);
2506}
2507
2508
2509void MacroAssembler::Test(const Operand& src, Smi* source) {
2510  if (SmiValuesAre32Bits()) {
2511    testl(Operand(src, kIntSize), Immediate(source->value()));
2512  } else {
2513    DCHECK(SmiValuesAre31Bits());
2514    testl(src, Immediate(source));
2515  }
2516}
2517
2518
2519// ----------------------------------------------------------------------------
2520
2521
2522void MacroAssembler::JumpIfNotString(Register object,
2523                                     Register object_map,
2524                                     Label* not_string,
2525                                     Label::Distance near_jump) {
2526  Condition is_smi = CheckSmi(object);
2527  j(is_smi, not_string, near_jump);
2528  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2529  j(above_equal, not_string, near_jump);
2530}
2531
2532
2533void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
2534    Register first_object, Register second_object, Register scratch1,
2535    Register scratch2, Label* on_fail, Label::Distance near_jump) {
2536  // Check that both objects are not smis.
2537  Condition either_smi = CheckEitherSmi(first_object, second_object);
2538  j(either_smi, on_fail, near_jump);
2539
2540  // Load instance type for both strings.
2541  movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2542  movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2543  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2544  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2545
2546  // Check that both are flat one-byte strings.
2547  DCHECK(kNotStringTag != 0);
2548  const int kFlatOneByteStringMask =
2549      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2550  const int kFlatOneByteStringTag =
2551      kStringTag | kOneByteStringTag | kSeqStringTag;
2552
2553  andl(scratch1, Immediate(kFlatOneByteStringMask));
2554  andl(scratch2, Immediate(kFlatOneByteStringMask));
2555  // Interleave the bits to check both scratch1 and scratch2 in one test.
2556  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2557  leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2558  cmpl(scratch1,
2559       Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2560  j(not_equal, on_fail, near_jump);
2561}
2562
2563
2564void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2565    Register instance_type, Register scratch, Label* failure,
2566    Label::Distance near_jump) {
2567  if (!scratch.is(instance_type)) {
2568    movl(scratch, instance_type);
2569  }
2570
2571  const int kFlatOneByteStringMask =
2572      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2573
2574  andl(scratch, Immediate(kFlatOneByteStringMask));
2575  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2576  j(not_equal, failure, near_jump);
2577}
2578
2579
2580void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2581    Register first_object_instance_type, Register second_object_instance_type,
2582    Register scratch1, Register scratch2, Label* on_fail,
2583    Label::Distance near_jump) {
2584  // Load instance type for both strings.
2585  movp(scratch1, first_object_instance_type);
2586  movp(scratch2, second_object_instance_type);
2587
2588  // Check that both are flat one-byte strings.
2589  DCHECK(kNotStringTag != 0);
2590  const int kFlatOneByteStringMask =
2591      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2592  const int kFlatOneByteStringTag =
2593      kStringTag | kOneByteStringTag | kSeqStringTag;
2594
2595  andl(scratch1, Immediate(kFlatOneByteStringMask));
2596  andl(scratch2, Immediate(kFlatOneByteStringMask));
2597  // Interleave the bits to check both scratch1 and scratch2 in one test.
2598  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2599  leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2600  cmpl(scratch1,
2601       Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
2602  j(not_equal, on_fail, near_jump);
2603}
2604
2605
2606template<class T>
2607static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2608                                      T operand_or_register,
2609                                      Label* not_unique_name,
2610                                      Label::Distance distance) {
2611  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2612  Label succeed;
2613  masm->testb(operand_or_register,
2614              Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2615  masm->j(zero, &succeed, Label::kNear);
2616  masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2617  masm->j(not_equal, not_unique_name, distance);
2618
2619  masm->bind(&succeed);
2620}
2621
2622
2623void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2624                                                     Label* not_unique_name,
2625                                                     Label::Distance distance) {
2626  JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2627}
2628
2629
2630void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
2631                                                     Label* not_unique_name,
2632                                                     Label::Distance distance) {
2633  JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2634}
2635
2636
2637void MacroAssembler::Move(Register dst, Register src) {
2638  if (!dst.is(src)) {
2639    movp(dst, src);
2640  }
2641}
2642
2643
2644void MacroAssembler::Move(Register dst, Handle<Object> source) {
2645  AllowDeferredHandleDereference smi_check;
2646  if (source->IsSmi()) {
2647    Move(dst, Smi::cast(*source));
2648  } else {
2649    MoveHeapObject(dst, source);
2650  }
2651}
2652
2653
2654void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2655  AllowDeferredHandleDereference smi_check;
2656  if (source->IsSmi()) {
2657    Move(dst, Smi::cast(*source));
2658  } else {
2659    MoveHeapObject(kScratchRegister, source);
2660    movp(dst, kScratchRegister);
2661  }
2662}
2663
2664
2665void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2666  if (src == 0) {
2667    Xorpd(dst, dst);
2668  } else {
2669    unsigned pop = base::bits::CountPopulation32(src);
2670    DCHECK_NE(0u, pop);
2671    if (pop == 32) {
2672      Pcmpeqd(dst, dst);
2673    } else {
2674      movl(kScratchRegister, Immediate(src));
2675      Movq(dst, kScratchRegister);
2676    }
2677  }
2678}
2679
2680
2681void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2682  if (src == 0) {
2683    Xorpd(dst, dst);
2684  } else {
2685    unsigned nlz = base::bits::CountLeadingZeros64(src);
2686    unsigned ntz = base::bits::CountTrailingZeros64(src);
2687    unsigned pop = base::bits::CountPopulation64(src);
2688    DCHECK_NE(0u, pop);
2689    if (pop == 64) {
2690      Pcmpeqd(dst, dst);
2691    } else if (pop + ntz == 64) {
2692      Pcmpeqd(dst, dst);
2693      Psllq(dst, ntz);
2694    } else if (pop + nlz == 64) {
2695      Pcmpeqd(dst, dst);
2696      Psrlq(dst, nlz);
2697    } else {
2698      uint32_t lower = static_cast<uint32_t>(src);
2699      uint32_t upper = static_cast<uint32_t>(src >> 32);
2700      if (upper == 0) {
2701        Move(dst, lower);
2702      } else {
2703        movq(kScratchRegister, src);
2704        Movq(dst, kScratchRegister);
2705      }
2706    }
2707  }
2708}
2709
2710
2711void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
2712  if (CpuFeatures::IsSupported(AVX)) {
2713    CpuFeatureScope scope(this, AVX);
2714    vmovaps(dst, src);
2715  } else {
2716    movaps(dst, src);
2717  }
2718}
2719
2720
2721void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
2722  if (CpuFeatures::IsSupported(AVX)) {
2723    CpuFeatureScope scope(this, AVX);
2724    vmovapd(dst, src);
2725  } else {
2726    movapd(dst, src);
2727  }
2728}
2729
2730
2731void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
2732  if (CpuFeatures::IsSupported(AVX)) {
2733    CpuFeatureScope scope(this, AVX);
2734    vmovsd(dst, dst, src);
2735  } else {
2736    movsd(dst, src);
2737  }
2738}
2739
2740
2741void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
2742  if (CpuFeatures::IsSupported(AVX)) {
2743    CpuFeatureScope scope(this, AVX);
2744    vmovsd(dst, src);
2745  } else {
2746    movsd(dst, src);
2747  }
2748}
2749
2750
2751void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
2752  if (CpuFeatures::IsSupported(AVX)) {
2753    CpuFeatureScope scope(this, AVX);
2754    vmovsd(dst, src);
2755  } else {
2756    movsd(dst, src);
2757  }
2758}
2759
2760
2761void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
2762  if (CpuFeatures::IsSupported(AVX)) {
2763    CpuFeatureScope scope(this, AVX);
2764    vmovss(dst, dst, src);
2765  } else {
2766    movss(dst, src);
2767  }
2768}
2769
2770
2771void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
2772  if (CpuFeatures::IsSupported(AVX)) {
2773    CpuFeatureScope scope(this, AVX);
2774    vmovss(dst, src);
2775  } else {
2776    movss(dst, src);
2777  }
2778}
2779
2780
2781void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
2782  if (CpuFeatures::IsSupported(AVX)) {
2783    CpuFeatureScope scope(this, AVX);
2784    vmovss(dst, src);
2785  } else {
2786    movss(dst, src);
2787  }
2788}
2789
2790
2791void MacroAssembler::Movd(XMMRegister dst, Register src) {
2792  if (CpuFeatures::IsSupported(AVX)) {
2793    CpuFeatureScope scope(this, AVX);
2794    vmovd(dst, src);
2795  } else {
2796    movd(dst, src);
2797  }
2798}
2799
2800
2801void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
2802  if (CpuFeatures::IsSupported(AVX)) {
2803    CpuFeatureScope scope(this, AVX);
2804    vmovd(dst, src);
2805  } else {
2806    movd(dst, src);
2807  }
2808}
2809
2810
2811void MacroAssembler::Movd(Register dst, XMMRegister src) {
2812  if (CpuFeatures::IsSupported(AVX)) {
2813    CpuFeatureScope scope(this, AVX);
2814    vmovd(dst, src);
2815  } else {
2816    movd(dst, src);
2817  }
2818}
2819
2820
2821void MacroAssembler::Movq(XMMRegister dst, Register src) {
2822  if (CpuFeatures::IsSupported(AVX)) {
2823    CpuFeatureScope scope(this, AVX);
2824    vmovq(dst, src);
2825  } else {
2826    movq(dst, src);
2827  }
2828}
2829
2830
2831void MacroAssembler::Movq(Register dst, XMMRegister src) {
2832  if (CpuFeatures::IsSupported(AVX)) {
2833    CpuFeatureScope scope(this, AVX);
2834    vmovq(dst, src);
2835  } else {
2836    movq(dst, src);
2837  }
2838}
2839
2840
2841void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
2842  if (CpuFeatures::IsSupported(AVX)) {
2843    CpuFeatureScope scope(this, AVX);
2844    vmovmskpd(dst, src);
2845  } else {
2846    movmskpd(dst, src);
2847  }
2848}
2849
2850
2851void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
2852                             RoundingMode mode) {
2853  if (CpuFeatures::IsSupported(AVX)) {
2854    CpuFeatureScope scope(this, AVX);
2855    vroundss(dst, dst, src, mode);
2856  } else {
2857    roundss(dst, src, mode);
2858  }
2859}
2860
2861
2862void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
2863                             RoundingMode mode) {
2864  if (CpuFeatures::IsSupported(AVX)) {
2865    CpuFeatureScope scope(this, AVX);
2866    vroundsd(dst, dst, src, mode);
2867  } else {
2868    roundsd(dst, src, mode);
2869  }
2870}
2871
2872
2873void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
2874  if (CpuFeatures::IsSupported(AVX)) {
2875    CpuFeatureScope scope(this, AVX);
2876    vsqrtsd(dst, dst, src);
2877  } else {
2878    sqrtsd(dst, src);
2879  }
2880}
2881
2882
2883void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
2884  if (CpuFeatures::IsSupported(AVX)) {
2885    CpuFeatureScope scope(this, AVX);
2886    vsqrtsd(dst, dst, src);
2887  } else {
2888    sqrtsd(dst, src);
2889  }
2890}
2891
2892
2893void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
2894  if (CpuFeatures::IsSupported(AVX)) {
2895    CpuFeatureScope scope(this, AVX);
2896    vucomiss(src1, src2);
2897  } else {
2898    ucomiss(src1, src2);
2899  }
2900}
2901
2902
2903void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
2904  if (CpuFeatures::IsSupported(AVX)) {
2905    CpuFeatureScope scope(this, AVX);
2906    vucomiss(src1, src2);
2907  } else {
2908    ucomiss(src1, src2);
2909  }
2910}
2911
2912
2913void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
2914  if (CpuFeatures::IsSupported(AVX)) {
2915    CpuFeatureScope scope(this, AVX);
2916    vucomisd(src1, src2);
2917  } else {
2918    ucomisd(src1, src2);
2919  }
2920}
2921
2922
2923void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
2924  if (CpuFeatures::IsSupported(AVX)) {
2925    CpuFeatureScope scope(this, AVX);
2926    vucomisd(src1, src2);
2927  } else {
2928    ucomisd(src1, src2);
2929  }
2930}
2931
2932
2933void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2934  AllowDeferredHandleDereference smi_check;
2935  if (source->IsSmi()) {
2936    Cmp(dst, Smi::cast(*source));
2937  } else {
2938    MoveHeapObject(kScratchRegister, source);
2939    cmpp(dst, kScratchRegister);
2940  }
2941}
2942
2943
2944void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2945  AllowDeferredHandleDereference smi_check;
2946  if (source->IsSmi()) {
2947    Cmp(dst, Smi::cast(*source));
2948  } else {
2949    MoveHeapObject(kScratchRegister, source);
2950    cmpp(dst, kScratchRegister);
2951  }
2952}
2953
2954
2955void MacroAssembler::Push(Handle<Object> source) {
2956  AllowDeferredHandleDereference smi_check;
2957  if (source->IsSmi()) {
2958    Push(Smi::cast(*source));
2959  } else {
2960    MoveHeapObject(kScratchRegister, source);
2961    Push(kScratchRegister);
2962  }
2963}
2964
2965
2966void MacroAssembler::MoveHeapObject(Register result,
2967                                    Handle<Object> object) {
2968  AllowDeferredHandleDereference using_raw_address;
2969  DCHECK(object->IsHeapObject());
2970  if (isolate()->heap()->InNewSpace(*object)) {
2971    Handle<Cell> cell = isolate()->factory()->NewCell(object);
2972    Move(result, cell, RelocInfo::CELL);
2973    movp(result, Operand(result, 0));
2974  } else {
2975    Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2976  }
2977}
2978
2979
2980void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2981  if (dst.is(rax)) {
2982    AllowDeferredHandleDereference embedding_raw_address;
2983    load_rax(cell.location(), RelocInfo::CELL);
2984  } else {
2985    Move(dst, cell, RelocInfo::CELL);
2986    movp(dst, Operand(dst, 0));
2987  }
2988}
2989
2990
2991void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2992                                  Register scratch) {
2993  Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
2994  cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2995}
2996
2997
2998void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2999  Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
3000  movp(value, FieldOperand(value, WeakCell::kValueOffset));
3001}
3002
3003
3004void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3005                                   Label* miss) {
3006  GetWeakValue(value, cell);
3007  JumpIfSmi(value, miss);
3008}
3009
3010
3011void MacroAssembler::Drop(int stack_elements) {
3012  if (stack_elements > 0) {
3013    addp(rsp, Immediate(stack_elements * kPointerSize));
3014  }
3015}
3016
3017
3018void MacroAssembler::DropUnderReturnAddress(int stack_elements,
3019                                            Register scratch) {
3020  DCHECK(stack_elements > 0);
3021  if (kPointerSize == kInt64Size && stack_elements == 1) {
3022    popq(MemOperand(rsp, 0));
3023    return;
3024  }
3025
3026  PopReturnAddressTo(scratch);
3027  Drop(stack_elements);
3028  PushReturnAddressFrom(scratch);
3029}
3030
3031
3032void MacroAssembler::Push(Register src) {
3033  if (kPointerSize == kInt64Size) {
3034    pushq(src);
3035  } else {
3036    // x32 uses 64-bit push for rbp in the prologue.
3037    DCHECK(src.code() != rbp.code());
3038    leal(rsp, Operand(rsp, -4));
3039    movp(Operand(rsp, 0), src);
3040  }
3041}
3042
3043
3044void MacroAssembler::Push(const Operand& src) {
3045  if (kPointerSize == kInt64Size) {
3046    pushq(src);
3047  } else {
3048    movp(kScratchRegister, src);
3049    leal(rsp, Operand(rsp, -4));
3050    movp(Operand(rsp, 0), kScratchRegister);
3051  }
3052}
3053
3054
3055void MacroAssembler::PushQuad(const Operand& src) {
3056  if (kPointerSize == kInt64Size) {
3057    pushq(src);
3058  } else {
3059    movp(kScratchRegister, src);
3060    pushq(kScratchRegister);
3061  }
3062}
3063
3064
3065void MacroAssembler::Push(Immediate value) {
3066  if (kPointerSize == kInt64Size) {
3067    pushq(value);
3068  } else {
3069    leal(rsp, Operand(rsp, -4));
3070    movp(Operand(rsp, 0), value);
3071  }
3072}
3073
3074
3075void MacroAssembler::PushImm32(int32_t imm32) {
3076  if (kPointerSize == kInt64Size) {
3077    pushq_imm32(imm32);
3078  } else {
3079    leal(rsp, Operand(rsp, -4));
3080    movp(Operand(rsp, 0), Immediate(imm32));
3081  }
3082}
3083
3084
3085void MacroAssembler::Pop(Register dst) {
3086  if (kPointerSize == kInt64Size) {
3087    popq(dst);
3088  } else {
3089    // x32 uses 64-bit pop for rbp in the epilogue.
3090    DCHECK(dst.code() != rbp.code());
3091    movp(dst, Operand(rsp, 0));
3092    leal(rsp, Operand(rsp, 4));
3093  }
3094}
3095
3096
3097void MacroAssembler::Pop(const Operand& dst) {
3098  if (kPointerSize == kInt64Size) {
3099    popq(dst);
3100  } else {
3101    Register scratch = dst.AddressUsesRegister(kScratchRegister)
3102        ? kRootRegister : kScratchRegister;
3103    movp(scratch, Operand(rsp, 0));
3104    movp(dst, scratch);
3105    leal(rsp, Operand(rsp, 4));
3106    if (scratch.is(kRootRegister)) {
3107      // Restore kRootRegister.
3108      InitializeRootRegister();
3109    }
3110  }
3111}
3112
3113
3114void MacroAssembler::PopQuad(const Operand& dst) {
3115  if (kPointerSize == kInt64Size) {
3116    popq(dst);
3117  } else {
3118    popq(kScratchRegister);
3119    movp(dst, kScratchRegister);
3120  }
3121}
3122
3123
3124void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
3125                                                        Register base,
3126                                                        int offset) {
3127  DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3128         offset <= SharedFunctionInfo::kSize &&
3129         (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3130  if (kPointerSize == kInt64Size) {
3131    movsxlq(dst, FieldOperand(base, offset));
3132  } else {
3133    movp(dst, FieldOperand(base, offset));
3134    SmiToInteger32(dst, dst);
3135  }
3136}
3137
3138
3139void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
3140                                                           int offset,
3141                                                           int bits) {
3142  DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
3143         offset <= SharedFunctionInfo::kSize &&
3144         (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
3145  if (kPointerSize == kInt32Size) {
3146    // On x32, this field is represented by SMI.
3147    bits += kSmiShift;
3148  }
3149  int byte_offset = bits / kBitsPerByte;
3150  int bit_in_byte = bits & (kBitsPerByte - 1);
3151  testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
3152}
3153
3154
3155void MacroAssembler::Jump(ExternalReference ext) {
3156  LoadAddress(kScratchRegister, ext);
3157  jmp(kScratchRegister);
3158}
3159
3160
3161void MacroAssembler::Jump(const Operand& op) {
3162  if (kPointerSize == kInt64Size) {
3163    jmp(op);
3164  } else {
3165    movp(kScratchRegister, op);
3166    jmp(kScratchRegister);
3167  }
3168}
3169
3170
3171void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
3172  Move(kScratchRegister, destination, rmode);
3173  jmp(kScratchRegister);
3174}
3175
3176
3177void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
3178  // TODO(X64): Inline this
3179  jmp(code_object, rmode);
3180}
3181
3182
3183int MacroAssembler::CallSize(ExternalReference ext) {
3184  // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
3185  return LoadAddressSize(ext) +
3186         Assembler::kCallScratchRegisterInstructionLength;
3187}
3188
3189
3190void MacroAssembler::Call(ExternalReference ext) {
3191#ifdef DEBUG
3192  int end_position = pc_offset() + CallSize(ext);
3193#endif
3194  LoadAddress(kScratchRegister, ext);
3195  call(kScratchRegister);
3196#ifdef DEBUG
3197  CHECK_EQ(end_position, pc_offset());
3198#endif
3199}
3200
3201
3202void MacroAssembler::Call(const Operand& op) {
3203  if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
3204    call(op);
3205  } else {
3206    movp(kScratchRegister, op);
3207    call(kScratchRegister);
3208  }
3209}
3210
3211
3212void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
3213#ifdef DEBUG
3214  int end_position = pc_offset() + CallSize(destination);
3215#endif
3216  Move(kScratchRegister, destination, rmode);
3217  call(kScratchRegister);
3218#ifdef DEBUG
3219  CHECK_EQ(pc_offset(), end_position);
3220#endif
3221}
3222
3223
3224void MacroAssembler::Call(Handle<Code> code_object,
3225                          RelocInfo::Mode rmode,
3226                          TypeFeedbackId ast_id) {
3227#ifdef DEBUG
3228  int end_position = pc_offset() + CallSize(code_object);
3229#endif
3230  DCHECK(RelocInfo::IsCodeTarget(rmode) ||
3231      rmode == RelocInfo::CODE_AGE_SEQUENCE);
3232  call(code_object, rmode, ast_id);
3233#ifdef DEBUG
3234  CHECK_EQ(end_position, pc_offset());
3235#endif
3236}
3237
3238
3239void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
3240  if (imm8 == 0) {
3241    Movd(dst, src);
3242    return;
3243  }
3244  DCHECK_EQ(1, imm8);
3245  if (CpuFeatures::IsSupported(SSE4_1)) {
3246    CpuFeatureScope sse_scope(this, SSE4_1);
3247    pextrd(dst, src, imm8);
3248    return;
3249  }
3250  movq(dst, src);
3251  shrq(dst, Immediate(32));
3252}
3253
3254
3255void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
3256  if (CpuFeatures::IsSupported(SSE4_1)) {
3257    CpuFeatureScope sse_scope(this, SSE4_1);
3258    pinsrd(dst, src, imm8);
3259    return;
3260  }
3261  Movd(xmm0, src);
3262  if (imm8 == 1) {
3263    punpckldq(dst, xmm0);
3264  } else {
3265    DCHECK_EQ(0, imm8);
3266    Movss(dst, xmm0);
3267  }
3268}
3269
3270
3271void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
3272  DCHECK(imm8 == 0 || imm8 == 1);
3273  if (CpuFeatures::IsSupported(SSE4_1)) {
3274    CpuFeatureScope sse_scope(this, SSE4_1);
3275    pinsrd(dst, src, imm8);
3276    return;
3277  }
3278  Movd(xmm0, src);
3279  if (imm8 == 1) {
3280    punpckldq(dst, xmm0);
3281  } else {
3282    DCHECK_EQ(0, imm8);
3283    Movss(dst, xmm0);
3284  }
3285}
3286
3287
3288void MacroAssembler::Lzcntl(Register dst, Register src) {
3289  if (CpuFeatures::IsSupported(LZCNT)) {
3290    CpuFeatureScope scope(this, LZCNT);
3291    lzcntl(dst, src);
3292    return;
3293  }
3294  Label not_zero_src;
3295  bsrl(dst, src);
3296  j(not_zero, &not_zero_src, Label::kNear);
3297  Set(dst, 63);  // 63^31 == 32
3298  bind(&not_zero_src);
3299  xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
3300}
3301
3302
3303void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
3304  if (CpuFeatures::IsSupported(LZCNT)) {
3305    CpuFeatureScope scope(this, LZCNT);
3306    lzcntl(dst, src);
3307    return;
3308  }
3309  Label not_zero_src;
3310  bsrl(dst, src);
3311  j(not_zero, &not_zero_src, Label::kNear);
3312  Set(dst, 63);  // 63^31 == 32
3313  bind(&not_zero_src);
3314  xorl(dst, Immediate(31));  // for x in [0..31], 31^x == 31 - x
3315}
3316
3317
3318void MacroAssembler::Lzcntq(Register dst, Register src) {
3319  if (CpuFeatures::IsSupported(LZCNT)) {
3320    CpuFeatureScope scope(this, LZCNT);
3321    lzcntq(dst, src);
3322    return;
3323  }
3324  Label not_zero_src;
3325  bsrq(dst, src);
3326  j(not_zero, &not_zero_src, Label::kNear);
3327  Set(dst, 127);  // 127^63 == 64
3328  bind(&not_zero_src);
3329  xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
3330}
3331
3332
3333void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
3334  if (CpuFeatures::IsSupported(LZCNT)) {
3335    CpuFeatureScope scope(this, LZCNT);
3336    lzcntq(dst, src);
3337    return;
3338  }
3339  Label not_zero_src;
3340  bsrq(dst, src);
3341  j(not_zero, &not_zero_src, Label::kNear);
3342  Set(dst, 127);  // 127^63 == 64
3343  bind(&not_zero_src);
3344  xorl(dst, Immediate(63));  // for x in [0..63], 63^x == 63 - x
3345}
3346
3347
3348void MacroAssembler::Tzcntq(Register dst, Register src) {
3349  if (CpuFeatures::IsSupported(BMI1)) {
3350    CpuFeatureScope scope(this, BMI1);
3351    tzcntq(dst, src);
3352    return;
3353  }
3354  Label not_zero_src;
3355  bsfq(dst, src);
3356  j(not_zero, &not_zero_src, Label::kNear);
3357  // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3358  Set(dst, 64);
3359  bind(&not_zero_src);
3360}
3361
3362
3363void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
3364  if (CpuFeatures::IsSupported(BMI1)) {
3365    CpuFeatureScope scope(this, BMI1);
3366    tzcntq(dst, src);
3367    return;
3368  }
3369  Label not_zero_src;
3370  bsfq(dst, src);
3371  j(not_zero, &not_zero_src, Label::kNear);
3372  // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
3373  Set(dst, 64);
3374  bind(&not_zero_src);
3375}
3376
3377
3378void MacroAssembler::Tzcntl(Register dst, Register src) {
3379  if (CpuFeatures::IsSupported(BMI1)) {
3380    CpuFeatureScope scope(this, BMI1);
3381    tzcntl(dst, src);
3382    return;
3383  }
3384  Label not_zero_src;
3385  bsfl(dst, src);
3386  j(not_zero, &not_zero_src, Label::kNear);
3387  Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
3388  bind(&not_zero_src);
3389}
3390
3391
3392void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
3393  if (CpuFeatures::IsSupported(BMI1)) {
3394    CpuFeatureScope scope(this, BMI1);
3395    tzcntl(dst, src);
3396    return;
3397  }
3398  Label not_zero_src;
3399  bsfl(dst, src);
3400  j(not_zero, &not_zero_src, Label::kNear);
3401  Set(dst, 32);  // The result of tzcnt is 32 if src = 0.
3402  bind(&not_zero_src);
3403}
3404
3405
3406void MacroAssembler::Popcntl(Register dst, Register src) {
3407  if (CpuFeatures::IsSupported(POPCNT)) {
3408    CpuFeatureScope scope(this, POPCNT);
3409    popcntl(dst, src);
3410    return;
3411  }
3412  UNREACHABLE();
3413}
3414
3415
3416void MacroAssembler::Popcntl(Register dst, const Operand& src) {
3417  if (CpuFeatures::IsSupported(POPCNT)) {
3418    CpuFeatureScope scope(this, POPCNT);
3419    popcntl(dst, src);
3420    return;
3421  }
3422  UNREACHABLE();
3423}
3424
3425
3426void MacroAssembler::Popcntq(Register dst, Register src) {
3427  if (CpuFeatures::IsSupported(POPCNT)) {
3428    CpuFeatureScope scope(this, POPCNT);
3429    popcntq(dst, src);
3430    return;
3431  }
3432  UNREACHABLE();
3433}
3434
3435
3436void MacroAssembler::Popcntq(Register dst, const Operand& src) {
3437  if (CpuFeatures::IsSupported(POPCNT)) {
3438    CpuFeatureScope scope(this, POPCNT);
3439    popcntq(dst, src);
3440    return;
3441  }
3442  UNREACHABLE();
3443}
3444
3445
3446void MacroAssembler::Pushad() {
3447  Push(rax);
3448  Push(rcx);
3449  Push(rdx);
3450  Push(rbx);
3451  // Not pushing rsp or rbp.
3452  Push(rsi);
3453  Push(rdi);
3454  Push(r8);
3455  Push(r9);
3456  // r10 is kScratchRegister.
3457  Push(r11);
3458  Push(r12);
3459  // r13 is kRootRegister.
3460  Push(r14);
3461  Push(r15);
3462  STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
3463  // Use lea for symmetry with Popad.
3464  int sp_delta =
3465      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3466  leap(rsp, Operand(rsp, -sp_delta));
3467}
3468
3469
3470void MacroAssembler::Popad() {
3471  // Popad must not change the flags, so use lea instead of addq.
3472  int sp_delta =
3473      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
3474  leap(rsp, Operand(rsp, sp_delta));
3475  Pop(r15);
3476  Pop(r14);
3477  Pop(r12);
3478  Pop(r11);
3479  Pop(r9);
3480  Pop(r8);
3481  Pop(rdi);
3482  Pop(rsi);
3483  Pop(rbx);
3484  Pop(rdx);
3485  Pop(rcx);
3486  Pop(rax);
3487}
3488
3489
3490void MacroAssembler::Dropad() {
3491  addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
3492}
3493
3494
3495// Order general registers are pushed by Pushad:
3496// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
3497const int
3498MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
3499    0,
3500    1,
3501    2,
3502    3,
3503    -1,
3504    -1,
3505    4,
3506    5,
3507    6,
3508    7,
3509    -1,
3510    8,
3511    9,
3512    -1,
3513    10,
3514    11
3515};
3516
3517
3518void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
3519                                                  const Immediate& imm) {
3520  movp(SafepointRegisterSlot(dst), imm);
3521}
3522
3523
3524void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
3525  movp(SafepointRegisterSlot(dst), src);
3526}
3527
3528
3529void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
3530  movp(dst, SafepointRegisterSlot(src));
3531}
3532
3533
3534Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
3535  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
3536}
3537
3538
3539void MacroAssembler::PushStackHandler() {
3540  // Adjust this code if not the case.
3541  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
3542  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3543
3544  // Link the current handler as the next handler.
3545  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3546  Push(ExternalOperand(handler_address));
3547
3548  // Set this new handler as the current one.
3549  movp(ExternalOperand(handler_address), rsp);
3550}
3551
3552
3553void MacroAssembler::PopStackHandler() {
3554  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3555  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
3556  Pop(ExternalOperand(handler_address));
3557  addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3558}
3559
3560
3561void MacroAssembler::Ret() {
3562  ret(0);
3563}
3564
3565
3566void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
3567  if (is_uint16(bytes_dropped)) {
3568    ret(bytes_dropped);
3569  } else {
3570    PopReturnAddressTo(scratch);
3571    addp(rsp, Immediate(bytes_dropped));
3572    PushReturnAddressFrom(scratch);
3573    ret(0);
3574  }
3575}
3576
3577
3578void MacroAssembler::FCmp() {
3579  fucomip();
3580  fstp(0);
3581}
3582
3583
3584void MacroAssembler::CmpObjectType(Register heap_object,
3585                                   InstanceType type,
3586                                   Register map) {
3587  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3588  CmpInstanceType(map, type);
3589}
3590
3591
3592void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3593  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3594       Immediate(static_cast<int8_t>(type)));
3595}
3596
3597
3598void MacroAssembler::CheckFastElements(Register map,
3599                                       Label* fail,
3600                                       Label::Distance distance) {
3601  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3602  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3603  STATIC_ASSERT(FAST_ELEMENTS == 2);
3604  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3605  cmpb(FieldOperand(map, Map::kBitField2Offset),
3606       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3607  j(above, fail, distance);
3608}
3609
3610
3611void MacroAssembler::CheckFastObjectElements(Register map,
3612                                             Label* fail,
3613                                             Label::Distance distance) {
3614  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3615  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3616  STATIC_ASSERT(FAST_ELEMENTS == 2);
3617  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3618  cmpb(FieldOperand(map, Map::kBitField2Offset),
3619       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3620  j(below_equal, fail, distance);
3621  cmpb(FieldOperand(map, Map::kBitField2Offset),
3622       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3623  j(above, fail, distance);
3624}
3625
3626
3627void MacroAssembler::CheckFastSmiElements(Register map,
3628                                          Label* fail,
3629                                          Label::Distance distance) {
3630  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3631  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3632  cmpb(FieldOperand(map, Map::kBitField2Offset),
3633       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3634  j(above, fail, distance);
3635}
3636
3637
3638void MacroAssembler::StoreNumberToDoubleElements(
3639    Register maybe_number,
3640    Register elements,
3641    Register index,
3642    XMMRegister xmm_scratch,
3643    Label* fail,
3644    int elements_offset) {
3645  Label smi_value, done;
3646
3647  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3648
3649  CheckMap(maybe_number,
3650           isolate()->factory()->heap_number_map(),
3651           fail,
3652           DONT_DO_SMI_CHECK);
3653
3654  // Double value, turn potential sNaN into qNaN.
3655  Move(xmm_scratch, 1.0);
3656  mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3657  jmp(&done, Label::kNear);
3658
3659  bind(&smi_value);
3660  // Value is a smi. convert to a double and store.
3661  // Preserve original value.
3662  SmiToInteger32(kScratchRegister, maybe_number);
3663  Cvtlsi2sd(xmm_scratch, kScratchRegister);
3664  bind(&done);
3665  Movsd(FieldOperand(elements, index, times_8,
3666                     FixedDoubleArray::kHeaderSize - elements_offset),
3667        xmm_scratch);
3668}
3669
3670
3671void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3672  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3673}
3674
3675
3676void MacroAssembler::CheckMap(Register obj,
3677                              Handle<Map> map,
3678                              Label* fail,
3679                              SmiCheckType smi_check_type) {
3680  if (smi_check_type == DO_SMI_CHECK) {
3681    JumpIfSmi(obj, fail);
3682  }
3683
3684  CompareMap(obj, map);
3685  j(not_equal, fail);
3686}
3687
3688
3689void MacroAssembler::ClampUint8(Register reg) {
3690  Label done;
3691  testl(reg, Immediate(0xFFFFFF00));
3692  j(zero, &done, Label::kNear);
3693  setcc(negative, reg);  // 1 if negative, 0 if positive.
3694  decb(reg);  // 0 if negative, 255 if positive.
3695  bind(&done);
3696}
3697
3698
3699void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3700                                        XMMRegister temp_xmm_reg,
3701                                        Register result_reg) {
3702  Label done;
3703  Label conv_failure;
3704  Xorpd(temp_xmm_reg, temp_xmm_reg);
3705  Cvtsd2si(result_reg, input_reg);
3706  testl(result_reg, Immediate(0xFFFFFF00));
3707  j(zero, &done, Label::kNear);
3708  cmpl(result_reg, Immediate(1));
3709  j(overflow, &conv_failure, Label::kNear);
3710  movl(result_reg, Immediate(0));
3711  setcc(sign, result_reg);
3712  subl(result_reg, Immediate(1));
3713  andl(result_reg, Immediate(255));
3714  jmp(&done, Label::kNear);
3715  bind(&conv_failure);
3716  Set(result_reg, 0);
3717  Ucomisd(input_reg, temp_xmm_reg);
3718  j(below, &done, Label::kNear);
3719  Set(result_reg, 255);
3720  bind(&done);
3721}
3722
3723
3724void MacroAssembler::LoadUint32(XMMRegister dst,
3725                                Register src) {
3726  if (FLAG_debug_code) {
3727    cmpq(src, Immediate(0xffffffff));
3728    Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3729  }
3730  Cvtqsi2sd(dst, src);
3731}
3732
3733
3734void MacroAssembler::SlowTruncateToI(Register result_reg,
3735                                     Register input_reg,
3736                                     int offset) {
3737  DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
3738  call(stub.GetCode(), RelocInfo::CODE_TARGET);
3739}
3740
3741
3742void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3743                                           Register input_reg) {
3744  Label done;
3745  Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3746  Cvttsd2siq(result_reg, xmm0);
3747  cmpq(result_reg, Immediate(1));
3748  j(no_overflow, &done, Label::kNear);
3749
3750  // Slow case.
3751  if (input_reg.is(result_reg)) {
3752    subp(rsp, Immediate(kDoubleSize));
3753    Movsd(MemOperand(rsp, 0), xmm0);
3754    SlowTruncateToI(result_reg, rsp, 0);
3755    addp(rsp, Immediate(kDoubleSize));
3756  } else {
3757    SlowTruncateToI(result_reg, input_reg);
3758  }
3759
3760  bind(&done);
3761  // Keep our invariant that the upper 32 bits are zero.
3762  movl(result_reg, result_reg);
3763}
3764
3765
3766void MacroAssembler::TruncateDoubleToI(Register result_reg,
3767                                       XMMRegister input_reg) {
3768  Label done;
3769  Cvttsd2siq(result_reg, input_reg);
3770  cmpq(result_reg, Immediate(1));
3771  j(no_overflow, &done, Label::kNear);
3772
3773  subp(rsp, Immediate(kDoubleSize));
3774  Movsd(MemOperand(rsp, 0), input_reg);
3775  SlowTruncateToI(result_reg, rsp, 0);
3776  addp(rsp, Immediate(kDoubleSize));
3777
3778  bind(&done);
3779  // Keep our invariant that the upper 32 bits are zero.
3780  movl(result_reg, result_reg);
3781}
3782
3783
3784void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
3785                               XMMRegister scratch,
3786                               MinusZeroMode minus_zero_mode,
3787                               Label* lost_precision, Label* is_nan,
3788                               Label* minus_zero, Label::Distance dst) {
3789  Cvttsd2si(result_reg, input_reg);
3790  Cvtlsi2sd(xmm0, result_reg);
3791  Ucomisd(xmm0, input_reg);
3792  j(not_equal, lost_precision, dst);
3793  j(parity_even, is_nan, dst);  // NaN.
3794  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3795    Label done;
3796    // The integer converted back is equal to the original. We
3797    // only have to test if we got -0 as an input.
3798    testl(result_reg, result_reg);
3799    j(not_zero, &done, Label::kNear);
3800    Movmskpd(result_reg, input_reg);
3801    // Bit 0 contains the sign of the double in input_reg.
3802    // If input was positive, we are ok and return 0, otherwise
3803    // jump to minus_zero.
3804    andl(result_reg, Immediate(1));
3805    j(not_zero, minus_zero, dst);
3806    bind(&done);
3807  }
3808}
3809
3810
3811void MacroAssembler::LoadInstanceDescriptors(Register map,
3812                                             Register descriptors) {
3813  movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3814}
3815
3816
3817void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3818  movl(dst, FieldOperand(map, Map::kBitField3Offset));
3819  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3820}
3821
3822
3823void MacroAssembler::EnumLength(Register dst, Register map) {
3824  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3825  movl(dst, FieldOperand(map, Map::kBitField3Offset));
3826  andl(dst, Immediate(Map::EnumLengthBits::kMask));
3827  Integer32ToSmi(dst, dst);
3828}
3829
3830
3831void MacroAssembler::LoadAccessor(Register dst, Register holder,
3832                                  int accessor_index,
3833                                  AccessorComponent accessor) {
3834  movp(dst, FieldOperand(holder, HeapObject::kMapOffset));
3835  LoadInstanceDescriptors(dst, dst);
3836  movp(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3837  int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
3838                                           : AccessorPair::kSetterOffset;
3839  movp(dst, FieldOperand(dst, offset));
3840}
3841
3842
3843void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3844                                     Register scratch2, Handle<WeakCell> cell,
3845                                     Handle<Code> success,
3846                                     SmiCheckType smi_check_type) {
3847  Label fail;
3848  if (smi_check_type == DO_SMI_CHECK) {
3849    JumpIfSmi(obj, &fail);
3850  }
3851  movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
3852  CmpWeakValue(scratch1, cell, scratch2);
3853  j(equal, success, RelocInfo::CODE_TARGET);
3854  bind(&fail);
3855}
3856
3857
3858void MacroAssembler::AssertNumber(Register object) {
3859  if (emit_debug_code()) {
3860    Label ok;
3861    Condition is_smi = CheckSmi(object);
3862    j(is_smi, &ok, Label::kNear);
3863    Cmp(FieldOperand(object, HeapObject::kMapOffset),
3864        isolate()->factory()->heap_number_map());
3865    Check(equal, kOperandIsNotANumber);
3866    bind(&ok);
3867  }
3868}
3869
3870
3871void MacroAssembler::AssertNotSmi(Register object) {
3872  if (emit_debug_code()) {
3873    Condition is_smi = CheckSmi(object);
3874    Check(NegateCondition(is_smi), kOperandIsASmi);
3875  }
3876}
3877
3878
3879void MacroAssembler::AssertSmi(Register object) {
3880  if (emit_debug_code()) {
3881    Condition is_smi = CheckSmi(object);
3882    Check(is_smi, kOperandIsNotASmi);
3883  }
3884}
3885
3886
3887void MacroAssembler::AssertSmi(const Operand& object) {
3888  if (emit_debug_code()) {
3889    Condition is_smi = CheckSmi(object);
3890    Check(is_smi, kOperandIsNotASmi);
3891  }
3892}
3893
3894
3895void MacroAssembler::AssertZeroExtended(Register int32_register) {
3896  if (emit_debug_code()) {
3897    DCHECK(!int32_register.is(kScratchRegister));
3898    movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3899    cmpq(kScratchRegister, int32_register);
3900    Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3901  }
3902}
3903
3904
3905void MacroAssembler::AssertString(Register object) {
3906  if (emit_debug_code()) {
3907    testb(object, Immediate(kSmiTagMask));
3908    Check(not_equal, kOperandIsASmiAndNotAString);
3909    Push(object);
3910    movp(object, FieldOperand(object, HeapObject::kMapOffset));
3911    CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3912    Pop(object);
3913    Check(below, kOperandIsNotAString);
3914  }
3915}
3916
3917
3918void MacroAssembler::AssertName(Register object) {
3919  if (emit_debug_code()) {
3920    testb(object, Immediate(kSmiTagMask));
3921    Check(not_equal, kOperandIsASmiAndNotAName);
3922    Push(object);
3923    movp(object, FieldOperand(object, HeapObject::kMapOffset));
3924    CmpInstanceType(object, LAST_NAME_TYPE);
3925    Pop(object);
3926    Check(below_equal, kOperandIsNotAName);
3927  }
3928}
3929
3930
3931void MacroAssembler::AssertFunction(Register object) {
3932  if (emit_debug_code()) {
3933    testb(object, Immediate(kSmiTagMask));
3934    Check(not_equal, kOperandIsASmiAndNotAFunction);
3935    Push(object);
3936    CmpObjectType(object, JS_FUNCTION_TYPE, object);
3937    Pop(object);
3938    Check(equal, kOperandIsNotAFunction);
3939  }
3940}
3941
3942
3943void MacroAssembler::AssertBoundFunction(Register object) {
3944  if (emit_debug_code()) {
3945    testb(object, Immediate(kSmiTagMask));
3946    Check(not_equal, kOperandIsASmiAndNotABoundFunction);
3947    Push(object);
3948    CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
3949    Pop(object);
3950    Check(equal, kOperandIsNotABoundFunction);
3951  }
3952}
3953
3954
3955void MacroAssembler::AssertReceiver(Register object) {
3956  if (emit_debug_code()) {
3957    testb(object, Immediate(kSmiTagMask));
3958    Check(not_equal, kOperandIsASmiAndNotAReceiver);
3959    Push(object);
3960    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3961    CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
3962    Pop(object);
3963    Check(above_equal, kOperandIsNotAReceiver);
3964  }
3965}
3966
3967
3968void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3969  if (emit_debug_code()) {
3970    Label done_checking;
3971    AssertNotSmi(object);
3972    Cmp(object, isolate()->factory()->undefined_value());
3973    j(equal, &done_checking);
3974    Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3975    Assert(equal, kExpectedUndefinedOrCell);
3976    bind(&done_checking);
3977  }
3978}
3979
3980
3981void MacroAssembler::AssertRootValue(Register src,
3982                                     Heap::RootListIndex root_value_index,
3983                                     BailoutReason reason) {
3984  if (emit_debug_code()) {
3985    DCHECK(!src.is(kScratchRegister));
3986    LoadRoot(kScratchRegister, root_value_index);
3987    cmpp(src, kScratchRegister);
3988    Check(equal, reason);
3989  }
3990}
3991
3992
3993
3994Condition MacroAssembler::IsObjectStringType(Register heap_object,
3995                                             Register map,
3996                                             Register instance_type) {
3997  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3998  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3999  STATIC_ASSERT(kNotStringTag != 0);
4000  testb(instance_type, Immediate(kIsNotStringMask));
4001  return zero;
4002}
4003
4004
4005Condition MacroAssembler::IsObjectNameType(Register heap_object,
4006                                           Register map,
4007                                           Register instance_type) {
4008  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
4009  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4010  cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
4011  return below_equal;
4012}
4013
4014
4015void MacroAssembler::GetMapConstructor(Register result, Register map,
4016                                       Register temp) {
4017  Label done, loop;
4018  movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
4019  bind(&loop);
4020  JumpIfSmi(result, &done, Label::kNear);
4021  CmpObjectType(result, MAP_TYPE, temp);
4022  j(not_equal, &done, Label::kNear);
4023  movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
4024  jmp(&loop);
4025  bind(&done);
4026}
4027
4028
4029void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
4030                                             Label* miss) {
4031  // Get the prototype or initial map from the function.
4032  movp(result,
4033       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4034
4035  // If the prototype or initial map is the hole, don't return it and
4036  // simply miss the cache instead. This will allow us to allocate a
4037  // prototype object on-demand in the runtime system.
4038  CompareRoot(result, Heap::kTheHoleValueRootIndex);
4039  j(equal, miss);
4040
4041  // If the function does not have an initial map, we're done.
4042  Label done;
4043  CmpObjectType(result, MAP_TYPE, kScratchRegister);
4044  j(not_equal, &done, Label::kNear);
4045
4046  // Get the prototype from the initial map.
4047  movp(result, FieldOperand(result, Map::kPrototypeOffset));
4048
4049  // All done.
4050  bind(&done);
4051}
4052
4053
4054void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
4055  if (FLAG_native_code_counters && counter->Enabled()) {
4056    Operand counter_operand = ExternalOperand(ExternalReference(counter));
4057    movl(counter_operand, Immediate(value));
4058  }
4059}
4060
4061
4062void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
4063  DCHECK(value > 0);
4064  if (FLAG_native_code_counters && counter->Enabled()) {
4065    Operand counter_operand = ExternalOperand(ExternalReference(counter));
4066    if (value == 1) {
4067      incl(counter_operand);
4068    } else {
4069      addl(counter_operand, Immediate(value));
4070    }
4071  }
4072}
4073
4074
4075void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
4076  DCHECK(value > 0);
4077  if (FLAG_native_code_counters && counter->Enabled()) {
4078    Operand counter_operand = ExternalOperand(ExternalReference(counter));
4079    if (value == 1) {
4080      decl(counter_operand);
4081    } else {
4082      subl(counter_operand, Immediate(value));
4083    }
4084  }
4085}
4086
4087
4088void MacroAssembler::DebugBreak() {
4089  Set(rax, 0);  // No arguments.
4090  LoadAddress(rbx,
4091              ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
4092  CEntryStub ces(isolate(), 1);
4093  DCHECK(AllowThisStubCall(&ces));
4094  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
4095}
4096
4097
4098void MacroAssembler::InvokeFunction(Register function,
4099                                    Register new_target,
4100                                    const ParameterCount& actual,
4101                                    InvokeFlag flag,
4102                                    const CallWrapper& call_wrapper) {
4103  movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
4104  LoadSharedFunctionInfoSpecialField(
4105      rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
4106
4107  ParameterCount expected(rbx);
4108  InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
4109}
4110
4111
4112void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4113                                    const ParameterCount& expected,
4114                                    const ParameterCount& actual,
4115                                    InvokeFlag flag,
4116                                    const CallWrapper& call_wrapper) {
4117  Move(rdi, function);
4118  InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
4119}
4120
4121
4122void MacroAssembler::InvokeFunction(Register function,
4123                                    Register new_target,
4124                                    const ParameterCount& expected,
4125                                    const ParameterCount& actual,
4126                                    InvokeFlag flag,
4127                                    const CallWrapper& call_wrapper) {
4128  DCHECK(function.is(rdi));
4129  movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
4130  InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
4131}
4132
4133
4134void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4135                                        const ParameterCount& expected,
4136                                        const ParameterCount& actual,
4137                                        InvokeFlag flag,
4138                                        const CallWrapper& call_wrapper) {
4139  // You can't call a function without a valid frame.
4140  DCHECK(flag == JUMP_FUNCTION || has_frame());
4141  DCHECK(function.is(rdi));
4142  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
4143
4144  if (call_wrapper.NeedsDebugStepCheck()) {
4145    FloodFunctionIfStepping(function, new_target, expected, actual);
4146  }
4147
4148  // Clear the new.target register if not given.
4149  if (!new_target.is_valid()) {
4150    LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
4151  }
4152
4153  Label done;
4154  bool definitely_mismatches = false;
4155  InvokePrologue(expected,
4156                 actual,
4157                 &done,
4158                 &definitely_mismatches,
4159                 flag,
4160                 Label::kNear,
4161                 call_wrapper);
4162  if (!definitely_mismatches) {
4163    // We call indirectly through the code field in the function to
4164    // allow recompilation to take effect without changing any of the
4165    // call sites.
4166    Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
4167    if (flag == CALL_FUNCTION) {
4168      call_wrapper.BeforeCall(CallSize(code));
4169      call(code);
4170      call_wrapper.AfterCall();
4171    } else {
4172      DCHECK(flag == JUMP_FUNCTION);
4173      jmp(code);
4174    }
4175    bind(&done);
4176  }
4177}
4178
4179
4180void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4181                                    const ParameterCount& actual,
4182                                    Label* done,
4183                                    bool* definitely_mismatches,
4184                                    InvokeFlag flag,
4185                                    Label::Distance near_jump,
4186                                    const CallWrapper& call_wrapper) {
4187  bool definitely_matches = false;
4188  *definitely_mismatches = false;
4189  Label invoke;
4190  if (expected.is_immediate()) {
4191    DCHECK(actual.is_immediate());
4192    Set(rax, actual.immediate());
4193    if (expected.immediate() == actual.immediate()) {
4194      definitely_matches = true;
4195    } else {
4196      if (expected.immediate() ==
4197              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
4198        // Don't worry about adapting arguments for built-ins that
4199        // don't want that done. Skip adaption code by making it look
4200        // like we have a match between expected and actual number of
4201        // arguments.
4202        definitely_matches = true;
4203      } else {
4204        *definitely_mismatches = true;
4205        Set(rbx, expected.immediate());
4206      }
4207    }
4208  } else {
4209    if (actual.is_immediate()) {
4210      // Expected is in register, actual is immediate. This is the
4211      // case when we invoke function values without going through the
4212      // IC mechanism.
4213      Set(rax, actual.immediate());
4214      cmpp(expected.reg(), Immediate(actual.immediate()));
4215      j(equal, &invoke, Label::kNear);
4216      DCHECK(expected.reg().is(rbx));
4217    } else if (!expected.reg().is(actual.reg())) {
4218      // Both expected and actual are in (different) registers. This
4219      // is the case when we invoke functions using call and apply.
4220      cmpp(expected.reg(), actual.reg());
4221      j(equal, &invoke, Label::kNear);
4222      DCHECK(actual.reg().is(rax));
4223      DCHECK(expected.reg().is(rbx));
4224    } else {
4225      Move(rax, actual.reg());
4226    }
4227  }
4228
4229  if (!definitely_matches) {
4230    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
4231    if (flag == CALL_FUNCTION) {
4232      call_wrapper.BeforeCall(CallSize(adaptor));
4233      Call(adaptor, RelocInfo::CODE_TARGET);
4234      call_wrapper.AfterCall();
4235      if (!*definitely_mismatches) {
4236        jmp(done, near_jump);
4237      }
4238    } else {
4239      Jump(adaptor, RelocInfo::CODE_TARGET);
4240    }
4241    bind(&invoke);
4242  }
4243}
4244
4245
4246void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
4247                                             const ParameterCount& expected,
4248                                             const ParameterCount& actual) {
4249  Label skip_flooding;
4250  ExternalReference step_in_enabled =
4251      ExternalReference::debug_step_in_enabled_address(isolate());
4252  Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
4253  cmpb(step_in_enabled_operand, Immediate(0));
4254  j(equal, &skip_flooding);
4255  {
4256    FrameScope frame(this,
4257                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4258    if (expected.is_reg()) {
4259      Integer32ToSmi(expected.reg(), expected.reg());
4260      Push(expected.reg());
4261    }
4262    if (actual.is_reg()) {
4263      Integer32ToSmi(actual.reg(), actual.reg());
4264      Push(actual.reg());
4265    }
4266    if (new_target.is_valid()) {
4267      Push(new_target);
4268    }
4269    Push(fun);
4270    Push(fun);
4271    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
4272    Pop(fun);
4273    if (new_target.is_valid()) {
4274      Pop(new_target);
4275    }
4276    if (actual.is_reg()) {
4277      Pop(actual.reg());
4278      SmiToInteger64(actual.reg(), actual.reg());
4279    }
4280    if (expected.is_reg()) {
4281      Pop(expected.reg());
4282      SmiToInteger64(expected.reg(), expected.reg());
4283    }
4284  }
4285  bind(&skip_flooding);
4286}
4287
4288
4289void MacroAssembler::StubPrologue() {
4290    pushq(rbp);  // Caller's frame pointer.
4291    movp(rbp, rsp);
4292    Push(rsi);  // Callee's context.
4293    Push(Smi::FromInt(StackFrame::STUB));
4294}
4295
4296
4297void MacroAssembler::Prologue(bool code_pre_aging) {
4298  PredictableCodeSizeScope predictible_code_size_scope(this,
4299      kNoCodeAgeSequenceLength);
4300  if (code_pre_aging) {
4301      // Pre-age the code.
4302    Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
4303         RelocInfo::CODE_AGE_SEQUENCE);
4304    Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
4305  } else {
4306    pushq(rbp);  // Caller's frame pointer.
4307    movp(rbp, rsp);
4308    Push(rsi);  // Callee's context.
4309    Push(rdi);  // Callee's JS function.
4310  }
4311}
4312
4313
4314void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
4315  movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
4316  movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
4317  movp(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
4318}
4319
4320
4321void MacroAssembler::EnterFrame(StackFrame::Type type,
4322                                bool load_constant_pool_pointer_reg) {
4323  // Out-of-line constant pool not implemented on x64.
4324  UNREACHABLE();
4325}
4326
4327
4328void MacroAssembler::EnterFrame(StackFrame::Type type) {
4329  pushq(rbp);
4330  movp(rbp, rsp);
4331  Push(rsi);  // Context.
4332  Push(Smi::FromInt(type));
4333  Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4334  Push(kScratchRegister);
4335  if (emit_debug_code()) {
4336    Move(kScratchRegister,
4337         isolate()->factory()->undefined_value(),
4338         RelocInfo::EMBEDDED_OBJECT);
4339    cmpp(Operand(rsp, 0), kScratchRegister);
4340    Check(not_equal, kCodeObjectNotProperlyPatched);
4341  }
4342}
4343
4344
4345void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4346  if (emit_debug_code()) {
4347    Move(kScratchRegister, Smi::FromInt(type));
4348    cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
4349    Check(equal, kStackFrameTypesMustMatch);
4350  }
4351  movp(rsp, rbp);
4352  popq(rbp);
4353}
4354
4355
4356void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
4357  // Set up the frame structure on the stack.
4358  // All constants are relative to the frame pointer of the exit frame.
4359  DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
4360         kFPOnStackSize + kPCOnStackSize);
4361  DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
4362  DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
4363  pushq(rbp);
4364  movp(rbp, rsp);
4365
4366  // Reserve room for entry stack pointer and push the code object.
4367  DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
4368  Push(Immediate(0));  // Saved entry sp, patched before call.
4369  Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
4370  Push(kScratchRegister);  // Accessed from EditFrame::code_slot.
4371
4372  // Save the frame pointer and the context in top.
4373  if (save_rax) {
4374    movp(r14, rax);  // Backup rax in callee-save register.
4375  }
4376
4377  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
4378  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
4379  Store(ExternalReference(Isolate::kCFunctionAddress, isolate()), rbx);
4380}
4381
4382
4383void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
4384                                            bool save_doubles) {
4385#ifdef _WIN64
4386  const int kShadowSpace = 4;
4387  arg_stack_space += kShadowSpace;
4388#endif
4389  // Optionally save all XMM registers.
4390  if (save_doubles) {
4391    int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
4392                arg_stack_space * kRegisterSize;
4393    subp(rsp, Immediate(space));
4394    int offset = -2 * kPointerSize;
4395    const RegisterConfiguration* config =
4396        RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
4397    for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4398      DoubleRegister reg =
4399          DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4400      Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
4401    }
4402  } else if (arg_stack_space > 0) {
4403    subp(rsp, Immediate(arg_stack_space * kRegisterSize));
4404  }
4405
4406  // Get the required frame alignment for the OS.
4407  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
4408  if (kFrameAlignment > 0) {
4409    DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
4410    DCHECK(is_int8(kFrameAlignment));
4411    andp(rsp, Immediate(-kFrameAlignment));
4412  }
4413
4414  // Patch the saved entry sp.
4415  movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
4416}
4417
4418
4419void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
4420  EnterExitFramePrologue(true);
4421
4422  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
4423  // so it must be retained across the C-call.
4424  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
4425  leap(r15, Operand(rbp, r14, times_pointer_size, offset));
4426
4427  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
4428}
4429
4430
4431void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
4432  EnterExitFramePrologue(false);
4433  EnterExitFrameEpilogue(arg_stack_space, false);
4434}
4435
4436
4437void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
4438  // Registers:
4439  // r15 : argv
4440  if (save_doubles) {
4441    int offset = -2 * kPointerSize;
4442    const RegisterConfiguration* config =
4443        RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
4444    for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4445      DoubleRegister reg =
4446          DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
4447      Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
4448    }
4449  }
4450
4451  if (pop_arguments) {
4452    // Get the return address from the stack and restore the frame pointer.
4453    movp(rcx, Operand(rbp, kFPOnStackSize));
4454    movp(rbp, Operand(rbp, 0 * kPointerSize));
4455
4456    // Drop everything up to and including the arguments and the receiver
4457    // from the caller stack.
4458    leap(rsp, Operand(r15, 1 * kPointerSize));
4459
4460    PushReturnAddressFrom(rcx);
4461  } else {
4462    // Otherwise just leave the exit frame.
4463    leave();
4464  }
4465
4466  LeaveExitFrameEpilogue(true);
4467}
4468
4469
4470void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
4471  movp(rsp, rbp);
4472  popq(rbp);
4473
4474  LeaveExitFrameEpilogue(restore_context);
4475}
4476
4477
4478void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
4479  // Restore current context from top and clear it in debug mode.
4480  ExternalReference context_address(Isolate::kContextAddress, isolate());
4481  Operand context_operand = ExternalOperand(context_address);
4482  if (restore_context) {
4483    movp(rsi, context_operand);
4484  }
4485#ifdef DEBUG
4486  movp(context_operand, Immediate(0));
4487#endif
4488
4489  // Clear the top frame.
4490  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
4491                                       isolate());
4492  Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
4493  movp(c_entry_fp_operand, Immediate(0));
4494}
4495
4496
4497void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4498                                            Register scratch,
4499                                            Label* miss) {
4500  Label same_contexts;
4501
4502  DCHECK(!holder_reg.is(scratch));
4503  DCHECK(!scratch.is(kScratchRegister));
4504  // Load current lexical context from the stack frame.
4505  movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
4506
4507  // When generating debug code, make sure the lexical context is set.
4508  if (emit_debug_code()) {
4509    cmpp(scratch, Immediate(0));
4510    Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
4511  }
4512  // Load the native context of the current context.
4513  movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
4514
4515  // Check the context is a native context.
4516  if (emit_debug_code()) {
4517    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
4518        isolate()->factory()->native_context_map());
4519    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4520  }
4521
4522  // Check if both contexts are the same.
4523  cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4524  j(equal, &same_contexts);
4525
4526  // Compare security tokens.
4527  // Check that the security token in the calling global object is
4528  // compatible with the security token in the receiving global
4529  // object.
4530
4531  // Check the context is a native context.
4532  if (emit_debug_code()) {
4533    // Preserve original value of holder_reg.
4534    Push(holder_reg);
4535    movp(holder_reg,
4536         FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4537    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
4538    Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
4539
4540    // Read the first word and compare to native_context_map(),
4541    movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
4542    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
4543    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
4544    Pop(holder_reg);
4545  }
4546
4547  movp(kScratchRegister,
4548       FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
4549  int token_offset =
4550      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
4551  movp(scratch, FieldOperand(scratch, token_offset));
4552  cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
4553  j(not_equal, miss);
4554
4555  bind(&same_contexts);
4556}
4557
4558
4559// Compute the hash code from the untagged key.  This must be kept in sync with
4560// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4561// code-stub-hydrogen.cc
4562void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
4563  // First of all we assign the hash seed to scratch.
4564  LoadRoot(scratch, Heap::kHashSeedRootIndex);
4565  SmiToInteger32(scratch, scratch);
4566
4567  // Xor original key with a seed.
4568  xorl(r0, scratch);
4569
4570  // Compute the hash code from the untagged key.  This must be kept in sync
4571  // with ComputeIntegerHash in utils.h.
4572  //
4573  // hash = ~hash + (hash << 15);
4574  movl(scratch, r0);
4575  notl(r0);
4576  shll(scratch, Immediate(15));
4577  addl(r0, scratch);
4578  // hash = hash ^ (hash >> 12);
4579  movl(scratch, r0);
4580  shrl(scratch, Immediate(12));
4581  xorl(r0, scratch);
4582  // hash = hash + (hash << 2);
4583  leal(r0, Operand(r0, r0, times_4, 0));
4584  // hash = hash ^ (hash >> 4);
4585  movl(scratch, r0);
4586  shrl(scratch, Immediate(4));
4587  xorl(r0, scratch);
4588  // hash = hash * 2057;
4589  imull(r0, r0, Immediate(2057));
4590  // hash = hash ^ (hash >> 16);
4591  movl(scratch, r0);
4592  shrl(scratch, Immediate(16));
4593  xorl(r0, scratch);
4594  andl(r0, Immediate(0x3fffffff));
4595}
4596
4597
4598
4599void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4600                                              Register elements,
4601                                              Register key,
4602                                              Register r0,
4603                                              Register r1,
4604                                              Register r2,
4605                                              Register result) {
4606  // Register use:
4607  //
4608  // elements - holds the slow-case elements of the receiver on entry.
4609  //            Unchanged unless 'result' is the same register.
4610  //
4611  // key      - holds the smi key on entry.
4612  //            Unchanged unless 'result' is the same register.
4613  //
4614  // Scratch registers:
4615  //
4616  // r0 - holds the untagged key on entry and holds the hash once computed.
4617  //
4618  // r1 - used to hold the capacity mask of the dictionary
4619  //
4620  // r2 - used for the index into the dictionary.
4621  //
4622  // result - holds the result on exit if the load succeeded.
4623  //          Allowed to be the same as 'key' or 'result'.
4624  //          Unchanged on bailout so 'key' or 'result' can be used
4625  //          in further computation.
4626
4627  Label done;
4628
4629  GetNumberHash(r0, r1);
4630
4631  // Compute capacity mask.
4632  SmiToInteger32(r1, FieldOperand(elements,
4633                                  SeededNumberDictionary::kCapacityOffset));
4634  decl(r1);
4635
4636  // Generate an unrolled loop that performs a few probes before giving up.
4637  for (int i = 0; i < kNumberDictionaryProbes; i++) {
4638    // Use r2 for index calculations and keep the hash intact in r0.
4639    movp(r2, r0);
4640    // Compute the masked index: (hash + i + i * i) & mask.
4641    if (i > 0) {
4642      addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4643    }
4644    andp(r2, r1);
4645
4646    // Scale the index by multiplying by the entry size.
4647    DCHECK(SeededNumberDictionary::kEntrySize == 3);
4648    leap(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
4649
4650    // Check if the key matches.
4651    cmpp(key, FieldOperand(elements,
4652                           r2,
4653                           times_pointer_size,
4654                           SeededNumberDictionary::kElementsStartOffset));
4655    if (i != (kNumberDictionaryProbes - 1)) {
4656      j(equal, &done);
4657    } else {
4658      j(not_equal, miss);
4659    }
4660  }
4661
4662  bind(&done);
4663  // Check that the value is a field property.
4664  const int kDetailsOffset =
4665      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4666  DCHECK_EQ(DATA, 0);
4667  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4668       Smi::FromInt(PropertyDetails::TypeField::kMask));
4669  j(not_zero, miss);
4670
4671  // Get the value at the masked, scaled index.
4672  const int kValueOffset =
4673      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4674  movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4675}
4676
4677
4678void MacroAssembler::LoadAllocationTopHelper(Register result,
4679                                             Register scratch,
4680                                             AllocationFlags flags) {
4681  ExternalReference allocation_top =
4682      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4683
4684  // Just return if allocation top is already known.
4685  if ((flags & RESULT_CONTAINS_TOP) != 0) {
4686    // No use of scratch if allocation top is provided.
4687    DCHECK(!scratch.is_valid());
4688#ifdef DEBUG
4689    // Assert that result actually contains top on entry.
4690    Operand top_operand = ExternalOperand(allocation_top);
4691    cmpp(result, top_operand);
4692    Check(equal, kUnexpectedAllocationTop);
4693#endif
4694    return;
4695  }
4696
4697  // Move address of new object to result. Use scratch register if available,
4698  // and keep address in scratch until call to UpdateAllocationTopHelper.
4699  if (scratch.is_valid()) {
4700    LoadAddress(scratch, allocation_top);
4701    movp(result, Operand(scratch, 0));
4702  } else {
4703    Load(result, allocation_top);
4704  }
4705}
4706
4707
4708void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
4709                                                 Register scratch,
4710                                                 Label* gc_required,
4711                                                 AllocationFlags flags) {
4712  if (kPointerSize == kDoubleSize) {
4713    if (FLAG_debug_code) {
4714      testl(result, Immediate(kDoubleAlignmentMask));
4715      Check(zero, kAllocationIsNotDoubleAligned);
4716    }
4717  } else {
4718    // Align the next allocation. Storing the filler map without checking top
4719    // is safe in new-space because the limit of the heap is aligned there.
4720    DCHECK(kPointerSize * 2 == kDoubleSize);
4721    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
4722    // Make sure scratch is not clobbered by this function as it might be
4723    // used in UpdateAllocationTopHelper later.
4724    DCHECK(!scratch.is(kScratchRegister));
4725    Label aligned;
4726    testl(result, Immediate(kDoubleAlignmentMask));
4727    j(zero, &aligned, Label::kNear);
4728    if ((flags & PRETENURE) != 0) {
4729      ExternalReference allocation_limit =
4730          AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4731      cmpp(result, ExternalOperand(allocation_limit));
4732      j(above_equal, gc_required);
4733    }
4734    LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
4735    movp(Operand(result, 0), kScratchRegister);
4736    addp(result, Immediate(kDoubleSize / 2));
4737    bind(&aligned);
4738  }
4739}
4740
4741
4742void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4743                                               Register scratch,
4744                                               AllocationFlags flags) {
4745  if (emit_debug_code()) {
4746    testp(result_end, Immediate(kObjectAlignmentMask));
4747    Check(zero, kUnalignedAllocationInNewSpace);
4748  }
4749
4750  ExternalReference allocation_top =
4751      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4752
4753  // Update new top.
4754  if (scratch.is_valid()) {
4755    // Scratch already contains address of allocation top.
4756    movp(Operand(scratch, 0), result_end);
4757  } else {
4758    Store(allocation_top, result_end);
4759  }
4760}
4761
4762
4763void MacroAssembler::Allocate(int object_size,
4764                              Register result,
4765                              Register result_end,
4766                              Register scratch,
4767                              Label* gc_required,
4768                              AllocationFlags flags) {
4769  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4770  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
4771  if (!FLAG_inline_new) {
4772    if (emit_debug_code()) {
4773      // Trash the registers to simulate an allocation failure.
4774      movl(result, Immediate(0x7091));
4775      if (result_end.is_valid()) {
4776        movl(result_end, Immediate(0x7191));
4777      }
4778      if (scratch.is_valid()) {
4779        movl(scratch, Immediate(0x7291));
4780      }
4781    }
4782    jmp(gc_required);
4783    return;
4784  }
4785  DCHECK(!result.is(result_end));
4786
4787  // Load address of new object into result.
4788  LoadAllocationTopHelper(result, scratch, flags);
4789
4790  if ((flags & DOUBLE_ALIGNMENT) != 0) {
4791    MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4792  }
4793
4794  // Calculate new top and bail out if new space is exhausted.
4795  ExternalReference allocation_limit =
4796      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4797
4798  Register top_reg = result_end.is_valid() ? result_end : result;
4799
4800  if (!top_reg.is(result)) {
4801    movp(top_reg, result);
4802  }
4803  addp(top_reg, Immediate(object_size));
4804  j(carry, gc_required);
4805  Operand limit_operand = ExternalOperand(allocation_limit);
4806  cmpp(top_reg, limit_operand);
4807  j(above, gc_required);
4808
4809  // Update allocation top.
4810  UpdateAllocationTopHelper(top_reg, scratch, flags);
4811
4812  bool tag_result = (flags & TAG_OBJECT) != 0;
4813  if (top_reg.is(result)) {
4814    if (tag_result) {
4815      subp(result, Immediate(object_size - kHeapObjectTag));
4816    } else {
4817      subp(result, Immediate(object_size));
4818    }
4819  } else if (tag_result) {
4820    // Tag the result if requested.
4821    DCHECK(kHeapObjectTag == 1);
4822    incp(result);
4823  }
4824}
4825
4826
4827void MacroAssembler::Allocate(int header_size,
4828                              ScaleFactor element_size,
4829                              Register element_count,
4830                              Register result,
4831                              Register result_end,
4832                              Register scratch,
4833                              Label* gc_required,
4834                              AllocationFlags flags) {
4835  DCHECK((flags & SIZE_IN_WORDS) == 0);
4836  leap(result_end, Operand(element_count, element_size, header_size));
4837  Allocate(result_end, result, result_end, scratch, gc_required, flags);
4838}
4839
4840
4841void MacroAssembler::Allocate(Register object_size,
4842                              Register result,
4843                              Register result_end,
4844                              Register scratch,
4845                              Label* gc_required,
4846                              AllocationFlags flags) {
4847  DCHECK((flags & SIZE_IN_WORDS) == 0);
4848  if (!FLAG_inline_new) {
4849    if (emit_debug_code()) {
4850      // Trash the registers to simulate an allocation failure.
4851      movl(result, Immediate(0x7091));
4852      movl(result_end, Immediate(0x7191));
4853      if (scratch.is_valid()) {
4854        movl(scratch, Immediate(0x7291));
4855      }
4856      // object_size is left unchanged by this function.
4857    }
4858    jmp(gc_required);
4859    return;
4860  }
4861  DCHECK(!result.is(result_end));
4862
4863  // Load address of new object into result.
4864  LoadAllocationTopHelper(result, scratch, flags);
4865
4866  if ((flags & DOUBLE_ALIGNMENT) != 0) {
4867    MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
4868  }
4869
4870  // Calculate new top and bail out if new space is exhausted.
4871  ExternalReference allocation_limit =
4872      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4873  if (!object_size.is(result_end)) {
4874    movp(result_end, object_size);
4875  }
4876  addp(result_end, result);
4877  j(carry, gc_required);
4878  Operand limit_operand = ExternalOperand(allocation_limit);
4879  cmpp(result_end, limit_operand);
4880  j(above, gc_required);
4881
4882  // Update allocation top.
4883  UpdateAllocationTopHelper(result_end, scratch, flags);
4884
4885  // Tag the result if requested.
4886  if ((flags & TAG_OBJECT) != 0) {
4887    addp(result, Immediate(kHeapObjectTag));
4888  }
4889}
4890
4891
4892void MacroAssembler::AllocateHeapNumber(Register result,
4893                                        Register scratch,
4894                                        Label* gc_required,
4895                                        MutableMode mode) {
4896  // Allocate heap number in new space.
4897  Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4898
4899  Heap::RootListIndex map_index = mode == MUTABLE
4900      ? Heap::kMutableHeapNumberMapRootIndex
4901      : Heap::kHeapNumberMapRootIndex;
4902
4903  // Set the map.
4904  LoadRoot(kScratchRegister, map_index);
4905  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4906}
4907
4908
4909void MacroAssembler::AllocateTwoByteString(Register result,
4910                                           Register length,
4911                                           Register scratch1,
4912                                           Register scratch2,
4913                                           Register scratch3,
4914                                           Label* gc_required) {
4915  // Calculate the number of bytes needed for the characters in the string while
4916  // observing object alignment.
4917  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4918                               kObjectAlignmentMask;
4919  DCHECK(kShortSize == 2);
4920  // scratch1 = length * 2 + kObjectAlignmentMask.
4921  leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4922                kHeaderAlignment));
4923  andp(scratch1, Immediate(~kObjectAlignmentMask));
4924  if (kHeaderAlignment > 0) {
4925    subp(scratch1, Immediate(kHeaderAlignment));
4926  }
4927
4928  // Allocate two byte string in new space.
4929  Allocate(SeqTwoByteString::kHeaderSize,
4930           times_1,
4931           scratch1,
4932           result,
4933           scratch2,
4934           scratch3,
4935           gc_required,
4936           TAG_OBJECT);
4937
4938  // Set the map, length and hash field.
4939  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4940  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4941  Integer32ToSmi(scratch1, length);
4942  movp(FieldOperand(result, String::kLengthOffset), scratch1);
4943  movp(FieldOperand(result, String::kHashFieldOffset),
4944       Immediate(String::kEmptyHashField));
4945}
4946
4947
4948void MacroAssembler::AllocateOneByteString(Register result, Register length,
4949                                           Register scratch1, Register scratch2,
4950                                           Register scratch3,
4951                                           Label* gc_required) {
4952  // Calculate the number of bytes needed for the characters in the string while
4953  // observing object alignment.
4954  const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4955                               kObjectAlignmentMask;
4956  movl(scratch1, length);
4957  DCHECK(kCharSize == 1);
4958  addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4959  andp(scratch1, Immediate(~kObjectAlignmentMask));
4960  if (kHeaderAlignment > 0) {
4961    subp(scratch1, Immediate(kHeaderAlignment));
4962  }
4963
4964  // Allocate one-byte string in new space.
4965  Allocate(SeqOneByteString::kHeaderSize,
4966           times_1,
4967           scratch1,
4968           result,
4969           scratch2,
4970           scratch3,
4971           gc_required,
4972           TAG_OBJECT);
4973
4974  // Set the map, length and hash field.
4975  LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
4976  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4977  Integer32ToSmi(scratch1, length);
4978  movp(FieldOperand(result, String::kLengthOffset), scratch1);
4979  movp(FieldOperand(result, String::kHashFieldOffset),
4980       Immediate(String::kEmptyHashField));
4981}
4982
4983
4984void MacroAssembler::AllocateTwoByteConsString(Register result,
4985                                        Register scratch1,
4986                                        Register scratch2,
4987                                        Label* gc_required) {
4988  // Allocate heap number in new space.
4989  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4990           TAG_OBJECT);
4991
4992  // Set the map. The other fields are left uninitialized.
4993  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4994  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4995}
4996
4997
4998void MacroAssembler::AllocateOneByteConsString(Register result,
4999                                               Register scratch1,
5000                                               Register scratch2,
5001                                               Label* gc_required) {
5002  Allocate(ConsString::kSize,
5003           result,
5004           scratch1,
5005           scratch2,
5006           gc_required,
5007           TAG_OBJECT);
5008
5009  // Set the map. The other fields are left uninitialized.
5010  LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
5011  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5012}
5013
5014
5015void MacroAssembler::AllocateTwoByteSlicedString(Register result,
5016                                          Register scratch1,
5017                                          Register scratch2,
5018                                          Label* gc_required) {
5019  // Allocate heap number in new space.
5020  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
5021           TAG_OBJECT);
5022
5023  // Set the map. The other fields are left uninitialized.
5024  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
5025  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5026}
5027
5028
5029void MacroAssembler::AllocateOneByteSlicedString(Register result,
5030                                                 Register scratch1,
5031                                                 Register scratch2,
5032                                                 Label* gc_required) {
5033  // Allocate heap number in new space.
5034  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
5035           TAG_OBJECT);
5036
5037  // Set the map. The other fields are left uninitialized.
5038  LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
5039  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
5040}
5041
5042
5043void MacroAssembler::AllocateJSValue(Register result, Register constructor,
5044                                     Register value, Register scratch,
5045                                     Label* gc_required) {
5046  DCHECK(!result.is(constructor));
5047  DCHECK(!result.is(scratch));
5048  DCHECK(!result.is(value));
5049
5050  // Allocate JSValue in new space.
5051  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
5052
5053  // Initialize the JSValue.
5054  LoadGlobalFunctionInitialMap(constructor, scratch);
5055  movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
5056  LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
5057  movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
5058  movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
5059  movp(FieldOperand(result, JSValue::kValueOffset), value);
5060  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
5061}
5062
5063
5064// Copy memory, byte-by-byte, from source to destination.  Not optimized for
5065// long or aligned copies.  The contents of scratch and length are destroyed.
5066// Destination is incremented by length, source, length and scratch are
5067// clobbered.
5068// A simpler loop is faster on small copies, but slower on large ones.
5069// The cld() instruction must have been emitted, to set the direction flag(),
5070// before calling this function.
5071void MacroAssembler::CopyBytes(Register destination,
5072                               Register source,
5073                               Register length,
5074                               int min_length,
5075                               Register scratch) {
5076  DCHECK(min_length >= 0);
5077  if (emit_debug_code()) {
5078    cmpl(length, Immediate(min_length));
5079    Assert(greater_equal, kInvalidMinLength);
5080  }
5081  Label short_loop, len8, len16, len24, done, short_string;
5082
5083  const int kLongStringLimit = 4 * kPointerSize;
5084  if (min_length <= kLongStringLimit) {
5085    cmpl(length, Immediate(kPointerSize));
5086    j(below, &short_string, Label::kNear);
5087  }
5088
5089  DCHECK(source.is(rsi));
5090  DCHECK(destination.is(rdi));
5091  DCHECK(length.is(rcx));
5092
5093  if (min_length <= kLongStringLimit) {
5094    cmpl(length, Immediate(2 * kPointerSize));
5095    j(below_equal, &len8, Label::kNear);
5096    cmpl(length, Immediate(3 * kPointerSize));
5097    j(below_equal, &len16, Label::kNear);
5098    cmpl(length, Immediate(4 * kPointerSize));
5099    j(below_equal, &len24, Label::kNear);
5100  }
5101
5102  // Because source is 8-byte aligned in our uses of this function,
5103  // we keep source aligned for the rep movs operation by copying the odd bytes
5104  // at the end of the ranges.
5105  movp(scratch, length);
5106  shrl(length, Immediate(kPointerSizeLog2));
5107  repmovsp();
5108  // Move remaining bytes of length.
5109  andl(scratch, Immediate(kPointerSize - 1));
5110  movp(length, Operand(source, scratch, times_1, -kPointerSize));
5111  movp(Operand(destination, scratch, times_1, -kPointerSize), length);
5112  addp(destination, scratch);
5113
5114  if (min_length <= kLongStringLimit) {
5115    jmp(&done, Label::kNear);
5116    bind(&len24);
5117    movp(scratch, Operand(source, 2 * kPointerSize));
5118    movp(Operand(destination, 2 * kPointerSize), scratch);
5119    bind(&len16);
5120    movp(scratch, Operand(source, kPointerSize));
5121    movp(Operand(destination, kPointerSize), scratch);
5122    bind(&len8);
5123    movp(scratch, Operand(source, 0));
5124    movp(Operand(destination, 0), scratch);
5125    // Move remaining bytes of length.
5126    movp(scratch, Operand(source, length, times_1, -kPointerSize));
5127    movp(Operand(destination, length, times_1, -kPointerSize), scratch);
5128    addp(destination, length);
5129    jmp(&done, Label::kNear);
5130
5131    bind(&short_string);
5132    if (min_length == 0) {
5133      testl(length, length);
5134      j(zero, &done, Label::kNear);
5135    }
5136
5137    bind(&short_loop);
5138    movb(scratch, Operand(source, 0));
5139    movb(Operand(destination, 0), scratch);
5140    incp(source);
5141    incp(destination);
5142    decl(length);
5143    j(not_zero, &short_loop, Label::kNear);
5144  }
5145
5146  bind(&done);
5147}
5148
5149
5150void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
5151                                                Register end_address,
5152                                                Register filler) {
5153  Label loop, entry;
5154  jmp(&entry, Label::kNear);
5155  bind(&loop);
5156  movp(Operand(current_address, 0), filler);
5157  addp(current_address, Immediate(kPointerSize));
5158  bind(&entry);
5159  cmpp(current_address, end_address);
5160  j(below, &loop, Label::kNear);
5161}
5162
5163
5164void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5165  if (context_chain_length > 0) {
5166    // Move up the chain of contexts to the context containing the slot.
5167    movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5168    for (int i = 1; i < context_chain_length; i++) {
5169      movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5170    }
5171  } else {
5172    // Slot is in the current function context.  Move it into the
5173    // destination register in case we store into it (the write barrier
5174    // cannot be allowed to destroy the context in rsi).
5175    movp(dst, rsi);
5176  }
5177
5178  // We should not have found a with context by walking the context
5179  // chain (i.e., the static scope chain and runtime context chain do
5180  // not agree).  A variable occurring in such a scope should have
5181  // slot type LOOKUP and not CONTEXT.
5182  if (emit_debug_code()) {
5183    CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
5184                Heap::kWithContextMapRootIndex);
5185    Check(not_equal, kVariableResolvedToWithContext);
5186  }
5187}
5188
5189
5190void MacroAssembler::LoadTransitionedArrayMapConditional(
5191    ElementsKind expected_kind,
5192    ElementsKind transitioned_kind,
5193    Register map_in_out,
5194    Register scratch,
5195    Label* no_map_match) {
5196  DCHECK(IsFastElementsKind(expected_kind));
5197  DCHECK(IsFastElementsKind(transitioned_kind));
5198
5199  // Check that the function's map is the same as the expected cached map.
5200  movp(scratch, NativeContextOperand());
5201  cmpp(map_in_out,
5202       ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
5203  j(not_equal, no_map_match);
5204
5205  // Use the transitioned cached map.
5206  movp(map_in_out,
5207       ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
5208}
5209
5210
5211#ifdef _WIN64
5212static const int kRegisterPassedArguments = 4;
5213#else
5214static const int kRegisterPassedArguments = 6;
5215#endif
5216
5217
5218void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5219  movp(dst, NativeContextOperand());
5220  movp(dst, ContextOperand(dst, index));
5221}
5222
5223
5224void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5225                                                  Register map) {
5226  // Load the initial map.  The global functions all have initial maps.
5227  movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5228  if (emit_debug_code()) {
5229    Label ok, fail;
5230    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
5231    jmp(&ok);
5232    bind(&fail);
5233    Abort(kGlobalFunctionsMustHaveInitialMap);
5234    bind(&ok);
5235  }
5236}
5237
5238
5239int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
5240  // On Windows 64 stack slots are reserved by the caller for all arguments
5241  // including the ones passed in registers, and space is always allocated for
5242  // the four register arguments even if the function takes fewer than four
5243  // arguments.
5244  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
5245  // and the caller does not reserve stack slots for them.
5246  DCHECK(num_arguments >= 0);
5247#ifdef _WIN64
5248  const int kMinimumStackSlots = kRegisterPassedArguments;
5249  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
5250  return num_arguments;
5251#else
5252  if (num_arguments < kRegisterPassedArguments) return 0;
5253  return num_arguments - kRegisterPassedArguments;
5254#endif
5255}
5256
5257
5258void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5259                                               Register index,
5260                                               Register value,
5261                                               uint32_t encoding_mask) {
5262  Label is_object;
5263  JumpIfNotSmi(string, &is_object);
5264  Abort(kNonObject);
5265  bind(&is_object);
5266
5267  Push(value);
5268  movp(value, FieldOperand(string, HeapObject::kMapOffset));
5269  movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
5270
5271  andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
5272  cmpp(value, Immediate(encoding_mask));
5273  Pop(value);
5274  Check(equal, kUnexpectedStringType);
5275
5276  // The index is assumed to be untagged coming in, tag it to compare with the
5277  // string length without using a temp register, it is restored at the end of
5278  // this function.
5279  Integer32ToSmi(index, index);
5280  SmiCompare(index, FieldOperand(string, String::kLengthOffset));
5281  Check(less, kIndexIsTooLarge);
5282
5283  SmiCompare(index, Smi::FromInt(0));
5284  Check(greater_equal, kIndexIsNegative);
5285
5286  // Restore the index
5287  SmiToInteger32(index, index);
5288}
5289
5290
5291void MacroAssembler::PrepareCallCFunction(int num_arguments) {
5292  int frame_alignment = base::OS::ActivationFrameAlignment();
5293  DCHECK(frame_alignment != 0);
5294  DCHECK(num_arguments >= 0);
5295
5296  // Make stack end at alignment and allocate space for arguments and old rsp.
5297  movp(kScratchRegister, rsp);
5298  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5299  int argument_slots_on_stack =
5300      ArgumentStackSlotsForCFunctionCall(num_arguments);
5301  subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
5302  andp(rsp, Immediate(-frame_alignment));
5303  movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
5304}
5305
5306
5307void MacroAssembler::CallCFunction(ExternalReference function,
5308                                   int num_arguments) {
5309  LoadAddress(rax, function);
5310  CallCFunction(rax, num_arguments);
5311}
5312
5313
5314void MacroAssembler::CallCFunction(Register function, int num_arguments) {
5315  DCHECK(has_frame());
5316  // Check stack alignment.
5317  if (emit_debug_code()) {
5318    CheckStackAlignment();
5319  }
5320
5321  call(function);
5322  DCHECK(base::OS::ActivationFrameAlignment() != 0);
5323  DCHECK(num_arguments >= 0);
5324  int argument_slots_on_stack =
5325      ArgumentStackSlotsForCFunctionCall(num_arguments);
5326  movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
5327}
5328
5329
5330#ifdef DEBUG
5331bool AreAliased(Register reg1,
5332                Register reg2,
5333                Register reg3,
5334                Register reg4,
5335                Register reg5,
5336                Register reg6,
5337                Register reg7,
5338                Register reg8) {
5339  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
5340      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
5341      reg7.is_valid() + reg8.is_valid();
5342
5343  RegList regs = 0;
5344  if (reg1.is_valid()) regs |= reg1.bit();
5345  if (reg2.is_valid()) regs |= reg2.bit();
5346  if (reg3.is_valid()) regs |= reg3.bit();
5347  if (reg4.is_valid()) regs |= reg4.bit();
5348  if (reg5.is_valid()) regs |= reg5.bit();
5349  if (reg6.is_valid()) regs |= reg6.bit();
5350  if (reg7.is_valid()) regs |= reg7.bit();
5351  if (reg8.is_valid()) regs |= reg8.bit();
5352  int n_of_non_aliasing_regs = NumRegs(regs);
5353
5354  return n_of_valid_regs != n_of_non_aliasing_regs;
5355}
5356#endif
5357
5358
5359CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
5360    : address_(address),
5361      size_(size),
5362      masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
5363  // Create a new macro assembler pointing to the address of the code to patch.
5364  // The size is adjusted with kGap on order for the assembler to generate size
5365  // bytes of instructions without failing with buffer size constraints.
5366  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5367}
5368
5369
5370CodePatcher::~CodePatcher() {
5371  // Indicate that code has changed.
5372  Assembler::FlushICache(masm_.isolate(), address_, size_);
5373
5374  // Check that the code was patched as expected.
5375  DCHECK(masm_.pc_ == address_ + size_);
5376  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5377}
5378
5379
5380void MacroAssembler::CheckPageFlag(
5381    Register object,
5382    Register scratch,
5383    int mask,
5384    Condition cc,
5385    Label* condition_met,
5386    Label::Distance condition_met_distance) {
5387  DCHECK(cc == zero || cc == not_zero);
5388  if (scratch.is(object)) {
5389    andp(scratch, Immediate(~Page::kPageAlignmentMask));
5390  } else {
5391    movp(scratch, Immediate(~Page::kPageAlignmentMask));
5392    andp(scratch, object);
5393  }
5394  if (mask < (1 << kBitsPerByte)) {
5395    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
5396          Immediate(static_cast<uint8_t>(mask)));
5397  } else {
5398    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
5399  }
5400  j(cc, condition_met, condition_met_distance);
5401}
5402
5403
5404void MacroAssembler::JumpIfBlack(Register object,
5405                                 Register bitmap_scratch,
5406                                 Register mask_scratch,
5407                                 Label* on_black,
5408                                 Label::Distance on_black_distance) {
5409  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
5410
5411  GetMarkBits(object, bitmap_scratch, mask_scratch);
5412
5413  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5414  // The mask_scratch register contains a 1 at the position of the first bit
5415  // and a 1 at a position of the second bit. All other positions are zero.
5416  movp(rcx, mask_scratch);
5417  andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
5418  cmpp(mask_scratch, rcx);
5419  j(equal, on_black, on_black_distance);
5420}
5421
5422
5423void MacroAssembler::GetMarkBits(Register addr_reg,
5424                                 Register bitmap_reg,
5425                                 Register mask_reg) {
5426  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
5427  movp(bitmap_reg, addr_reg);
5428  // Sign extended 32 bit immediate.
5429  andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
5430  movp(rcx, addr_reg);
5431  int shift =
5432      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
5433  shrl(rcx, Immediate(shift));
5434  andp(rcx,
5435       Immediate((Page::kPageAlignmentMask >> shift) &
5436                 ~(Bitmap::kBytesPerCell - 1)));
5437
5438  addp(bitmap_reg, rcx);
5439  movp(rcx, addr_reg);
5440  shrl(rcx, Immediate(kPointerSizeLog2));
5441  andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
5442  movl(mask_reg, Immediate(3));
5443  shlp_cl(mask_reg);
5444}
5445
5446
5447void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
5448                                 Register mask_scratch, Label* value_is_white,
5449                                 Label::Distance distance) {
5450  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
5451  GetMarkBits(value, bitmap_scratch, mask_scratch);
5452
5453  // If the value is black or grey we don't need to do anything.
5454  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5455  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
5456  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
5457  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5458
5459  // Since both black and grey have a 1 in the first position and white does
5460  // not have a 1 there we only need to check one bit.
5461  testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
5462  j(zero, value_is_white, distance);
5463}
5464
5465
5466void MacroAssembler::CheckEnumCache(Label* call_runtime) {
5467  Label next, start;
5468  Register empty_fixed_array_value = r8;
5469  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5470  movp(rcx, rax);
5471
5472  // Check if the enum length field is properly initialized, indicating that
5473  // there is an enum cache.
5474  movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5475
5476  EnumLength(rdx, rbx);
5477  Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
5478  j(equal, call_runtime);
5479
5480  jmp(&start);
5481
5482  bind(&next);
5483
5484  movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5485
5486  // For all objects but the receiver, check that the cache is empty.
5487  EnumLength(rdx, rbx);
5488  Cmp(rdx, Smi::FromInt(0));
5489  j(not_equal, call_runtime);
5490
5491  bind(&start);
5492
5493  // Check that there are no elements. Register rcx contains the current JS
5494  // object we've reached through the prototype chain.
5495  Label no_elements;
5496  cmpp(empty_fixed_array_value,
5497       FieldOperand(rcx, JSObject::kElementsOffset));
5498  j(equal, &no_elements);
5499
5500  // Second chance, the object may be using the empty slow element dictionary.
5501  LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5502  cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5503  j(not_equal, call_runtime);
5504
5505  bind(&no_elements);
5506  movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5507  CompareRoot(rcx, Heap::kNullValueRootIndex);
5508  j(not_equal, &next);
5509}
5510
5511
5512void MacroAssembler::TestJSArrayForAllocationMemento(
5513    Register receiver_reg,
5514    Register scratch_reg,
5515    Label* no_memento_found) {
5516  ExternalReference new_space_start =
5517      ExternalReference::new_space_start(isolate());
5518  ExternalReference new_space_allocation_top =
5519      ExternalReference::new_space_allocation_top_address(isolate());
5520
5521  leap(scratch_reg, Operand(receiver_reg,
5522      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5523  Move(kScratchRegister, new_space_start);
5524  cmpp(scratch_reg, kScratchRegister);
5525  j(less, no_memento_found);
5526  cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5527  j(greater, no_memento_found);
5528  CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5529              Heap::kAllocationMementoMapRootIndex);
5530}
5531
5532
5533void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5534    Register object,
5535    Register scratch0,
5536    Register scratch1,
5537    Label* found) {
5538  DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5539  DCHECK(!scratch1.is(scratch0));
5540  Register current = scratch0;
5541  Label loop_again, end;
5542
5543  movp(current, object);
5544  movp(current, FieldOperand(current, HeapObject::kMapOffset));
5545  movp(current, FieldOperand(current, Map::kPrototypeOffset));
5546  CompareRoot(current, Heap::kNullValueRootIndex);
5547  j(equal, &end);
5548
5549  // Loop based on the map going up the prototype chain.
5550  bind(&loop_again);
5551  movp(current, FieldOperand(current, HeapObject::kMapOffset));
5552  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
5553  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
5554  CmpInstanceType(current, JS_OBJECT_TYPE);
5555  j(below, found);
5556  movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5557  DecodeField<Map::ElementsKindBits>(scratch1);
5558  cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5559  j(equal, found);
5560  movp(current, FieldOperand(current, Map::kPrototypeOffset));
5561  CompareRoot(current, Heap::kNullValueRootIndex);
5562  j(not_equal, &loop_again);
5563
5564  bind(&end);
5565}
5566
5567
5568void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5569  DCHECK(!dividend.is(rax));
5570  DCHECK(!dividend.is(rdx));
5571  base::MagicNumbersForDivision<uint32_t> mag =
5572      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5573  movl(rax, Immediate(mag.multiplier));
5574  imull(dividend);
5575  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5576  if (divisor > 0 && neg) addl(rdx, dividend);
5577  if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
5578  if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
5579  movl(rax, dividend);
5580  shrl(rax, Immediate(31));
5581  addl(rdx, rax);
5582}
5583
5584
5585}  // namespace internal
5586}  // namespace v8
5587
5588#endif  // V8_TARGET_ARCH_X64
5589