1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_IA32
6
7#include "src/base/bits.h"
8#include "src/base/division-by-constant.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
11#include "src/debug/debug.h"
12#include "src/ia32/frames-ia32.h"
13#include "src/ia32/macro-assembler-ia32.h"
14#include "src/runtime/runtime.h"
15
16namespace v8 {
17namespace internal {
18
19// -------------------------------------------------------------------------
20// MacroAssembler implementation.
21
22MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
23                               CodeObjectRequired create_code_object)
24    : Assembler(arg_isolate, buffer, size),
25      generating_stub_(false),
26      has_frame_(false) {
27  if (create_code_object == CodeObjectRequired::kYes) {
28    code_object_ =
29        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
30  }
31}
32
33
34void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
35  DCHECK(!r.IsDouble());
36  if (r.IsInteger8()) {
37    movsx_b(dst, src);
38  } else if (r.IsUInteger8()) {
39    movzx_b(dst, src);
40  } else if (r.IsInteger16()) {
41    movsx_w(dst, src);
42  } else if (r.IsUInteger16()) {
43    movzx_w(dst, src);
44  } else {
45    mov(dst, src);
46  }
47}
48
49
50void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
51  DCHECK(!r.IsDouble());
52  if (r.IsInteger8() || r.IsUInteger8()) {
53    mov_b(dst, src);
54  } else if (r.IsInteger16() || r.IsUInteger16()) {
55    mov_w(dst, src);
56  } else {
57    if (r.IsHeapObject()) {
58      AssertNotSmi(src);
59    } else if (r.IsSmi()) {
60      AssertSmi(src);
61    }
62    mov(dst, src);
63  }
64}
65
66
67void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
68  if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
69    mov(destination, isolate()->heap()->root_handle(index));
70    return;
71  }
72  ExternalReference roots_array_start =
73      ExternalReference::roots_array_start(isolate());
74  mov(destination, Immediate(index));
75  mov(destination, Operand::StaticArray(destination,
76                                        times_pointer_size,
77                                        roots_array_start));
78}
79
80
81void MacroAssembler::StoreRoot(Register source,
82                               Register scratch,
83                               Heap::RootListIndex index) {
84  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
85  ExternalReference roots_array_start =
86      ExternalReference::roots_array_start(isolate());
87  mov(scratch, Immediate(index));
88  mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
89      source);
90}
91
92
93void MacroAssembler::CompareRoot(Register with,
94                                 Register scratch,
95                                 Heap::RootListIndex index) {
96  ExternalReference roots_array_start =
97      ExternalReference::roots_array_start(isolate());
98  mov(scratch, Immediate(index));
99  cmp(with, Operand::StaticArray(scratch,
100                                times_pointer_size,
101                                roots_array_start));
102}
103
104
105void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
106  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
107  cmp(with, isolate()->heap()->root_handle(index));
108}
109
110
111void MacroAssembler::CompareRoot(const Operand& with,
112                                 Heap::RootListIndex index) {
113  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
114  cmp(with, isolate()->heap()->root_handle(index));
115}
116
117
118void MacroAssembler::PushRoot(Heap::RootListIndex index) {
119  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
120  Push(isolate()->heap()->root_handle(index));
121}
122
123#define REG(Name) \
124  { Register::kCode_##Name }
125
126static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
127
128#undef REG
129
130static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
131
132void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
133                                     Register exclusion1, Register exclusion2,
134                                     Register exclusion3) {
135  // We don't allow a GC during a store buffer overflow so there is no need to
136  // store the registers in any particular way, but we do have to store and
137  // restore them.
138  for (int i = 0; i < kNumberOfSavedRegs; i++) {
139    Register reg = saved_regs[i];
140    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
141      push(reg);
142    }
143  }
144  if (fp_mode == kSaveFPRegs) {
145    sub(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
146    // Save all XMM registers except XMM0.
147    for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
148      XMMRegister reg = XMMRegister::from_code(i);
149      movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
150    }
151  }
152}
153
154void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
155                                    Register exclusion2, Register exclusion3) {
156  if (fp_mode == kSaveFPRegs) {
157    // Restore all XMM registers except XMM0.
158    for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
159      XMMRegister reg = XMMRegister::from_code(i);
160      movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
161    }
162    add(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
163  }
164
165  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
166    Register reg = saved_regs[i];
167    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
168      pop(reg);
169    }
170  }
171}
172
173void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
174                                Label* condition_met,
175                                Label::Distance distance) {
176  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc,
177                condition_met, distance);
178}
179
180
181void MacroAssembler::RememberedSetHelper(
182    Register object,  // Only used for debug checks.
183    Register addr,
184    Register scratch,
185    SaveFPRegsMode save_fp,
186    MacroAssembler::RememberedSetFinalAction and_then) {
187  Label done;
188  if (emit_debug_code()) {
189    Label ok;
190    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
191    int3();
192    bind(&ok);
193  }
194  // Load store buffer top.
195  ExternalReference store_buffer =
196      ExternalReference::store_buffer_top(isolate());
197  mov(scratch, Operand::StaticVariable(store_buffer));
198  // Store pointer to buffer.
199  mov(Operand(scratch, 0), addr);
200  // Increment buffer top.
201  add(scratch, Immediate(kPointerSize));
202  // Write back new top of buffer.
203  mov(Operand::StaticVariable(store_buffer), scratch);
204  // Call stub on end of buffer.
205  // Check for end of buffer.
206  test(scratch, Immediate(StoreBuffer::kStoreBufferMask));
207  if (and_then == kReturnAtEnd) {
208    Label buffer_overflowed;
209    j(equal, &buffer_overflowed, Label::kNear);
210    ret(0);
211    bind(&buffer_overflowed);
212  } else {
213    DCHECK(and_then == kFallThroughAtEnd);
214    j(not_equal, &done, Label::kNear);
215  }
216  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
217  CallStub(&store_buffer_overflow);
218  if (and_then == kReturnAtEnd) {
219    ret(0);
220  } else {
221    DCHECK(and_then == kFallThroughAtEnd);
222    bind(&done);
223  }
224}
225
226
227void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
228                                        XMMRegister scratch_reg,
229                                        Register result_reg) {
230  Label done;
231  Label conv_failure;
232  xorps(scratch_reg, scratch_reg);
233  cvtsd2si(result_reg, input_reg);
234  test(result_reg, Immediate(0xFFFFFF00));
235  j(zero, &done, Label::kNear);
236  cmp(result_reg, Immediate(0x1));
237  j(overflow, &conv_failure, Label::kNear);
238  mov(result_reg, Immediate(0));
239  setcc(sign, result_reg);
240  sub(result_reg, Immediate(1));
241  and_(result_reg, Immediate(255));
242  jmp(&done, Label::kNear);
243  bind(&conv_failure);
244  Move(result_reg, Immediate(0));
245  ucomisd(input_reg, scratch_reg);
246  j(below, &done, Label::kNear);
247  Move(result_reg, Immediate(255));
248  bind(&done);
249}
250
251
252void MacroAssembler::ClampUint8(Register reg) {
253  Label done;
254  test(reg, Immediate(0xFFFFFF00));
255  j(zero, &done, Label::kNear);
256  setcc(negative, reg);  // 1 if negative, 0 if positive.
257  dec_b(reg);  // 0 if negative, 255 if positive.
258  bind(&done);
259}
260
261
262void MacroAssembler::SlowTruncateToI(Register result_reg,
263                                     Register input_reg,
264                                     int offset) {
265  DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
266  call(stub.GetCode(), RelocInfo::CODE_TARGET);
267}
268
269
270void MacroAssembler::TruncateDoubleToI(Register result_reg,
271                                       XMMRegister input_reg) {
272  Label done;
273  cvttsd2si(result_reg, Operand(input_reg));
274  cmp(result_reg, 0x1);
275  j(no_overflow, &done, Label::kNear);
276
277  sub(esp, Immediate(kDoubleSize));
278  movsd(MemOperand(esp, 0), input_reg);
279  SlowTruncateToI(result_reg, esp, 0);
280  add(esp, Immediate(kDoubleSize));
281  bind(&done);
282}
283
284
285void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
286                               XMMRegister scratch,
287                               MinusZeroMode minus_zero_mode,
288                               Label* lost_precision, Label* is_nan,
289                               Label* minus_zero, Label::Distance dst) {
290  DCHECK(!input_reg.is(scratch));
291  cvttsd2si(result_reg, Operand(input_reg));
292  Cvtsi2sd(scratch, Operand(result_reg));
293  ucomisd(scratch, input_reg);
294  j(not_equal, lost_precision, dst);
295  j(parity_even, is_nan, dst);
296  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
297    Label done;
298    // The integer converted back is equal to the original. We
299    // only have to test if we got -0 as an input.
300    test(result_reg, Operand(result_reg));
301    j(not_zero, &done, Label::kNear);
302    movmskpd(result_reg, input_reg);
303    // Bit 0 contains the sign of the double in input_reg.
304    // If input was positive, we are ok and return 0, otherwise
305    // jump to minus_zero.
306    and_(result_reg, 1);
307    j(not_zero, minus_zero, dst);
308    bind(&done);
309  }
310}
311
312
313void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
314                                           Register input_reg) {
315  Label done, slow_case;
316
317  if (CpuFeatures::IsSupported(SSE3)) {
318    CpuFeatureScope scope(this, SSE3);
319    Label convert;
320    // Use more powerful conversion when sse3 is available.
321    // Load x87 register with heap number.
322    fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
323    // Get exponent alone and check for too-big exponent.
324    mov(result_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
325    and_(result_reg, HeapNumber::kExponentMask);
326    const uint32_t kTooBigExponent =
327        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
328    cmp(Operand(result_reg), Immediate(kTooBigExponent));
329    j(greater_equal, &slow_case, Label::kNear);
330
331    // Reserve space for 64 bit answer.
332    sub(Operand(esp), Immediate(kDoubleSize));
333    // Do conversion, which cannot fail because we checked the exponent.
334    fisttp_d(Operand(esp, 0));
335    mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
336    add(Operand(esp), Immediate(kDoubleSize));
337    jmp(&done, Label::kNear);
338
339    // Slow case.
340    bind(&slow_case);
341    if (input_reg.is(result_reg)) {
342      // Input is clobbered. Restore number from fpu stack
343      sub(Operand(esp), Immediate(kDoubleSize));
344      fstp_d(Operand(esp, 0));
345      SlowTruncateToI(result_reg, esp, 0);
346      add(esp, Immediate(kDoubleSize));
347    } else {
348      fstp(0);
349      SlowTruncateToI(result_reg, input_reg);
350    }
351  } else {
352    movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
353    cvttsd2si(result_reg, Operand(xmm0));
354    cmp(result_reg, 0x1);
355    j(no_overflow, &done, Label::kNear);
356    // Check if the input was 0x8000000 (kMinInt).
357    // If no, then we got an overflow and we deoptimize.
358    ExternalReference min_int = ExternalReference::address_of_min_int();
359    ucomisd(xmm0, Operand::StaticVariable(min_int));
360    j(not_equal, &slow_case, Label::kNear);
361    j(parity_even, &slow_case, Label::kNear);  // NaN.
362    jmp(&done, Label::kNear);
363
364    // Slow case.
365    bind(&slow_case);
366    if (input_reg.is(result_reg)) {
367      // Input is clobbered. Restore number from double scratch.
368      sub(esp, Immediate(kDoubleSize));
369      movsd(MemOperand(esp, 0), xmm0);
370      SlowTruncateToI(result_reg, esp, 0);
371      add(esp, Immediate(kDoubleSize));
372    } else {
373      SlowTruncateToI(result_reg, input_reg);
374    }
375  }
376  bind(&done);
377}
378
379
380void MacroAssembler::LoadUint32(XMMRegister dst, const Operand& src) {
381  Label done;
382  cmp(src, Immediate(0));
383  ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
384  Cvtsi2sd(dst, src);
385  j(not_sign, &done, Label::kNear);
386  addsd(dst, Operand::StaticVariable(uint32_bias));
387  bind(&done);
388}
389
390
391void MacroAssembler::RecordWriteArray(
392    Register object,
393    Register value,
394    Register index,
395    SaveFPRegsMode save_fp,
396    RememberedSetAction remembered_set_action,
397    SmiCheck smi_check,
398    PointersToHereCheck pointers_to_here_check_for_value) {
399  // First, check if a write barrier is even needed. The tests below
400  // catch stores of Smis.
401  Label done;
402
403  // Skip barrier if writing a smi.
404  if (smi_check == INLINE_SMI_CHECK) {
405    DCHECK_EQ(0, kSmiTag);
406    test(value, Immediate(kSmiTagMask));
407    j(zero, &done);
408  }
409
410  // Array access: calculate the destination address in the same manner as
411  // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
412  // into an array of words.
413  Register dst = index;
414  lea(dst, Operand(object, index, times_half_pointer_size,
415                   FixedArray::kHeaderSize - kHeapObjectTag));
416
417  RecordWrite(object, dst, value, save_fp, remembered_set_action,
418              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
419
420  bind(&done);
421
422  // Clobber clobbered input registers when running with the debug-code flag
423  // turned on to provoke errors.
424  if (emit_debug_code()) {
425    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
426    mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
427  }
428}
429
430
431void MacroAssembler::RecordWriteField(
432    Register object,
433    int offset,
434    Register value,
435    Register dst,
436    SaveFPRegsMode save_fp,
437    RememberedSetAction remembered_set_action,
438    SmiCheck smi_check,
439    PointersToHereCheck pointers_to_here_check_for_value) {
440  // First, check if a write barrier is even needed. The tests below
441  // catch stores of Smis.
442  Label done;
443
444  // Skip barrier if writing a smi.
445  if (smi_check == INLINE_SMI_CHECK) {
446    JumpIfSmi(value, &done, Label::kNear);
447  }
448
449  // Although the object register is tagged, the offset is relative to the start
450  // of the object, so so offset must be a multiple of kPointerSize.
451  DCHECK(IsAligned(offset, kPointerSize));
452
453  lea(dst, FieldOperand(object, offset));
454  if (emit_debug_code()) {
455    Label ok;
456    test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
457    j(zero, &ok, Label::kNear);
458    int3();
459    bind(&ok);
460  }
461
462  RecordWrite(object, dst, value, save_fp, remembered_set_action,
463              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
464
465  bind(&done);
466
467  // Clobber clobbered input registers when running with the debug-code flag
468  // turned on to provoke errors.
469  if (emit_debug_code()) {
470    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
471    mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
472  }
473}
474
475
476void MacroAssembler::RecordWriteForMap(
477    Register object,
478    Handle<Map> map,
479    Register scratch1,
480    Register scratch2,
481    SaveFPRegsMode save_fp) {
482  Label done;
483
484  Register address = scratch1;
485  Register value = scratch2;
486  if (emit_debug_code()) {
487    Label ok;
488    lea(address, FieldOperand(object, HeapObject::kMapOffset));
489    test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
490    j(zero, &ok, Label::kNear);
491    int3();
492    bind(&ok);
493  }
494
495  DCHECK(!object.is(value));
496  DCHECK(!object.is(address));
497  DCHECK(!value.is(address));
498  AssertNotSmi(object);
499
500  if (!FLAG_incremental_marking) {
501    return;
502  }
503
504  // Compute the address.
505  lea(address, FieldOperand(object, HeapObject::kMapOffset));
506
507  // A single check of the map's pages interesting flag suffices, since it is
508  // only set during incremental collection, and then it's also guaranteed that
509  // the from object's page's interesting flag is also set.  This optimization
510  // relies on the fact that maps can never be in new space.
511  DCHECK(!isolate()->heap()->InNewSpace(*map));
512  CheckPageFlagForMap(map,
513                      MemoryChunk::kPointersToHereAreInterestingMask,
514                      zero,
515                      &done,
516                      Label::kNear);
517
518  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
519                       save_fp);
520  CallStub(&stub);
521
522  bind(&done);
523
524  // Count number of write barriers in generated code.
525  isolate()->counters()->write_barriers_static()->Increment();
526  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
527
528  // Clobber clobbered input registers when running with the debug-code flag
529  // turned on to provoke errors.
530  if (emit_debug_code()) {
531    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
532    mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
533    mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
534  }
535}
536
537
538void MacroAssembler::RecordWrite(
539    Register object,
540    Register address,
541    Register value,
542    SaveFPRegsMode fp_mode,
543    RememberedSetAction remembered_set_action,
544    SmiCheck smi_check,
545    PointersToHereCheck pointers_to_here_check_for_value) {
546  DCHECK(!object.is(value));
547  DCHECK(!object.is(address));
548  DCHECK(!value.is(address));
549  AssertNotSmi(object);
550
551  if (remembered_set_action == OMIT_REMEMBERED_SET &&
552      !FLAG_incremental_marking) {
553    return;
554  }
555
556  if (emit_debug_code()) {
557    Label ok;
558    cmp(value, Operand(address, 0));
559    j(equal, &ok, Label::kNear);
560    int3();
561    bind(&ok);
562  }
563
564  // First, check if a write barrier is even needed. The tests below
565  // catch stores of Smis and stores into young gen.
566  Label done;
567
568  if (smi_check == INLINE_SMI_CHECK) {
569    // Skip barrier if writing a smi.
570    JumpIfSmi(value, &done, Label::kNear);
571  }
572
573  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
574    CheckPageFlag(value,
575                  value,  // Used as scratch.
576                  MemoryChunk::kPointersToHereAreInterestingMask,
577                  zero,
578                  &done,
579                  Label::kNear);
580  }
581  CheckPageFlag(object,
582                value,  // Used as scratch.
583                MemoryChunk::kPointersFromHereAreInterestingMask,
584                zero,
585                &done,
586                Label::kNear);
587
588  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
589                       fp_mode);
590  CallStub(&stub);
591
592  bind(&done);
593
594  // Count number of write barriers in generated code.
595  isolate()->counters()->write_barriers_static()->Increment();
596  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
597
598  // Clobber clobbered registers when running with the debug-code flag
599  // turned on to provoke errors.
600  if (emit_debug_code()) {
601    mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
602    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
603  }
604}
605
606void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
607                                               Register code_entry,
608                                               Register scratch) {
609  const int offset = JSFunction::kCodeEntryOffset;
610
611  // Since a code entry (value) is always in old space, we don't need to update
612  // remembered set. If incremental marking is off, there is nothing for us to
613  // do.
614  if (!FLAG_incremental_marking) return;
615
616  DCHECK(!js_function.is(code_entry));
617  DCHECK(!js_function.is(scratch));
618  DCHECK(!code_entry.is(scratch));
619  AssertNotSmi(js_function);
620
621  if (emit_debug_code()) {
622    Label ok;
623    lea(scratch, FieldOperand(js_function, offset));
624    cmp(code_entry, Operand(scratch, 0));
625    j(equal, &ok, Label::kNear);
626    int3();
627    bind(&ok);
628  }
629
630  // First, check if a write barrier is even needed. The tests below
631  // catch stores of Smis and stores into young gen.
632  Label done;
633
634  CheckPageFlag(code_entry, scratch,
635                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
636                Label::kNear);
637  CheckPageFlag(js_function, scratch,
638                MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
639                Label::kNear);
640
641  // Save input registers.
642  push(js_function);
643  push(code_entry);
644
645  const Register dst = scratch;
646  lea(dst, FieldOperand(js_function, offset));
647
648  // Save caller-saved registers.
649  PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
650
651  int argument_count = 3;
652  PrepareCallCFunction(argument_count, code_entry);
653  mov(Operand(esp, 0 * kPointerSize), js_function);
654  mov(Operand(esp, 1 * kPointerSize), dst);  // Slot.
655  mov(Operand(esp, 2 * kPointerSize),
656      Immediate(ExternalReference::isolate_address(isolate())));
657
658  {
659    AllowExternalCallThatCantCauseGC scope(this);
660    CallCFunction(
661        ExternalReference::incremental_marking_record_write_code_entry_function(
662            isolate()),
663        argument_count);
664  }
665
666  // Restore caller-saved registers.
667  PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
668
669  // Restore input registers.
670  pop(code_entry);
671  pop(js_function);
672
673  bind(&done);
674}
675
676void MacroAssembler::MaybeDropFrames() {
677  // Check whether we need to drop frames to restart a function on the stack.
678  ExternalReference restart_fp =
679      ExternalReference::debug_restart_fp_address(isolate());
680  mov(ebx, Operand::StaticVariable(restart_fp));
681  test(ebx, ebx);
682  j(not_zero, isolate()->builtins()->FrameDropperTrampoline(),
683    RelocInfo::CODE_TARGET);
684}
685
686void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
687  xorps(dst, dst);
688  cvtsi2sd(dst, src);
689}
690
691
692void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
693  Label msb_set_src;
694  Label jmp_return;
695  test(src, src);
696  j(sign, &msb_set_src, Label::kNear);
697  cvtsi2ss(dst, src);
698  jmp(&jmp_return, Label::kNear);
699  bind(&msb_set_src);
700  mov(tmp, src);
701  shr(src, 1);
702  // Recover the least significant bit to avoid rounding errors.
703  and_(tmp, Immediate(1));
704  or_(src, tmp);
705  cvtsi2ss(dst, src);
706  addss(dst, dst);
707  bind(&jmp_return);
708}
709
710void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
711  if (shift >= 32) {
712    mov(high, low);
713    shl(high, shift - 32);
714    xor_(low, low);
715  } else {
716    shld(high, low, shift);
717    shl(low, shift);
718  }
719}
720
721void MacroAssembler::ShlPair_cl(Register high, Register low) {
722  shld_cl(high, low);
723  shl_cl(low);
724  Label done;
725  test(ecx, Immediate(0x20));
726  j(equal, &done, Label::kNear);
727  mov(high, low);
728  xor_(low, low);
729  bind(&done);
730}
731
732void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
733  if (shift >= 32) {
734    mov(low, high);
735    shr(low, shift - 32);
736    xor_(high, high);
737  } else {
738    shrd(high, low, shift);
739    shr(high, shift);
740  }
741}
742
743void MacroAssembler::ShrPair_cl(Register high, Register low) {
744  shrd_cl(low, high);
745  shr_cl(high);
746  Label done;
747  test(ecx, Immediate(0x20));
748  j(equal, &done, Label::kNear);
749  mov(low, high);
750  xor_(high, high);
751  bind(&done);
752}
753
754void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
755  if (shift >= 32) {
756    mov(low, high);
757    sar(low, shift - 32);
758    sar(high, 31);
759  } else {
760    shrd(high, low, shift);
761    sar(high, shift);
762  }
763}
764
765void MacroAssembler::SarPair_cl(Register high, Register low) {
766  shrd_cl(low, high);
767  sar_cl(high);
768  Label done;
769  test(ecx, Immediate(0x20));
770  j(equal, &done, Label::kNear);
771  mov(low, high);
772  sar(high, 31);
773  bind(&done);
774}
775
776bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
777  static const int kMaxImmediateBits = 17;
778  if (!RelocInfo::IsNone(x.rmode_)) return false;
779  return !is_intn(x.x_, kMaxImmediateBits);
780}
781
782
783void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
784  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
785    Move(dst, Immediate(x.x_ ^ jit_cookie()));
786    xor_(dst, jit_cookie());
787  } else {
788    Move(dst, x);
789  }
790}
791
792
793void MacroAssembler::SafePush(const Immediate& x) {
794  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
795    push(Immediate(x.x_ ^ jit_cookie()));
796    xor_(Operand(esp, 0), Immediate(jit_cookie()));
797  } else {
798    push(x);
799  }
800}
801
802
803void MacroAssembler::CmpObjectType(Register heap_object,
804                                   InstanceType type,
805                                   Register map) {
806  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
807  CmpInstanceType(map, type);
808}
809
810
811void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
812  cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
813}
814
815void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
816  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
817}
818
819
820void MacroAssembler::CheckMap(Register obj,
821                              Handle<Map> map,
822                              Label* fail,
823                              SmiCheckType smi_check_type) {
824  if (smi_check_type == DO_SMI_CHECK) {
825    JumpIfSmi(obj, fail);
826  }
827
828  CompareMap(obj, map);
829  j(not_equal, fail);
830}
831
832
833void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
834                                     Register scratch2, Handle<WeakCell> cell,
835                                     Handle<Code> success,
836                                     SmiCheckType smi_check_type) {
837  Label fail;
838  if (smi_check_type == DO_SMI_CHECK) {
839    JumpIfSmi(obj, &fail);
840  }
841  mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
842  CmpWeakValue(scratch1, cell, scratch2);
843  j(equal, success);
844
845  bind(&fail);
846}
847
848
849Condition MacroAssembler::IsObjectStringType(Register heap_object,
850                                             Register map,
851                                             Register instance_type) {
852  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
853  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
854  STATIC_ASSERT(kNotStringTag != 0);
855  test(instance_type, Immediate(kIsNotStringMask));
856  return zero;
857}
858
859
860Condition MacroAssembler::IsObjectNameType(Register heap_object,
861                                           Register map,
862                                           Register instance_type) {
863  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
864  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
865  cmpb(instance_type, Immediate(LAST_NAME_TYPE));
866  return below_equal;
867}
868
869
870void MacroAssembler::FCmp() {
871  fucomip();
872  fstp(0);
873}
874
875
876void MacroAssembler::AssertNumber(Register object) {
877  if (emit_debug_code()) {
878    Label ok;
879    JumpIfSmi(object, &ok);
880    cmp(FieldOperand(object, HeapObject::kMapOffset),
881        isolate()->factory()->heap_number_map());
882    Check(equal, kOperandNotANumber);
883    bind(&ok);
884  }
885}
886
887void MacroAssembler::AssertNotNumber(Register object) {
888  if (emit_debug_code()) {
889    test(object, Immediate(kSmiTagMask));
890    Check(not_equal, kOperandIsANumber);
891    cmp(FieldOperand(object, HeapObject::kMapOffset),
892        isolate()->factory()->heap_number_map());
893    Check(not_equal, kOperandIsANumber);
894  }
895}
896
897void MacroAssembler::AssertSmi(Register object) {
898  if (emit_debug_code()) {
899    test(object, Immediate(kSmiTagMask));
900    Check(equal, kOperandIsNotASmi);
901  }
902}
903
904
905void MacroAssembler::AssertString(Register object) {
906  if (emit_debug_code()) {
907    test(object, Immediate(kSmiTagMask));
908    Check(not_equal, kOperandIsASmiAndNotAString);
909    push(object);
910    mov(object, FieldOperand(object, HeapObject::kMapOffset));
911    CmpInstanceType(object, FIRST_NONSTRING_TYPE);
912    pop(object);
913    Check(below, kOperandIsNotAString);
914  }
915}
916
917
918void MacroAssembler::AssertName(Register object) {
919  if (emit_debug_code()) {
920    test(object, Immediate(kSmiTagMask));
921    Check(not_equal, kOperandIsASmiAndNotAName);
922    push(object);
923    mov(object, FieldOperand(object, HeapObject::kMapOffset));
924    CmpInstanceType(object, LAST_NAME_TYPE);
925    pop(object);
926    Check(below_equal, kOperandIsNotAName);
927  }
928}
929
930
931void MacroAssembler::AssertFunction(Register object) {
932  if (emit_debug_code()) {
933    test(object, Immediate(kSmiTagMask));
934    Check(not_equal, kOperandIsASmiAndNotAFunction);
935    Push(object);
936    CmpObjectType(object, JS_FUNCTION_TYPE, object);
937    Pop(object);
938    Check(equal, kOperandIsNotAFunction);
939  }
940}
941
942
943void MacroAssembler::AssertBoundFunction(Register object) {
944  if (emit_debug_code()) {
945    test(object, Immediate(kSmiTagMask));
946    Check(not_equal, kOperandIsASmiAndNotABoundFunction);
947    Push(object);
948    CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
949    Pop(object);
950    Check(equal, kOperandIsNotABoundFunction);
951  }
952}
953
954void MacroAssembler::AssertGeneratorObject(Register object) {
955  if (emit_debug_code()) {
956    test(object, Immediate(kSmiTagMask));
957    Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
958    Push(object);
959    CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
960    Pop(object);
961    Check(equal, kOperandIsNotAGeneratorObject);
962  }
963}
964
965void MacroAssembler::AssertReceiver(Register object) {
966  if (emit_debug_code()) {
967    test(object, Immediate(kSmiTagMask));
968    Check(not_equal, kOperandIsASmiAndNotAReceiver);
969    Push(object);
970    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
971    CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
972    Pop(object);
973    Check(above_equal, kOperandIsNotAReceiver);
974  }
975}
976
977
978void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
979  if (emit_debug_code()) {
980    Label done_checking;
981    AssertNotSmi(object);
982    cmp(object, isolate()->factory()->undefined_value());
983    j(equal, &done_checking);
984    cmp(FieldOperand(object, 0),
985        Immediate(isolate()->factory()->allocation_site_map()));
986    Assert(equal, kExpectedUndefinedOrCell);
987    bind(&done_checking);
988  }
989}
990
991
992void MacroAssembler::AssertNotSmi(Register object) {
993  if (emit_debug_code()) {
994    test(object, Immediate(kSmiTagMask));
995    Check(not_equal, kOperandIsASmi);
996  }
997}
998
999void MacroAssembler::StubPrologue(StackFrame::Type type) {
1000  push(ebp);  // Caller's frame pointer.
1001  mov(ebp, esp);
1002  push(Immediate(StackFrame::TypeToMarker(type)));
1003}
1004
1005void MacroAssembler::Prologue(bool code_pre_aging) {
1006  PredictableCodeSizeScope predictible_code_size_scope(this,
1007      kNoCodeAgeSequenceLength);
1008  if (code_pre_aging) {
1009      // Pre-age the code.
1010    call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
1011        RelocInfo::CODE_AGE_SEQUENCE);
1012    Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
1013  } else {
1014    push(ebp);  // Caller's frame pointer.
1015    mov(ebp, esp);
1016    push(esi);  // Callee's context.
1017    push(edi);  // Callee's JS function.
1018  }
1019}
1020
1021void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
1022  mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
1023  mov(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
1024  mov(vector, FieldOperand(vector, Cell::kValueOffset));
1025}
1026
1027
1028void MacroAssembler::EnterFrame(StackFrame::Type type,
1029                                bool load_constant_pool_pointer_reg) {
1030  // Out-of-line constant pool not implemented on ia32.
1031  UNREACHABLE();
1032}
1033
1034
1035void MacroAssembler::EnterFrame(StackFrame::Type type) {
1036  push(ebp);
1037  mov(ebp, esp);
1038  push(Immediate(StackFrame::TypeToMarker(type)));
1039  if (type == StackFrame::INTERNAL) {
1040    push(Immediate(CodeObject()));
1041  }
1042  if (emit_debug_code()) {
1043    cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
1044    Check(not_equal, kCodeObjectNotProperlyPatched);
1045  }
1046}
1047
1048
1049void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1050  if (emit_debug_code()) {
1051    cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
1052        Immediate(StackFrame::TypeToMarker(type)));
1053    Check(equal, kStackFrameTypesMustMatch);
1054  }
1055  leave();
1056}
1057
1058void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
1059                                       Register argc) {
1060  Push(ebp);
1061  Move(ebp, esp);
1062  Push(context);
1063  Push(target);
1064  Push(argc);
1065}
1066
1067void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
1068                                       Register argc) {
1069  Pop(argc);
1070  Pop(target);
1071  Pop(context);
1072  leave();
1073}
1074
1075void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
1076  DCHECK(frame_type == StackFrame::EXIT ||
1077         frame_type == StackFrame::BUILTIN_EXIT);
1078
1079  // Set up the frame structure on the stack.
1080  DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1081  DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1082  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1083  push(ebp);
1084  mov(ebp, esp);
1085
1086  // Reserve room for entry stack pointer and push the code object.
1087  push(Immediate(StackFrame::TypeToMarker(frame_type)));
1088  DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
1089  push(Immediate(0));  // Saved entry sp, patched before call.
1090  DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
1091  push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
1092
1093  // Save the frame pointer and the context in top.
1094  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
1095  ExternalReference context_address(Isolate::kContextAddress, isolate());
1096  ExternalReference c_function_address(Isolate::kCFunctionAddress, isolate());
1097  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
1098  mov(Operand::StaticVariable(context_address), esi);
1099  mov(Operand::StaticVariable(c_function_address), ebx);
1100}
1101
1102
1103void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
1104  // Optionally save all XMM registers.
1105  if (save_doubles) {
1106    int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
1107                argc * kPointerSize;
1108    sub(esp, Immediate(space));
1109    const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
1110    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1111      XMMRegister reg = XMMRegister::from_code(i);
1112      movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
1113    }
1114  } else {
1115    sub(esp, Immediate(argc * kPointerSize));
1116  }
1117
1118  // Get the required frame alignment for the OS.
1119  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
1120  if (kFrameAlignment > 0) {
1121    DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
1122    and_(esp, -kFrameAlignment);
1123  }
1124
1125  // Patch the saved entry sp.
1126  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
1127}
1128
1129void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
1130                                    StackFrame::Type frame_type) {
1131  EnterExitFramePrologue(frame_type);
1132
1133  // Set up argc and argv in callee-saved registers.
1134  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
1135  mov(edi, eax);
1136  lea(esi, Operand(ebp, eax, times_4, offset));
1137
1138  // Reserve space for argc, argv and isolate.
1139  EnterExitFrameEpilogue(argc, save_doubles);
1140}
1141
1142
1143void MacroAssembler::EnterApiExitFrame(int argc) {
1144  EnterExitFramePrologue(StackFrame::EXIT);
1145  EnterExitFrameEpilogue(argc, false);
1146}
1147
1148
1149void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
1150  // Optionally restore all XMM registers.
1151  if (save_doubles) {
1152    const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
1153    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1154      XMMRegister reg = XMMRegister::from_code(i);
1155      movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
1156    }
1157  }
1158
1159  if (pop_arguments) {
1160    // Get the return address from the stack and restore the frame pointer.
1161    mov(ecx, Operand(ebp, 1 * kPointerSize));
1162    mov(ebp, Operand(ebp, 0 * kPointerSize));
1163
1164    // Pop the arguments and the receiver from the caller stack.
1165    lea(esp, Operand(esi, 1 * kPointerSize));
1166
1167    // Push the return address to get ready to return.
1168    push(ecx);
1169  } else {
1170    // Otherwise just leave the exit frame.
1171    leave();
1172  }
1173
1174  LeaveExitFrameEpilogue(true);
1175}
1176
1177
1178void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
1179  // Restore current context from top and clear it in debug mode.
1180  ExternalReference context_address(Isolate::kContextAddress, isolate());
1181  if (restore_context) {
1182    mov(esi, Operand::StaticVariable(context_address));
1183  }
1184#ifdef DEBUG
1185  mov(Operand::StaticVariable(context_address), Immediate(0));
1186#endif
1187
1188  // Clear the top frame.
1189  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
1190                                       isolate());
1191  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
1192}
1193
1194
1195void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
1196  mov(esp, ebp);
1197  pop(ebp);
1198
1199  LeaveExitFrameEpilogue(restore_context);
1200}
1201
1202
1203void MacroAssembler::PushStackHandler() {
1204  // Adjust this code if not the case.
1205  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
1206  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1207
1208  // Link the current handler as the next handler.
1209  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1210  push(Operand::StaticVariable(handler_address));
1211
1212  // Set this new handler as the current one.
1213  mov(Operand::StaticVariable(handler_address), esp);
1214}
1215
1216
1217void MacroAssembler::PopStackHandler() {
1218  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1219  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1220  pop(Operand::StaticVariable(handler_address));
1221  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1222}
1223
1224
1225// Compute the hash code from the untagged key.  This must be kept in sync with
1226// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1227// code-stub-hydrogen.cc
1228//
1229// Note: r0 will contain hash code
1230void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1231  // Xor original key with a seed.
1232  if (serializer_enabled()) {
1233    ExternalReference roots_array_start =
1234        ExternalReference::roots_array_start(isolate());
1235    mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1236    mov(scratch,
1237        Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1238    SmiUntag(scratch);
1239    xor_(r0, scratch);
1240  } else {
1241    int32_t seed = isolate()->heap()->HashSeed();
1242    xor_(r0, Immediate(seed));
1243  }
1244
1245  // hash = ~hash + (hash << 15);
1246  mov(scratch, r0);
1247  not_(r0);
1248  shl(scratch, 15);
1249  add(r0, scratch);
1250  // hash = hash ^ (hash >> 12);
1251  mov(scratch, r0);
1252  shr(scratch, 12);
1253  xor_(r0, scratch);
1254  // hash = hash + (hash << 2);
1255  lea(r0, Operand(r0, r0, times_4, 0));
1256  // hash = hash ^ (hash >> 4);
1257  mov(scratch, r0);
1258  shr(scratch, 4);
1259  xor_(r0, scratch);
1260  // hash = hash * 2057;
1261  imul(r0, r0, 2057);
1262  // hash = hash ^ (hash >> 16);
1263  mov(scratch, r0);
1264  shr(scratch, 16);
1265  xor_(r0, scratch);
1266  and_(r0, 0x3fffffff);
1267}
1268
1269void MacroAssembler::LoadAllocationTopHelper(Register result,
1270                                             Register scratch,
1271                                             AllocationFlags flags) {
1272  ExternalReference allocation_top =
1273      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1274
1275  // Just return if allocation top is already known.
1276  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1277    // No use of scratch if allocation top is provided.
1278    DCHECK(scratch.is(no_reg));
1279#ifdef DEBUG
1280    // Assert that result actually contains top on entry.
1281    cmp(result, Operand::StaticVariable(allocation_top));
1282    Check(equal, kUnexpectedAllocationTop);
1283#endif
1284    return;
1285  }
1286
1287  // Move address of new object to result. Use scratch register if available.
1288  if (scratch.is(no_reg)) {
1289    mov(result, Operand::StaticVariable(allocation_top));
1290  } else {
1291    mov(scratch, Immediate(allocation_top));
1292    mov(result, Operand(scratch, 0));
1293  }
1294}
1295
1296
1297void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1298                                               Register scratch,
1299                                               AllocationFlags flags) {
1300  if (emit_debug_code()) {
1301    test(result_end, Immediate(kObjectAlignmentMask));
1302    Check(zero, kUnalignedAllocationInNewSpace);
1303  }
1304
1305  ExternalReference allocation_top =
1306      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1307
1308  // Update new top. Use scratch if available.
1309  if (scratch.is(no_reg)) {
1310    mov(Operand::StaticVariable(allocation_top), result_end);
1311  } else {
1312    mov(Operand(scratch, 0), result_end);
1313  }
1314}
1315
1316
1317void MacroAssembler::Allocate(int object_size,
1318                              Register result,
1319                              Register result_end,
1320                              Register scratch,
1321                              Label* gc_required,
1322                              AllocationFlags flags) {
1323  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1324  DCHECK(object_size <= kMaxRegularHeapObjectSize);
1325  DCHECK((flags & ALLOCATION_FOLDED) == 0);
1326  if (!FLAG_inline_new) {
1327    if (emit_debug_code()) {
1328      // Trash the registers to simulate an allocation failure.
1329      mov(result, Immediate(0x7091));
1330      if (result_end.is_valid()) {
1331        mov(result_end, Immediate(0x7191));
1332      }
1333      if (scratch.is_valid()) {
1334        mov(scratch, Immediate(0x7291));
1335      }
1336    }
1337    jmp(gc_required);
1338    return;
1339  }
1340  DCHECK(!result.is(result_end));
1341
1342  // Load address of new object into result.
1343  LoadAllocationTopHelper(result, scratch, flags);
1344
1345  ExternalReference allocation_limit =
1346      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1347
1348  // Align the next allocation. Storing the filler map without checking top is
1349  // safe in new-space because the limit of the heap is aligned there.
1350  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1351    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1352    Label aligned;
1353    test(result, Immediate(kDoubleAlignmentMask));
1354    j(zero, &aligned, Label::kNear);
1355    if ((flags & PRETENURE) != 0) {
1356      cmp(result, Operand::StaticVariable(allocation_limit));
1357      j(above_equal, gc_required);
1358    }
1359    mov(Operand(result, 0),
1360        Immediate(isolate()->factory()->one_pointer_filler_map()));
1361    add(result, Immediate(kDoubleSize / 2));
1362    bind(&aligned);
1363  }
1364
1365  // Calculate new top and bail out if space is exhausted.
1366  Register top_reg = result_end.is_valid() ? result_end : result;
1367
1368  if (!top_reg.is(result)) {
1369    mov(top_reg, result);
1370  }
1371  add(top_reg, Immediate(object_size));
1372  cmp(top_reg, Operand::StaticVariable(allocation_limit));
1373  j(above, gc_required);
1374
1375  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1376    // The top pointer is not updated for allocation folding dominators.
1377    UpdateAllocationTopHelper(top_reg, scratch, flags);
1378  }
1379
1380  if (top_reg.is(result)) {
1381    sub(result, Immediate(object_size - kHeapObjectTag));
1382  } else {
1383    // Tag the result.
1384    DCHECK(kHeapObjectTag == 1);
1385    inc(result);
1386  }
1387}
1388
1389
1390void MacroAssembler::Allocate(int header_size,
1391                              ScaleFactor element_size,
1392                              Register element_count,
1393                              RegisterValueType element_count_type,
1394                              Register result,
1395                              Register result_end,
1396                              Register scratch,
1397                              Label* gc_required,
1398                              AllocationFlags flags) {
1399  DCHECK((flags & SIZE_IN_WORDS) == 0);
1400  DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
1401  DCHECK((flags & ALLOCATION_FOLDED) == 0);
1402  if (!FLAG_inline_new) {
1403    if (emit_debug_code()) {
1404      // Trash the registers to simulate an allocation failure.
1405      mov(result, Immediate(0x7091));
1406      mov(result_end, Immediate(0x7191));
1407      if (scratch.is_valid()) {
1408        mov(scratch, Immediate(0x7291));
1409      }
1410      // Register element_count is not modified by the function.
1411    }
1412    jmp(gc_required);
1413    return;
1414  }
1415  DCHECK(!result.is(result_end));
1416
1417  // Load address of new object into result.
1418  LoadAllocationTopHelper(result, scratch, flags);
1419
1420  ExternalReference allocation_limit =
1421      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1422
1423  // Align the next allocation. Storing the filler map without checking top is
1424  // safe in new-space because the limit of the heap is aligned there.
1425  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1426    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1427    Label aligned;
1428    test(result, Immediate(kDoubleAlignmentMask));
1429    j(zero, &aligned, Label::kNear);
1430    if ((flags & PRETENURE) != 0) {
1431      cmp(result, Operand::StaticVariable(allocation_limit));
1432      j(above_equal, gc_required);
1433    }
1434    mov(Operand(result, 0),
1435        Immediate(isolate()->factory()->one_pointer_filler_map()));
1436    add(result, Immediate(kDoubleSize / 2));
1437    bind(&aligned);
1438  }
1439
1440  // Calculate new top and bail out if space is exhausted.
1441  // We assume that element_count*element_size + header_size does not
1442  // overflow.
1443  if (element_count_type == REGISTER_VALUE_IS_SMI) {
1444    STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1445    STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1446    STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1447    DCHECK(element_size >= times_2);
1448    DCHECK(kSmiTagSize == 1);
1449    element_size = static_cast<ScaleFactor>(element_size - 1);
1450  } else {
1451    DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1452  }
1453
1454  lea(result_end, Operand(element_count, element_size, header_size));
1455  add(result_end, result);
1456  cmp(result_end, Operand::StaticVariable(allocation_limit));
1457  j(above, gc_required);
1458
1459  // Tag result.
1460  DCHECK(kHeapObjectTag == 1);
1461  inc(result);
1462
1463  UpdateAllocationTopHelper(result_end, scratch, flags);
1464}
1465
1466
1467void MacroAssembler::Allocate(Register object_size,
1468                              Register result,
1469                              Register result_end,
1470                              Register scratch,
1471                              Label* gc_required,
1472                              AllocationFlags flags) {
1473  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
1474  DCHECK((flags & ALLOCATION_FOLDED) == 0);
1475  if (!FLAG_inline_new) {
1476    if (emit_debug_code()) {
1477      // Trash the registers to simulate an allocation failure.
1478      mov(result, Immediate(0x7091));
1479      mov(result_end, Immediate(0x7191));
1480      if (scratch.is_valid()) {
1481        mov(scratch, Immediate(0x7291));
1482      }
1483      // object_size is left unchanged by this function.
1484    }
1485    jmp(gc_required);
1486    return;
1487  }
1488  DCHECK(!result.is(result_end));
1489
1490  // Load address of new object into result.
1491  LoadAllocationTopHelper(result, scratch, flags);
1492
1493  ExternalReference allocation_limit =
1494      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1495
1496  // Align the next allocation. Storing the filler map without checking top is
1497  // safe in new-space because the limit of the heap is aligned there.
1498  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1499    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1500    Label aligned;
1501    test(result, Immediate(kDoubleAlignmentMask));
1502    j(zero, &aligned, Label::kNear);
1503    if ((flags & PRETENURE) != 0) {
1504      cmp(result, Operand::StaticVariable(allocation_limit));
1505      j(above_equal, gc_required);
1506    }
1507    mov(Operand(result, 0),
1508        Immediate(isolate()->factory()->one_pointer_filler_map()));
1509    add(result, Immediate(kDoubleSize / 2));
1510    bind(&aligned);
1511  }
1512
1513  // Calculate new top and bail out if space is exhausted.
1514  if (!object_size.is(result_end)) {
1515    mov(result_end, object_size);
1516  }
1517  add(result_end, result);
1518  cmp(result_end, Operand::StaticVariable(allocation_limit));
1519  j(above, gc_required);
1520
1521  // Tag result.
1522  DCHECK(kHeapObjectTag == 1);
1523  inc(result);
1524
1525  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
1526    // The top pointer is not updated for allocation folding dominators.
1527    UpdateAllocationTopHelper(result_end, scratch, flags);
1528  }
1529}
1530
1531void MacroAssembler::FastAllocate(int object_size, Register result,
1532                                  Register result_end, AllocationFlags flags) {
1533  DCHECK(!result.is(result_end));
1534  // Load address of new object into result.
1535  LoadAllocationTopHelper(result, no_reg, flags);
1536
1537  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1538    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1539    Label aligned;
1540    test(result, Immediate(kDoubleAlignmentMask));
1541    j(zero, &aligned, Label::kNear);
1542    mov(Operand(result, 0),
1543        Immediate(isolate()->factory()->one_pointer_filler_map()));
1544    add(result, Immediate(kDoubleSize / 2));
1545    bind(&aligned);
1546  }
1547
1548  lea(result_end, Operand(result, object_size));
1549  UpdateAllocationTopHelper(result_end, no_reg, flags);
1550
1551  DCHECK(kHeapObjectTag == 1);
1552  inc(result);
1553}
1554
1555void MacroAssembler::FastAllocate(Register object_size, Register result,
1556                                  Register result_end, AllocationFlags flags) {
1557  DCHECK(!result.is(result_end));
1558  // Load address of new object into result.
1559  LoadAllocationTopHelper(result, no_reg, flags);
1560
1561  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1562    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
1563    Label aligned;
1564    test(result, Immediate(kDoubleAlignmentMask));
1565    j(zero, &aligned, Label::kNear);
1566    mov(Operand(result, 0),
1567        Immediate(isolate()->factory()->one_pointer_filler_map()));
1568    add(result, Immediate(kDoubleSize / 2));
1569    bind(&aligned);
1570  }
1571
1572  lea(result_end, Operand(result, object_size, times_1, 0));
1573  UpdateAllocationTopHelper(result_end, no_reg, flags);
1574
1575  DCHECK(kHeapObjectTag == 1);
1576  inc(result);
1577}
1578
1579
1580void MacroAssembler::AllocateHeapNumber(Register result,
1581                                        Register scratch1,
1582                                        Register scratch2,
1583                                        Label* gc_required,
1584                                        MutableMode mode) {
1585  // Allocate heap number in new space.
1586  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1587           NO_ALLOCATION_FLAGS);
1588
1589  Handle<Map> map = mode == MUTABLE
1590      ? isolate()->factory()->mutable_heap_number_map()
1591      : isolate()->factory()->heap_number_map();
1592
1593  // Set the map.
1594  mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
1595}
1596
1597void MacroAssembler::AllocateJSValue(Register result, Register constructor,
1598                                     Register value, Register scratch,
1599                                     Label* gc_required) {
1600  DCHECK(!result.is(constructor));
1601  DCHECK(!result.is(scratch));
1602  DCHECK(!result.is(value));
1603
1604  // Allocate JSValue in new space.
1605  Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
1606           NO_ALLOCATION_FLAGS);
1607
1608  // Initialize the JSValue.
1609  LoadGlobalFunctionInitialMap(constructor, scratch);
1610  mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
1611  LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
1612  mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
1613  mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
1614  mov(FieldOperand(result, JSValue::kValueOffset), value);
1615  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
1616}
1617
1618void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
1619                                                Register end_address,
1620                                                Register filler) {
1621  Label loop, entry;
1622  jmp(&entry, Label::kNear);
1623  bind(&loop);
1624  mov(Operand(current_address, 0), filler);
1625  add(current_address, Immediate(kPointerSize));
1626  bind(&entry);
1627  cmp(current_address, end_address);
1628  j(below, &loop, Label::kNear);
1629}
1630
1631
1632void MacroAssembler::BooleanBitTest(Register object,
1633                                    int field_offset,
1634                                    int bit_index) {
1635  bit_index += kSmiTagSize + kSmiShiftSize;
1636  DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
1637  int byte_index = bit_index / kBitsPerByte;
1638  int byte_bit_index = bit_index & (kBitsPerByte - 1);
1639  test_b(FieldOperand(object, field_offset + byte_index),
1640         Immediate(1 << byte_bit_index));
1641}
1642
1643
1644
1645void MacroAssembler::NegativeZeroTest(Register result,
1646                                      Register op,
1647                                      Label* then_label) {
1648  Label ok;
1649  test(result, result);
1650  j(not_zero, &ok, Label::kNear);
1651  test(op, op);
1652  j(sign, then_label, Label::kNear);
1653  bind(&ok);
1654}
1655
1656
1657void MacroAssembler::NegativeZeroTest(Register result,
1658                                      Register op1,
1659                                      Register op2,
1660                                      Register scratch,
1661                                      Label* then_label) {
1662  Label ok;
1663  test(result, result);
1664  j(not_zero, &ok, Label::kNear);
1665  mov(scratch, op1);
1666  or_(scratch, op2);
1667  j(sign, then_label, Label::kNear);
1668  bind(&ok);
1669}
1670
1671
1672void MacroAssembler::GetMapConstructor(Register result, Register map,
1673                                       Register temp) {
1674  Label done, loop;
1675  mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
1676  bind(&loop);
1677  JumpIfSmi(result, &done, Label::kNear);
1678  CmpObjectType(result, MAP_TYPE, temp);
1679  j(not_equal, &done, Label::kNear);
1680  mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
1681  jmp(&loop);
1682  bind(&done);
1683}
1684
1685void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1686  DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
1687  call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1688}
1689
1690
1691void MacroAssembler::TailCallStub(CodeStub* stub) {
1692  jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
1693}
1694
1695
1696void MacroAssembler::StubReturn(int argc) {
1697  DCHECK(argc >= 1 && generating_stub());
1698  ret((argc - 1) * kPointerSize);
1699}
1700
1701
1702bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1703  return has_frame_ || !stub->SometimesSetsUpAFrame();
1704}
1705
1706void MacroAssembler::CallRuntime(const Runtime::Function* f,
1707                                 int num_arguments,
1708                                 SaveFPRegsMode save_doubles) {
1709  // If the expected number of arguments of the runtime function is
1710  // constant, we check that the actual number of arguments match the
1711  // expectation.
1712  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1713
1714  // TODO(1236192): Most runtime routines don't need the number of
1715  // arguments passed in because it is constant. At some point we
1716  // should remove this need and make the runtime routine entry code
1717  // smarter.
1718  Move(eax, Immediate(num_arguments));
1719  mov(ebx, Immediate(ExternalReference(f, isolate())));
1720  CEntryStub ces(isolate(), 1, save_doubles);
1721  CallStub(&ces);
1722}
1723
1724
1725void MacroAssembler::CallExternalReference(ExternalReference ref,
1726                                           int num_arguments) {
1727  mov(eax, Immediate(num_arguments));
1728  mov(ebx, Immediate(ref));
1729
1730  CEntryStub stub(isolate(), 1);
1731  CallStub(&stub);
1732}
1733
1734
1735void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1736  // ----------- S t a t e -------------
1737  //  -- esp[0]                 : return address
1738  //  -- esp[8]                 : argument num_arguments - 1
1739  //  ...
1740  //  -- esp[8 * num_arguments] : argument 0 (receiver)
1741  //
1742  //  For runtime functions with variable arguments:
1743  //  -- eax                    : number of  arguments
1744  // -----------------------------------
1745
1746  const Runtime::Function* function = Runtime::FunctionForId(fid);
1747  DCHECK_EQ(1, function->result_size);
1748  if (function->nargs >= 0) {
1749    // TODO(1236192): Most runtime routines don't need the number of
1750    // arguments passed in because it is constant. At some point we
1751    // should remove this need and make the runtime routine entry code
1752    // smarter.
1753    mov(eax, Immediate(function->nargs));
1754  }
1755  JumpToExternalReference(ExternalReference(fid, isolate()));
1756}
1757
1758void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
1759                                             bool builtin_exit_frame) {
1760  // Set the entry point and jump to the C entry runtime stub.
1761  mov(ebx, Immediate(ext));
1762  CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
1763                 builtin_exit_frame);
1764  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
1765}
1766
1767void MacroAssembler::PrepareForTailCall(
1768    const ParameterCount& callee_args_count, Register caller_args_count_reg,
1769    Register scratch0, Register scratch1, ReturnAddressState ra_state,
1770    int number_of_temp_values_after_return_address) {
1771#if DEBUG
1772  if (callee_args_count.is_reg()) {
1773    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1774                       scratch1));
1775  } else {
1776    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1777  }
1778  DCHECK(ra_state != ReturnAddressState::kNotOnStack ||
1779         number_of_temp_values_after_return_address == 0);
1780#endif
1781
1782  // Calculate the destination address where we will put the return address
1783  // after we drop current frame.
1784  Register new_sp_reg = scratch0;
1785  if (callee_args_count.is_reg()) {
1786    sub(caller_args_count_reg, callee_args_count.reg());
1787    lea(new_sp_reg,
1788        Operand(ebp, caller_args_count_reg, times_pointer_size,
1789                StandardFrameConstants::kCallerPCOffset -
1790                    number_of_temp_values_after_return_address * kPointerSize));
1791  } else {
1792    lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
1793                            StandardFrameConstants::kCallerPCOffset -
1794                                (callee_args_count.immediate() +
1795                                 number_of_temp_values_after_return_address) *
1796                                    kPointerSize));
1797  }
1798
1799  if (FLAG_debug_code) {
1800    cmp(esp, new_sp_reg);
1801    Check(below, kStackAccessBelowStackPointer);
1802  }
1803
1804  // Copy return address from caller's frame to current frame's return address
1805  // to avoid its trashing and let the following loop copy it to the right
1806  // place.
1807  Register tmp_reg = scratch1;
1808  if (ra_state == ReturnAddressState::kOnStack) {
1809    mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
1810    mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
1811        tmp_reg);
1812  } else {
1813    DCHECK(ReturnAddressState::kNotOnStack == ra_state);
1814    DCHECK_EQ(0, number_of_temp_values_after_return_address);
1815    Push(Operand(ebp, StandardFrameConstants::kCallerPCOffset));
1816  }
1817
1818  // Restore caller's frame pointer now as it could be overwritten by
1819  // the copying loop.
1820  mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1821
1822  // +2 here is to copy both receiver and return address.
1823  Register count_reg = caller_args_count_reg;
1824  if (callee_args_count.is_reg()) {
1825    lea(count_reg, Operand(callee_args_count.reg(),
1826                           2 + number_of_temp_values_after_return_address));
1827  } else {
1828    mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
1829                             number_of_temp_values_after_return_address));
1830    // TODO(ishell): Unroll copying loop for small immediate values.
1831  }
1832
1833  // Now copy callee arguments to the caller frame going backwards to avoid
1834  // callee arguments corruption (source and destination areas could overlap).
1835  Label loop, entry;
1836  jmp(&entry, Label::kNear);
1837  bind(&loop);
1838  dec(count_reg);
1839  mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
1840  mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
1841  bind(&entry);
1842  cmp(count_reg, Immediate(0));
1843  j(not_equal, &loop, Label::kNear);
1844
1845  // Leave current frame.
1846  mov(esp, new_sp_reg);
1847}
1848
1849void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1850                                    const ParameterCount& actual,
1851                                    Label* done,
1852                                    bool* definitely_mismatches,
1853                                    InvokeFlag flag,
1854                                    Label::Distance done_near,
1855                                    const CallWrapper& call_wrapper) {
1856  bool definitely_matches = false;
1857  *definitely_mismatches = false;
1858  Label invoke;
1859  if (expected.is_immediate()) {
1860    DCHECK(actual.is_immediate());
1861    mov(eax, actual.immediate());
1862    if (expected.immediate() == actual.immediate()) {
1863      definitely_matches = true;
1864    } else {
1865      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1866      if (expected.immediate() == sentinel) {
1867        // Don't worry about adapting arguments for builtins that
1868        // don't want that done. Skip adaption code by making it look
1869        // like we have a match between expected and actual number of
1870        // arguments.
1871        definitely_matches = true;
1872      } else {
1873        *definitely_mismatches = true;
1874        mov(ebx, expected.immediate());
1875      }
1876    }
1877  } else {
1878    if (actual.is_immediate()) {
1879      // Expected is in register, actual is immediate. This is the
1880      // case when we invoke function values without going through the
1881      // IC mechanism.
1882      mov(eax, actual.immediate());
1883      cmp(expected.reg(), actual.immediate());
1884      j(equal, &invoke);
1885      DCHECK(expected.reg().is(ebx));
1886    } else if (!expected.reg().is(actual.reg())) {
1887      // Both expected and actual are in (different) registers. This
1888      // is the case when we invoke functions using call and apply.
1889      cmp(expected.reg(), actual.reg());
1890      j(equal, &invoke);
1891      DCHECK(actual.reg().is(eax));
1892      DCHECK(expected.reg().is(ebx));
1893    } else {
1894      definitely_matches = true;
1895      Move(eax, actual.reg());
1896    }
1897  }
1898
1899  if (!definitely_matches) {
1900    Handle<Code> adaptor =
1901        isolate()->builtins()->ArgumentsAdaptorTrampoline();
1902    if (flag == CALL_FUNCTION) {
1903      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
1904      call(adaptor, RelocInfo::CODE_TARGET);
1905      call_wrapper.AfterCall();
1906      if (!*definitely_mismatches) {
1907        jmp(done, done_near);
1908      }
1909    } else {
1910      jmp(adaptor, RelocInfo::CODE_TARGET);
1911    }
1912    bind(&invoke);
1913  }
1914}
1915
1916void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1917                                    const ParameterCount& expected,
1918                                    const ParameterCount& actual) {
1919  Label skip_hook;
1920  ExternalReference debug_hook_active =
1921      ExternalReference::debug_hook_on_function_call_address(isolate());
1922  cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
1923  j(equal, &skip_hook);
1924  {
1925    FrameScope frame(this,
1926                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1927    if (expected.is_reg()) {
1928      SmiTag(expected.reg());
1929      Push(expected.reg());
1930    }
1931    if (actual.is_reg()) {
1932      SmiTag(actual.reg());
1933      Push(actual.reg());
1934    }
1935    if (new_target.is_valid()) {
1936      Push(new_target);
1937    }
1938    Push(fun);
1939    Push(fun);
1940    CallRuntime(Runtime::kDebugOnFunctionCall);
1941    Pop(fun);
1942    if (new_target.is_valid()) {
1943      Pop(new_target);
1944    }
1945    if (actual.is_reg()) {
1946      Pop(actual.reg());
1947      SmiUntag(actual.reg());
1948    }
1949    if (expected.is_reg()) {
1950      Pop(expected.reg());
1951      SmiUntag(expected.reg());
1952    }
1953  }
1954  bind(&skip_hook);
1955}
1956
1957
1958void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1959                                        const ParameterCount& expected,
1960                                        const ParameterCount& actual,
1961                                        InvokeFlag flag,
1962                                        const CallWrapper& call_wrapper) {
1963  // You can't call a function without a valid frame.
1964  DCHECK(flag == JUMP_FUNCTION || has_frame());
1965  DCHECK(function.is(edi));
1966  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
1967
1968  if (call_wrapper.NeedsDebugHookCheck()) {
1969    CheckDebugHook(function, new_target, expected, actual);
1970  }
1971
1972  // Clear the new.target register if not given.
1973  if (!new_target.is_valid()) {
1974    mov(edx, isolate()->factory()->undefined_value());
1975  }
1976
1977  Label done;
1978  bool definitely_mismatches = false;
1979  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1980                 Label::kNear, call_wrapper);
1981  if (!definitely_mismatches) {
1982    // We call indirectly through the code field in the function to
1983    // allow recompilation to take effect without changing any of the
1984    // call sites.
1985    Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
1986    if (flag == CALL_FUNCTION) {
1987      call_wrapper.BeforeCall(CallSize(code));
1988      call(code);
1989      call_wrapper.AfterCall();
1990    } else {
1991      DCHECK(flag == JUMP_FUNCTION);
1992      jmp(code);
1993    }
1994    bind(&done);
1995  }
1996}
1997
1998
1999void MacroAssembler::InvokeFunction(Register fun,
2000                                    Register new_target,
2001                                    const ParameterCount& actual,
2002                                    InvokeFlag flag,
2003                                    const CallWrapper& call_wrapper) {
2004  // You can't call a function without a valid frame.
2005  DCHECK(flag == JUMP_FUNCTION || has_frame());
2006
2007  DCHECK(fun.is(edi));
2008  mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2009  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2010  mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
2011  SmiUntag(ebx);
2012
2013  ParameterCount expected(ebx);
2014  InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
2015}
2016
2017
2018void MacroAssembler::InvokeFunction(Register fun,
2019                                    const ParameterCount& expected,
2020                                    const ParameterCount& actual,
2021                                    InvokeFlag flag,
2022                                    const CallWrapper& call_wrapper) {
2023  // You can't call a function without a valid frame.
2024  DCHECK(flag == JUMP_FUNCTION || has_frame());
2025
2026  DCHECK(fun.is(edi));
2027  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2028
2029  InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
2030}
2031
2032
2033void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2034                                    const ParameterCount& expected,
2035                                    const ParameterCount& actual,
2036                                    InvokeFlag flag,
2037                                    const CallWrapper& call_wrapper) {
2038  LoadHeapObject(edi, function);
2039  InvokeFunction(edi, expected, actual, flag, call_wrapper);
2040}
2041
2042
2043void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2044  if (context_chain_length > 0) {
2045    // Move up the chain of contexts to the context containing the slot.
2046    mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2047    for (int i = 1; i < context_chain_length; i++) {
2048      mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2049    }
2050  } else {
2051    // Slot is in the current function context.  Move it into the
2052    // destination register in case we store into it (the write barrier
2053    // cannot be allowed to destroy the context in esi).
2054    mov(dst, esi);
2055  }
2056
2057  // We should not have found a with context by walking the context chain
2058  // (i.e., the static scope chain and runtime context chain do not agree).
2059  // A variable occurring in such a scope should have slot type LOOKUP and
2060  // not CONTEXT.
2061  if (emit_debug_code()) {
2062    cmp(FieldOperand(dst, HeapObject::kMapOffset),
2063        isolate()->factory()->with_context_map());
2064    Check(not_equal, kVariableResolvedToWithContext);
2065  }
2066}
2067
2068
2069void MacroAssembler::LoadGlobalProxy(Register dst) {
2070  mov(dst, NativeContextOperand());
2071  mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
2072}
2073
2074void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2075  // Load the native context from the current context.
2076  mov(function, NativeContextOperand());
2077  // Load the function from the native context.
2078  mov(function, ContextOperand(function, index));
2079}
2080
2081
2082void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2083                                                  Register map) {
2084  // Load the initial map.  The global functions all have initial maps.
2085  mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2086  if (emit_debug_code()) {
2087    Label ok, fail;
2088    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2089    jmp(&ok);
2090    bind(&fail);
2091    Abort(kGlobalFunctionsMustHaveInitialMap);
2092    bind(&ok);
2093  }
2094}
2095
2096
2097// Store the value in register src in the safepoint register stack
2098// slot for register dst.
2099void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2100  mov(SafepointRegisterSlot(dst), src);
2101}
2102
2103
2104void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2105  mov(SafepointRegisterSlot(dst), src);
2106}
2107
2108
2109void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2110  mov(dst, SafepointRegisterSlot(src));
2111}
2112
2113
2114Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2115  return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2116}
2117
2118
2119int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2120  // The registers are pushed starting with the lowest encoding,
2121  // which means that lowest encodings are furthest away from
2122  // the stack pointer.
2123  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2124  return kNumSafepointRegisters - reg_code - 1;
2125}
2126
2127
2128void MacroAssembler::LoadHeapObject(Register result,
2129                                    Handle<HeapObject> object) {
2130  mov(result, object);
2131}
2132
2133
2134void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2135  cmp(reg, object);
2136}
2137
2138void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { Push(object); }
2139
2140void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2141                                  Register scratch) {
2142  mov(scratch, cell);
2143  cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
2144}
2145
2146
2147void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2148  mov(value, cell);
2149  mov(value, FieldOperand(value, WeakCell::kValueOffset));
2150}
2151
2152
2153void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2154                                   Label* miss) {
2155  GetWeakValue(value, cell);
2156  JumpIfSmi(value, miss);
2157}
2158
2159
2160void MacroAssembler::Ret() {
2161  ret(0);
2162}
2163
2164
2165void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2166  if (is_uint16(bytes_dropped)) {
2167    ret(bytes_dropped);
2168  } else {
2169    pop(scratch);
2170    add(esp, Immediate(bytes_dropped));
2171    push(scratch);
2172    ret(0);
2173  }
2174}
2175
2176
2177void MacroAssembler::Drop(int stack_elements) {
2178  if (stack_elements > 0) {
2179    add(esp, Immediate(stack_elements * kPointerSize));
2180  }
2181}
2182
2183
2184void MacroAssembler::Move(Register dst, Register src) {
2185  if (!dst.is(src)) {
2186    mov(dst, src);
2187  }
2188}
2189
2190
2191void MacroAssembler::Move(Register dst, const Immediate& x) {
2192  if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
2193    xor_(dst, dst);  // Shorter than mov of 32-bit immediate 0.
2194  } else {
2195    mov(dst, x);
2196  }
2197}
2198
2199
2200void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
2201  mov(dst, x);
2202}
2203
2204
2205void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
2206  if (src == 0) {
2207    pxor(dst, dst);
2208  } else {
2209    unsigned cnt = base::bits::CountPopulation32(src);
2210    unsigned nlz = base::bits::CountLeadingZeros32(src);
2211    unsigned ntz = base::bits::CountTrailingZeros32(src);
2212    if (nlz + cnt + ntz == 32) {
2213      pcmpeqd(dst, dst);
2214      if (ntz == 0) {
2215        psrld(dst, 32 - cnt);
2216      } else {
2217        pslld(dst, 32 - cnt);
2218        if (nlz != 0) psrld(dst, nlz);
2219      }
2220    } else {
2221      push(eax);
2222      mov(eax, Immediate(src));
2223      movd(dst, Operand(eax));
2224      pop(eax);
2225    }
2226  }
2227}
2228
2229
2230void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
2231  if (src == 0) {
2232    pxor(dst, dst);
2233  } else {
2234    uint32_t lower = static_cast<uint32_t>(src);
2235    uint32_t upper = static_cast<uint32_t>(src >> 32);
2236    unsigned cnt = base::bits::CountPopulation64(src);
2237    unsigned nlz = base::bits::CountLeadingZeros64(src);
2238    unsigned ntz = base::bits::CountTrailingZeros64(src);
2239    if (nlz + cnt + ntz == 64) {
2240      pcmpeqd(dst, dst);
2241      if (ntz == 0) {
2242        psrlq(dst, 64 - cnt);
2243      } else {
2244        psllq(dst, 64 - cnt);
2245        if (nlz != 0) psrlq(dst, nlz);
2246      }
2247    } else if (lower == 0) {
2248      Move(dst, upper);
2249      psllq(dst, 32);
2250    } else if (CpuFeatures::IsSupported(SSE4_1)) {
2251      CpuFeatureScope scope(this, SSE4_1);
2252      push(eax);
2253      Move(eax, Immediate(lower));
2254      movd(dst, Operand(eax));
2255      Move(eax, Immediate(upper));
2256      pinsrd(dst, Operand(eax), 1);
2257      pop(eax);
2258    } else {
2259      push(Immediate(upper));
2260      push(Immediate(lower));
2261      movsd(dst, Operand(esp, 0));
2262      add(esp, Immediate(kDoubleSize));
2263    }
2264  }
2265}
2266
2267
2268void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
2269  if (imm8 == 0) {
2270    movd(dst, src);
2271    return;
2272  }
2273  DCHECK_EQ(1, imm8);
2274  if (CpuFeatures::IsSupported(SSE4_1)) {
2275    CpuFeatureScope sse_scope(this, SSE4_1);
2276    pextrd(dst, src, imm8);
2277    return;
2278  }
2279  pshufd(xmm0, src, 1);
2280  movd(dst, xmm0);
2281}
2282
2283
2284void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
2285  DCHECK(imm8 == 0 || imm8 == 1);
2286  if (CpuFeatures::IsSupported(SSE4_1)) {
2287    CpuFeatureScope sse_scope(this, SSE4_1);
2288    pinsrd(dst, src, imm8);
2289    return;
2290  }
2291  movd(xmm0, src);
2292  if (imm8 == 1) {
2293    punpckldq(dst, xmm0);
2294  } else {
2295    DCHECK_EQ(0, imm8);
2296    psrlq(dst, 32);
2297    punpckldq(xmm0, dst);
2298    movaps(dst, xmm0);
2299  }
2300}
2301
2302
2303void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
2304  if (CpuFeatures::IsSupported(LZCNT)) {
2305    CpuFeatureScope scope(this, LZCNT);
2306    lzcnt(dst, src);
2307    return;
2308  }
2309  Label not_zero_src;
2310  bsr(dst, src);
2311  j(not_zero, &not_zero_src, Label::kNear);
2312  Move(dst, Immediate(63));  // 63^31 == 32
2313  bind(&not_zero_src);
2314  xor_(dst, Immediate(31));  // for x in [0..31], 31^x == 31-x.
2315}
2316
2317
2318void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
2319  if (CpuFeatures::IsSupported(BMI1)) {
2320    CpuFeatureScope scope(this, BMI1);
2321    tzcnt(dst, src);
2322    return;
2323  }
2324  Label not_zero_src;
2325  bsf(dst, src);
2326  j(not_zero, &not_zero_src, Label::kNear);
2327  Move(dst, Immediate(32));  // The result of tzcnt is 32 if src = 0.
2328  bind(&not_zero_src);
2329}
2330
2331
2332void MacroAssembler::Popcnt(Register dst, const Operand& src) {
2333  if (CpuFeatures::IsSupported(POPCNT)) {
2334    CpuFeatureScope scope(this, POPCNT);
2335    popcnt(dst, src);
2336    return;
2337  }
2338  UNREACHABLE();
2339}
2340
2341
2342void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2343  if (FLAG_native_code_counters && counter->Enabled()) {
2344    mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2345  }
2346}
2347
2348
2349void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2350  DCHECK(value > 0);
2351  if (FLAG_native_code_counters && counter->Enabled()) {
2352    Operand operand = Operand::StaticVariable(ExternalReference(counter));
2353    if (value == 1) {
2354      inc(operand);
2355    } else {
2356      add(operand, Immediate(value));
2357    }
2358  }
2359}
2360
2361
2362void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2363  DCHECK(value > 0);
2364  if (FLAG_native_code_counters && counter->Enabled()) {
2365    Operand operand = Operand::StaticVariable(ExternalReference(counter));
2366    if (value == 1) {
2367      dec(operand);
2368    } else {
2369      sub(operand, Immediate(value));
2370    }
2371  }
2372}
2373
2374
2375void MacroAssembler::IncrementCounter(Condition cc,
2376                                      StatsCounter* counter,
2377                                      int value) {
2378  DCHECK(value > 0);
2379  if (FLAG_native_code_counters && counter->Enabled()) {
2380    Label skip;
2381    j(NegateCondition(cc), &skip);
2382    pushfd();
2383    IncrementCounter(counter, value);
2384    popfd();
2385    bind(&skip);
2386  }
2387}
2388
2389
2390void MacroAssembler::DecrementCounter(Condition cc,
2391                                      StatsCounter* counter,
2392                                      int value) {
2393  DCHECK(value > 0);
2394  if (FLAG_native_code_counters && counter->Enabled()) {
2395    Label skip;
2396    j(NegateCondition(cc), &skip);
2397    pushfd();
2398    DecrementCounter(counter, value);
2399    popfd();
2400    bind(&skip);
2401  }
2402}
2403
2404
2405void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2406  if (emit_debug_code()) Check(cc, reason);
2407}
2408
2409
2410void MacroAssembler::AssertFastElements(Register elements) {
2411  if (emit_debug_code()) {
2412    Factory* factory = isolate()->factory();
2413    Label ok;
2414    cmp(FieldOperand(elements, HeapObject::kMapOffset),
2415        Immediate(factory->fixed_array_map()));
2416    j(equal, &ok);
2417    cmp(FieldOperand(elements, HeapObject::kMapOffset),
2418        Immediate(factory->fixed_double_array_map()));
2419    j(equal, &ok);
2420    cmp(FieldOperand(elements, HeapObject::kMapOffset),
2421        Immediate(factory->fixed_cow_array_map()));
2422    j(equal, &ok);
2423    Abort(kJSObjectWithFastElementsMapHasSlowElements);
2424    bind(&ok);
2425  }
2426}
2427
2428
2429void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2430  Label L;
2431  j(cc, &L);
2432  Abort(reason);
2433  // will not return here
2434  bind(&L);
2435}
2436
2437
2438void MacroAssembler::CheckStackAlignment() {
2439  int frame_alignment = base::OS::ActivationFrameAlignment();
2440  int frame_alignment_mask = frame_alignment - 1;
2441  if (frame_alignment > kPointerSize) {
2442    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2443    Label alignment_as_expected;
2444    test(esp, Immediate(frame_alignment_mask));
2445    j(zero, &alignment_as_expected);
2446    // Abort if stack is not aligned.
2447    int3();
2448    bind(&alignment_as_expected);
2449  }
2450}
2451
2452
2453void MacroAssembler::Abort(BailoutReason reason) {
2454#ifdef DEBUG
2455  const char* msg = GetBailoutReason(reason);
2456  if (msg != NULL) {
2457    RecordComment("Abort message: ");
2458    RecordComment(msg);
2459  }
2460
2461  if (FLAG_trap_on_abort) {
2462    int3();
2463    return;
2464  }
2465#endif
2466
2467  // Check if Abort() has already been initialized.
2468  DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
2469
2470  Move(edx, Smi::FromInt(static_cast<int>(reason)));
2471
2472  // Disable stub call restrictions to always allow calls to abort.
2473  if (!has_frame_) {
2474    // We don't actually want to generate a pile of code for this, so just
2475    // claim there is a stack frame, without generating one.
2476    FrameScope scope(this, StackFrame::NONE);
2477    Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2478  } else {
2479    Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
2480  }
2481  // will not return here
2482  int3();
2483}
2484
2485
2486void MacroAssembler::LoadInstanceDescriptors(Register map,
2487                                             Register descriptors) {
2488  mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2489}
2490
2491
2492void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2493  mov(dst, FieldOperand(map, Map::kBitField3Offset));
2494  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2495}
2496
2497
2498void MacroAssembler::LoadAccessor(Register dst, Register holder,
2499                                  int accessor_index,
2500                                  AccessorComponent accessor) {
2501  mov(dst, FieldOperand(holder, HeapObject::kMapOffset));
2502  LoadInstanceDescriptors(dst, dst);
2503  mov(dst, FieldOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
2504  int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
2505                                           : AccessorPair::kSetterOffset;
2506  mov(dst, FieldOperand(dst, offset));
2507}
2508
2509
2510void MacroAssembler::LoadPowerOf2(XMMRegister dst,
2511                                  Register scratch,
2512                                  int power) {
2513  DCHECK(is_uintn(power + HeapNumber::kExponentBias,
2514                  HeapNumber::kExponentBits));
2515  mov(scratch, Immediate(power + HeapNumber::kExponentBias));
2516  movd(dst, scratch);
2517  psllq(dst, HeapNumber::kMantissaBits);
2518}
2519
2520void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
2521                                                           Register object2,
2522                                                           Register scratch1,
2523                                                           Register scratch2,
2524                                                           Label* failure) {
2525  // Check that both objects are not smis.
2526  STATIC_ASSERT(kSmiTag == 0);
2527  mov(scratch1, object1);
2528  and_(scratch1, object2);
2529  JumpIfSmi(scratch1, failure);
2530
2531  // Load instance type for both strings.
2532  mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
2533  mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
2534  movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2535  movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2536
2537  // Check that both are flat one-byte strings.
2538  const int kFlatOneByteStringMask =
2539      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2540  const int kFlatOneByteStringTag =
2541      kStringTag | kOneByteStringTag | kSeqStringTag;
2542  // Interleave bits from both instance types and compare them in one check.
2543  const int kShift = 8;
2544  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
2545  and_(scratch1, kFlatOneByteStringMask);
2546  and_(scratch2, kFlatOneByteStringMask);
2547  shl(scratch2, kShift);
2548  or_(scratch1, scratch2);
2549  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
2550  j(not_equal, failure);
2551}
2552
2553
2554void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2555                                                     Label* not_unique_name,
2556                                                     Label::Distance distance) {
2557  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2558  Label succeed;
2559  test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2560  j(zero, &succeed);
2561  cmpb(operand, Immediate(SYMBOL_TYPE));
2562  j(not_equal, not_unique_name, distance);
2563
2564  bind(&succeed);
2565}
2566
2567
2568void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
2569                                               Register index,
2570                                               Register value,
2571                                               uint32_t encoding_mask) {
2572  Label is_object;
2573  JumpIfNotSmi(string, &is_object, Label::kNear);
2574  Abort(kNonObject);
2575  bind(&is_object);
2576
2577  push(value);
2578  mov(value, FieldOperand(string, HeapObject::kMapOffset));
2579  movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
2580
2581  and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
2582  cmp(value, Immediate(encoding_mask));
2583  pop(value);
2584  Check(equal, kUnexpectedStringType);
2585
2586  // The index is assumed to be untagged coming in, tag it to compare with the
2587  // string length without using a temp register, it is restored at the end of
2588  // this function.
2589  SmiTag(index);
2590  Check(no_overflow, kIndexIsTooLarge);
2591
2592  cmp(index, FieldOperand(string, String::kLengthOffset));
2593  Check(less, kIndexIsTooLarge);
2594
2595  cmp(index, Immediate(Smi::kZero));
2596  Check(greater_equal, kIndexIsNegative);
2597
2598  // Restore the index
2599  SmiUntag(index);
2600}
2601
2602
2603void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2604  int frame_alignment = base::OS::ActivationFrameAlignment();
2605  if (frame_alignment != 0) {
2606    // Make stack end at alignment and make room for num_arguments words
2607    // and the original value of esp.
2608    mov(scratch, esp);
2609    sub(esp, Immediate((num_arguments + 1) * kPointerSize));
2610    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2611    and_(esp, -frame_alignment);
2612    mov(Operand(esp, num_arguments * kPointerSize), scratch);
2613  } else {
2614    sub(esp, Immediate(num_arguments * kPointerSize));
2615  }
2616}
2617
2618
2619void MacroAssembler::CallCFunction(ExternalReference function,
2620                                   int num_arguments) {
2621  // Trashing eax is ok as it will be the return value.
2622  mov(eax, Immediate(function));
2623  CallCFunction(eax, num_arguments);
2624}
2625
2626
2627void MacroAssembler::CallCFunction(Register function,
2628                                   int num_arguments) {
2629  DCHECK(has_frame());
2630  // Check stack alignment.
2631  if (emit_debug_code()) {
2632    CheckStackAlignment();
2633  }
2634
2635  call(function);
2636  if (base::OS::ActivationFrameAlignment() != 0) {
2637    mov(esp, Operand(esp, num_arguments * kPointerSize));
2638  } else {
2639    add(esp, Immediate(num_arguments * kPointerSize));
2640  }
2641}
2642
2643
2644#ifdef DEBUG
2645bool AreAliased(Register reg1,
2646                Register reg2,
2647                Register reg3,
2648                Register reg4,
2649                Register reg5,
2650                Register reg6,
2651                Register reg7,
2652                Register reg8) {
2653  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
2654      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
2655      reg7.is_valid() + reg8.is_valid();
2656
2657  RegList regs = 0;
2658  if (reg1.is_valid()) regs |= reg1.bit();
2659  if (reg2.is_valid()) regs |= reg2.bit();
2660  if (reg3.is_valid()) regs |= reg3.bit();
2661  if (reg4.is_valid()) regs |= reg4.bit();
2662  if (reg5.is_valid()) regs |= reg5.bit();
2663  if (reg6.is_valid()) regs |= reg6.bit();
2664  if (reg7.is_valid()) regs |= reg7.bit();
2665  if (reg8.is_valid()) regs |= reg8.bit();
2666  int n_of_non_aliasing_regs = NumRegs(regs);
2667
2668  return n_of_valid_regs != n_of_non_aliasing_regs;
2669}
2670#endif
2671
2672
2673CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
2674    : address_(address),
2675      size_(size),
2676      masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
2677  // Create a new macro assembler pointing to the address of the code to patch.
2678  // The size is adjusted with kGap on order for the assembler to generate size
2679  // bytes of instructions without failing with buffer size constraints.
2680  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2681}
2682
2683
2684CodePatcher::~CodePatcher() {
2685  // Indicate that code has changed.
2686  Assembler::FlushICache(masm_.isolate(), address_, size_);
2687
2688  // Check that the code was patched as expected.
2689  DCHECK(masm_.pc_ == address_ + size_);
2690  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2691}
2692
2693
2694void MacroAssembler::CheckPageFlag(
2695    Register object,
2696    Register scratch,
2697    int mask,
2698    Condition cc,
2699    Label* condition_met,
2700    Label::Distance condition_met_distance) {
2701  DCHECK(cc == zero || cc == not_zero);
2702  if (scratch.is(object)) {
2703    and_(scratch, Immediate(~Page::kPageAlignmentMask));
2704  } else {
2705    mov(scratch, Immediate(~Page::kPageAlignmentMask));
2706    and_(scratch, object);
2707  }
2708  if (mask < (1 << kBitsPerByte)) {
2709    test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2710  } else {
2711    test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2712  }
2713  j(cc, condition_met, condition_met_distance);
2714}
2715
2716
2717void MacroAssembler::CheckPageFlagForMap(
2718    Handle<Map> map,
2719    int mask,
2720    Condition cc,
2721    Label* condition_met,
2722    Label::Distance condition_met_distance) {
2723  DCHECK(cc == zero || cc == not_zero);
2724  Page* page = Page::FromAddress(map->address());
2725  DCHECK(!serializer_enabled());  // Serializer cannot match page_flags.
2726  ExternalReference reference(ExternalReference::page_flags(page));
2727  // The inlined static address check of the page's flags relies
2728  // on maps never being compacted.
2729  DCHECK(!isolate()->heap()->mark_compact_collector()->
2730         IsOnEvacuationCandidate(*map));
2731  if (mask < (1 << kBitsPerByte)) {
2732    test_b(Operand::StaticVariable(reference), Immediate(mask));
2733  } else {
2734    test(Operand::StaticVariable(reference), Immediate(mask));
2735  }
2736  j(cc, condition_met, condition_met_distance);
2737}
2738
2739
2740void MacroAssembler::JumpIfBlack(Register object,
2741                                 Register scratch0,
2742                                 Register scratch1,
2743                                 Label* on_black,
2744                                 Label::Distance on_black_near) {
2745  HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
2746           1);  // kBlackBitPattern.
2747  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
2748}
2749
2750
2751void MacroAssembler::HasColor(Register object,
2752                              Register bitmap_scratch,
2753                              Register mask_scratch,
2754                              Label* has_color,
2755                              Label::Distance has_color_distance,
2756                              int first_bit,
2757                              int second_bit) {
2758  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
2759
2760  GetMarkBits(object, bitmap_scratch, mask_scratch);
2761
2762  Label other_color, word_boundary;
2763  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2764  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
2765  add(mask_scratch, mask_scratch);  // Shift left 1 by adding.
2766  j(zero, &word_boundary, Label::kNear);
2767  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2768  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2769  jmp(&other_color, Label::kNear);
2770
2771  bind(&word_boundary);
2772  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize),
2773         Immediate(1));
2774
2775  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
2776  bind(&other_color);
2777}
2778
2779
2780void MacroAssembler::GetMarkBits(Register addr_reg,
2781                                 Register bitmap_reg,
2782                                 Register mask_reg) {
2783  DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
2784  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
2785  and_(bitmap_reg, addr_reg);
2786  mov(ecx, addr_reg);
2787  int shift =
2788      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
2789  shr(ecx, shift);
2790  and_(ecx,
2791       (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
2792
2793  add(bitmap_reg, ecx);
2794  mov(ecx, addr_reg);
2795  shr(ecx, kPointerSizeLog2);
2796  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
2797  mov(mask_reg, Immediate(1));
2798  shl_cl(mask_reg);
2799}
2800
2801
2802void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
2803                                 Register mask_scratch, Label* value_is_white,
2804                                 Label::Distance distance) {
2805  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
2806  GetMarkBits(value, bitmap_scratch, mask_scratch);
2807
2808  // If the value is black or grey we don't need to do anything.
2809  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2810  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
2811  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
2812  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2813
2814  // Since both black and grey have a 1 in the first position and white does
2815  // not have a 1 there we only need to check one bit.
2816  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
2817  j(zero, value_is_white, Label::kNear);
2818}
2819
2820
2821void MacroAssembler::EnumLength(Register dst, Register map) {
2822  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
2823  mov(dst, FieldOperand(map, Map::kBitField3Offset));
2824  and_(dst, Immediate(Map::EnumLengthBits::kMask));
2825  SmiTag(dst);
2826}
2827
2828
2829void MacroAssembler::CheckEnumCache(Label* call_runtime) {
2830  Label next, start;
2831  mov(ecx, eax);
2832
2833  // Check if the enum length field is properly initialized, indicating that
2834  // there is an enum cache.
2835  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
2836
2837  EnumLength(edx, ebx);
2838  cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
2839  j(equal, call_runtime);
2840
2841  jmp(&start);
2842
2843  bind(&next);
2844  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
2845
2846  // For all objects but the receiver, check that the cache is empty.
2847  EnumLength(edx, ebx);
2848  cmp(edx, Immediate(Smi::kZero));
2849  j(not_equal, call_runtime);
2850
2851  bind(&start);
2852
2853  // Check that there are no elements. Register rcx contains the current JS
2854  // object we've reached through the prototype chain.
2855  Label no_elements;
2856  mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
2857  cmp(ecx, isolate()->factory()->empty_fixed_array());
2858  j(equal, &no_elements);
2859
2860  // Second chance, the object may be using the empty slow element dictionary.
2861  cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
2862  j(not_equal, call_runtime);
2863
2864  bind(&no_elements);
2865  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
2866  cmp(ecx, isolate()->factory()->null_value());
2867  j(not_equal, &next);
2868}
2869
2870
2871void MacroAssembler::TestJSArrayForAllocationMemento(
2872    Register receiver_reg,
2873    Register scratch_reg,
2874    Label* no_memento_found) {
2875  Label map_check;
2876  Label top_check;
2877  ExternalReference new_space_allocation_top =
2878      ExternalReference::new_space_allocation_top_address(isolate());
2879  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
2880  const int kMementoLastWordOffset =
2881      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
2882
2883  // Bail out if the object is not in new space.
2884  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
2885  // If the object is in new space, we need to check whether it is on the same
2886  // page as the current top.
2887  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
2888  xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
2889  test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
2890  j(zero, &top_check);
2891  // The object is on a different page than allocation top. Bail out if the
2892  // object sits on the page boundary as no memento can follow and we cannot
2893  // touch the memory following it.
2894  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
2895  xor_(scratch_reg, receiver_reg);
2896  test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
2897  j(not_zero, no_memento_found);
2898  // Continue with the actual map check.
2899  jmp(&map_check);
2900  // If top is on the same page as the current object, we need to check whether
2901  // we are below top.
2902  bind(&top_check);
2903  lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
2904  cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
2905  j(greater_equal, no_memento_found);
2906  // Memento map check.
2907  bind(&map_check);
2908  mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
2909  cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
2910}
2911
2912void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
2913  DCHECK(!dividend.is(eax));
2914  DCHECK(!dividend.is(edx));
2915  base::MagicNumbersForDivision<uint32_t> mag =
2916      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
2917  mov(eax, Immediate(mag.multiplier));
2918  imul(dividend);
2919  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
2920  if (divisor > 0 && neg) add(edx, dividend);
2921  if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
2922  if (mag.shift > 0) sar(edx, mag.shift);
2923  mov(eax, dividend);
2924  shr(eax, 31);
2925  add(edx, eax);
2926}
2927
2928
2929}  // namespace internal
2930}  // namespace v8
2931
2932#endif  // V8_TARGET_ARCH_IA32
2933