1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <limits.h>  // For LONG_MIN, LONG_MAX.
29
30#include "v8.h"
31
32#if V8_TARGET_ARCH_ARM
33
34#include "bootstrapper.h"
35#include "codegen.h"
36#include "cpu-profiler.h"
37#include "debug.h"
38#include "isolate-inl.h"
39#include "runtime.h"
40
41namespace v8 {
42namespace internal {
43
44MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45    : Assembler(arg_isolate, buffer, size),
46      generating_stub_(false),
47      has_frame_(false) {
48  if (isolate() != NULL) {
49    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
50                                  isolate());
51  }
52}
53
54
55void MacroAssembler::Jump(Register target, Condition cond) {
56  bx(target, cond);
57}
58
59
60void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
61                          Condition cond) {
62  mov(ip, Operand(target, rmode));
63  bx(ip, cond);
64}
65
66
67void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
68                          Condition cond) {
69  ASSERT(!RelocInfo::IsCodeTarget(rmode));
70  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
71}
72
73
74void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
75                          Condition cond) {
76  ASSERT(RelocInfo::IsCodeTarget(rmode));
77  // 'code' is always generated ARM code, never THUMB code
78  AllowDeferredHandleDereference embedding_raw_address;
79  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
80}
81
82
83int MacroAssembler::CallSize(Register target, Condition cond) {
84  return kInstrSize;
85}
86
87
88void MacroAssembler::Call(Register target, Condition cond) {
89  // Block constant pool for the call instruction sequence.
90  BlockConstPoolScope block_const_pool(this);
91  Label start;
92  bind(&start);
93  blx(target, cond);
94  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
95}
96
97
98int MacroAssembler::CallSize(
99    Address target, RelocInfo::Mode rmode, Condition cond) {
100  int size = 2 * kInstrSize;
101  Instr mov_instr = cond | MOV | LeaveCC;
102  intptr_t immediate = reinterpret_cast<intptr_t>(target);
103  if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
104    size += kInstrSize;
105  }
106  return size;
107}
108
109
110int MacroAssembler::CallSizeNotPredictableCodeSize(
111    Address target, RelocInfo::Mode rmode, Condition cond) {
112  int size = 2 * kInstrSize;
113  Instr mov_instr = cond | MOV | LeaveCC;
114  intptr_t immediate = reinterpret_cast<intptr_t>(target);
115  if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
116    size += kInstrSize;
117  }
118  return size;
119}
120
121
122void MacroAssembler::Call(Address target,
123                          RelocInfo::Mode rmode,
124                          Condition cond,
125                          TargetAddressStorageMode mode) {
126  // Block constant pool for the call instruction sequence.
127  BlockConstPoolScope block_const_pool(this);
128  Label start;
129  bind(&start);
130
131  bool old_predictable_code_size = predictable_code_size();
132  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
133    set_predictable_code_size(true);
134  }
135
136  // Call sequence on V7 or later may be :
137  //  movw  ip, #... @ call address low 16
138  //  movt  ip, #... @ call address high 16
139  //  blx   ip
140  //                      @ return address
141  // Or for pre-V7 or values that may be back-patched
142  // to avoid ICache flushes:
143  //  ldr   ip, [pc, #...] @ call address
144  //  blx   ip
145  //                      @ return address
146
147  // Statement positions are expected to be recorded when the target
148  // address is loaded. The mov method will automatically record
149  // positions when pc is the target, since this is not the case here
150  // we have to do it explicitly.
151  positions_recorder()->WriteRecordedPositions();
152
153  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
154  blx(ip, cond);
155
156  ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
157  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
158    set_predictable_code_size(old_predictable_code_size);
159  }
160}
161
162
163int MacroAssembler::CallSize(Handle<Code> code,
164                             RelocInfo::Mode rmode,
165                             TypeFeedbackId ast_id,
166                             Condition cond) {
167  AllowDeferredHandleDereference using_raw_address;
168  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
169}
170
171
172void MacroAssembler::Call(Handle<Code> code,
173                          RelocInfo::Mode rmode,
174                          TypeFeedbackId ast_id,
175                          Condition cond,
176                          TargetAddressStorageMode mode) {
177  Label start;
178  bind(&start);
179  ASSERT(RelocInfo::IsCodeTarget(rmode));
180  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
181    SetRecordedAstId(ast_id);
182    rmode = RelocInfo::CODE_TARGET_WITH_ID;
183  }
184  // 'code' is always generated ARM code, never THUMB code
185  AllowDeferredHandleDereference embedding_raw_address;
186  Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
187}
188
189
190void MacroAssembler::Ret(Condition cond) {
191  bx(lr, cond);
192}
193
194
195void MacroAssembler::Drop(int count, Condition cond) {
196  if (count > 0) {
197    add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
198  }
199}
200
201
202void MacroAssembler::Ret(int drop, Condition cond) {
203  Drop(drop, cond);
204  Ret(cond);
205}
206
207
208void MacroAssembler::Swap(Register reg1,
209                          Register reg2,
210                          Register scratch,
211                          Condition cond) {
212  if (scratch.is(no_reg)) {
213    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
214    eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
215    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
216  } else {
217    mov(scratch, reg1, LeaveCC, cond);
218    mov(reg1, reg2, LeaveCC, cond);
219    mov(reg2, scratch, LeaveCC, cond);
220  }
221}
222
223
224void MacroAssembler::Call(Label* target) {
225  bl(target);
226}
227
228
229void MacroAssembler::Push(Handle<Object> handle) {
230  mov(ip, Operand(handle));
231  push(ip);
232}
233
234
235void MacroAssembler::Move(Register dst, Handle<Object> value) {
236  AllowDeferredHandleDereference smi_check;
237  if (value->IsSmi()) {
238    mov(dst, Operand(value));
239  } else {
240    ASSERT(value->IsHeapObject());
241    if (isolate()->heap()->InNewSpace(*value)) {
242      Handle<Cell> cell = isolate()->factory()->NewCell(value);
243      mov(dst, Operand(cell));
244      ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
245    } else {
246      mov(dst, Operand(value));
247    }
248  }
249}
250
251
252void MacroAssembler::Move(Register dst, Register src, Condition cond) {
253  if (!dst.is(src)) {
254    mov(dst, src, LeaveCC, cond);
255  }
256}
257
258
259void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
260  if (!dst.is(src)) {
261    vmov(dst, src);
262  }
263}
264
265
266void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
267                         Condition cond) {
268  if (!src2.is_reg() &&
269      !src2.must_output_reloc_info(this) &&
270      src2.immediate() == 0) {
271    mov(dst, Operand::Zero(), LeaveCC, cond);
272  } else if (!src2.is_single_instruction(this) &&
273             !src2.must_output_reloc_info(this) &&
274             CpuFeatures::IsSupported(ARMv7) &&
275             IsPowerOf2(src2.immediate() + 1)) {
276    ubfx(dst, src1, 0,
277        WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
278  } else {
279    and_(dst, src1, src2, LeaveCC, cond);
280  }
281}
282
283
284void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
285                          Condition cond) {
286  ASSERT(lsb < 32);
287  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
288    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
289    and_(dst, src1, Operand(mask), LeaveCC, cond);
290    if (lsb != 0) {
291      mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
292    }
293  } else {
294    ubfx(dst, src1, lsb, width, cond);
295  }
296}
297
298
299void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
300                          Condition cond) {
301  ASSERT(lsb < 32);
302  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
303    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
304    and_(dst, src1, Operand(mask), LeaveCC, cond);
305    int shift_up = 32 - lsb - width;
306    int shift_down = lsb + shift_up;
307    if (shift_up != 0) {
308      mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
309    }
310    if (shift_down != 0) {
311      mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
312    }
313  } else {
314    sbfx(dst, src1, lsb, width, cond);
315  }
316}
317
318
319void MacroAssembler::Bfi(Register dst,
320                         Register src,
321                         Register scratch,
322                         int lsb,
323                         int width,
324                         Condition cond) {
325  ASSERT(0 <= lsb && lsb < 32);
326  ASSERT(0 <= width && width < 32);
327  ASSERT(lsb + width < 32);
328  ASSERT(!scratch.is(dst));
329  if (width == 0) return;
330  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
331    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
332    bic(dst, dst, Operand(mask));
333    and_(scratch, src, Operand((1 << width) - 1));
334    mov(scratch, Operand(scratch, LSL, lsb));
335    orr(dst, dst, scratch);
336  } else {
337    bfi(dst, src, lsb, width, cond);
338  }
339}
340
341
342void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
343                         Condition cond) {
344  ASSERT(lsb < 32);
345  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
346    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
347    bic(dst, src, Operand(mask));
348  } else {
349    Move(dst, src, cond);
350    bfc(dst, lsb, width, cond);
351  }
352}
353
354
355void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
356                          Condition cond) {
357  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
358    ASSERT(!dst.is(pc) && !src.rm().is(pc));
359    ASSERT((satpos >= 0) && (satpos <= 31));
360
361    // These asserts are required to ensure compatibility with the ARMv7
362    // implementation.
363    ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
364    ASSERT(src.rs().is(no_reg));
365
366    Label done;
367    int satval = (1 << satpos) - 1;
368
369    if (cond != al) {
370      b(NegateCondition(cond), &done);  // Skip saturate if !condition.
371    }
372    if (!(src.is_reg() && dst.is(src.rm()))) {
373      mov(dst, src);
374    }
375    tst(dst, Operand(~satval));
376    b(eq, &done);
377    mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
378    mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
379    bind(&done);
380  } else {
381    usat(dst, satpos, src, cond);
382  }
383}
384
385
386void MacroAssembler::Load(Register dst,
387                          const MemOperand& src,
388                          Representation r) {
389  ASSERT(!r.IsDouble());
390  if (r.IsInteger8()) {
391    ldrsb(dst, src);
392  } else if (r.IsUInteger8()) {
393    ldrb(dst, src);
394  } else if (r.IsInteger16()) {
395    ldrsh(dst, src);
396  } else if (r.IsUInteger16()) {
397    ldrh(dst, src);
398  } else {
399    ldr(dst, src);
400  }
401}
402
403
404void MacroAssembler::Store(Register src,
405                           const MemOperand& dst,
406                           Representation r) {
407  ASSERT(!r.IsDouble());
408  if (r.IsInteger8() || r.IsUInteger8()) {
409    strb(src, dst);
410  } else if (r.IsInteger16() || r.IsUInteger16()) {
411    strh(src, dst);
412  } else {
413    str(src, dst);
414  }
415}
416
417
418void MacroAssembler::LoadRoot(Register destination,
419                              Heap::RootListIndex index,
420                              Condition cond) {
421  if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
422      isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
423      !predictable_code_size()) {
424    // The CPU supports fast immediate values, and this root will never
425    // change. We will load it as a relocatable immediate value.
426    Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
427    mov(destination, Operand(root), LeaveCC, cond);
428    return;
429  }
430  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
431}
432
433
434void MacroAssembler::StoreRoot(Register source,
435                               Heap::RootListIndex index,
436                               Condition cond) {
437  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
438}
439
440
441void MacroAssembler::InNewSpace(Register object,
442                                Register scratch,
443                                Condition cond,
444                                Label* branch) {
445  ASSERT(cond == eq || cond == ne);
446  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
447  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
448  b(cond, branch);
449}
450
451
452void MacroAssembler::RecordWriteField(
453    Register object,
454    int offset,
455    Register value,
456    Register dst,
457    LinkRegisterStatus lr_status,
458    SaveFPRegsMode save_fp,
459    RememberedSetAction remembered_set_action,
460    SmiCheck smi_check) {
461  // First, check if a write barrier is even needed. The tests below
462  // catch stores of Smis.
463  Label done;
464
465  // Skip barrier if writing a smi.
466  if (smi_check == INLINE_SMI_CHECK) {
467    JumpIfSmi(value, &done);
468  }
469
470  // Although the object register is tagged, the offset is relative to the start
471  // of the object, so so offset must be a multiple of kPointerSize.
472  ASSERT(IsAligned(offset, kPointerSize));
473
474  add(dst, object, Operand(offset - kHeapObjectTag));
475  if (emit_debug_code()) {
476    Label ok;
477    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
478    b(eq, &ok);
479    stop("Unaligned cell in write barrier");
480    bind(&ok);
481  }
482
483  RecordWrite(object,
484              dst,
485              value,
486              lr_status,
487              save_fp,
488              remembered_set_action,
489              OMIT_SMI_CHECK);
490
491  bind(&done);
492
493  // Clobber clobbered input registers when running with the debug-code flag
494  // turned on to provoke errors.
495  if (emit_debug_code()) {
496    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
497    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
498  }
499}
500
501
502// Will clobber 4 registers: object, address, scratch, ip.  The
503// register 'object' contains a heap object pointer.  The heap object
504// tag is shifted away.
505void MacroAssembler::RecordWrite(Register object,
506                                 Register address,
507                                 Register value,
508                                 LinkRegisterStatus lr_status,
509                                 SaveFPRegsMode fp_mode,
510                                 RememberedSetAction remembered_set_action,
511                                 SmiCheck smi_check) {
512  if (emit_debug_code()) {
513    ldr(ip, MemOperand(address));
514    cmp(ip, value);
515    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
516  }
517
518  // Count number of write barriers in generated code.
519  isolate()->counters()->write_barriers_static()->Increment();
520  // TODO(mstarzinger): Dynamic counter missing.
521
522  // First, check if a write barrier is even needed. The tests below
523  // catch stores of smis and stores into the young generation.
524  Label done;
525
526  if (smi_check == INLINE_SMI_CHECK) {
527    JumpIfSmi(value, &done);
528  }
529
530  CheckPageFlag(value,
531                value,  // Used as scratch.
532                MemoryChunk::kPointersToHereAreInterestingMask,
533                eq,
534                &done);
535  CheckPageFlag(object,
536                value,  // Used as scratch.
537                MemoryChunk::kPointersFromHereAreInterestingMask,
538                eq,
539                &done);
540
541  // Record the actual write.
542  if (lr_status == kLRHasNotBeenSaved) {
543    push(lr);
544  }
545  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
546  CallStub(&stub);
547  if (lr_status == kLRHasNotBeenSaved) {
548    pop(lr);
549  }
550
551  bind(&done);
552
553  // Clobber clobbered registers when running with the debug-code flag
554  // turned on to provoke errors.
555  if (emit_debug_code()) {
556    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
557    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
558  }
559}
560
561
562void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
563                                         Register address,
564                                         Register scratch,
565                                         SaveFPRegsMode fp_mode,
566                                         RememberedSetFinalAction and_then) {
567  Label done;
568  if (emit_debug_code()) {
569    Label ok;
570    JumpIfNotInNewSpace(object, scratch, &ok);
571    stop("Remembered set pointer is in new space");
572    bind(&ok);
573  }
574  // Load store buffer top.
575  ExternalReference store_buffer =
576      ExternalReference::store_buffer_top(isolate());
577  mov(ip, Operand(store_buffer));
578  ldr(scratch, MemOperand(ip));
579  // Store pointer to buffer and increment buffer top.
580  str(address, MemOperand(scratch, kPointerSize, PostIndex));
581  // Write back new top of buffer.
582  str(scratch, MemOperand(ip));
583  // Call stub on end of buffer.
584  // Check for end of buffer.
585  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
586  if (and_then == kFallThroughAtEnd) {
587    b(eq, &done);
588  } else {
589    ASSERT(and_then == kReturnAtEnd);
590    Ret(eq);
591  }
592  push(lr);
593  StoreBufferOverflowStub store_buffer_overflow =
594      StoreBufferOverflowStub(fp_mode);
595  CallStub(&store_buffer_overflow);
596  pop(lr);
597  bind(&done);
598  if (and_then == kReturnAtEnd) {
599    Ret();
600  }
601}
602
603
604// Push and pop all registers that can hold pointers.
605void MacroAssembler::PushSafepointRegisters() {
606  // Safepoints expect a block of contiguous register values starting with r0:
607  ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
608  // Safepoints expect a block of kNumSafepointRegisters values on the
609  // stack, so adjust the stack for unsaved registers.
610  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
611  ASSERT(num_unsaved >= 0);
612  sub(sp, sp, Operand(num_unsaved * kPointerSize));
613  stm(db_w, sp, kSafepointSavedRegisters);
614}
615
616
617void MacroAssembler::PopSafepointRegisters() {
618  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
619  ldm(ia_w, sp, kSafepointSavedRegisters);
620  add(sp, sp, Operand(num_unsaved * kPointerSize));
621}
622
623
624void MacroAssembler::PushSafepointRegistersAndDoubles() {
625  // Number of d-regs not known at snapshot time.
626  ASSERT(!Serializer::enabled());
627  PushSafepointRegisters();
628  // Only save allocatable registers.
629  ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
630  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
631  if (CpuFeatures::IsSupported(VFP32DREGS)) {
632    vstm(db_w, sp, d16, d31);
633  }
634  vstm(db_w, sp, d0, d13);
635}
636
637
638void MacroAssembler::PopSafepointRegistersAndDoubles() {
639  // Number of d-regs not known at snapshot time.
640  ASSERT(!Serializer::enabled());
641  // Only save allocatable registers.
642  ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
643  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
644  vldm(ia_w, sp, d0, d13);
645  if (CpuFeatures::IsSupported(VFP32DREGS)) {
646    vldm(ia_w, sp, d16, d31);
647  }
648  PopSafepointRegisters();
649}
650
651void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
652                                                             Register dst) {
653  str(src, SafepointRegistersAndDoublesSlot(dst));
654}
655
656
657void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
658  str(src, SafepointRegisterSlot(dst));
659}
660
661
662void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
663  ldr(dst, SafepointRegisterSlot(src));
664}
665
666
667int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
668  // The registers are pushed starting with the highest encoding,
669  // which means that lowest encodings are closest to the stack pointer.
670  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
671  return reg_code;
672}
673
674
675MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
676  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
677}
678
679
680MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
681  // Number of d-regs not known at snapshot time.
682  ASSERT(!Serializer::enabled());
683  // General purpose registers are pushed last on the stack.
684  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
685  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
686  return MemOperand(sp, doubles_size + register_offset);
687}
688
689
690void MacroAssembler::Ldrd(Register dst1, Register dst2,
691                          const MemOperand& src, Condition cond) {
692  ASSERT(src.rm().is(no_reg));
693  ASSERT(!dst1.is(lr));  // r14.
694
695  // V8 does not use this addressing mode, so the fallback code
696  // below doesn't support it yet.
697  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
698
699  // Generate two ldr instructions if ldrd is not available.
700  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
701      (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
702    CpuFeatureScope scope(this, ARMv7);
703    ldrd(dst1, dst2, src, cond);
704  } else {
705    if ((src.am() == Offset) || (src.am() == NegOffset)) {
706      MemOperand src2(src);
707      src2.set_offset(src2.offset() + 4);
708      if (dst1.is(src.rn())) {
709        ldr(dst2, src2, cond);
710        ldr(dst1, src, cond);
711      } else {
712        ldr(dst1, src, cond);
713        ldr(dst2, src2, cond);
714      }
715    } else {  // PostIndex or NegPostIndex.
716      ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
717      if (dst1.is(src.rn())) {
718        ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
719        ldr(dst1, src, cond);
720      } else {
721        MemOperand src2(src);
722        src2.set_offset(src2.offset() - 4);
723        ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
724        ldr(dst2, src2, cond);
725      }
726    }
727  }
728}
729
730
731void MacroAssembler::Strd(Register src1, Register src2,
732                          const MemOperand& dst, Condition cond) {
733  ASSERT(dst.rm().is(no_reg));
734  ASSERT(!src1.is(lr));  // r14.
735
736  // V8 does not use this addressing mode, so the fallback code
737  // below doesn't support it yet.
738  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
739
740  // Generate two str instructions if strd is not available.
741  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
742      (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
743    CpuFeatureScope scope(this, ARMv7);
744    strd(src1, src2, dst, cond);
745  } else {
746    MemOperand dst2(dst);
747    if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
748      dst2.set_offset(dst2.offset() + 4);
749      str(src1, dst, cond);
750      str(src2, dst2, cond);
751    } else {  // PostIndex or NegPostIndex.
752      ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
753      dst2.set_offset(dst2.offset() - 4);
754      str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
755      str(src2, dst2, cond);
756    }
757  }
758}
759
760
761void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
762  // If needed, restore wanted bits of FPSCR.
763  Label fpscr_done;
764  vmrs(scratch);
765  tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
766  b(ne, &fpscr_done);
767  orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
768  vmsr(scratch);
769  bind(&fpscr_done);
770}
771
772
773void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
774                                        const DwVfpRegister src,
775                                        const Condition cond) {
776  vsub(dst, src, kDoubleRegZero, cond);
777}
778
779
780void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
781                                           const DwVfpRegister src2,
782                                           const Condition cond) {
783  // Compare and move FPSCR flags to the normal condition flags.
784  VFPCompareAndLoadFlags(src1, src2, pc, cond);
785}
786
787void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
788                                           const double src2,
789                                           const Condition cond) {
790  // Compare and move FPSCR flags to the normal condition flags.
791  VFPCompareAndLoadFlags(src1, src2, pc, cond);
792}
793
794
795void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
796                                            const DwVfpRegister src2,
797                                            const Register fpscr_flags,
798                                            const Condition cond) {
799  // Compare and load FPSCR.
800  vcmp(src1, src2, cond);
801  vmrs(fpscr_flags, cond);
802}
803
804void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
805                                            const double src2,
806                                            const Register fpscr_flags,
807                                            const Condition cond) {
808  // Compare and load FPSCR.
809  vcmp(src1, src2, cond);
810  vmrs(fpscr_flags, cond);
811}
812
813void MacroAssembler::Vmov(const DwVfpRegister dst,
814                          const double imm,
815                          const Register scratch) {
816  static const DoubleRepresentation minus_zero(-0.0);
817  static const DoubleRepresentation zero(0.0);
818  DoubleRepresentation value(imm);
819  // Handle special values first.
820  if (value.bits == zero.bits) {
821    vmov(dst, kDoubleRegZero);
822  } else if (value.bits == minus_zero.bits) {
823    vneg(dst, kDoubleRegZero);
824  } else {
825    vmov(dst, imm, scratch);
826  }
827}
828
829
830void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
831  if (src.code() < 16) {
832    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
833    vmov(dst, loc.high());
834  } else {
835    vmov(dst, VmovIndexHi, src);
836  }
837}
838
839
840void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
841  if (dst.code() < 16) {
842    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
843    vmov(loc.high(), src);
844  } else {
845    vmov(dst, VmovIndexHi, src);
846  }
847}
848
849
850void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
851  if (src.code() < 16) {
852    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
853    vmov(dst, loc.low());
854  } else {
855    vmov(dst, VmovIndexLo, src);
856  }
857}
858
859
860void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
861  if (dst.code() < 16) {
862    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
863    vmov(loc.low(), src);
864  } else {
865    vmov(dst, VmovIndexLo, src);
866  }
867}
868
869
870void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
871  if (frame_mode == BUILD_STUB_FRAME) {
872    stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
873    Push(Smi::FromInt(StackFrame::STUB));
874    // Adjust FP to point to saved FP.
875    add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
876  } else {
877    PredictableCodeSizeScope predictible_code_size_scope(
878        this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
879    // The following three instructions must remain together and unmodified
880    // for code aging to work properly.
881    if (isolate()->IsCodePreAgingActive()) {
882      // Pre-age the code.
883      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
884      add(r0, pc, Operand(-8));
885      ldr(pc, MemOperand(pc, -4));
886      emit_code_stub_address(stub);
887    } else {
888      stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
889      nop(ip.code());
890      // Adjust FP to point to saved FP.
891      add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
892    }
893  }
894}
895
896
897void MacroAssembler::EnterFrame(StackFrame::Type type) {
898  // r0-r3: preserved
899  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
900  mov(ip, Operand(Smi::FromInt(type)));
901  push(ip);
902  mov(ip, Operand(CodeObject()));
903  push(ip);
904  // Adjust FP to point to saved FP.
905  add(fp, sp,
906      Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
907}
908
909
910void MacroAssembler::LeaveFrame(StackFrame::Type type) {
911  // r0: preserved
912  // r1: preserved
913  // r2: preserved
914
915  // Drop the execution stack down to the frame pointer and restore
916  // the caller frame pointer and return address.
917  mov(sp, fp);
918  ldm(ia_w, sp, fp.bit() | lr.bit());
919}
920
921
922void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
923  // Set up the frame structure on the stack.
924  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
925  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
926  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
927  Push(lr, fp);
928  mov(fp, Operand(sp));  // Set up new frame pointer.
929  // Reserve room for saved entry sp and code object.
930  sub(sp, sp, Operand(2 * kPointerSize));
931  if (emit_debug_code()) {
932    mov(ip, Operand::Zero());
933    str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
934  }
935  mov(ip, Operand(CodeObject()));
936  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
937
938  // Save the frame pointer and the context in top.
939  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
940  str(fp, MemOperand(ip));
941  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
942  str(cp, MemOperand(ip));
943
944  // Optionally save all double registers.
945  if (save_doubles) {
946    SaveFPRegs(sp, ip);
947    // Note that d0 will be accessible at
948    //   fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
949    // since the sp slot and code slot were pushed after the fp.
950  }
951
952  // Reserve place for the return address and stack space and align the frame
953  // preparing for calling the runtime function.
954  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
955  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
956  if (frame_alignment > 0) {
957    ASSERT(IsPowerOf2(frame_alignment));
958    and_(sp, sp, Operand(-frame_alignment));
959  }
960
961  // Set the exit frame sp value to point just before the return address
962  // location.
963  add(ip, sp, Operand(kPointerSize));
964  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
965}
966
967
968void MacroAssembler::InitializeNewString(Register string,
969                                         Register length,
970                                         Heap::RootListIndex map_index,
971                                         Register scratch1,
972                                         Register scratch2) {
973  SmiTag(scratch1, length);
974  LoadRoot(scratch2, map_index);
975  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
976  mov(scratch1, Operand(String::kEmptyHashField));
977  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
978  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
979}
980
981
982int MacroAssembler::ActivationFrameAlignment() {
983#if V8_HOST_ARCH_ARM
984  // Running on the real platform. Use the alignment as mandated by the local
985  // environment.
986  // Note: This will break if we ever start generating snapshots on one ARM
987  // platform for another ARM platform with a different alignment.
988  return OS::ActivationFrameAlignment();
989#else  // V8_HOST_ARCH_ARM
990  // If we are using the simulator then we should always align to the expected
991  // alignment. As the simulator is used to generate snapshots we do not know
992  // if the target platform will need alignment, so this is controlled from a
993  // flag.
994  return FLAG_sim_stack_alignment;
995#endif  // V8_HOST_ARCH_ARM
996}
997
998
999void MacroAssembler::LeaveExitFrame(bool save_doubles,
1000                                    Register argument_count,
1001                                    bool restore_context) {
1002  // Optionally restore all double registers.
1003  if (save_doubles) {
1004    // Calculate the stack location of the saved doubles and restore them.
1005    const int offset = 2 * kPointerSize;
1006    sub(r3, fp,
1007        Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1008    RestoreFPRegs(r3, ip);
1009  }
1010
1011  // Clear top frame.
1012  mov(r3, Operand::Zero());
1013  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1014  str(r3, MemOperand(ip));
1015
1016
1017  // Restore current context from top and clear it in debug mode.
1018  if (restore_context) {
1019    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1020    ldr(cp, MemOperand(ip));
1021  }
1022#ifdef DEBUG
1023  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1024  str(r3, MemOperand(ip));
1025#endif
1026
1027  // Tear down the exit frame, pop the arguments, and return.
1028  mov(sp, Operand(fp));
1029  ldm(ia_w, sp, fp.bit() | lr.bit());
1030  if (argument_count.is_valid()) {
1031    add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1032  }
1033}
1034
1035
1036void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
1037  if (use_eabi_hardfloat()) {
1038    Move(dst, d0);
1039  } else {
1040    vmov(dst, r0, r1);
1041  }
1042}
1043
1044
1045void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
1046  // This macro takes the dst register to make the code more readable
1047  // at the call sites. However, the dst register has to be r5 to
1048  // follow the calling convention which requires the call type to be
1049  // in r5.
1050  ASSERT(dst.is(r5));
1051  if (call_kind == CALL_AS_FUNCTION) {
1052    mov(dst, Operand(Smi::FromInt(1)));
1053  } else {
1054    mov(dst, Operand(Smi::FromInt(0)));
1055  }
1056}
1057
1058
1059void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1060                                    const ParameterCount& actual,
1061                                    Handle<Code> code_constant,
1062                                    Register code_reg,
1063                                    Label* done,
1064                                    bool* definitely_mismatches,
1065                                    InvokeFlag flag,
1066                                    const CallWrapper& call_wrapper,
1067                                    CallKind call_kind) {
1068  bool definitely_matches = false;
1069  *definitely_mismatches = false;
1070  Label regular_invoke;
1071
1072  // Check whether the expected and actual arguments count match. If not,
1073  // setup registers according to contract with ArgumentsAdaptorTrampoline:
1074  //  r0: actual arguments count
1075  //  r1: function (passed through to callee)
1076  //  r2: expected arguments count
1077  //  r3: callee code entry
1078
1079  // The code below is made a lot easier because the calling code already sets
1080  // up actual and expected registers according to the contract if values are
1081  // passed in registers.
1082  ASSERT(actual.is_immediate() || actual.reg().is(r0));
1083  ASSERT(expected.is_immediate() || expected.reg().is(r2));
1084  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1085
1086  if (expected.is_immediate()) {
1087    ASSERT(actual.is_immediate());
1088    if (expected.immediate() == actual.immediate()) {
1089      definitely_matches = true;
1090    } else {
1091      mov(r0, Operand(actual.immediate()));
1092      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1093      if (expected.immediate() == sentinel) {
1094        // Don't worry about adapting arguments for builtins that
1095        // don't want that done. Skip adaption code by making it look
1096        // like we have a match between expected and actual number of
1097        // arguments.
1098        definitely_matches = true;
1099      } else {
1100        *definitely_mismatches = true;
1101        mov(r2, Operand(expected.immediate()));
1102      }
1103    }
1104  } else {
1105    if (actual.is_immediate()) {
1106      cmp(expected.reg(), Operand(actual.immediate()));
1107      b(eq, &regular_invoke);
1108      mov(r0, Operand(actual.immediate()));
1109    } else {
1110      cmp(expected.reg(), Operand(actual.reg()));
1111      b(eq, &regular_invoke);
1112    }
1113  }
1114
1115  if (!definitely_matches) {
1116    if (!code_constant.is_null()) {
1117      mov(r3, Operand(code_constant));
1118      add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1119    }
1120
1121    Handle<Code> adaptor =
1122        isolate()->builtins()->ArgumentsAdaptorTrampoline();
1123    if (flag == CALL_FUNCTION) {
1124      call_wrapper.BeforeCall(CallSize(adaptor));
1125      SetCallKind(r5, call_kind);
1126      Call(adaptor);
1127      call_wrapper.AfterCall();
1128      if (!*definitely_mismatches) {
1129        b(done);
1130      }
1131    } else {
1132      SetCallKind(r5, call_kind);
1133      Jump(adaptor, RelocInfo::CODE_TARGET);
1134    }
1135    bind(&regular_invoke);
1136  }
1137}
1138
1139
1140void MacroAssembler::InvokeCode(Register code,
1141                                const ParameterCount& expected,
1142                                const ParameterCount& actual,
1143                                InvokeFlag flag,
1144                                const CallWrapper& call_wrapper,
1145                                CallKind call_kind) {
1146  // You can't call a function without a valid frame.
1147  ASSERT(flag == JUMP_FUNCTION || has_frame());
1148
1149  Label done;
1150  bool definitely_mismatches = false;
1151  InvokePrologue(expected, actual, Handle<Code>::null(), code,
1152                 &done, &definitely_mismatches, flag,
1153                 call_wrapper, call_kind);
1154  if (!definitely_mismatches) {
1155    if (flag == CALL_FUNCTION) {
1156      call_wrapper.BeforeCall(CallSize(code));
1157      SetCallKind(r5, call_kind);
1158      Call(code);
1159      call_wrapper.AfterCall();
1160    } else {
1161      ASSERT(flag == JUMP_FUNCTION);
1162      SetCallKind(r5, call_kind);
1163      Jump(code);
1164    }
1165
1166    // Continue here if InvokePrologue does handle the invocation due to
1167    // mismatched parameter counts.
1168    bind(&done);
1169  }
1170}
1171
1172
1173void MacroAssembler::InvokeCode(Handle<Code> code,
1174                                const ParameterCount& expected,
1175                                const ParameterCount& actual,
1176                                RelocInfo::Mode rmode,
1177                                InvokeFlag flag,
1178                                CallKind call_kind) {
1179  // You can't call a function without a valid frame.
1180  ASSERT(flag == JUMP_FUNCTION || has_frame());
1181
1182  Label done;
1183  bool definitely_mismatches = false;
1184  InvokePrologue(expected, actual, code, no_reg,
1185                 &done, &definitely_mismatches, flag,
1186                 NullCallWrapper(), call_kind);
1187  if (!definitely_mismatches) {
1188    if (flag == CALL_FUNCTION) {
1189      SetCallKind(r5, call_kind);
1190      Call(code, rmode);
1191    } else {
1192      SetCallKind(r5, call_kind);
1193      Jump(code, rmode);
1194    }
1195
1196    // Continue here if InvokePrologue does handle the invocation due to
1197    // mismatched parameter counts.
1198    bind(&done);
1199  }
1200}
1201
1202
1203void MacroAssembler::InvokeFunction(Register fun,
1204                                    const ParameterCount& actual,
1205                                    InvokeFlag flag,
1206                                    const CallWrapper& call_wrapper,
1207                                    CallKind call_kind) {
1208  // You can't call a function without a valid frame.
1209  ASSERT(flag == JUMP_FUNCTION || has_frame());
1210
1211  // Contract with called JS functions requires that function is passed in r1.
1212  ASSERT(fun.is(r1));
1213
1214  Register expected_reg = r2;
1215  Register code_reg = r3;
1216
1217  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1218  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1219  ldr(expected_reg,
1220      FieldMemOperand(code_reg,
1221                      SharedFunctionInfo::kFormalParameterCountOffset));
1222  SmiUntag(expected_reg);
1223  ldr(code_reg,
1224      FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1225
1226  ParameterCount expected(expected_reg);
1227  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1228}
1229
1230
1231void MacroAssembler::InvokeFunction(Register function,
1232                                    const ParameterCount& expected,
1233                                    const ParameterCount& actual,
1234                                    InvokeFlag flag,
1235                                    const CallWrapper& call_wrapper,
1236                                    CallKind call_kind) {
1237  // You can't call a function without a valid frame.
1238  ASSERT(flag == JUMP_FUNCTION || has_frame());
1239
1240  // Contract with called JS functions requires that function is passed in r1.
1241  ASSERT(function.is(r1));
1242
1243  // Get the function and setup the context.
1244  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1245
1246  // We call indirectly through the code field in the function to
1247  // allow recompilation to take effect without changing any of the
1248  // call sites.
1249  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1250  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
1251}
1252
1253
1254void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1255                                    const ParameterCount& expected,
1256                                    const ParameterCount& actual,
1257                                    InvokeFlag flag,
1258                                    const CallWrapper& call_wrapper,
1259                                    CallKind call_kind) {
1260  Move(r1, function);
1261  InvokeFunction(r1, expected, actual, flag, call_wrapper, call_kind);
1262}
1263
1264
1265void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1266                                          Register map,
1267                                          Register scratch,
1268                                          Label* fail) {
1269  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1270  IsInstanceJSObjectType(map, scratch, fail);
1271}
1272
1273
1274void MacroAssembler::IsInstanceJSObjectType(Register map,
1275                                            Register scratch,
1276                                            Label* fail) {
1277  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1278  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1279  b(lt, fail);
1280  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1281  b(gt, fail);
1282}
1283
1284
1285void MacroAssembler::IsObjectJSStringType(Register object,
1286                                          Register scratch,
1287                                          Label* fail) {
1288  ASSERT(kNotStringTag != 0);
1289
1290  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1291  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1292  tst(scratch, Operand(kIsNotStringMask));
1293  b(ne, fail);
1294}
1295
1296
1297void MacroAssembler::IsObjectNameType(Register object,
1298                                      Register scratch,
1299                                      Label* fail) {
1300  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1301  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1302  cmp(scratch, Operand(LAST_NAME_TYPE));
1303  b(hi, fail);
1304}
1305
1306
1307#ifdef ENABLE_DEBUGGER_SUPPORT
1308void MacroAssembler::DebugBreak() {
1309  mov(r0, Operand::Zero());
1310  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1311  CEntryStub ces(1);
1312  ASSERT(AllowThisStubCall(&ces));
1313  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
1314}
1315#endif
1316
1317
1318void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1319                                    int handler_index) {
1320  // Adjust this code if not the case.
1321  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1322  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1323  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1324  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1325  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1326  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1327
1328  // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1329  // We will build up the handler from the bottom by pushing on the stack.
1330  // Set up the code object (r5) and the state (r6) for pushing.
1331  unsigned state =
1332      StackHandler::IndexField::encode(handler_index) |
1333      StackHandler::KindField::encode(kind);
1334  mov(r5, Operand(CodeObject()));
1335  mov(r6, Operand(state));
1336
1337  // Push the frame pointer, context, state, and code object.
1338  if (kind == StackHandler::JS_ENTRY) {
1339    mov(cp, Operand(Smi::FromInt(0)));  // Indicates no context.
1340    mov(ip, Operand::Zero());  // NULL frame pointer.
1341    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1342  } else {
1343    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1344  }
1345
1346  // Link the current handler as the next handler.
1347  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1348  ldr(r5, MemOperand(r6));
1349  push(r5);
1350  // Set this new handler as the current one.
1351  str(sp, MemOperand(r6));
1352}
1353
1354
1355void MacroAssembler::PopTryHandler() {
1356  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1357  pop(r1);
1358  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1359  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1360  str(r1, MemOperand(ip));
1361}
1362
1363
1364void MacroAssembler::JumpToHandlerEntry() {
1365  // Compute the handler entry address and jump to it.  The handler table is
1366  // a fixed array of (smi-tagged) code offsets.
1367  // r0 = exception, r1 = code object, r2 = state.
1368  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
1369  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1370  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
1371  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
1372  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
1373  add(pc, r1, Operand::SmiUntag(r2));  // Jump
1374}
1375
1376
1377void MacroAssembler::Throw(Register value) {
1378  // Adjust this code if not the case.
1379  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1380  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1381  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1382  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1383  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1384  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1385
1386  // The exception is expected in r0.
1387  if (!value.is(r0)) {
1388    mov(r0, value);
1389  }
1390  // Drop the stack pointer to the top of the top handler.
1391  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1392  ldr(sp, MemOperand(r3));
1393  // Restore the next handler.
1394  pop(r2);
1395  str(r2, MemOperand(r3));
1396
1397  // Get the code object (r1) and state (r2).  Restore the context and frame
1398  // pointer.
1399  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1400
1401  // If the handler is a JS frame, restore the context to the frame.
1402  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1403  // or cp.
1404  tst(cp, cp);
1405  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1406
1407  JumpToHandlerEntry();
1408}
1409
1410
1411void MacroAssembler::ThrowUncatchable(Register value) {
1412  // Adjust this code if not the case.
1413  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1414  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1415  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1416  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1417  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1418  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1419
1420  // The exception is expected in r0.
1421  if (!value.is(r0)) {
1422    mov(r0, value);
1423  }
1424  // Drop the stack pointer to the top of the top stack handler.
1425  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1426  ldr(sp, MemOperand(r3));
1427
1428  // Unwind the handlers until the ENTRY handler is found.
1429  Label fetch_next, check_kind;
1430  jmp(&check_kind);
1431  bind(&fetch_next);
1432  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1433
1434  bind(&check_kind);
1435  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1436  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1437  tst(r2, Operand(StackHandler::KindField::kMask));
1438  b(ne, &fetch_next);
1439
1440  // Set the top handler address to next handler past the top ENTRY handler.
1441  pop(r2);
1442  str(r2, MemOperand(r3));
1443  // Get the code object (r1) and state (r2).  Clear the context and frame
1444  // pointer (0 was saved in the handler).
1445  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1446
1447  JumpToHandlerEntry();
1448}
1449
1450
1451void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1452                                            Register scratch,
1453                                            Label* miss) {
1454  Label same_contexts;
1455
1456  ASSERT(!holder_reg.is(scratch));
1457  ASSERT(!holder_reg.is(ip));
1458  ASSERT(!scratch.is(ip));
1459
1460  // Load current lexical context from the stack frame.
1461  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1462  // In debug mode, make sure the lexical context is set.
1463#ifdef DEBUG
1464  cmp(scratch, Operand::Zero());
1465  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1466#endif
1467
1468  // Load the native context of the current context.
1469  int offset =
1470      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1471  ldr(scratch, FieldMemOperand(scratch, offset));
1472  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1473
1474  // Check the context is a native context.
1475  if (emit_debug_code()) {
1476    // Cannot use ip as a temporary in this verification code. Due to the fact
1477    // that ip is clobbered as part of cmp with an object Operand.
1478    push(holder_reg);  // Temporarily save holder on the stack.
1479    // Read the first word and compare to the native_context_map.
1480    ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1481    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1482    cmp(holder_reg, ip);
1483    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1484    pop(holder_reg);  // Restore holder.
1485  }
1486
1487  // Check if both contexts are the same.
1488  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1489  cmp(scratch, Operand(ip));
1490  b(eq, &same_contexts);
1491
1492  // Check the context is a native context.
1493  if (emit_debug_code()) {
1494    // Cannot use ip as a temporary in this verification code. Due to the fact
1495    // that ip is clobbered as part of cmp with an object Operand.
1496    push(holder_reg);  // Temporarily save holder on the stack.
1497    mov(holder_reg, ip);  // Move ip to its holding place.
1498    LoadRoot(ip, Heap::kNullValueRootIndex);
1499    cmp(holder_reg, ip);
1500    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1501
1502    ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1503    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1504    cmp(holder_reg, ip);
1505    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1506    // Restore ip is not needed. ip is reloaded below.
1507    pop(holder_reg);  // Restore holder.
1508    // Restore ip to holder's context.
1509    ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1510  }
1511
1512  // Check that the security token in the calling global object is
1513  // compatible with the security token in the receiving global
1514  // object.
1515  int token_offset = Context::kHeaderSize +
1516                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
1517
1518  ldr(scratch, FieldMemOperand(scratch, token_offset));
1519  ldr(ip, FieldMemOperand(ip, token_offset));
1520  cmp(scratch, Operand(ip));
1521  b(ne, miss);
1522
1523  bind(&same_contexts);
1524}
1525
1526
1527// Compute the hash code from the untagged key.  This must be kept in sync with
1528// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
1529// code-stub-hydrogen.cc
1530void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1531  // First of all we assign the hash seed to scratch.
1532  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1533  SmiUntag(scratch);
1534
1535  // Xor original key with a seed.
1536  eor(t0, t0, Operand(scratch));
1537
1538  // Compute the hash code from the untagged key.  This must be kept in sync
1539  // with ComputeIntegerHash in utils.h.
1540  //
1541  // hash = ~hash + (hash << 15);
1542  mvn(scratch, Operand(t0));
1543  add(t0, scratch, Operand(t0, LSL, 15));
1544  // hash = hash ^ (hash >> 12);
1545  eor(t0, t0, Operand(t0, LSR, 12));
1546  // hash = hash + (hash << 2);
1547  add(t0, t0, Operand(t0, LSL, 2));
1548  // hash = hash ^ (hash >> 4);
1549  eor(t0, t0, Operand(t0, LSR, 4));
1550  // hash = hash * 2057;
1551  mov(scratch, Operand(t0, LSL, 11));
1552  add(t0, t0, Operand(t0, LSL, 3));
1553  add(t0, t0, scratch);
1554  // hash = hash ^ (hash >> 16);
1555  eor(t0, t0, Operand(t0, LSR, 16));
1556}
1557
1558
1559void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1560                                              Register elements,
1561                                              Register key,
1562                                              Register result,
1563                                              Register t0,
1564                                              Register t1,
1565                                              Register t2) {
1566  // Register use:
1567  //
1568  // elements - holds the slow-case elements of the receiver on entry.
1569  //            Unchanged unless 'result' is the same register.
1570  //
1571  // key      - holds the smi key on entry.
1572  //            Unchanged unless 'result' is the same register.
1573  //
1574  // result   - holds the result on exit if the load succeeded.
1575  //            Allowed to be the same as 'key' or 'result'.
1576  //            Unchanged on bailout so 'key' or 'result' can be used
1577  //            in further computation.
1578  //
1579  // Scratch registers:
1580  //
1581  // t0 - holds the untagged key on entry and holds the hash once computed.
1582  //
1583  // t1 - used to hold the capacity mask of the dictionary
1584  //
1585  // t2 - used for the index into the dictionary.
1586  Label done;
1587
1588  GetNumberHash(t0, t1);
1589
1590  // Compute the capacity mask.
1591  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1592  SmiUntag(t1);
1593  sub(t1, t1, Operand(1));
1594
1595  // Generate an unrolled loop that performs a few probes before giving up.
1596  for (int i = 0; i < kNumberDictionaryProbes; i++) {
1597    // Use t2 for index calculations and keep the hash intact in t0.
1598    mov(t2, t0);
1599    // Compute the masked index: (hash + i + i * i) & mask.
1600    if (i > 0) {
1601      add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1602    }
1603    and_(t2, t2, Operand(t1));
1604
1605    // Scale the index by multiplying by the element size.
1606    ASSERT(SeededNumberDictionary::kEntrySize == 3);
1607    add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
1608
1609    // Check if the key is identical to the name.
1610    add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1611    ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1612    cmp(key, Operand(ip));
1613    if (i != kNumberDictionaryProbes - 1) {
1614      b(eq, &done);
1615    } else {
1616      b(ne, miss);
1617    }
1618  }
1619
1620  bind(&done);
1621  // Check that the value is a normal property.
1622  // t2: elements + (index * kPointerSize)
1623  const int kDetailsOffset =
1624      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1625  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1626  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1627  b(ne, miss);
1628
1629  // Get the value at the masked, scaled index and return.
1630  const int kValueOffset =
1631      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1632  ldr(result, FieldMemOperand(t2, kValueOffset));
1633}
1634
1635
1636void MacroAssembler::Allocate(int object_size,
1637                              Register result,
1638                              Register scratch1,
1639                              Register scratch2,
1640                              Label* gc_required,
1641                              AllocationFlags flags) {
1642  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
1643  if (!FLAG_inline_new) {
1644    if (emit_debug_code()) {
1645      // Trash the registers to simulate an allocation failure.
1646      mov(result, Operand(0x7091));
1647      mov(scratch1, Operand(0x7191));
1648      mov(scratch2, Operand(0x7291));
1649    }
1650    jmp(gc_required);
1651    return;
1652  }
1653
1654  ASSERT(!result.is(scratch1));
1655  ASSERT(!result.is(scratch2));
1656  ASSERT(!scratch1.is(scratch2));
1657  ASSERT(!scratch1.is(ip));
1658  ASSERT(!scratch2.is(ip));
1659
1660  // Make object size into bytes.
1661  if ((flags & SIZE_IN_WORDS) != 0) {
1662    object_size *= kPointerSize;
1663  }
1664  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1665
1666  // Check relative positions of allocation top and limit addresses.
1667  // The values must be adjacent in memory to allow the use of LDM.
1668  // Also, assert that the registers are numbered such that the values
1669  // are loaded in the correct order.
1670  ExternalReference allocation_top =
1671      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1672  ExternalReference allocation_limit =
1673      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1674
1675  intptr_t top   =
1676      reinterpret_cast<intptr_t>(allocation_top.address());
1677  intptr_t limit =
1678      reinterpret_cast<intptr_t>(allocation_limit.address());
1679  ASSERT((limit - top) == kPointerSize);
1680  ASSERT(result.code() < ip.code());
1681
1682  // Set up allocation top address register.
1683  Register topaddr = scratch1;
1684  mov(topaddr, Operand(allocation_top));
1685
1686  // This code stores a temporary value in ip. This is OK, as the code below
1687  // does not need ip for implicit literal generation.
1688  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1689    // Load allocation top into result and allocation limit into ip.
1690    ldm(ia, topaddr, result.bit() | ip.bit());
1691  } else {
1692    if (emit_debug_code()) {
1693      // Assert that result actually contains top on entry. ip is used
1694      // immediately below so this use of ip does not cause difference with
1695      // respect to register content between debug and release mode.
1696      ldr(ip, MemOperand(topaddr));
1697      cmp(result, ip);
1698      Check(eq, kUnexpectedAllocationTop);
1699    }
1700    // Load allocation limit into ip. Result already contains allocation top.
1701    ldr(ip, MemOperand(topaddr, limit - top));
1702  }
1703
1704  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1705    // Align the next allocation. Storing the filler map without checking top is
1706    // safe in new-space because the limit of the heap is aligned there.
1707    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1708    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1709    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1710    Label aligned;
1711    b(eq, &aligned);
1712    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1713      cmp(result, Operand(ip));
1714      b(hs, gc_required);
1715    }
1716    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1717    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1718    bind(&aligned);
1719  }
1720
1721  // Calculate new top and bail out if new space is exhausted. Use result
1722  // to calculate the new top. We must preserve the ip register at this
1723  // point, so we cannot just use add().
1724  ASSERT(object_size > 0);
1725  Register source = result;
1726  Condition cond = al;
1727  int shift = 0;
1728  while (object_size != 0) {
1729    if (((object_size >> shift) & 0x03) == 0) {
1730      shift += 2;
1731    } else {
1732      int bits = object_size & (0xff << shift);
1733      object_size -= bits;
1734      shift += 8;
1735      Operand bits_operand(bits);
1736      ASSERT(bits_operand.is_single_instruction(this));
1737      add(scratch2, source, bits_operand, SetCC, cond);
1738      source = scratch2;
1739      cond = cc;
1740    }
1741  }
1742  b(cs, gc_required);
1743  cmp(scratch2, Operand(ip));
1744  b(hi, gc_required);
1745  str(scratch2, MemOperand(topaddr));
1746
1747  // Tag object if requested.
1748  if ((flags & TAG_OBJECT) != 0) {
1749    add(result, result, Operand(kHeapObjectTag));
1750  }
1751}
1752
1753
1754void MacroAssembler::Allocate(Register object_size,
1755                              Register result,
1756                              Register scratch1,
1757                              Register scratch2,
1758                              Label* gc_required,
1759                              AllocationFlags flags) {
1760  if (!FLAG_inline_new) {
1761    if (emit_debug_code()) {
1762      // Trash the registers to simulate an allocation failure.
1763      mov(result, Operand(0x7091));
1764      mov(scratch1, Operand(0x7191));
1765      mov(scratch2, Operand(0x7291));
1766    }
1767    jmp(gc_required);
1768    return;
1769  }
1770
1771  // Assert that the register arguments are different and that none of
1772  // them are ip. ip is used explicitly in the code generated below.
1773  ASSERT(!result.is(scratch1));
1774  ASSERT(!result.is(scratch2));
1775  ASSERT(!scratch1.is(scratch2));
1776  ASSERT(!object_size.is(ip));
1777  ASSERT(!result.is(ip));
1778  ASSERT(!scratch1.is(ip));
1779  ASSERT(!scratch2.is(ip));
1780
1781  // Check relative positions of allocation top and limit addresses.
1782  // The values must be adjacent in memory to allow the use of LDM.
1783  // Also, assert that the registers are numbered such that the values
1784  // are loaded in the correct order.
1785  ExternalReference allocation_top =
1786      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1787  ExternalReference allocation_limit =
1788      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1789  intptr_t top =
1790      reinterpret_cast<intptr_t>(allocation_top.address());
1791  intptr_t limit =
1792      reinterpret_cast<intptr_t>(allocation_limit.address());
1793  ASSERT((limit - top) == kPointerSize);
1794  ASSERT(result.code() < ip.code());
1795
1796  // Set up allocation top address.
1797  Register topaddr = scratch1;
1798  mov(topaddr, Operand(allocation_top));
1799
1800  // This code stores a temporary value in ip. This is OK, as the code below
1801  // does not need ip for implicit literal generation.
1802  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1803    // Load allocation top into result and allocation limit into ip.
1804    ldm(ia, topaddr, result.bit() | ip.bit());
1805  } else {
1806    if (emit_debug_code()) {
1807      // Assert that result actually contains top on entry. ip is used
1808      // immediately below so this use of ip does not cause difference with
1809      // respect to register content between debug and release mode.
1810      ldr(ip, MemOperand(topaddr));
1811      cmp(result, ip);
1812      Check(eq, kUnexpectedAllocationTop);
1813    }
1814    // Load allocation limit into ip. Result already contains allocation top.
1815    ldr(ip, MemOperand(topaddr, limit - top));
1816  }
1817
1818  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1819    // Align the next allocation. Storing the filler map without checking top is
1820    // safe in new-space because the limit of the heap is aligned there.
1821    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1822    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1823    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1824    Label aligned;
1825    b(eq, &aligned);
1826    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1827      cmp(result, Operand(ip));
1828      b(hs, gc_required);
1829    }
1830    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1831    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1832    bind(&aligned);
1833  }
1834
1835  // Calculate new top and bail out if new space is exhausted. Use result
1836  // to calculate the new top. Object size may be in words so a shift is
1837  // required to get the number of bytes.
1838  if ((flags & SIZE_IN_WORDS) != 0) {
1839    add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1840  } else {
1841    add(scratch2, result, Operand(object_size), SetCC);
1842  }
1843  b(cs, gc_required);
1844  cmp(scratch2, Operand(ip));
1845  b(hi, gc_required);
1846
1847  // Update allocation top. result temporarily holds the new top.
1848  if (emit_debug_code()) {
1849    tst(scratch2, Operand(kObjectAlignmentMask));
1850    Check(eq, kUnalignedAllocationInNewSpace);
1851  }
1852  str(scratch2, MemOperand(topaddr));
1853
1854  // Tag object if requested.
1855  if ((flags & TAG_OBJECT) != 0) {
1856    add(result, result, Operand(kHeapObjectTag));
1857  }
1858}
1859
1860
1861void MacroAssembler::UndoAllocationInNewSpace(Register object,
1862                                              Register scratch) {
1863  ExternalReference new_space_allocation_top =
1864      ExternalReference::new_space_allocation_top_address(isolate());
1865
1866  // Make sure the object has no tag before resetting top.
1867  and_(object, object, Operand(~kHeapObjectTagMask));
1868#ifdef DEBUG
1869  // Check that the object un-allocated is below the current top.
1870  mov(scratch, Operand(new_space_allocation_top));
1871  ldr(scratch, MemOperand(scratch));
1872  cmp(object, scratch);
1873  Check(lt, kUndoAllocationOfNonAllocatedMemory);
1874#endif
1875  // Write the address of the object to un-allocate as the current top.
1876  mov(scratch, Operand(new_space_allocation_top));
1877  str(object, MemOperand(scratch));
1878}
1879
1880
1881void MacroAssembler::AllocateTwoByteString(Register result,
1882                                           Register length,
1883                                           Register scratch1,
1884                                           Register scratch2,
1885                                           Register scratch3,
1886                                           Label* gc_required) {
1887  // Calculate the number of bytes needed for the characters in the string while
1888  // observing object alignment.
1889  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1890  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
1891  add(scratch1, scratch1,
1892      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1893  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1894
1895  // Allocate two-byte string in new space.
1896  Allocate(scratch1,
1897           result,
1898           scratch2,
1899           scratch3,
1900           gc_required,
1901           TAG_OBJECT);
1902
1903  // Set the map, length and hash field.
1904  InitializeNewString(result,
1905                      length,
1906                      Heap::kStringMapRootIndex,
1907                      scratch1,
1908                      scratch2);
1909}
1910
1911
1912void MacroAssembler::AllocateAsciiString(Register result,
1913                                         Register length,
1914                                         Register scratch1,
1915                                         Register scratch2,
1916                                         Register scratch3,
1917                                         Label* gc_required) {
1918  // Calculate the number of bytes needed for the characters in the string while
1919  // observing object alignment.
1920  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1921  ASSERT(kCharSize == 1);
1922  add(scratch1, length,
1923      Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1924  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1925
1926  // Allocate ASCII string in new space.
1927  Allocate(scratch1,
1928           result,
1929           scratch2,
1930           scratch3,
1931           gc_required,
1932           TAG_OBJECT);
1933
1934  // Set the map, length and hash field.
1935  InitializeNewString(result,
1936                      length,
1937                      Heap::kAsciiStringMapRootIndex,
1938                      scratch1,
1939                      scratch2);
1940}
1941
1942
1943void MacroAssembler::AllocateTwoByteConsString(Register result,
1944                                               Register length,
1945                                               Register scratch1,
1946                                               Register scratch2,
1947                                               Label* gc_required) {
1948  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1949           TAG_OBJECT);
1950
1951  InitializeNewString(result,
1952                      length,
1953                      Heap::kConsStringMapRootIndex,
1954                      scratch1,
1955                      scratch2);
1956}
1957
1958
1959void MacroAssembler::AllocateAsciiConsString(Register result,
1960                                             Register length,
1961                                             Register scratch1,
1962                                             Register scratch2,
1963                                             Label* gc_required) {
1964  Label allocate_new_space, install_map;
1965  AllocationFlags flags = TAG_OBJECT;
1966
1967  ExternalReference high_promotion_mode = ExternalReference::
1968      new_space_high_promotion_mode_active_address(isolate());
1969  mov(scratch1, Operand(high_promotion_mode));
1970  ldr(scratch1, MemOperand(scratch1, 0));
1971  cmp(scratch1, Operand::Zero());
1972  b(eq, &allocate_new_space);
1973
1974  Allocate(ConsString::kSize,
1975           result,
1976           scratch1,
1977           scratch2,
1978           gc_required,
1979           static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
1980
1981  jmp(&install_map);
1982
1983  bind(&allocate_new_space);
1984  Allocate(ConsString::kSize,
1985           result,
1986           scratch1,
1987           scratch2,
1988           gc_required,
1989           flags);
1990
1991  bind(&install_map);
1992
1993  InitializeNewString(result,
1994                      length,
1995                      Heap::kConsAsciiStringMapRootIndex,
1996                      scratch1,
1997                      scratch2);
1998}
1999
2000
2001void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2002                                                 Register length,
2003                                                 Register scratch1,
2004                                                 Register scratch2,
2005                                                 Label* gc_required) {
2006  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2007           TAG_OBJECT);
2008
2009  InitializeNewString(result,
2010                      length,
2011                      Heap::kSlicedStringMapRootIndex,
2012                      scratch1,
2013                      scratch2);
2014}
2015
2016
2017void MacroAssembler::AllocateAsciiSlicedString(Register result,
2018                                               Register length,
2019                                               Register scratch1,
2020                                               Register scratch2,
2021                                               Label* gc_required) {
2022  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2023           TAG_OBJECT);
2024
2025  InitializeNewString(result,
2026                      length,
2027                      Heap::kSlicedAsciiStringMapRootIndex,
2028                      scratch1,
2029                      scratch2);
2030}
2031
2032
2033void MacroAssembler::CompareObjectType(Register object,
2034                                       Register map,
2035                                       Register type_reg,
2036                                       InstanceType type) {
2037  const Register temp = type_reg.is(no_reg) ? ip : type_reg;
2038
2039  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2040  CompareInstanceType(map, temp, type);
2041}
2042
2043
2044void MacroAssembler::CheckObjectTypeRange(Register object,
2045                                          Register map,
2046                                          InstanceType min_type,
2047                                          InstanceType max_type,
2048                                          Label* false_label) {
2049  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2050  STATIC_ASSERT(LAST_TYPE < 256);
2051  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2052  ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
2053  sub(ip, ip, Operand(min_type));
2054  cmp(ip, Operand(max_type - min_type));
2055  b(hi, false_label);
2056}
2057
2058
2059void MacroAssembler::CompareInstanceType(Register map,
2060                                         Register type_reg,
2061                                         InstanceType type) {
2062  // Registers map and type_reg can be ip. These two lines assert
2063  // that ip can be used with the two instructions (the constants
2064  // will never need ip).
2065  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
2066  STATIC_ASSERT(LAST_TYPE < 256);
2067  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2068  cmp(type_reg, Operand(type));
2069}
2070
2071
2072void MacroAssembler::CompareRoot(Register obj,
2073                                 Heap::RootListIndex index) {
2074  ASSERT(!obj.is(ip));
2075  LoadRoot(ip, index);
2076  cmp(obj, ip);
2077}
2078
2079
2080void MacroAssembler::CheckFastElements(Register map,
2081                                       Register scratch,
2082                                       Label* fail) {
2083  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2084  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2085  STATIC_ASSERT(FAST_ELEMENTS == 2);
2086  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2087  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2088  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2089  b(hi, fail);
2090}
2091
2092
2093void MacroAssembler::CheckFastObjectElements(Register map,
2094                                             Register scratch,
2095                                             Label* fail) {
2096  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2097  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2098  STATIC_ASSERT(FAST_ELEMENTS == 2);
2099  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2100  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2101  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2102  b(ls, fail);
2103  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2104  b(hi, fail);
2105}
2106
2107
2108void MacroAssembler::CheckFastSmiElements(Register map,
2109                                          Register scratch,
2110                                          Label* fail) {
2111  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2112  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2113  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2114  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2115  b(hi, fail);
2116}
2117
2118
2119void MacroAssembler::StoreNumberToDoubleElements(
2120                                      Register value_reg,
2121                                      Register key_reg,
2122                                      Register elements_reg,
2123                                      Register scratch1,
2124                                      LowDwVfpRegister double_scratch,
2125                                      Label* fail,
2126                                      int elements_offset) {
2127  Label smi_value, store;
2128
2129  // Handle smi values specially.
2130  JumpIfSmi(value_reg, &smi_value);
2131
2132  // Ensure that the object is a heap number
2133  CheckMap(value_reg,
2134           scratch1,
2135           isolate()->factory()->heap_number_map(),
2136           fail,
2137           DONT_DO_SMI_CHECK);
2138
2139  vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2140  // Force a canonical NaN.
2141  if (emit_debug_code()) {
2142    vmrs(ip);
2143    tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2144    Assert(ne, kDefaultNaNModeNotSet);
2145  }
2146  VFPCanonicalizeNaN(double_scratch);
2147  b(&store);
2148
2149  bind(&smi_value);
2150  SmiToDouble(double_scratch, value_reg);
2151
2152  bind(&store);
2153  add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2154  vstr(double_scratch,
2155       FieldMemOperand(scratch1,
2156                       FixedDoubleArray::kHeaderSize - elements_offset));
2157}
2158
2159
2160void MacroAssembler::CompareMap(Register obj,
2161                                Register scratch,
2162                                Handle<Map> map,
2163                                Label* early_success) {
2164  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2165  CompareMap(scratch, map, early_success);
2166}
2167
2168
2169void MacroAssembler::CompareMap(Register obj_map,
2170                                Handle<Map> map,
2171                                Label* early_success) {
2172  cmp(obj_map, Operand(map));
2173}
2174
2175
2176void MacroAssembler::CheckMap(Register obj,
2177                              Register scratch,
2178                              Handle<Map> map,
2179                              Label* fail,
2180                              SmiCheckType smi_check_type) {
2181  if (smi_check_type == DO_SMI_CHECK) {
2182    JumpIfSmi(obj, fail);
2183  }
2184
2185  Label success;
2186  CompareMap(obj, scratch, map, &success);
2187  b(ne, fail);
2188  bind(&success);
2189}
2190
2191
2192void MacroAssembler::CheckMap(Register obj,
2193                              Register scratch,
2194                              Heap::RootListIndex index,
2195                              Label* fail,
2196                              SmiCheckType smi_check_type) {
2197  if (smi_check_type == DO_SMI_CHECK) {
2198    JumpIfSmi(obj, fail);
2199  }
2200  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2201  LoadRoot(ip, index);
2202  cmp(scratch, ip);
2203  b(ne, fail);
2204}
2205
2206
2207void MacroAssembler::DispatchMap(Register obj,
2208                                 Register scratch,
2209                                 Handle<Map> map,
2210                                 Handle<Code> success,
2211                                 SmiCheckType smi_check_type) {
2212  Label fail;
2213  if (smi_check_type == DO_SMI_CHECK) {
2214    JumpIfSmi(obj, &fail);
2215  }
2216  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2217  mov(ip, Operand(map));
2218  cmp(scratch, ip);
2219  Jump(success, RelocInfo::CODE_TARGET, eq);
2220  bind(&fail);
2221}
2222
2223
2224void MacroAssembler::TryGetFunctionPrototype(Register function,
2225                                             Register result,
2226                                             Register scratch,
2227                                             Label* miss,
2228                                             bool miss_on_bound_function) {
2229  // Check that the receiver isn't a smi.
2230  JumpIfSmi(function, miss);
2231
2232  // Check that the function really is a function.  Load map into result reg.
2233  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2234  b(ne, miss);
2235
2236  if (miss_on_bound_function) {
2237    ldr(scratch,
2238        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2239    ldr(scratch,
2240        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2241    tst(scratch,
2242        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2243    b(ne, miss);
2244  }
2245
2246  // Make sure that the function has an instance prototype.
2247  Label non_instance;
2248  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2249  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2250  b(ne, &non_instance);
2251
2252  // Get the prototype or initial map from the function.
2253  ldr(result,
2254      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2255
2256  // If the prototype or initial map is the hole, don't return it and
2257  // simply miss the cache instead. This will allow us to allocate a
2258  // prototype object on-demand in the runtime system.
2259  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2260  cmp(result, ip);
2261  b(eq, miss);
2262
2263  // If the function does not have an initial map, we're done.
2264  Label done;
2265  CompareObjectType(result, scratch, scratch, MAP_TYPE);
2266  b(ne, &done);
2267
2268  // Get the prototype from the initial map.
2269  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2270  jmp(&done);
2271
2272  // Non-instance prototype: Fetch prototype from constructor field
2273  // in initial map.
2274  bind(&non_instance);
2275  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2276
2277  // All done.
2278  bind(&done);
2279}
2280
2281
2282void MacroAssembler::CallStub(CodeStub* stub,
2283                              TypeFeedbackId ast_id,
2284                              Condition cond) {
2285  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2286  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2287}
2288
2289
2290void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2291  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2292}
2293
2294
2295static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2296  return ref0.address() - ref1.address();
2297}
2298
2299
2300void MacroAssembler::CallApiFunctionAndReturn(
2301    ExternalReference function,
2302    Address function_address,
2303    ExternalReference thunk_ref,
2304    Register thunk_last_arg,
2305    int stack_space,
2306    MemOperand return_value_operand,
2307    MemOperand* context_restore_operand) {
2308  ExternalReference next_address =
2309      ExternalReference::handle_scope_next_address(isolate());
2310  const int kNextOffset = 0;
2311  const int kLimitOffset = AddressOffset(
2312      ExternalReference::handle_scope_limit_address(isolate()),
2313      next_address);
2314  const int kLevelOffset = AddressOffset(
2315      ExternalReference::handle_scope_level_address(isolate()),
2316      next_address);
2317
2318  ASSERT(!thunk_last_arg.is(r3));
2319
2320  // Allocate HandleScope in callee-save registers.
2321  mov(r9, Operand(next_address));
2322  ldr(r4, MemOperand(r9, kNextOffset));
2323  ldr(r5, MemOperand(r9, kLimitOffset));
2324  ldr(r6, MemOperand(r9, kLevelOffset));
2325  add(r6, r6, Operand(1));
2326  str(r6, MemOperand(r9, kLevelOffset));
2327
2328  if (FLAG_log_timer_events) {
2329    FrameScope frame(this, StackFrame::MANUAL);
2330    PushSafepointRegisters();
2331    PrepareCallCFunction(1, r0);
2332    mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2333    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2334    PopSafepointRegisters();
2335  }
2336
2337  Label profiler_disabled;
2338  Label end_profiler_check;
2339  bool* is_profiling_flag =
2340      isolate()->cpu_profiler()->is_profiling_address();
2341  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2342  mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2343  ldrb(r3, MemOperand(r3, 0));
2344  cmp(r3, Operand(0));
2345  b(eq, &profiler_disabled);
2346
2347  // Additional parameter is the address of the actual callback.
2348  mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
2349  mov(r3, Operand(thunk_ref));
2350  jmp(&end_profiler_check);
2351
2352  bind(&profiler_disabled);
2353  mov(r3, Operand(function));
2354  bind(&end_profiler_check);
2355
2356  // Native call returns to the DirectCEntry stub which redirects to the
2357  // return address pushed on stack (could have moved after GC).
2358  // DirectCEntry stub itself is generated early and never moves.
2359  DirectCEntryStub stub;
2360  stub.GenerateCall(this, r3);
2361
2362  if (FLAG_log_timer_events) {
2363    FrameScope frame(this, StackFrame::MANUAL);
2364    PushSafepointRegisters();
2365    PrepareCallCFunction(1, r0);
2366    mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2367    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2368    PopSafepointRegisters();
2369  }
2370
2371  Label promote_scheduled_exception;
2372  Label exception_handled;
2373  Label delete_allocated_handles;
2374  Label leave_exit_frame;
2375  Label return_value_loaded;
2376
2377  // load value from ReturnValue
2378  ldr(r0, return_value_operand);
2379  bind(&return_value_loaded);
2380  // No more valid handles (the result handle was the last one). Restore
2381  // previous handle scope.
2382  str(r4, MemOperand(r9, kNextOffset));
2383  if (emit_debug_code()) {
2384    ldr(r1, MemOperand(r9, kLevelOffset));
2385    cmp(r1, r6);
2386    Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2387  }
2388  sub(r6, r6, Operand(1));
2389  str(r6, MemOperand(r9, kLevelOffset));
2390  ldr(ip, MemOperand(r9, kLimitOffset));
2391  cmp(r5, ip);
2392  b(ne, &delete_allocated_handles);
2393
2394  // Check if the function scheduled an exception.
2395  bind(&leave_exit_frame);
2396  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2397  mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2398  ldr(r5, MemOperand(ip));
2399  cmp(r4, r5);
2400  b(ne, &promote_scheduled_exception);
2401  bind(&exception_handled);
2402
2403  bool restore_context = context_restore_operand != NULL;
2404  if (restore_context) {
2405    ldr(cp, *context_restore_operand);
2406  }
2407  // LeaveExitFrame expects unwind space to be in a register.
2408  mov(r4, Operand(stack_space));
2409  LeaveExitFrame(false, r4, !restore_context);
2410  mov(pc, lr);
2411
2412  bind(&promote_scheduled_exception);
2413  {
2414    FrameScope frame(this, StackFrame::INTERNAL);
2415    CallExternalReference(
2416        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2417        0);
2418  }
2419  jmp(&exception_handled);
2420
2421  // HandleScope limit has changed. Delete allocated extensions.
2422  bind(&delete_allocated_handles);
2423  str(r5, MemOperand(r9, kLimitOffset));
2424  mov(r4, r0);
2425  PrepareCallCFunction(1, r5);
2426  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2427  CallCFunction(
2428      ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2429  mov(r0, r4);
2430  jmp(&leave_exit_frame);
2431}
2432
2433
2434bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2435  return has_frame_ || !stub->SometimesSetsUpAFrame();
2436}
2437
2438
2439void MacroAssembler::IllegalOperation(int num_arguments) {
2440  if (num_arguments > 0) {
2441    add(sp, sp, Operand(num_arguments * kPointerSize));
2442  }
2443  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2444}
2445
2446
2447void MacroAssembler::IndexFromHash(Register hash, Register index) {
2448  // If the hash field contains an array index pick it out. The assert checks
2449  // that the constants for the maximum number of digits for an array index
2450  // cached in the hash field and the number of bits reserved for it does not
2451  // conflict.
2452  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2453         (1 << String::kArrayIndexValueBits));
2454  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
2455  // the low kHashShift bits.
2456  Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2457  SmiTag(index, hash);
2458}
2459
2460
2461void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2462  if (CpuFeatures::IsSupported(VFP3)) {
2463    vmov(value.low(), smi);
2464    vcvt_f64_s32(value, 1);
2465  } else {
2466    SmiUntag(ip, smi);
2467    vmov(value.low(), ip);
2468    vcvt_f64_s32(value, value.low());
2469  }
2470}
2471
2472
2473void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2474                                       LowDwVfpRegister double_scratch) {
2475  ASSERT(!double_input.is(double_scratch));
2476  vcvt_s32_f64(double_scratch.low(), double_input);
2477  vcvt_f64_s32(double_scratch, double_scratch.low());
2478  VFPCompareAndSetFlags(double_input, double_scratch);
2479}
2480
2481
2482void MacroAssembler::TryDoubleToInt32Exact(Register result,
2483                                           DwVfpRegister double_input,
2484                                           LowDwVfpRegister double_scratch) {
2485  ASSERT(!double_input.is(double_scratch));
2486  vcvt_s32_f64(double_scratch.low(), double_input);
2487  vmov(result, double_scratch.low());
2488  vcvt_f64_s32(double_scratch, double_scratch.low());
2489  VFPCompareAndSetFlags(double_input, double_scratch);
2490}
2491
2492
2493void MacroAssembler::TryInt32Floor(Register result,
2494                                   DwVfpRegister double_input,
2495                                   Register input_high,
2496                                   LowDwVfpRegister double_scratch,
2497                                   Label* done,
2498                                   Label* exact) {
2499  ASSERT(!result.is(input_high));
2500  ASSERT(!double_input.is(double_scratch));
2501  Label negative, exception;
2502
2503  VmovHigh(input_high, double_input);
2504
2505  // Test for NaN and infinities.
2506  Sbfx(result, input_high,
2507       HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2508  cmp(result, Operand(-1));
2509  b(eq, &exception);
2510  // Test for values that can be exactly represented as a
2511  // signed 32-bit integer.
2512  TryDoubleToInt32Exact(result, double_input, double_scratch);
2513  // If exact, return (result already fetched).
2514  b(eq, exact);
2515  cmp(input_high, Operand::Zero());
2516  b(mi, &negative);
2517
2518  // Input is in ]+0, +inf[.
2519  // If result equals 0x7fffffff input was out of range or
2520  // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2521  // could fits into an int32, that means we always think input was
2522  // out of range and always go to exception.
2523  // If result < 0x7fffffff, go to done, result fetched.
2524  cmn(result, Operand(1));
2525  b(mi, &exception);
2526  b(done);
2527
2528  // Input is in ]-inf, -0[.
2529  // If x is a non integer negative number,
2530  // floor(x) <=> round_to_zero(x) - 1.
2531  bind(&negative);
2532  sub(result, result, Operand(1), SetCC);
2533  // If result is still negative, go to done, result fetched.
2534  // Else, we had an overflow and we fall through exception.
2535  b(mi, done);
2536  bind(&exception);
2537}
2538
2539void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2540                                                DwVfpRegister double_input,
2541                                                Label* done) {
2542  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2543  vcvt_s32_f64(double_scratch.low(), double_input);
2544  vmov(result, double_scratch.low());
2545
2546  // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2547  sub(ip, result, Operand(1));
2548  cmp(ip, Operand(0x7ffffffe));
2549  b(lt, done);
2550}
2551
2552
2553void MacroAssembler::TruncateDoubleToI(Register result,
2554                                       DwVfpRegister double_input) {
2555  Label done;
2556
2557  TryInlineTruncateDoubleToI(result, double_input, &done);
2558
2559  // If we fell through then inline version didn't succeed - call stub instead.
2560  push(lr);
2561  sub(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2562  vstr(double_input, MemOperand(sp, 0));
2563
2564  DoubleToIStub stub(sp, result, 0, true, true);
2565  CallStub(&stub);
2566
2567  add(sp, sp, Operand(kDoubleSize));
2568  pop(lr);
2569
2570  bind(&done);
2571}
2572
2573
2574void MacroAssembler::TruncateHeapNumberToI(Register result,
2575                                           Register object) {
2576  Label done;
2577  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2578  ASSERT(!result.is(object));
2579
2580  vldr(double_scratch,
2581       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2582  TryInlineTruncateDoubleToI(result, double_scratch, &done);
2583
2584  // If we fell through then inline version didn't succeed - call stub instead.
2585  push(lr);
2586  DoubleToIStub stub(object,
2587                     result,
2588                     HeapNumber::kValueOffset - kHeapObjectTag,
2589                     true,
2590                     true);
2591  CallStub(&stub);
2592  pop(lr);
2593
2594  bind(&done);
2595}
2596
2597
2598void MacroAssembler::TruncateNumberToI(Register object,
2599                                       Register result,
2600                                       Register heap_number_map,
2601                                       Register scratch1,
2602                                       Label* not_number) {
2603  Label done;
2604  ASSERT(!result.is(object));
2605
2606  UntagAndJumpIfSmi(result, object, &done);
2607  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2608  TruncateHeapNumberToI(result, object);
2609
2610  bind(&done);
2611}
2612
2613
2614void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2615                                         Register src,
2616                                         int num_least_bits) {
2617  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2618    ubfx(dst, src, kSmiTagSize, num_least_bits);
2619  } else {
2620    SmiUntag(dst, src);
2621    and_(dst, dst, Operand((1 << num_least_bits) - 1));
2622  }
2623}
2624
2625
2626void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2627                                           Register src,
2628                                           int num_least_bits) {
2629  and_(dst, src, Operand((1 << num_least_bits) - 1));
2630}
2631
2632
2633void MacroAssembler::CallRuntime(const Runtime::Function* f,
2634                                 int num_arguments,
2635                                 SaveFPRegsMode save_doubles) {
2636  // All parameters are on the stack.  r0 has the return value after call.
2637
2638  // If the expected number of arguments of the runtime function is
2639  // constant, we check that the actual number of arguments match the
2640  // expectation.
2641  if (f->nargs >= 0 && f->nargs != num_arguments) {
2642    IllegalOperation(num_arguments);
2643    return;
2644  }
2645
2646  // TODO(1236192): Most runtime routines don't need the number of
2647  // arguments passed in because it is constant. At some point we
2648  // should remove this need and make the runtime routine entry code
2649  // smarter.
2650  mov(r0, Operand(num_arguments));
2651  mov(r1, Operand(ExternalReference(f, isolate())));
2652  CEntryStub stub(1, save_doubles);
2653  CallStub(&stub);
2654}
2655
2656
2657void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2658                                           int num_arguments) {
2659  mov(r0, Operand(num_arguments));
2660  mov(r1, Operand(ext));
2661
2662  CEntryStub stub(1);
2663  CallStub(&stub);
2664}
2665
2666
2667void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2668                                               int num_arguments,
2669                                               int result_size) {
2670  // TODO(1236192): Most runtime routines don't need the number of
2671  // arguments passed in because it is constant. At some point we
2672  // should remove this need and make the runtime routine entry code
2673  // smarter.
2674  mov(r0, Operand(num_arguments));
2675  JumpToExternalReference(ext);
2676}
2677
2678
2679void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2680                                     int num_arguments,
2681                                     int result_size) {
2682  TailCallExternalReference(ExternalReference(fid, isolate()),
2683                            num_arguments,
2684                            result_size);
2685}
2686
2687
2688void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2689#if defined(__thumb__)
2690  // Thumb mode builtin.
2691  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2692#endif
2693  mov(r1, Operand(builtin));
2694  CEntryStub stub(1);
2695  Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2696}
2697
2698
2699void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2700                                   InvokeFlag flag,
2701                                   const CallWrapper& call_wrapper) {
2702  // You can't call a builtin without a valid frame.
2703  ASSERT(flag == JUMP_FUNCTION || has_frame());
2704
2705  GetBuiltinEntry(r2, id);
2706  if (flag == CALL_FUNCTION) {
2707    call_wrapper.BeforeCall(CallSize(r2));
2708    SetCallKind(r5, CALL_AS_METHOD);
2709    Call(r2);
2710    call_wrapper.AfterCall();
2711  } else {
2712    ASSERT(flag == JUMP_FUNCTION);
2713    SetCallKind(r5, CALL_AS_METHOD);
2714    Jump(r2);
2715  }
2716}
2717
2718
2719void MacroAssembler::GetBuiltinFunction(Register target,
2720                                        Builtins::JavaScript id) {
2721  // Load the builtins object into target register.
2722  ldr(target,
2723      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2724  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2725  // Load the JavaScript builtin function from the builtins object.
2726  ldr(target, FieldMemOperand(target,
2727                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2728}
2729
2730
2731void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2732  ASSERT(!target.is(r1));
2733  GetBuiltinFunction(r1, id);
2734  // Load the code entry point from the builtins object.
2735  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2736}
2737
2738
2739void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2740                                Register scratch1, Register scratch2) {
2741  if (FLAG_native_code_counters && counter->Enabled()) {
2742    mov(scratch1, Operand(value));
2743    mov(scratch2, Operand(ExternalReference(counter)));
2744    str(scratch1, MemOperand(scratch2));
2745  }
2746}
2747
2748
2749void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2750                                      Register scratch1, Register scratch2) {
2751  ASSERT(value > 0);
2752  if (FLAG_native_code_counters && counter->Enabled()) {
2753    mov(scratch2, Operand(ExternalReference(counter)));
2754    ldr(scratch1, MemOperand(scratch2));
2755    add(scratch1, scratch1, Operand(value));
2756    str(scratch1, MemOperand(scratch2));
2757  }
2758}
2759
2760
2761void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2762                                      Register scratch1, Register scratch2) {
2763  ASSERT(value > 0);
2764  if (FLAG_native_code_counters && counter->Enabled()) {
2765    mov(scratch2, Operand(ExternalReference(counter)));
2766    ldr(scratch1, MemOperand(scratch2));
2767    sub(scratch1, scratch1, Operand(value));
2768    str(scratch1, MemOperand(scratch2));
2769  }
2770}
2771
2772
2773void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2774  if (emit_debug_code())
2775    Check(cond, reason);
2776}
2777
2778
2779void MacroAssembler::AssertFastElements(Register elements) {
2780  if (emit_debug_code()) {
2781    ASSERT(!elements.is(ip));
2782    Label ok;
2783    push(elements);
2784    ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2785    LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2786    cmp(elements, ip);
2787    b(eq, &ok);
2788    LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2789    cmp(elements, ip);
2790    b(eq, &ok);
2791    LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2792    cmp(elements, ip);
2793    b(eq, &ok);
2794    Abort(kJSObjectWithFastElementsMapHasSlowElements);
2795    bind(&ok);
2796    pop(elements);
2797  }
2798}
2799
2800
2801void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2802  Label L;
2803  b(cond, &L);
2804  Abort(reason);
2805  // will not return here
2806  bind(&L);
2807}
2808
2809
2810void MacroAssembler::Abort(BailoutReason reason) {
2811  Label abort_start;
2812  bind(&abort_start);
2813  // We want to pass the msg string like a smi to avoid GC
2814  // problems, however msg is not guaranteed to be aligned
2815  // properly. Instead, we pass an aligned pointer that is
2816  // a proper v8 smi, but also pass the alignment difference
2817  // from the real pointer as a smi.
2818  const char* msg = GetBailoutReason(reason);
2819  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2820  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2821  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2822#ifdef DEBUG
2823  if (msg != NULL) {
2824    RecordComment("Abort message: ");
2825    RecordComment(msg);
2826  }
2827
2828  if (FLAG_trap_on_abort) {
2829    stop(msg);
2830    return;
2831  }
2832#endif
2833
2834  mov(r0, Operand(p0));
2835  push(r0);
2836  mov(r0, Operand(Smi::FromInt(p1 - p0)));
2837  push(r0);
2838  // Disable stub call restrictions to always allow calls to abort.
2839  if (!has_frame_) {
2840    // We don't actually want to generate a pile of code for this, so just
2841    // claim there is a stack frame, without generating one.
2842    FrameScope scope(this, StackFrame::NONE);
2843    CallRuntime(Runtime::kAbort, 2);
2844  } else {
2845    CallRuntime(Runtime::kAbort, 2);
2846  }
2847  // will not return here
2848  if (is_const_pool_blocked()) {
2849    // If the calling code cares about the exact number of
2850    // instructions generated, we insert padding here to keep the size
2851    // of the Abort macro constant.
2852    static const int kExpectedAbortInstructions = 10;
2853    int abort_instructions = InstructionsGeneratedSince(&abort_start);
2854    ASSERT(abort_instructions <= kExpectedAbortInstructions);
2855    while (abort_instructions++ < kExpectedAbortInstructions) {
2856      nop();
2857    }
2858  }
2859}
2860
2861
2862void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2863  if (context_chain_length > 0) {
2864    // Move up the chain of contexts to the context containing the slot.
2865    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2866    for (int i = 1; i < context_chain_length; i++) {
2867      ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2868    }
2869  } else {
2870    // Slot is in the current function context.  Move it into the
2871    // destination register in case we store into it (the write barrier
2872    // cannot be allowed to destroy the context in esi).
2873    mov(dst, cp);
2874  }
2875}
2876
2877
2878void MacroAssembler::LoadTransitionedArrayMapConditional(
2879    ElementsKind expected_kind,
2880    ElementsKind transitioned_kind,
2881    Register map_in_out,
2882    Register scratch,
2883    Label* no_map_match) {
2884  // Load the global or builtins object from the current context.
2885  ldr(scratch,
2886      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2887  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2888
2889  // Check that the function's map is the same as the expected cached map.
2890  ldr(scratch,
2891      MemOperand(scratch,
2892                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2893  size_t offset = expected_kind * kPointerSize +
2894      FixedArrayBase::kHeaderSize;
2895  ldr(ip, FieldMemOperand(scratch, offset));
2896  cmp(map_in_out, ip);
2897  b(ne, no_map_match);
2898
2899  // Use the transitioned cached map.
2900  offset = transitioned_kind * kPointerSize +
2901      FixedArrayBase::kHeaderSize;
2902  ldr(map_in_out, FieldMemOperand(scratch, offset));
2903}
2904
2905
2906void MacroAssembler::LoadInitialArrayMap(
2907    Register function_in, Register scratch,
2908    Register map_out, bool can_have_holes) {
2909  ASSERT(!function_in.is(map_out));
2910  Label done;
2911  ldr(map_out, FieldMemOperand(function_in,
2912                               JSFunction::kPrototypeOrInitialMapOffset));
2913  if (!FLAG_smi_only_arrays) {
2914    ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2915    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2916                                        kind,
2917                                        map_out,
2918                                        scratch,
2919                                        &done);
2920  } else if (can_have_holes) {
2921    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2922                                        FAST_HOLEY_SMI_ELEMENTS,
2923                                        map_out,
2924                                        scratch,
2925                                        &done);
2926  }
2927  bind(&done);
2928}
2929
2930
2931void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2932  // Load the global or builtins object from the current context.
2933  ldr(function,
2934      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2935  // Load the native context from the global or builtins object.
2936  ldr(function, FieldMemOperand(function,
2937                                GlobalObject::kNativeContextOffset));
2938  // Load the function from the native context.
2939  ldr(function, MemOperand(function, Context::SlotOffset(index)));
2940}
2941
2942
2943void MacroAssembler::LoadArrayFunction(Register function) {
2944  // Load the global or builtins object from the current context.
2945  ldr(function,
2946      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2947  // Load the global context from the global or builtins object.
2948  ldr(function,
2949      FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
2950  // Load the array function from the native context.
2951  ldr(function,
2952      MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
2953}
2954
2955
2956void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2957                                                  Register map,
2958                                                  Register scratch) {
2959  // Load the initial map. The global functions all have initial maps.
2960  ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2961  if (emit_debug_code()) {
2962    Label ok, fail;
2963    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2964    b(&ok);
2965    bind(&fail);
2966    Abort(kGlobalFunctionsMustHaveInitialMap);
2967    bind(&ok);
2968  }
2969}
2970
2971
2972void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2973    Register reg,
2974    Register scratch,
2975    Label* not_power_of_two_or_zero) {
2976  sub(scratch, reg, Operand(1), SetCC);
2977  b(mi, not_power_of_two_or_zero);
2978  tst(scratch, reg);
2979  b(ne, not_power_of_two_or_zero);
2980}
2981
2982
2983void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2984    Register reg,
2985    Register scratch,
2986    Label* zero_and_neg,
2987    Label* not_power_of_two) {
2988  sub(scratch, reg, Operand(1), SetCC);
2989  b(mi, zero_and_neg);
2990  tst(scratch, reg);
2991  b(ne, not_power_of_two);
2992}
2993
2994
2995void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2996                                      Register reg2,
2997                                      Label* on_not_both_smi) {
2998  STATIC_ASSERT(kSmiTag == 0);
2999  tst(reg1, Operand(kSmiTagMask));
3000  tst(reg2, Operand(kSmiTagMask), eq);
3001  b(ne, on_not_both_smi);
3002}
3003
3004
3005void MacroAssembler::UntagAndJumpIfSmi(
3006    Register dst, Register src, Label* smi_case) {
3007  STATIC_ASSERT(kSmiTag == 0);
3008  SmiUntag(dst, src, SetCC);
3009  b(cc, smi_case);  // Shifter carry is not set for a smi.
3010}
3011
3012
3013void MacroAssembler::UntagAndJumpIfNotSmi(
3014    Register dst, Register src, Label* non_smi_case) {
3015  STATIC_ASSERT(kSmiTag == 0);
3016  SmiUntag(dst, src, SetCC);
3017  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
3018}
3019
3020
3021void MacroAssembler::JumpIfEitherSmi(Register reg1,
3022                                     Register reg2,
3023                                     Label* on_either_smi) {
3024  STATIC_ASSERT(kSmiTag == 0);
3025  tst(reg1, Operand(kSmiTagMask));
3026  tst(reg2, Operand(kSmiTagMask), ne);
3027  b(eq, on_either_smi);
3028}
3029
3030
3031void MacroAssembler::AssertNotSmi(Register object) {
3032  if (emit_debug_code()) {
3033    STATIC_ASSERT(kSmiTag == 0);
3034    tst(object, Operand(kSmiTagMask));
3035    Check(ne, kOperandIsASmi);
3036  }
3037}
3038
3039
3040void MacroAssembler::AssertSmi(Register object) {
3041  if (emit_debug_code()) {
3042    STATIC_ASSERT(kSmiTag == 0);
3043    tst(object, Operand(kSmiTagMask));
3044    Check(eq, kOperandIsNotSmi);
3045  }
3046}
3047
3048
3049void MacroAssembler::AssertString(Register object) {
3050  if (emit_debug_code()) {
3051    STATIC_ASSERT(kSmiTag == 0);
3052    tst(object, Operand(kSmiTagMask));
3053    Check(ne, kOperandIsASmiAndNotAString);
3054    push(object);
3055    ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3056    CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3057    pop(object);
3058    Check(lo, kOperandIsNotAString);
3059  }
3060}
3061
3062
3063void MacroAssembler::AssertName(Register object) {
3064  if (emit_debug_code()) {
3065    STATIC_ASSERT(kSmiTag == 0);
3066    tst(object, Operand(kSmiTagMask));
3067    Check(ne, kOperandIsASmiAndNotAName);
3068    push(object);
3069    ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3070    CompareInstanceType(object, object, LAST_NAME_TYPE);
3071    pop(object);
3072    Check(le, kOperandIsNotAName);
3073  }
3074}
3075
3076
3077
3078void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3079  if (emit_debug_code()) {
3080    CompareRoot(reg, index);
3081    Check(eq, kHeapNumberMapRegisterClobbered);
3082  }
3083}
3084
3085
3086void MacroAssembler::JumpIfNotHeapNumber(Register object,
3087                                         Register heap_number_map,
3088                                         Register scratch,
3089                                         Label* on_not_heap_number) {
3090  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3091  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3092  cmp(scratch, heap_number_map);
3093  b(ne, on_not_heap_number);
3094}
3095
3096
3097void MacroAssembler::LookupNumberStringCache(Register object,
3098                                             Register result,
3099                                             Register scratch1,
3100                                             Register scratch2,
3101                                             Register scratch3,
3102                                             Label* not_found) {
3103  // Use of registers. Register result is used as a temporary.
3104  Register number_string_cache = result;
3105  Register mask = scratch3;
3106
3107  // Load the number string cache.
3108  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3109
3110  // Make the hash mask from the length of the number string cache. It
3111  // contains two elements (number and string) for each cache entry.
3112  ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3113  // Divide length by two (length is a smi).
3114  mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3115  sub(mask, mask, Operand(1));  // Make mask.
3116
3117  // Calculate the entry in the number string cache. The hash value in the
3118  // number string cache for smis is just the smi value, and the hash for
3119  // doubles is the xor of the upper and lower words. See
3120  // Heap::GetNumberStringCache.
3121  Label is_smi;
3122  Label load_result_from_cache;
3123  JumpIfSmi(object, &is_smi);
3124  CheckMap(object,
3125           scratch1,
3126           Heap::kHeapNumberMapRootIndex,
3127           not_found,
3128           DONT_DO_SMI_CHECK);
3129
3130  STATIC_ASSERT(8 == kDoubleSize);
3131  add(scratch1,
3132      object,
3133      Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3134  ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3135  eor(scratch1, scratch1, Operand(scratch2));
3136  and_(scratch1, scratch1, Operand(mask));
3137
3138  // Calculate address of entry in string cache: each entry consists
3139  // of two pointer sized fields.
3140  add(scratch1,
3141      number_string_cache,
3142      Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3143
3144  Register probe = mask;
3145  ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3146  JumpIfSmi(probe, not_found);
3147  sub(scratch2, object, Operand(kHeapObjectTag));
3148  vldr(d0, scratch2, HeapNumber::kValueOffset);
3149  sub(probe, probe, Operand(kHeapObjectTag));
3150  vldr(d1, probe, HeapNumber::kValueOffset);
3151  VFPCompareAndSetFlags(d0, d1);
3152  b(ne, not_found);  // The cache did not contain this value.
3153  b(&load_result_from_cache);
3154
3155  bind(&is_smi);
3156  Register scratch = scratch1;
3157  and_(scratch, mask, Operand(object, ASR, 1));
3158  // Calculate address of entry in string cache: each entry consists
3159  // of two pointer sized fields.
3160  add(scratch,
3161      number_string_cache,
3162      Operand(scratch, LSL, kPointerSizeLog2 + 1));
3163
3164  // Check if the entry is the smi we are looking for.
3165  ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3166  cmp(object, probe);
3167  b(ne, not_found);
3168
3169  // Get the result from the cache.
3170  bind(&load_result_from_cache);
3171  ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3172  IncrementCounter(isolate()->counters()->number_to_string_native(),
3173                   1,
3174                   scratch1,
3175                   scratch2);
3176}
3177
3178
3179void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3180    Register first,
3181    Register second,
3182    Register scratch1,
3183    Register scratch2,
3184    Label* failure) {
3185  // Test that both first and second are sequential ASCII strings.
3186  // Assume that they are non-smis.
3187  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3188  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3189  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3190  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3191
3192  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3193                                               scratch2,
3194                                               scratch1,
3195                                               scratch2,
3196                                               failure);
3197}
3198
3199void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3200                                                         Register second,
3201                                                         Register scratch1,
3202                                                         Register scratch2,
3203                                                         Label* failure) {
3204  // Check that neither is a smi.
3205  and_(scratch1, first, Operand(second));
3206  JumpIfSmi(scratch1, failure);
3207  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3208                                             second,
3209                                             scratch1,
3210                                             scratch2,
3211                                             failure);
3212}
3213
3214
3215void MacroAssembler::JumpIfNotUniqueName(Register reg,
3216                                         Label* not_unique_name) {
3217  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3218  Label succeed;
3219  tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3220  b(eq, &succeed);
3221  cmp(reg, Operand(SYMBOL_TYPE));
3222  b(ne, not_unique_name);
3223
3224  bind(&succeed);
3225}
3226
3227
3228// Allocates a heap number or jumps to the need_gc label if the young space
3229// is full and a scavenge is needed.
3230void MacroAssembler::AllocateHeapNumber(Register result,
3231                                        Register scratch1,
3232                                        Register scratch2,
3233                                        Register heap_number_map,
3234                                        Label* gc_required,
3235                                        TaggingMode tagging_mode) {
3236  // Allocate an object in the heap for the heap number and tag it as a heap
3237  // object.
3238  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3239           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3240
3241  // Store heap number map in the allocated object.
3242  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3243  if (tagging_mode == TAG_RESULT) {
3244    str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3245  } else {
3246    str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3247  }
3248}
3249
3250
3251void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3252                                                 DwVfpRegister value,
3253                                                 Register scratch1,
3254                                                 Register scratch2,
3255                                                 Register heap_number_map,
3256                                                 Label* gc_required) {
3257  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3258  sub(scratch1, result, Operand(kHeapObjectTag));
3259  vstr(value, scratch1, HeapNumber::kValueOffset);
3260}
3261
3262
3263// Copies a fixed number of fields of heap objects from src to dst.
3264void MacroAssembler::CopyFields(Register dst,
3265                                Register src,
3266                                LowDwVfpRegister double_scratch,
3267                                int field_count) {
3268  int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3269  for (int i = 0; i < double_count; i++) {
3270    vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3271    vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3272  }
3273
3274  STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3275  STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3276
3277  int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3278  if (remain != 0) {
3279    vldr(double_scratch.low(),
3280         FieldMemOperand(src, (field_count - 1) * kPointerSize));
3281    vstr(double_scratch.low(),
3282         FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3283  }
3284}
3285
3286
3287void MacroAssembler::CopyBytes(Register src,
3288                               Register dst,
3289                               Register length,
3290                               Register scratch) {
3291  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3292
3293  // Align src before copying in word size chunks.
3294  cmp(length, Operand(kPointerSize));
3295  b(le, &byte_loop);
3296
3297  bind(&align_loop_1);
3298  tst(src, Operand(kPointerSize - 1));
3299  b(eq, &word_loop);
3300  ldrb(scratch, MemOperand(src, 1, PostIndex));
3301  strb(scratch, MemOperand(dst, 1, PostIndex));
3302  sub(length, length, Operand(1), SetCC);
3303  b(&align_loop_1);
3304  // Copy bytes in word size chunks.
3305  bind(&word_loop);
3306  if (emit_debug_code()) {
3307    tst(src, Operand(kPointerSize - 1));
3308    Assert(eq, kExpectingAlignmentForCopyBytes);
3309  }
3310  cmp(length, Operand(kPointerSize));
3311  b(lt, &byte_loop);
3312  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3313  if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3314    str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3315  } else {
3316    strb(scratch, MemOperand(dst, 1, PostIndex));
3317    mov(scratch, Operand(scratch, LSR, 8));
3318    strb(scratch, MemOperand(dst, 1, PostIndex));
3319    mov(scratch, Operand(scratch, LSR, 8));
3320    strb(scratch, MemOperand(dst, 1, PostIndex));
3321    mov(scratch, Operand(scratch, LSR, 8));
3322    strb(scratch, MemOperand(dst, 1, PostIndex));
3323  }
3324  sub(length, length, Operand(kPointerSize));
3325  b(&word_loop);
3326
3327  // Copy the last bytes if any left.
3328  bind(&byte_loop);
3329  cmp(length, Operand::Zero());
3330  b(eq, &done);
3331  bind(&byte_loop_1);
3332  ldrb(scratch, MemOperand(src, 1, PostIndex));
3333  strb(scratch, MemOperand(dst, 1, PostIndex));
3334  sub(length, length, Operand(1), SetCC);
3335  b(ne, &byte_loop_1);
3336  bind(&done);
3337}
3338
3339
3340void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3341                                                Register end_offset,
3342                                                Register filler) {
3343  Label loop, entry;
3344  b(&entry);
3345  bind(&loop);
3346  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3347  bind(&entry);
3348  cmp(start_offset, end_offset);
3349  b(lt, &loop);
3350}
3351
3352
3353void MacroAssembler::CheckFor32DRegs(Register scratch) {
3354  mov(scratch, Operand(ExternalReference::cpu_features()));
3355  ldr(scratch, MemOperand(scratch));
3356  tst(scratch, Operand(1u << VFP32DREGS));
3357}
3358
3359
3360void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3361  CheckFor32DRegs(scratch);
3362  vstm(db_w, location, d16, d31, ne);
3363  sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3364  vstm(db_w, location, d0, d15);
3365}
3366
3367
3368void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3369  CheckFor32DRegs(scratch);
3370  vldm(ia_w, location, d0, d15);
3371  vldm(ia_w, location, d16, d31, ne);
3372  add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3373}
3374
3375
3376void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3377    Register first,
3378    Register second,
3379    Register scratch1,
3380    Register scratch2,
3381    Label* failure) {
3382  const int kFlatAsciiStringMask =
3383      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3384  const int kFlatAsciiStringTag =
3385      kStringTag | kOneByteStringTag | kSeqStringTag;
3386  and_(scratch1, first, Operand(kFlatAsciiStringMask));
3387  and_(scratch2, second, Operand(kFlatAsciiStringMask));
3388  cmp(scratch1, Operand(kFlatAsciiStringTag));
3389  // Ignore second test if first test failed.
3390  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3391  b(ne, failure);
3392}
3393
3394
3395void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3396                                                            Register scratch,
3397                                                            Label* failure) {
3398  const int kFlatAsciiStringMask =
3399      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3400  const int kFlatAsciiStringTag =
3401      kStringTag | kOneByteStringTag | kSeqStringTag;
3402  and_(scratch, type, Operand(kFlatAsciiStringMask));
3403  cmp(scratch, Operand(kFlatAsciiStringTag));
3404  b(ne, failure);
3405}
3406
3407static const int kRegisterPassedArguments = 4;
3408
3409
3410int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3411                                              int num_double_arguments) {
3412  int stack_passed_words = 0;
3413  if (use_eabi_hardfloat()) {
3414    // In the hard floating point calling convention, we can use
3415    // all double registers to pass doubles.
3416    if (num_double_arguments > DoubleRegister::NumRegisters()) {
3417      stack_passed_words +=
3418          2 * (num_double_arguments - DoubleRegister::NumRegisters());
3419    }
3420  } else {
3421    // In the soft floating point calling convention, every double
3422    // argument is passed using two registers.
3423    num_reg_arguments += 2 * num_double_arguments;
3424  }
3425  // Up to four simple arguments are passed in registers r0..r3.
3426  if (num_reg_arguments > kRegisterPassedArguments) {
3427    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3428  }
3429  return stack_passed_words;
3430}
3431
3432
3433void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
3434                                               Register index,
3435                                               Register value,
3436                                               uint32_t encoding_mask) {
3437  Label is_object;
3438  SmiTst(string);
3439  ThrowIf(eq, kNonObject);
3440
3441  ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3442  ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3443
3444  and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3445  cmp(ip, Operand(encoding_mask));
3446  ThrowIf(ne, kUnexpectedStringType);
3447
3448  // The index is assumed to be untagged coming in, tag it to compare with the
3449  // string length without using a temp register, it is restored at the end of
3450  // this function.
3451  Label index_tag_ok, index_tag_bad;
3452  TrySmiTag(index, index, &index_tag_bad);
3453  b(&index_tag_ok);
3454  bind(&index_tag_bad);
3455  Throw(kIndexIsTooLarge);
3456  bind(&index_tag_ok);
3457
3458  ldr(ip, FieldMemOperand(string, String::kLengthOffset));
3459  cmp(index, ip);
3460  ThrowIf(ge, kIndexIsTooLarge);
3461
3462  cmp(index, Operand(Smi::FromInt(0)));
3463  ThrowIf(lt, kIndexIsNegative);
3464
3465  SmiUntag(index, index);
3466}
3467
3468
3469void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3470                                          int num_double_arguments,
3471                                          Register scratch) {
3472  int frame_alignment = ActivationFrameAlignment();
3473  int stack_passed_arguments = CalculateStackPassedWords(
3474      num_reg_arguments, num_double_arguments);
3475  if (frame_alignment > kPointerSize) {
3476    // Make stack end at alignment and make room for num_arguments - 4 words
3477    // and the original value of sp.
3478    mov(scratch, sp);
3479    sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3480    ASSERT(IsPowerOf2(frame_alignment));
3481    and_(sp, sp, Operand(-frame_alignment));
3482    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3483  } else {
3484    sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3485  }
3486}
3487
3488
3489void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3490                                          Register scratch) {
3491  PrepareCallCFunction(num_reg_arguments, 0, scratch);
3492}
3493
3494
3495void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
3496  ASSERT(dreg.is(d0));
3497  if (!use_eabi_hardfloat()) {
3498    vmov(r0, r1, dreg);
3499  }
3500}
3501
3502
3503void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
3504                                             DwVfpRegister dreg2) {
3505  ASSERT(dreg1.is(d0));
3506  ASSERT(dreg2.is(d1));
3507  if (!use_eabi_hardfloat()) {
3508    vmov(r0, r1, dreg1);
3509    vmov(r2, r3, dreg2);
3510  }
3511}
3512
3513
3514void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
3515                                             Register reg) {
3516  ASSERT(dreg.is(d0));
3517  if (use_eabi_hardfloat()) {
3518    Move(r0, reg);
3519  } else {
3520    Move(r2, reg);
3521    vmov(r0, r1, dreg);
3522  }
3523}
3524
3525
3526void MacroAssembler::CallCFunction(ExternalReference function,
3527                                   int num_reg_arguments,
3528                                   int num_double_arguments) {
3529  mov(ip, Operand(function));
3530  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3531}
3532
3533
3534void MacroAssembler::CallCFunction(Register function,
3535                                   int num_reg_arguments,
3536                                   int num_double_arguments) {
3537  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3538}
3539
3540
3541void MacroAssembler::CallCFunction(ExternalReference function,
3542                                   int num_arguments) {
3543  CallCFunction(function, num_arguments, 0);
3544}
3545
3546
3547void MacroAssembler::CallCFunction(Register function,
3548                                   int num_arguments) {
3549  CallCFunction(function, num_arguments, 0);
3550}
3551
3552
3553void MacroAssembler::CallCFunctionHelper(Register function,
3554                                         int num_reg_arguments,
3555                                         int num_double_arguments) {
3556  ASSERT(has_frame());
3557  // Make sure that the stack is aligned before calling a C function unless
3558  // running in the simulator. The simulator has its own alignment check which
3559  // provides more information.
3560#if V8_HOST_ARCH_ARM
3561  if (emit_debug_code()) {
3562    int frame_alignment = OS::ActivationFrameAlignment();
3563    int frame_alignment_mask = frame_alignment - 1;
3564    if (frame_alignment > kPointerSize) {
3565      ASSERT(IsPowerOf2(frame_alignment));
3566      Label alignment_as_expected;
3567      tst(sp, Operand(frame_alignment_mask));
3568      b(eq, &alignment_as_expected);
3569      // Don't use Check here, as it will call Runtime_Abort possibly
3570      // re-entering here.
3571      stop("Unexpected alignment");
3572      bind(&alignment_as_expected);
3573    }
3574  }
3575#endif
3576
3577  // Just call directly. The function called cannot cause a GC, or
3578  // allow preemption, so the return address in the link register
3579  // stays correct.
3580  Call(function);
3581  int stack_passed_arguments = CalculateStackPassedWords(
3582      num_reg_arguments, num_double_arguments);
3583  if (ActivationFrameAlignment() > kPointerSize) {
3584    ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3585  } else {
3586    add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3587  }
3588}
3589
3590
3591void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3592                               Register result) {
3593  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3594  const int32_t kPCRegOffset = 2 * kPointerSize;
3595  ldr(result, MemOperand(ldr_location));
3596  if (emit_debug_code()) {
3597    // Check that the instruction is a ldr reg, [pc + offset] .
3598    and_(result, result, Operand(kLdrPCPattern));
3599    cmp(result, Operand(kLdrPCPattern));
3600    Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
3601    // Result was clobbered. Restore it.
3602    ldr(result, MemOperand(ldr_location));
3603  }
3604  // Get the address of the constant.
3605  and_(result, result, Operand(kLdrOffsetMask));
3606  add(result, ldr_location, Operand(result));
3607  add(result, result, Operand(kPCRegOffset));
3608}
3609
3610
3611void MacroAssembler::CheckPageFlag(
3612    Register object,
3613    Register scratch,
3614    int mask,
3615    Condition cc,
3616    Label* condition_met) {
3617  Bfc(scratch, object, 0, kPageSizeBits);
3618  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3619  tst(scratch, Operand(mask));
3620  b(cc, condition_met);
3621}
3622
3623
3624void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3625                                        Register scratch,
3626                                        Label* if_deprecated) {
3627  if (map->CanBeDeprecated()) {
3628    mov(scratch, Operand(map));
3629    ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3630    tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3631    b(ne, if_deprecated);
3632  }
3633}
3634
3635
3636void MacroAssembler::JumpIfBlack(Register object,
3637                                 Register scratch0,
3638                                 Register scratch1,
3639                                 Label* on_black) {
3640  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
3641  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3642}
3643
3644
3645void MacroAssembler::HasColor(Register object,
3646                              Register bitmap_scratch,
3647                              Register mask_scratch,
3648                              Label* has_color,
3649                              int first_bit,
3650                              int second_bit) {
3651  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3652
3653  GetMarkBits(object, bitmap_scratch, mask_scratch);
3654
3655  Label other_color, word_boundary;
3656  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3657  tst(ip, Operand(mask_scratch));
3658  b(first_bit == 1 ? eq : ne, &other_color);
3659  // Shift left 1 by adding.
3660  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3661  b(eq, &word_boundary);
3662  tst(ip, Operand(mask_scratch));
3663  b(second_bit == 1 ? ne : eq, has_color);
3664  jmp(&other_color);
3665
3666  bind(&word_boundary);
3667  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3668  tst(ip, Operand(1));
3669  b(second_bit == 1 ? ne : eq, has_color);
3670  bind(&other_color);
3671}
3672
3673
3674// Detect some, but not all, common pointer-free objects.  This is used by the
3675// incremental write barrier which doesn't care about oddballs (they are always
3676// marked black immediately so this code is not hit).
3677void MacroAssembler::JumpIfDataObject(Register value,
3678                                      Register scratch,
3679                                      Label* not_data_object) {
3680  Label is_data_object;
3681  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3682  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3683  b(eq, &is_data_object);
3684  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3685  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3686  // If it's a string and it's not a cons string then it's an object containing
3687  // no GC pointers.
3688  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3689  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3690  b(ne, not_data_object);
3691  bind(&is_data_object);
3692}
3693
3694
3695void MacroAssembler::GetMarkBits(Register addr_reg,
3696                                 Register bitmap_reg,
3697                                 Register mask_reg) {
3698  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3699  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3700  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3701  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3702  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3703  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3704  mov(ip, Operand(1));
3705  mov(mask_reg, Operand(ip, LSL, mask_reg));
3706}
3707
3708
3709void MacroAssembler::EnsureNotWhite(
3710    Register value,
3711    Register bitmap_scratch,
3712    Register mask_scratch,
3713    Register load_scratch,
3714    Label* value_is_white_and_not_data) {
3715  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3716  GetMarkBits(value, bitmap_scratch, mask_scratch);
3717
3718  // If the value is black or grey we don't need to do anything.
3719  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3720  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3721  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3722  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3723
3724  Label done;
3725
3726  // Since both black and grey have a 1 in the first position and white does
3727  // not have a 1 there we only need to check one bit.
3728  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3729  tst(mask_scratch, load_scratch);
3730  b(ne, &done);
3731
3732  if (emit_debug_code()) {
3733    // Check for impossible bit pattern.
3734    Label ok;
3735    // LSL may overflow, making the check conservative.
3736    tst(load_scratch, Operand(mask_scratch, LSL, 1));
3737    b(eq, &ok);
3738    stop("Impossible marking bit pattern");
3739    bind(&ok);
3740  }
3741
3742  // Value is white.  We check whether it is data that doesn't need scanning.
3743  // Currently only checks for HeapNumber and non-cons strings.
3744  Register map = load_scratch;  // Holds map while checking type.
3745  Register length = load_scratch;  // Holds length of object after testing type.
3746  Label is_data_object;
3747
3748  // Check for heap-number
3749  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3750  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3751  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3752  b(eq, &is_data_object);
3753
3754  // Check for strings.
3755  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3756  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3757  // If it's a string and it's not a cons string then it's an object containing
3758  // no GC pointers.
3759  Register instance_type = load_scratch;
3760  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3761  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3762  b(ne, value_is_white_and_not_data);
3763  // It's a non-indirect (non-cons and non-slice) string.
3764  // If it's external, the length is just ExternalString::kSize.
3765  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3766  // External strings are the only ones with the kExternalStringTag bit
3767  // set.
3768  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
3769  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
3770  tst(instance_type, Operand(kExternalStringTag));
3771  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3772  b(ne, &is_data_object);
3773
3774  // Sequential string, either ASCII or UC16.
3775  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3776  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3777  // getting the length multiplied by 2.
3778  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3779  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3780  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3781  tst(instance_type, Operand(kStringEncodingMask));
3782  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3783  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3784  and_(length, length, Operand(~kObjectAlignmentMask));
3785
3786  bind(&is_data_object);
3787  // Value is a data object, and it is white.  Mark it black.  Since we know
3788  // that the object is white we can make it black by flipping one bit.
3789  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3790  orr(ip, ip, Operand(mask_scratch));
3791  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3792
3793  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3794  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3795  add(ip, ip, Operand(length));
3796  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3797
3798  bind(&done);
3799}
3800
3801
3802void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3803  Usat(output_reg, 8, Operand(input_reg));
3804}
3805
3806
3807void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3808                                        DwVfpRegister input_reg,
3809                                        LowDwVfpRegister double_scratch) {
3810  Label above_zero;
3811  Label done;
3812  Label in_bounds;
3813
3814  VFPCompareAndSetFlags(input_reg, 0.0);
3815  b(gt, &above_zero);
3816
3817  // Double value is less than zero, NaN or Inf, return 0.
3818  mov(result_reg, Operand::Zero());
3819  b(al, &done);
3820
3821  // Double value is >= 255, return 255.
3822  bind(&above_zero);
3823  Vmov(double_scratch, 255.0, result_reg);
3824  VFPCompareAndSetFlags(input_reg, double_scratch);
3825  b(le, &in_bounds);
3826  mov(result_reg, Operand(255));
3827  b(al, &done);
3828
3829  // In 0-255 range, round and truncate.
3830  bind(&in_bounds);
3831  // Save FPSCR.
3832  vmrs(ip);
3833  // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3834  bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3835  vmsr(result_reg);
3836  vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3837  vmov(result_reg, double_scratch.low());
3838  // Restore FPSCR.
3839  vmsr(ip);
3840  bind(&done);
3841}
3842
3843
3844void MacroAssembler::Throw(BailoutReason reason) {
3845  Label throw_start;
3846  bind(&throw_start);
3847#ifdef DEBUG
3848  const char* msg = GetBailoutReason(reason);
3849  if (msg != NULL) {
3850    RecordComment("Throw message: ");
3851    RecordComment(msg);
3852  }
3853#endif
3854
3855  mov(r0, Operand(Smi::FromInt(reason)));
3856  push(r0);
3857  // Disable stub call restrictions to always allow calls to throw.
3858  if (!has_frame_) {
3859    // We don't actually want to generate a pile of code for this, so just
3860    // claim there is a stack frame, without generating one.
3861    FrameScope scope(this, StackFrame::NONE);
3862    CallRuntime(Runtime::kThrowMessage, 1);
3863  } else {
3864    CallRuntime(Runtime::kThrowMessage, 1);
3865  }
3866  // will not return here
3867  if (is_const_pool_blocked()) {
3868    // If the calling code cares throw the exact number of
3869    // instructions generated, we insert padding here to keep the size
3870    // of the ThrowMessage macro constant.
3871    static const int kExpectedThrowMessageInstructions = 10;
3872    int throw_instructions = InstructionsGeneratedSince(&throw_start);
3873    ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
3874    while (throw_instructions++ < kExpectedThrowMessageInstructions) {
3875      nop();
3876    }
3877  }
3878}
3879
3880
3881void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3882  Label L;
3883  b(NegateCondition(cc), &L);
3884  Throw(reason);
3885  // will not return here
3886  bind(&L);
3887}
3888
3889
3890void MacroAssembler::LoadInstanceDescriptors(Register map,
3891                                             Register descriptors) {
3892  ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3893}
3894
3895
3896void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3897  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3898  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3899}
3900
3901
3902void MacroAssembler::EnumLength(Register dst, Register map) {
3903  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3904  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3905  and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3906}
3907
3908
3909void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3910  Register  empty_fixed_array_value = r6;
3911  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3912  Label next, start;
3913  mov(r2, r0);
3914
3915  // Check if the enum length field is properly initialized, indicating that
3916  // there is an enum cache.
3917  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3918
3919  EnumLength(r3, r1);
3920  cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3921  b(eq, call_runtime);
3922
3923  jmp(&start);
3924
3925  bind(&next);
3926  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3927
3928  // For all objects but the receiver, check that the cache is empty.
3929  EnumLength(r3, r1);
3930  cmp(r3, Operand(Smi::FromInt(0)));
3931  b(ne, call_runtime);
3932
3933  bind(&start);
3934
3935  // Check that there are no elements. Register r2 contains the current JS
3936  // object we've reached through the prototype chain.
3937  ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3938  cmp(r2, empty_fixed_array_value);
3939  b(ne, call_runtime);
3940
3941  ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3942  cmp(r2, null_value);
3943  b(ne, &next);
3944}
3945
3946
3947void MacroAssembler::TestJSArrayForAllocationMemento(
3948    Register receiver_reg,
3949    Register scratch_reg,
3950    Label* no_memento_found) {
3951  ExternalReference new_space_start =
3952      ExternalReference::new_space_start(isolate());
3953  ExternalReference new_space_allocation_top =
3954      ExternalReference::new_space_allocation_top_address(isolate());
3955  add(scratch_reg, receiver_reg,
3956      Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3957  cmp(scratch_reg, Operand(new_space_start));
3958  b(lt, no_memento_found);
3959  mov(ip, Operand(new_space_allocation_top));
3960  ldr(ip, MemOperand(ip));
3961  cmp(scratch_reg, ip);
3962  b(gt, no_memento_found);
3963  ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3964  cmp(scratch_reg,
3965      Operand(isolate()->factory()->allocation_memento_map()));
3966}
3967
3968
3969Register GetRegisterThatIsNotOneOf(Register reg1,
3970                                   Register reg2,
3971                                   Register reg3,
3972                                   Register reg4,
3973                                   Register reg5,
3974                                   Register reg6) {
3975  RegList regs = 0;
3976  if (reg1.is_valid()) regs |= reg1.bit();
3977  if (reg2.is_valid()) regs |= reg2.bit();
3978  if (reg3.is_valid()) regs |= reg3.bit();
3979  if (reg4.is_valid()) regs |= reg4.bit();
3980  if (reg5.is_valid()) regs |= reg5.bit();
3981  if (reg6.is_valid()) regs |= reg6.bit();
3982
3983  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3984    Register candidate = Register::FromAllocationIndex(i);
3985    if (regs & candidate.bit()) continue;
3986    return candidate;
3987  }
3988  UNREACHABLE();
3989  return no_reg;
3990}
3991
3992
3993void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3994    Register object,
3995    Register scratch0,
3996    Register scratch1,
3997    Label* found) {
3998  ASSERT(!scratch1.is(scratch0));
3999  Factory* factory = isolate()->factory();
4000  Register current = scratch0;
4001  Label loop_again;
4002
4003  // scratch contained elements pointer.
4004  mov(current, object);
4005
4006  // Loop based on the map going up the prototype chain.
4007  bind(&loop_again);
4008  ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4009  ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4010  Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4011  cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
4012  b(eq, found);
4013  ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4014  cmp(current, Operand(factory->null_value()));
4015  b(ne, &loop_again);
4016}
4017
4018
4019#ifdef DEBUG
4020bool AreAliased(Register reg1,
4021                Register reg2,
4022                Register reg3,
4023                Register reg4,
4024                Register reg5,
4025                Register reg6) {
4026  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4027    reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
4028
4029  RegList regs = 0;
4030  if (reg1.is_valid()) regs |= reg1.bit();
4031  if (reg2.is_valid()) regs |= reg2.bit();
4032  if (reg3.is_valid()) regs |= reg3.bit();
4033  if (reg4.is_valid()) regs |= reg4.bit();
4034  if (reg5.is_valid()) regs |= reg5.bit();
4035  if (reg6.is_valid()) regs |= reg6.bit();
4036  int n_of_non_aliasing_regs = NumRegs(regs);
4037
4038  return n_of_valid_regs != n_of_non_aliasing_regs;
4039}
4040#endif
4041
4042
4043CodePatcher::CodePatcher(byte* address,
4044                         int instructions,
4045                         FlushICache flush_cache)
4046    : address_(address),
4047      size_(instructions * Assembler::kInstrSize),
4048      masm_(NULL, address, size_ + Assembler::kGap),
4049      flush_cache_(flush_cache) {
4050  // Create a new macro assembler pointing to the address of the code to patch.
4051  // The size is adjusted with kGap on order for the assembler to generate size
4052  // bytes of instructions without failing with buffer size constraints.
4053  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4054}
4055
4056
4057CodePatcher::~CodePatcher() {
4058  // Indicate that code has changed.
4059  if (flush_cache_ == FLUSH) {
4060    CPU::FlushICache(address_, size_);
4061  }
4062
4063  // Check that the code was patched as expected.
4064  ASSERT(masm_.pc_ == address_ + size_);
4065  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4066}
4067
4068
4069void CodePatcher::Emit(Instr instr) {
4070  masm()->emit(instr);
4071}
4072
4073
4074void CodePatcher::Emit(Address addr) {
4075  masm()->emit(reinterpret_cast<Instr>(addr));
4076}
4077
4078
4079void CodePatcher::EmitCondition(Condition cond) {
4080  Instr instr = Assembler::instr_at(masm_.pc_);
4081  instr = (instr & ~kCondMask) | cond;
4082  masm_.emit(instr);
4083}
4084
4085
4086} }  // namespace v8::internal
4087
4088#endif  // V8_TARGET_ARCH_ARM
4089