macro-assembler-arm.cc revision 0d5e116f6aee03185f237311a943491bb079a768
1// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <limits.h>  // For LONG_MIN, LONG_MAX.
29
30#include "v8.h"
31
32#if defined(V8_TARGET_ARCH_ARM)
33
34#include "bootstrapper.h"
35#include "codegen-inl.h"
36#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
42MacroAssembler::MacroAssembler(void* buffer, int size)
43    : Assembler(buffer, size),
44      generating_stub_(false),
45      allow_stub_calls_(true),
46      code_object_(Heap::undefined_value()) {
47}
48
49
50// We always generate arm code, never thumb code, even if V8 is compiled to
51// thumb, so we require inter-working support
52#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
53#error "flag -mthumb-interwork missing"
54#endif
55
56
57// We do not support thumb inter-working with an arm architecture not supporting
58// the blx instruction (below v5t).  If you know what CPU you are compiling for
59// you can use -march=armv7 or similar.
60#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
61# error "For thumb inter-working we require an architecture which supports blx"
62#endif
63
64
65// Using bx does not yield better code, so use it only when required
66#if defined(USE_THUMB_INTERWORK)
67#define USE_BX 1
68#endif
69
70
71void MacroAssembler::Jump(Register target, Condition cond) {
72#if USE_BX
73  bx(target, cond);
74#else
75  mov(pc, Operand(target), LeaveCC, cond);
76#endif
77}
78
79
80void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
81                          Condition cond) {
82#if USE_BX
83  mov(ip, Operand(target, rmode), LeaveCC, cond);
84  bx(ip, cond);
85#else
86  mov(pc, Operand(target, rmode), LeaveCC, cond);
87#endif
88}
89
90
91void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
92                          Condition cond) {
93  ASSERT(!RelocInfo::IsCodeTarget(rmode));
94  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
95}
96
97
98void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
99                          Condition cond) {
100  ASSERT(RelocInfo::IsCodeTarget(rmode));
101  // 'code' is always generated ARM code, never THUMB code
102  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
103}
104
105
106void MacroAssembler::Call(Register target, Condition cond) {
107#if USE_BLX
108  blx(target, cond);
109#else
110  // set lr for return at current pc + 8
111  mov(lr, Operand(pc), LeaveCC, cond);
112  mov(pc, Operand(target), LeaveCC, cond);
113#endif
114}
115
116
117void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
118                          Condition cond) {
119#if USE_BLX
120  // On ARMv5 and after the recommended call sequence is:
121  //  ldr ip, [pc, #...]
122  //  blx ip
123
124  // The two instructions (ldr and blx) could be separated by a constant
125  // pool and the code would still work. The issue comes from the
126  // patching code which expect the ldr to be just above the blx.
127  { BlockConstPoolScope block_const_pool(this);
128    // Statement positions are expected to be recorded when the target
129    // address is loaded. The mov method will automatically record
130    // positions when pc is the target, since this is not the case here
131    // we have to do it explicitly.
132    WriteRecordedPositions();
133
134    mov(ip, Operand(target, rmode), LeaveCC, cond);
135    blx(ip, cond);
136  }
137
138  ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
139#else
140  // Set lr for return at current pc + 8.
141  mov(lr, Operand(pc), LeaveCC, cond);
142  // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
143  mov(pc, Operand(target, rmode), LeaveCC, cond);
144
145  ASSERT(kCallTargetAddressOffset == kInstrSize);
146#endif
147}
148
149
150void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
151                          Condition cond) {
152  ASSERT(!RelocInfo::IsCodeTarget(rmode));
153  Call(reinterpret_cast<intptr_t>(target), rmode, cond);
154}
155
156
157void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
158                          Condition cond) {
159  ASSERT(RelocInfo::IsCodeTarget(rmode));
160  // 'code' is always generated ARM code, never THUMB code
161  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
162}
163
164
165void MacroAssembler::Ret(Condition cond) {
166#if USE_BX
167  bx(lr, cond);
168#else
169  mov(pc, Operand(lr), LeaveCC, cond);
170#endif
171}
172
173
174void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
175  LoadRoot(ip, Heap::kStackLimitRootIndex);
176  cmp(sp, Operand(ip));
177  b(lo, on_stack_overflow);
178}
179
180
181void MacroAssembler::Drop(int count, Condition cond) {
182  if (count > 0) {
183    add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
184  }
185}
186
187
188void MacroAssembler::Swap(Register reg1,
189                          Register reg2,
190                          Register scratch,
191                          Condition cond) {
192  if (scratch.is(no_reg)) {
193    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
194    eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
195    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
196  } else {
197    mov(scratch, reg1, LeaveCC, cond);
198    mov(reg1, reg2, LeaveCC, cond);
199    mov(reg2, scratch, LeaveCC, cond);
200  }
201}
202
203
204void MacroAssembler::Call(Label* target) {
205  bl(target);
206}
207
208
209void MacroAssembler::Move(Register dst, Handle<Object> value) {
210  mov(dst, Operand(value));
211}
212
213
214void MacroAssembler::Move(Register dst, Register src) {
215  if (!dst.is(src)) {
216    mov(dst, src);
217  }
218}
219
220
221void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
222                         Condition cond) {
223  if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) {
224    and_(dst, src1, src2, LeaveCC, cond);
225    return;
226  }
227  int32_t immediate = src2.immediate();
228  if (immediate == 0) {
229    mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
230    return;
231  }
232  if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
233    ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond);
234    return;
235  }
236  and_(dst, src1, src2, LeaveCC, cond);
237}
238
239
240void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
241                          Condition cond) {
242  ASSERT(lsb < 32);
243  if (!CpuFeatures::IsSupported(ARMv7)) {
244    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
245    and_(dst, src1, Operand(mask), LeaveCC, cond);
246    if (lsb != 0) {
247      mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
248    }
249  } else {
250    ubfx(dst, src1, lsb, width, cond);
251  }
252}
253
254
255void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
256                          Condition cond) {
257  ASSERT(lsb < 32);
258  if (!CpuFeatures::IsSupported(ARMv7)) {
259    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
260    and_(dst, src1, Operand(mask), LeaveCC, cond);
261    int shift_up = 32 - lsb - width;
262    int shift_down = lsb + shift_up;
263    if (shift_up != 0) {
264      mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
265    }
266    if (shift_down != 0) {
267      mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
268    }
269  } else {
270    sbfx(dst, src1, lsb, width, cond);
271  }
272}
273
274
275void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
276  ASSERT(lsb < 32);
277  if (!CpuFeatures::IsSupported(ARMv7)) {
278    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
279    bic(dst, dst, Operand(mask));
280  } else {
281    bfc(dst, lsb, width, cond);
282  }
283}
284
285
286void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
287                          Condition cond) {
288  if (!CpuFeatures::IsSupported(ARMv7)) {
289    ASSERT(!dst.is(pc) && !src.rm().is(pc));
290    ASSERT((satpos >= 0) && (satpos <= 31));
291
292    // These asserts are required to ensure compatibility with the ARMv7
293    // implementation.
294    ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
295    ASSERT(src.rs().is(no_reg));
296
297    Label done;
298    int satval = (1 << satpos) - 1;
299
300    if (cond != al) {
301      b(NegateCondition(cond), &done);  // Skip saturate if !condition.
302    }
303    if (!(src.is_reg() && dst.is(src.rm()))) {
304      mov(dst, src);
305    }
306    tst(dst, Operand(~satval));
307    b(eq, &done);
308    mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi);  // 0 if negative.
309    mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
310    bind(&done);
311  } else {
312    usat(dst, satpos, src, cond);
313  }
314}
315
316
317void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
318  // Empty the const pool.
319  CheckConstPool(true, true);
320  add(pc, pc, Operand(index,
321                      LSL,
322                      assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
323  BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
324  nop();  // Jump table alignment.
325  for (int i = 0; i < targets.length(); i++) {
326    b(targets[i]);
327  }
328}
329
330
331void MacroAssembler::LoadRoot(Register destination,
332                              Heap::RootListIndex index,
333                              Condition cond) {
334  ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
335}
336
337
338void MacroAssembler::StoreRoot(Register source,
339                               Heap::RootListIndex index,
340                               Condition cond) {
341  str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
342}
343
344
345void MacroAssembler::RecordWriteHelper(Register object,
346                                       Register address,
347                                       Register scratch) {
348  if (FLAG_debug_code) {
349    // Check that the object is not in new space.
350    Label not_in_new_space;
351    InNewSpace(object, scratch, ne, &not_in_new_space);
352    Abort("new-space object passed to RecordWriteHelper");
353    bind(&not_in_new_space);
354  }
355
356  // Calculate page address.
357  Bfc(object, 0, kPageSizeBits);
358
359  // Calculate region number.
360  Ubfx(address, address, Page::kRegionSizeLog2,
361       kPageSizeBits - Page::kRegionSizeLog2);
362
363  // Mark region dirty.
364  ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
365  mov(ip, Operand(1));
366  orr(scratch, scratch, Operand(ip, LSL, address));
367  str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
368}
369
370
371void MacroAssembler::InNewSpace(Register object,
372                                Register scratch,
373                                Condition cc,
374                                Label* branch) {
375  ASSERT(cc == eq || cc == ne);
376  and_(scratch, object, Operand(ExternalReference::new_space_mask()));
377  cmp(scratch, Operand(ExternalReference::new_space_start()));
378  b(cc, branch);
379}
380
381
382// Will clobber 4 registers: object, offset, scratch, ip.  The
383// register 'object' contains a heap object pointer.  The heap object
384// tag is shifted away.
385void MacroAssembler::RecordWrite(Register object,
386                                 Operand offset,
387                                 Register scratch0,
388                                 Register scratch1) {
389  // The compiled code assumes that record write doesn't change the
390  // context register, so we check that none of the clobbered
391  // registers are cp.
392  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
393
394  Label done;
395
396  // First, test that the object is not in the new space.  We cannot set
397  // region marks for new space pages.
398  InNewSpace(object, scratch0, eq, &done);
399
400  // Add offset into the object.
401  add(scratch0, object, offset);
402
403  // Record the actual write.
404  RecordWriteHelper(object, scratch0, scratch1);
405
406  bind(&done);
407
408  // Clobber all input registers when running with the debug-code flag
409  // turned on to provoke errors.
410  if (FLAG_debug_code) {
411    mov(object, Operand(BitCast<int32_t>(kZapValue)));
412    mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
413    mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
414  }
415}
416
417
418// Will clobber 4 registers: object, address, scratch, ip.  The
419// register 'object' contains a heap object pointer.  The heap object
420// tag is shifted away.
421void MacroAssembler::RecordWrite(Register object,
422                                 Register address,
423                                 Register scratch) {
424  // The compiled code assumes that record write doesn't change the
425  // context register, so we check that none of the clobbered
426  // registers are cp.
427  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
428
429  Label done;
430
431  // First, test that the object is not in the new space.  We cannot set
432  // region marks for new space pages.
433  InNewSpace(object, scratch, eq, &done);
434
435  // Record the actual write.
436  RecordWriteHelper(object, address, scratch);
437
438  bind(&done);
439
440  // Clobber all input registers when running with the debug-code flag
441  // turned on to provoke errors.
442  if (FLAG_debug_code) {
443    mov(object, Operand(BitCast<int32_t>(kZapValue)));
444    mov(address, Operand(BitCast<int32_t>(kZapValue)));
445    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
446  }
447}
448
449
450void MacroAssembler::Ldrd(Register dst1, Register dst2,
451                          const MemOperand& src, Condition cond) {
452  ASSERT(src.rm().is(no_reg));
453  ASSERT(!dst1.is(lr));  // r14.
454  ASSERT_EQ(0, dst1.code() % 2);
455  ASSERT_EQ(dst1.code() + 1, dst2.code());
456
457  // Generate two ldr instructions if ldrd is not available.
458  if (CpuFeatures::IsSupported(ARMv7)) {
459    CpuFeatures::Scope scope(ARMv7);
460    ldrd(dst1, dst2, src, cond);
461  } else {
462    MemOperand src2(src);
463    src2.set_offset(src2.offset() + 4);
464    if (dst1.is(src.rn())) {
465      ldr(dst2, src2, cond);
466      ldr(dst1, src, cond);
467    } else {
468      ldr(dst1, src, cond);
469      ldr(dst2, src2, cond);
470    }
471  }
472}
473
474
475void MacroAssembler::Strd(Register src1, Register src2,
476                          const MemOperand& dst, Condition cond) {
477  ASSERT(dst.rm().is(no_reg));
478  ASSERT(!src1.is(lr));  // r14.
479  ASSERT_EQ(0, src1.code() % 2);
480  ASSERT_EQ(src1.code() + 1, src2.code());
481
482  // Generate two str instructions if strd is not available.
483  if (CpuFeatures::IsSupported(ARMv7)) {
484    CpuFeatures::Scope scope(ARMv7);
485    strd(src1, src2, dst, cond);
486  } else {
487    MemOperand dst2(dst);
488    dst2.set_offset(dst2.offset() + 4);
489    str(src1, dst, cond);
490    str(src2, dst2, cond);
491  }
492}
493
494
495void MacroAssembler::EnterFrame(StackFrame::Type type) {
496  // r0-r3: preserved
497  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
498  mov(ip, Operand(Smi::FromInt(type)));
499  push(ip);
500  mov(ip, Operand(CodeObject()));
501  push(ip);
502  add(fp, sp, Operand(3 * kPointerSize));  // Adjust FP to point to saved FP.
503}
504
505
506void MacroAssembler::LeaveFrame(StackFrame::Type type) {
507  // r0: preserved
508  // r1: preserved
509  // r2: preserved
510
511  // Drop the execution stack down to the frame pointer and restore
512  // the caller frame pointer and return address.
513  mov(sp, fp);
514  ldm(ia_w, sp, fp.bit() | lr.bit());
515}
516
517
518void MacroAssembler::EnterExitFrame() {
519  // Compute the argv pointer and keep it in a callee-saved register.
520  // r0 is argc.
521  add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
522  sub(r6, r6, Operand(kPointerSize));
523
524  // Compute callee's stack pointer before making changes and save it as
525  // ip register so that it is restored as sp register on exit, thereby
526  // popping the args.
527
528  // ip = sp + kPointerSize * #args;
529  add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
530
531  // Prepare the stack to be aligned when calling into C. After this point there
532  // are 5 pushes before the call into C, so the stack needs to be aligned after
533  // 5 pushes.
534  int frame_alignment = ActivationFrameAlignment();
535  int frame_alignment_mask = frame_alignment - 1;
536  if (frame_alignment != kPointerSize) {
537    // The following code needs to be more general if this assert does not hold.
538    ASSERT(frame_alignment == 2 * kPointerSize);
539    // With 5 pushes left the frame must be unaligned at this point.
540    mov(r7, Operand(Smi::FromInt(0)));
541    tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
542    push(r7, eq);  // Push if aligned to make it unaligned.
543  }
544
545  // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
546  stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
547  mov(fp, Operand(sp));  // Setup new frame pointer.
548
549  mov(ip, Operand(CodeObject()));
550  push(ip);  // Accessed from ExitFrame::code_slot.
551
552  // Save the frame pointer and the context in top.
553  mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
554  str(fp, MemOperand(ip));
555  mov(ip, Operand(ExternalReference(Top::k_context_address)));
556  str(cp, MemOperand(ip));
557
558  // Setup argc and the builtin function in callee-saved registers.
559  mov(r4, Operand(r0));
560  mov(r5, Operand(r1));
561}
562
563
564void MacroAssembler::InitializeNewString(Register string,
565                                         Register length,
566                                         Heap::RootListIndex map_index,
567                                         Register scratch1,
568                                         Register scratch2) {
569  mov(scratch1, Operand(length, LSL, kSmiTagSize));
570  LoadRoot(scratch2, map_index);
571  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
572  mov(scratch1, Operand(String::kEmptyHashField));
573  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
574  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
575}
576
577
578int MacroAssembler::ActivationFrameAlignment() {
579#if defined(V8_HOST_ARCH_ARM)
580  // Running on the real platform. Use the alignment as mandated by the local
581  // environment.
582  // Note: This will break if we ever start generating snapshots on one ARM
583  // platform for another ARM platform with a different alignment.
584  return OS::ActivationFrameAlignment();
585#else  // defined(V8_HOST_ARCH_ARM)
586  // If we are using the simulator then we should always align to the expected
587  // alignment. As the simulator is used to generate snapshots we do not know
588  // if the target platform will need alignment, so this is controlled from a
589  // flag.
590  return FLAG_sim_stack_alignment;
591#endif  // defined(V8_HOST_ARCH_ARM)
592}
593
594
595void MacroAssembler::LeaveExitFrame() {
596  // Clear top frame.
597  mov(r3, Operand(0, RelocInfo::NONE));
598  mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
599  str(r3, MemOperand(ip));
600
601  // Restore current context from top and clear it in debug mode.
602  mov(ip, Operand(ExternalReference(Top::k_context_address)));
603  ldr(cp, MemOperand(ip));
604#ifdef DEBUG
605  str(r3, MemOperand(ip));
606#endif
607
608  // Pop the arguments, restore registers, and return.
609  mov(sp, Operand(fp));  // respect ABI stack constraint
610  ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
611}
612
613
614void MacroAssembler::InvokePrologue(const ParameterCount& expected,
615                                    const ParameterCount& actual,
616                                    Handle<Code> code_constant,
617                                    Register code_reg,
618                                    Label* done,
619                                    InvokeFlag flag) {
620  bool definitely_matches = false;
621  Label regular_invoke;
622
623  // Check whether the expected and actual arguments count match. If not,
624  // setup registers according to contract with ArgumentsAdaptorTrampoline:
625  //  r0: actual arguments count
626  //  r1: function (passed through to callee)
627  //  r2: expected arguments count
628  //  r3: callee code entry
629
630  // The code below is made a lot easier because the calling code already sets
631  // up actual and expected registers according to the contract if values are
632  // passed in registers.
633  ASSERT(actual.is_immediate() || actual.reg().is(r0));
634  ASSERT(expected.is_immediate() || expected.reg().is(r2));
635  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
636
637  if (expected.is_immediate()) {
638    ASSERT(actual.is_immediate());
639    if (expected.immediate() == actual.immediate()) {
640      definitely_matches = true;
641    } else {
642      mov(r0, Operand(actual.immediate()));
643      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
644      if (expected.immediate() == sentinel) {
645        // Don't worry about adapting arguments for builtins that
646        // don't want that done. Skip adaption code by making it look
647        // like we have a match between expected and actual number of
648        // arguments.
649        definitely_matches = true;
650      } else {
651        mov(r2, Operand(expected.immediate()));
652      }
653    }
654  } else {
655    if (actual.is_immediate()) {
656      cmp(expected.reg(), Operand(actual.immediate()));
657      b(eq, &regular_invoke);
658      mov(r0, Operand(actual.immediate()));
659    } else {
660      cmp(expected.reg(), Operand(actual.reg()));
661      b(eq, &regular_invoke);
662    }
663  }
664
665  if (!definitely_matches) {
666    if (!code_constant.is_null()) {
667      mov(r3, Operand(code_constant));
668      add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
669    }
670
671    Handle<Code> adaptor =
672        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
673    if (flag == CALL_FUNCTION) {
674      Call(adaptor, RelocInfo::CODE_TARGET);
675      b(done);
676    } else {
677      Jump(adaptor, RelocInfo::CODE_TARGET);
678    }
679    bind(&regular_invoke);
680  }
681}
682
683
684void MacroAssembler::InvokeCode(Register code,
685                                const ParameterCount& expected,
686                                const ParameterCount& actual,
687                                InvokeFlag flag) {
688  Label done;
689
690  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
691  if (flag == CALL_FUNCTION) {
692    Call(code);
693  } else {
694    ASSERT(flag == JUMP_FUNCTION);
695    Jump(code);
696  }
697
698  // Continue here if InvokePrologue does handle the invocation due to
699  // mismatched parameter counts.
700  bind(&done);
701}
702
703
704void MacroAssembler::InvokeCode(Handle<Code> code,
705                                const ParameterCount& expected,
706                                const ParameterCount& actual,
707                                RelocInfo::Mode rmode,
708                                InvokeFlag flag) {
709  Label done;
710
711  InvokePrologue(expected, actual, code, no_reg, &done, flag);
712  if (flag == CALL_FUNCTION) {
713    Call(code, rmode);
714  } else {
715    Jump(code, rmode);
716  }
717
718  // Continue here if InvokePrologue does handle the invocation due to
719  // mismatched parameter counts.
720  bind(&done);
721}
722
723
724void MacroAssembler::InvokeFunction(Register fun,
725                                    const ParameterCount& actual,
726                                    InvokeFlag flag) {
727  // Contract with called JS functions requires that function is passed in r1.
728  ASSERT(fun.is(r1));
729
730  Register expected_reg = r2;
731  Register code_reg = r3;
732
733  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
734  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
735  ldr(expected_reg,
736      FieldMemOperand(code_reg,
737                      SharedFunctionInfo::kFormalParameterCountOffset));
738  mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
739  ldr(code_reg,
740      FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
741
742  ParameterCount expected(expected_reg);
743  InvokeCode(code_reg, expected, actual, flag);
744}
745
746
747void MacroAssembler::InvokeFunction(JSFunction* function,
748                                    const ParameterCount& actual,
749                                    InvokeFlag flag) {
750  ASSERT(function->is_compiled());
751
752  // Get the function and setup the context.
753  mov(r1, Operand(Handle<JSFunction>(function)));
754  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
755
756  // Invoke the cached code.
757  Handle<Code> code(function->code());
758  ParameterCount expected(function->shared()->formal_parameter_count());
759  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
760}
761
762
763#ifdef ENABLE_DEBUGGER_SUPPORT
764void MacroAssembler::DebugBreak() {
765  ASSERT(allow_stub_calls());
766  mov(r0, Operand(0, RelocInfo::NONE));
767  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
768  CEntryStub ces(1);
769  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
770}
771#endif
772
773
774void MacroAssembler::PushTryHandler(CodeLocation try_location,
775                                    HandlerType type) {
776  // Adjust this code if not the case.
777  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
778  // The pc (return address) is passed in register lr.
779  if (try_location == IN_JAVASCRIPT) {
780    if (type == TRY_CATCH_HANDLER) {
781      mov(r3, Operand(StackHandler::TRY_CATCH));
782    } else {
783      mov(r3, Operand(StackHandler::TRY_FINALLY));
784    }
785    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
786           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
787           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
788    stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
789    // Save the current handler as the next handler.
790    mov(r3, Operand(ExternalReference(Top::k_handler_address)));
791    ldr(r1, MemOperand(r3));
792    ASSERT(StackHandlerConstants::kNextOffset == 0);
793    push(r1);
794    // Link this handler as the new current one.
795    str(sp, MemOperand(r3));
796  } else {
797    // Must preserve r0-r4, r5-r7 are available.
798    ASSERT(try_location == IN_JS_ENTRY);
799    // The frame pointer does not point to a JS frame so we save NULL
800    // for fp. We expect the code throwing an exception to check fp
801    // before dereferencing it to restore the context.
802    mov(ip, Operand(0, RelocInfo::NONE));  // To save a NULL frame pointer.
803    mov(r6, Operand(StackHandler::ENTRY));
804    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
805           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
806           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
807    stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
808    // Save the current handler as the next handler.
809    mov(r7, Operand(ExternalReference(Top::k_handler_address)));
810    ldr(r6, MemOperand(r7));
811    ASSERT(StackHandlerConstants::kNextOffset == 0);
812    push(r6);
813    // Link this handler as the new current one.
814    str(sp, MemOperand(r7));
815  }
816}
817
818
819void MacroAssembler::PopTryHandler() {
820  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
821  pop(r1);
822  mov(ip, Operand(ExternalReference(Top::k_handler_address)));
823  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
824  str(r1, MemOperand(ip));
825}
826
827
828void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
829                                            Register scratch,
830                                            Label* miss) {
831  Label same_contexts;
832
833  ASSERT(!holder_reg.is(scratch));
834  ASSERT(!holder_reg.is(ip));
835  ASSERT(!scratch.is(ip));
836
837  // Load current lexical context from the stack frame.
838  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
839  // In debug mode, make sure the lexical context is set.
840#ifdef DEBUG
841  cmp(scratch, Operand(0, RelocInfo::NONE));
842  Check(ne, "we should not have an empty lexical context");
843#endif
844
845  // Load the global context of the current context.
846  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
847  ldr(scratch, FieldMemOperand(scratch, offset));
848  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
849
850  // Check the context is a global context.
851  if (FLAG_debug_code) {
852    // TODO(119): avoid push(holder_reg)/pop(holder_reg)
853    // Cannot use ip as a temporary in this verification code. Due to the fact
854    // that ip is clobbered as part of cmp with an object Operand.
855    push(holder_reg);  // Temporarily save holder on the stack.
856    // Read the first word and compare to the global_context_map.
857    ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
858    LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
859    cmp(holder_reg, ip);
860    Check(eq, "JSGlobalObject::global_context should be a global context.");
861    pop(holder_reg);  // Restore holder.
862  }
863
864  // Check if both contexts are the same.
865  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
866  cmp(scratch, Operand(ip));
867  b(eq, &same_contexts);
868
869  // Check the context is a global context.
870  if (FLAG_debug_code) {
871    // TODO(119): avoid push(holder_reg)/pop(holder_reg)
872    // Cannot use ip as a temporary in this verification code. Due to the fact
873    // that ip is clobbered as part of cmp with an object Operand.
874    push(holder_reg);  // Temporarily save holder on the stack.
875    mov(holder_reg, ip);  // Move ip to its holding place.
876    LoadRoot(ip, Heap::kNullValueRootIndex);
877    cmp(holder_reg, ip);
878    Check(ne, "JSGlobalProxy::context() should not be null.");
879
880    ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
881    LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
882    cmp(holder_reg, ip);
883    Check(eq, "JSGlobalObject::global_context should be a global context.");
884    // Restore ip is not needed. ip is reloaded below.
885    pop(holder_reg);  // Restore holder.
886    // Restore ip to holder's context.
887    ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
888  }
889
890  // Check that the security token in the calling global object is
891  // compatible with the security token in the receiving global
892  // object.
893  int token_offset = Context::kHeaderSize +
894                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
895
896  ldr(scratch, FieldMemOperand(scratch, token_offset));
897  ldr(ip, FieldMemOperand(ip, token_offset));
898  cmp(scratch, Operand(ip));
899  b(ne, miss);
900
901  bind(&same_contexts);
902}
903
904
905void MacroAssembler::AllocateInNewSpace(int object_size,
906                                        Register result,
907                                        Register scratch1,
908                                        Register scratch2,
909                                        Label* gc_required,
910                                        AllocationFlags flags) {
911  ASSERT(!result.is(scratch1));
912  ASSERT(!scratch1.is(scratch2));
913
914  // Make object size into bytes.
915  if ((flags & SIZE_IN_WORDS) != 0) {
916    object_size *= kPointerSize;
917  }
918  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
919
920  // Load address of new object into result and allocation top address into
921  // scratch1.
922  ExternalReference new_space_allocation_top =
923      ExternalReference::new_space_allocation_top_address();
924  mov(scratch1, Operand(new_space_allocation_top));
925  if ((flags & RESULT_CONTAINS_TOP) == 0) {
926    ldr(result, MemOperand(scratch1));
927  } else if (FLAG_debug_code) {
928    // Assert that result actually contains top on entry. scratch2 is used
929    // immediately below so this use of scratch2 does not cause difference with
930    // respect to register content between debug and release mode.
931    ldr(scratch2, MemOperand(scratch1));
932    cmp(result, scratch2);
933    Check(eq, "Unexpected allocation top");
934  }
935
936  // Calculate new top and bail out if new space is exhausted. Use result
937  // to calculate the new top.
938  ExternalReference new_space_allocation_limit =
939      ExternalReference::new_space_allocation_limit_address();
940  mov(scratch2, Operand(new_space_allocation_limit));
941  ldr(scratch2, MemOperand(scratch2));
942  add(result, result, Operand(object_size));
943  cmp(result, Operand(scratch2));
944  b(hi, gc_required);
945  str(result, MemOperand(scratch1));
946
947  // Tag and adjust back to start of new object.
948  if ((flags & TAG_OBJECT) != 0) {
949    sub(result, result, Operand(object_size - kHeapObjectTag));
950  } else {
951    sub(result, result, Operand(object_size));
952  }
953}
954
955
956void MacroAssembler::AllocateInNewSpace(Register object_size,
957                                        Register result,
958                                        Register scratch1,
959                                        Register scratch2,
960                                        Label* gc_required,
961                                        AllocationFlags flags) {
962  ASSERT(!result.is(scratch1));
963  ASSERT(!scratch1.is(scratch2));
964
965  // Load address of new object into result and allocation top address into
966  // scratch1.
967  ExternalReference new_space_allocation_top =
968      ExternalReference::new_space_allocation_top_address();
969  mov(scratch1, Operand(new_space_allocation_top));
970  if ((flags & RESULT_CONTAINS_TOP) == 0) {
971    ldr(result, MemOperand(scratch1));
972  } else if (FLAG_debug_code) {
973    // Assert that result actually contains top on entry. scratch2 is used
974    // immediately below so this use of scratch2 does not cause difference with
975    // respect to register content between debug and release mode.
976    ldr(scratch2, MemOperand(scratch1));
977    cmp(result, scratch2);
978    Check(eq, "Unexpected allocation top");
979  }
980
981  // Calculate new top and bail out if new space is exhausted. Use result
982  // to calculate the new top. Object size is in words so a shift is required to
983  // get the number of bytes
984  ExternalReference new_space_allocation_limit =
985      ExternalReference::new_space_allocation_limit_address();
986  mov(scratch2, Operand(new_space_allocation_limit));
987  ldr(scratch2, MemOperand(scratch2));
988  if ((flags & SIZE_IN_WORDS) != 0) {
989    add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
990  } else {
991    add(result, result, Operand(object_size));
992  }
993  cmp(result, Operand(scratch2));
994  b(hi, gc_required);
995
996  // Update allocation top. result temporarily holds the new top.
997  if (FLAG_debug_code) {
998    tst(result, Operand(kObjectAlignmentMask));
999    Check(eq, "Unaligned allocation in new space");
1000  }
1001  str(result, MemOperand(scratch1));
1002
1003  // Adjust back to start of new object.
1004  if ((flags & SIZE_IN_WORDS) != 0) {
1005    sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
1006  } else {
1007    sub(result, result, Operand(object_size));
1008  }
1009
1010  // Tag object if requested.
1011  if ((flags & TAG_OBJECT) != 0) {
1012    add(result, result, Operand(kHeapObjectTag));
1013  }
1014}
1015
1016
1017void MacroAssembler::UndoAllocationInNewSpace(Register object,
1018                                              Register scratch) {
1019  ExternalReference new_space_allocation_top =
1020      ExternalReference::new_space_allocation_top_address();
1021
1022  // Make sure the object has no tag before resetting top.
1023  and_(object, object, Operand(~kHeapObjectTagMask));
1024#ifdef DEBUG
1025  // Check that the object un-allocated is below the current top.
1026  mov(scratch, Operand(new_space_allocation_top));
1027  ldr(scratch, MemOperand(scratch));
1028  cmp(object, scratch);
1029  Check(lt, "Undo allocation of non allocated memory");
1030#endif
1031  // Write the address of the object to un-allocate as the current top.
1032  mov(scratch, Operand(new_space_allocation_top));
1033  str(object, MemOperand(scratch));
1034}
1035
1036
1037void MacroAssembler::AllocateTwoByteString(Register result,
1038                                           Register length,
1039                                           Register scratch1,
1040                                           Register scratch2,
1041                                           Register scratch3,
1042                                           Label* gc_required) {
1043  // Calculate the number of bytes needed for the characters in the string while
1044  // observing object alignment.
1045  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1046  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
1047  add(scratch1, scratch1,
1048      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1049  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1050
1051  // Allocate two-byte string in new space.
1052  AllocateInNewSpace(scratch1,
1053                     result,
1054                     scratch2,
1055                     scratch3,
1056                     gc_required,
1057                     TAG_OBJECT);
1058
1059  // Set the map, length and hash field.
1060  InitializeNewString(result,
1061                      length,
1062                      Heap::kStringMapRootIndex,
1063                      scratch1,
1064                      scratch2);
1065}
1066
1067
1068void MacroAssembler::AllocateAsciiString(Register result,
1069                                         Register length,
1070                                         Register scratch1,
1071                                         Register scratch2,
1072                                         Register scratch3,
1073                                         Label* gc_required) {
1074  // Calculate the number of bytes needed for the characters in the string while
1075  // observing object alignment.
1076  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1077  ASSERT(kCharSize == 1);
1078  add(scratch1, length,
1079      Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
1080  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1081
1082  // Allocate ASCII string in new space.
1083  AllocateInNewSpace(scratch1,
1084                     result,
1085                     scratch2,
1086                     scratch3,
1087                     gc_required,
1088                     TAG_OBJECT);
1089
1090  // Set the map, length and hash field.
1091  InitializeNewString(result,
1092                      length,
1093                      Heap::kAsciiStringMapRootIndex,
1094                      scratch1,
1095                      scratch2);
1096}
1097
1098
1099void MacroAssembler::AllocateTwoByteConsString(Register result,
1100                                               Register length,
1101                                               Register scratch1,
1102                                               Register scratch2,
1103                                               Label* gc_required) {
1104  AllocateInNewSpace(ConsString::kSize,
1105                     result,
1106                     scratch1,
1107                     scratch2,
1108                     gc_required,
1109                     TAG_OBJECT);
1110
1111  InitializeNewString(result,
1112                      length,
1113                      Heap::kConsStringMapRootIndex,
1114                      scratch1,
1115                      scratch2);
1116}
1117
1118
1119void MacroAssembler::AllocateAsciiConsString(Register result,
1120                                             Register length,
1121                                             Register scratch1,
1122                                             Register scratch2,
1123                                             Label* gc_required) {
1124  AllocateInNewSpace(ConsString::kSize,
1125                     result,
1126                     scratch1,
1127                     scratch2,
1128                     gc_required,
1129                     TAG_OBJECT);
1130
1131  InitializeNewString(result,
1132                      length,
1133                      Heap::kConsAsciiStringMapRootIndex,
1134                      scratch1,
1135                      scratch2);
1136}
1137
1138
1139void MacroAssembler::CompareObjectType(Register object,
1140                                       Register map,
1141                                       Register type_reg,
1142                                       InstanceType type) {
1143  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1144  CompareInstanceType(map, type_reg, type);
1145}
1146
1147
1148void MacroAssembler::CompareInstanceType(Register map,
1149                                         Register type_reg,
1150                                         InstanceType type) {
1151  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1152  cmp(type_reg, Operand(type));
1153}
1154
1155
1156void MacroAssembler::CheckMap(Register obj,
1157                              Register scratch,
1158                              Handle<Map> map,
1159                              Label* fail,
1160                              bool is_heap_object) {
1161  if (!is_heap_object) {
1162    BranchOnSmi(obj, fail);
1163  }
1164  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1165  mov(ip, Operand(map));
1166  cmp(scratch, ip);
1167  b(ne, fail);
1168}
1169
1170
1171void MacroAssembler::CheckMap(Register obj,
1172                              Register scratch,
1173                              Heap::RootListIndex index,
1174                              Label* fail,
1175                              bool is_heap_object) {
1176  if (!is_heap_object) {
1177    BranchOnSmi(obj, fail);
1178  }
1179  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1180  LoadRoot(ip, index);
1181  cmp(scratch, ip);
1182  b(ne, fail);
1183}
1184
1185
1186void MacroAssembler::TryGetFunctionPrototype(Register function,
1187                                             Register result,
1188                                             Register scratch,
1189                                             Label* miss) {
1190  // Check that the receiver isn't a smi.
1191  BranchOnSmi(function, miss);
1192
1193  // Check that the function really is a function.  Load map into result reg.
1194  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1195  b(ne, miss);
1196
1197  // Make sure that the function has an instance prototype.
1198  Label non_instance;
1199  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1200  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1201  b(ne, &non_instance);
1202
1203  // Get the prototype or initial map from the function.
1204  ldr(result,
1205      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1206
1207  // If the prototype or initial map is the hole, don't return it and
1208  // simply miss the cache instead. This will allow us to allocate a
1209  // prototype object on-demand in the runtime system.
1210  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1211  cmp(result, ip);
1212  b(eq, miss);
1213
1214  // If the function does not have an initial map, we're done.
1215  Label done;
1216  CompareObjectType(result, scratch, scratch, MAP_TYPE);
1217  b(ne, &done);
1218
1219  // Get the prototype from the initial map.
1220  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
1221  jmp(&done);
1222
1223  // Non-instance prototype: Fetch prototype from constructor field
1224  // in initial map.
1225  bind(&non_instance);
1226  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
1227
1228  // All done.
1229  bind(&done);
1230}
1231
1232
1233void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1234  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
1235  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1236}
1237
1238
1239void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1240  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
1241  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1242}
1243
1244
1245void MacroAssembler::IllegalOperation(int num_arguments) {
1246  if (num_arguments > 0) {
1247    add(sp, sp, Operand(num_arguments * kPointerSize));
1248  }
1249  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1250}
1251
1252
1253void MacroAssembler::IndexFromHash(Register hash, Register index) {
1254  // If the hash field contains an array index pick it out. The assert checks
1255  // that the constants for the maximum number of digits for an array index
1256  // cached in the hash field and the number of bits reserved for it does not
1257  // conflict.
1258  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
1259         (1 << String::kArrayIndexValueBits));
1260  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
1261  // the low kHashShift bits.
1262  STATIC_ASSERT(kSmiTag == 0);
1263  Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
1264  mov(index, Operand(hash, LSL, kSmiTagSize));
1265}
1266
1267
1268void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
1269                                                       Register outHighReg,
1270                                                       Register outLowReg) {
1271  // ARMv7 VFP3 instructions to implement integer to double conversion.
1272  mov(r7, Operand(inReg, ASR, kSmiTagSize));
1273  vmov(s15, r7);
1274  vcvt_f64_s32(d7, s15);
1275  vmov(outLowReg, outHighReg, d7);
1276}
1277
1278
1279void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
1280                                               DwVfpRegister result,
1281                                               Register scratch1,
1282                                               Register scratch2,
1283                                               Register heap_number_map,
1284                                               SwVfpRegister scratch3,
1285                                               Label* not_number,
1286                                               ObjectToDoubleFlags flags) {
1287  Label done;
1288  if ((flags & OBJECT_NOT_SMI) == 0) {
1289    Label not_smi;
1290    BranchOnNotSmi(object, &not_smi);
1291    // Remove smi tag and convert to double.
1292    mov(scratch1, Operand(object, ASR, kSmiTagSize));
1293    vmov(scratch3, scratch1);
1294    vcvt_f64_s32(result, scratch3);
1295    b(&done);
1296    bind(&not_smi);
1297  }
1298  // Check for heap number and load double value from it.
1299  ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
1300  sub(scratch2, object, Operand(kHeapObjectTag));
1301  cmp(scratch1, heap_number_map);
1302  b(ne, not_number);
1303  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
1304    // If exponent is all ones the number is either a NaN or +/-Infinity.
1305    ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
1306    Sbfx(scratch1,
1307         scratch1,
1308         HeapNumber::kExponentShift,
1309         HeapNumber::kExponentBits);
1310    // All-one value sign extend to -1.
1311    cmp(scratch1, Operand(-1));
1312    b(eq, not_number);
1313  }
1314  vldr(result, scratch2, HeapNumber::kValueOffset);
1315  bind(&done);
1316}
1317
1318
1319void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
1320                                            DwVfpRegister value,
1321                                            Register scratch1,
1322                                            SwVfpRegister scratch2) {
1323  mov(scratch1, Operand(smi, ASR, kSmiTagSize));
1324  vmov(scratch2, scratch1);
1325  vcvt_f64_s32(value, scratch2);
1326}
1327
1328
1329// Tries to get a signed int32 out of a double precision floating point heap
1330// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1331// 32bits signed integer range.
1332void MacroAssembler::ConvertToInt32(Register source,
1333                                    Register dest,
1334                                    Register scratch,
1335                                    Register scratch2,
1336                                    Label *not_int32) {
1337  if (CpuFeatures::IsSupported(VFP3)) {
1338    CpuFeatures::Scope scope(VFP3);
1339    sub(scratch, source, Operand(kHeapObjectTag));
1340    vldr(d0, scratch, HeapNumber::kValueOffset);
1341    vcvt_s32_f64(s0, d0);
1342    vmov(dest, s0);
1343    // Signed vcvt instruction will saturate to the minimum (0x80000000) or
1344    // maximun (0x7fffffff) signed 32bits integer when the double is out of
1345    // range. When substracting one, the minimum signed integer becomes the
1346    // maximun signed integer.
1347    sub(scratch, dest, Operand(1));
1348    cmp(scratch, Operand(LONG_MAX - 1));
1349    // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
1350    b(ge, not_int32);
1351  } else {
1352    // This code is faster for doubles that are in the ranges -0x7fffffff to
1353    // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
1354    // the range of signed int32 values that are not Smis.  Jumps to the label
1355    // 'not_int32' if the double isn't in the range -0x80000000.0 to
1356    // 0x80000000.0 (excluding the endpoints).
1357    Label right_exponent, done;
1358    // Get exponent word.
1359    ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1360    // Get exponent alone in scratch2.
1361    Ubfx(scratch2,
1362            scratch,
1363            HeapNumber::kExponentShift,
1364            HeapNumber::kExponentBits);
1365    // Load dest with zero.  We use this either for the final shift or
1366    // for the answer.
1367    mov(dest, Operand(0, RelocInfo::NONE));
1368    // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1369    // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1370    // the exponent that we are fastest at and also the highest exponent we can
1371    // handle here.
1372    const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
1373    // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
1374    // split it up to avoid a constant pool entry.  You can't do that in general
1375    // for cmp because of the overflow flag, but we know the exponent is in the
1376    // range 0-2047 so there is no overflow.
1377    int fudge_factor = 0x400;
1378    sub(scratch2, scratch2, Operand(fudge_factor));
1379    cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
1380    // If we have a match of the int32-but-not-Smi exponent then skip some
1381    // logic.
1382    b(eq, &right_exponent);
1383    // If the exponent is higher than that then go to slow case.  This catches
1384    // numbers that don't fit in a signed int32, infinities and NaNs.
1385    b(gt, not_int32);
1386
1387    // We know the exponent is smaller than 30 (biased).  If it is less than
1388    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
1389    // it rounds to zero.
1390    const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
1391    sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
1392    // Dest already has a Smi zero.
1393    b(lt, &done);
1394
1395    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
1396    // get how much to shift down.
1397    rsb(dest, scratch2, Operand(30));
1398
1399    bind(&right_exponent);
1400    // Get the top bits of the mantissa.
1401    and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
1402    // Put back the implicit 1.
1403    orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
1404    // Shift up the mantissa bits to take up the space the exponent used to
1405    // take. We just orred in the implicit bit so that took care of one and
1406    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1407    // distance.
1408    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1409    mov(scratch2, Operand(scratch2, LSL, shift_distance));
1410    // Put sign in zero flag.
1411    tst(scratch, Operand(HeapNumber::kSignMask));
1412    // Get the second half of the double. For some exponents we don't
1413    // actually need this because the bits get shifted out again, but
1414    // it's probably slower to test than just to do it.
1415    ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1416    // Shift down 22 bits to get the last 10 bits.
1417    orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
1418    // Move down according to the exponent.
1419    mov(dest, Operand(scratch, LSR, dest));
1420    // Fix sign if sign bit was set.
1421    rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1422    bind(&done);
1423  }
1424}
1425
1426
1427void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1428                                         Register src,
1429                                         int num_least_bits) {
1430  if (CpuFeatures::IsSupported(ARMv7)) {
1431    ubfx(dst, src, kSmiTagSize, num_least_bits);
1432  } else {
1433    mov(dst, Operand(src, ASR, kSmiTagSize));
1434    and_(dst, dst, Operand((1 << num_least_bits) - 1));
1435  }
1436}
1437
1438
1439void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
1440  // All parameters are on the stack.  r0 has the return value after call.
1441
1442  // If the expected number of arguments of the runtime function is
1443  // constant, we check that the actual number of arguments match the
1444  // expectation.
1445  if (f->nargs >= 0 && f->nargs != num_arguments) {
1446    IllegalOperation(num_arguments);
1447    return;
1448  }
1449
1450  // TODO(1236192): Most runtime routines don't need the number of
1451  // arguments passed in because it is constant. At some point we
1452  // should remove this need and make the runtime routine entry code
1453  // smarter.
1454  mov(r0, Operand(num_arguments));
1455  mov(r1, Operand(ExternalReference(f)));
1456  CEntryStub stub(1);
1457  CallStub(&stub);
1458}
1459
1460
1461void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1462  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1463}
1464
1465
1466void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1467                                           int num_arguments) {
1468  mov(r0, Operand(num_arguments));
1469  mov(r1, Operand(ext));
1470
1471  CEntryStub stub(1);
1472  CallStub(&stub);
1473}
1474
1475
1476void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1477                                               int num_arguments,
1478                                               int result_size) {
1479  // TODO(1236192): Most runtime routines don't need the number of
1480  // arguments passed in because it is constant. At some point we
1481  // should remove this need and make the runtime routine entry code
1482  // smarter.
1483  mov(r0, Operand(num_arguments));
1484  JumpToExternalReference(ext);
1485}
1486
1487
1488void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1489                                     int num_arguments,
1490                                     int result_size) {
1491  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
1492}
1493
1494
1495void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1496#if defined(__thumb__)
1497  // Thumb mode builtin.
1498  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
1499#endif
1500  mov(r1, Operand(builtin));
1501  CEntryStub stub(1);
1502  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1503}
1504
1505
1506void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1507                                   InvokeJSFlags flags) {
1508  GetBuiltinEntry(r2, id);
1509  if (flags == CALL_JS) {
1510    Call(r2);
1511  } else {
1512    ASSERT(flags == JUMP_JS);
1513    Jump(r2);
1514  }
1515}
1516
1517
1518void MacroAssembler::GetBuiltinFunction(Register target,
1519                                        Builtins::JavaScript id) {
1520  // Load the builtins object into target register.
1521  ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
1522  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1523  // Load the JavaScript builtin function from the builtins object.
1524  ldr(target, FieldMemOperand(target,
1525                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1526}
1527
1528
1529void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
1530  ASSERT(!target.is(r1));
1531  GetBuiltinFunction(r1, id);
1532  // Load the code entry point from the builtins object.
1533  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1534}
1535
1536
1537void MacroAssembler::SetCounter(StatsCounter* counter, int value,
1538                                Register scratch1, Register scratch2) {
1539  if (FLAG_native_code_counters && counter->Enabled()) {
1540    mov(scratch1, Operand(value));
1541    mov(scratch2, Operand(ExternalReference(counter)));
1542    str(scratch1, MemOperand(scratch2));
1543  }
1544}
1545
1546
1547void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1548                                      Register scratch1, Register scratch2) {
1549  ASSERT(value > 0);
1550  if (FLAG_native_code_counters && counter->Enabled()) {
1551    mov(scratch2, Operand(ExternalReference(counter)));
1552    ldr(scratch1, MemOperand(scratch2));
1553    add(scratch1, scratch1, Operand(value));
1554    str(scratch1, MemOperand(scratch2));
1555  }
1556}
1557
1558
1559void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1560                                      Register scratch1, Register scratch2) {
1561  ASSERT(value > 0);
1562  if (FLAG_native_code_counters && counter->Enabled()) {
1563    mov(scratch2, Operand(ExternalReference(counter)));
1564    ldr(scratch1, MemOperand(scratch2));
1565    sub(scratch1, scratch1, Operand(value));
1566    str(scratch1, MemOperand(scratch2));
1567  }
1568}
1569
1570
1571void MacroAssembler::Assert(Condition cc, const char* msg) {
1572  if (FLAG_debug_code)
1573    Check(cc, msg);
1574}
1575
1576
1577void MacroAssembler::AssertRegisterIsRoot(Register reg,
1578                                          Heap::RootListIndex index) {
1579  if (FLAG_debug_code) {
1580    LoadRoot(ip, index);
1581    cmp(reg, ip);
1582    Check(eq, "Register did not match expected root");
1583  }
1584}
1585
1586
1587void MacroAssembler::AssertFastElements(Register elements) {
1588  if (FLAG_debug_code) {
1589    ASSERT(!elements.is(ip));
1590    Label ok;
1591    push(elements);
1592    ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
1593    LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1594    cmp(elements, ip);
1595    b(eq, &ok);
1596    LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
1597    cmp(elements, ip);
1598    b(eq, &ok);
1599    Abort("JSObject with fast elements map has slow elements");
1600    bind(&ok);
1601    pop(elements);
1602  }
1603}
1604
1605
1606void MacroAssembler::Check(Condition cc, const char* msg) {
1607  Label L;
1608  b(cc, &L);
1609  Abort(msg);
1610  // will not return here
1611  bind(&L);
1612}
1613
1614
1615void MacroAssembler::Abort(const char* msg) {
1616  Label abort_start;
1617  bind(&abort_start);
1618  // We want to pass the msg string like a smi to avoid GC
1619  // problems, however msg is not guaranteed to be aligned
1620  // properly. Instead, we pass an aligned pointer that is
1621  // a proper v8 smi, but also pass the alignment difference
1622  // from the real pointer as a smi.
1623  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
1624  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
1625  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
1626#ifdef DEBUG
1627  if (msg != NULL) {
1628    RecordComment("Abort message: ");
1629    RecordComment(msg);
1630  }
1631#endif
1632  // Disable stub call restrictions to always allow calls to abort.
1633  set_allow_stub_calls(true);
1634
1635  mov(r0, Operand(p0));
1636  push(r0);
1637  mov(r0, Operand(Smi::FromInt(p1 - p0)));
1638  push(r0);
1639  CallRuntime(Runtime::kAbort, 2);
1640  // will not return here
1641  if (is_const_pool_blocked()) {
1642    // If the calling code cares about the exact number of
1643    // instructions generated, we insert padding here to keep the size
1644    // of the Abort macro constant.
1645    static const int kExpectedAbortInstructions = 10;
1646    int abort_instructions = InstructionsGeneratedSince(&abort_start);
1647    ASSERT(abort_instructions <= kExpectedAbortInstructions);
1648    while (abort_instructions++ < kExpectedAbortInstructions) {
1649      nop();
1650    }
1651  }
1652}
1653
1654
1655void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
1656  if (context_chain_length > 0) {
1657    // Move up the chain of contexts to the context containing the slot.
1658    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
1659    // Load the function context (which is the incoming, outer context).
1660    ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
1661    for (int i = 1; i < context_chain_length; i++) {
1662      ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
1663      ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
1664    }
1665    // The context may be an intermediate context, not a function context.
1666    ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
1667  } else {  // Slot is in the current function context.
1668    // The context may be an intermediate context, not a function context.
1669    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
1670  }
1671}
1672
1673
1674void MacroAssembler::JumpIfNotBothSmi(Register reg1,
1675                                      Register reg2,
1676                                      Label* on_not_both_smi) {
1677  ASSERT_EQ(0, kSmiTag);
1678  tst(reg1, Operand(kSmiTagMask));
1679  tst(reg2, Operand(kSmiTagMask), eq);
1680  b(ne, on_not_both_smi);
1681}
1682
1683
1684void MacroAssembler::JumpIfEitherSmi(Register reg1,
1685                                     Register reg2,
1686                                     Label* on_either_smi) {
1687  ASSERT_EQ(0, kSmiTag);
1688  tst(reg1, Operand(kSmiTagMask));
1689  tst(reg2, Operand(kSmiTagMask), ne);
1690  b(eq, on_either_smi);
1691}
1692
1693
1694void MacroAssembler::AbortIfSmi(Register object) {
1695  ASSERT_EQ(0, kSmiTag);
1696  tst(object, Operand(kSmiTagMask));
1697  Assert(ne, "Operand is a smi");
1698}
1699
1700
1701void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
1702    Register first,
1703    Register second,
1704    Register scratch1,
1705    Register scratch2,
1706    Label* failure) {
1707  // Test that both first and second are sequential ASCII strings.
1708  // Assume that they are non-smis.
1709  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
1710  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
1711  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
1712  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
1713
1714  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
1715                                               scratch2,
1716                                               scratch1,
1717                                               scratch2,
1718                                               failure);
1719}
1720
1721void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
1722                                                         Register second,
1723                                                         Register scratch1,
1724                                                         Register scratch2,
1725                                                         Label* failure) {
1726  // Check that neither is a smi.
1727  ASSERT_EQ(0, kSmiTag);
1728  and_(scratch1, first, Operand(second));
1729  tst(scratch1, Operand(kSmiTagMask));
1730  b(eq, failure);
1731  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
1732                                             second,
1733                                             scratch1,
1734                                             scratch2,
1735                                             failure);
1736}
1737
1738
1739// Allocates a heap number or jumps to the need_gc label if the young space
1740// is full and a scavenge is needed.
1741void MacroAssembler::AllocateHeapNumber(Register result,
1742                                        Register scratch1,
1743                                        Register scratch2,
1744                                        Register heap_number_map,
1745                                        Label* gc_required) {
1746  // Allocate an object in the heap for the heap number and tag it as a heap
1747  // object.
1748  AllocateInNewSpace(HeapNumber::kSize,
1749                     result,
1750                     scratch1,
1751                     scratch2,
1752                     gc_required,
1753                     TAG_OBJECT);
1754
1755  // Store heap number map in the allocated object.
1756  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1757  str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
1758}
1759
1760
1761void MacroAssembler::AllocateHeapNumberWithValue(Register result,
1762                                                 DwVfpRegister value,
1763                                                 Register scratch1,
1764                                                 Register scratch2,
1765                                                 Register heap_number_map,
1766                                                 Label* gc_required) {
1767  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
1768  sub(scratch1, result, Operand(kHeapObjectTag));
1769  vstr(value, scratch1, HeapNumber::kValueOffset);
1770}
1771
1772
1773// Copies a fixed number of fields of heap objects from src to dst.
1774void MacroAssembler::CopyFields(Register dst,
1775                                Register src,
1776                                RegList temps,
1777                                int field_count) {
1778  // At least one bit set in the first 15 registers.
1779  ASSERT((temps & ((1 << 15) - 1)) != 0);
1780  ASSERT((temps & dst.bit()) == 0);
1781  ASSERT((temps & src.bit()) == 0);
1782  // Primitive implementation using only one temporary register.
1783
1784  Register tmp = no_reg;
1785  // Find a temp register in temps list.
1786  for (int i = 0; i < 15; i++) {
1787    if ((temps & (1 << i)) != 0) {
1788      tmp.set_code(i);
1789      break;
1790    }
1791  }
1792  ASSERT(!tmp.is(no_reg));
1793
1794  for (int i = 0; i < field_count; i++) {
1795    ldr(tmp, FieldMemOperand(src, i * kPointerSize));
1796    str(tmp, FieldMemOperand(dst, i * kPointerSize));
1797  }
1798}
1799
1800
1801void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
1802                                       Register source,  // Input.
1803                                       Register scratch) {
1804  ASSERT(!zeros.is(source) || !source.is(zeros));
1805  ASSERT(!zeros.is(scratch));
1806  ASSERT(!scratch.is(ip));
1807  ASSERT(!source.is(ip));
1808  ASSERT(!zeros.is(ip));
1809#ifdef CAN_USE_ARMV5_INSTRUCTIONS
1810  clz(zeros, source);  // This instruction is only supported after ARM5.
1811#else
1812  mov(zeros, Operand(0, RelocInfo::NONE));
1813  Move(scratch, source);
1814  // Top 16.
1815  tst(scratch, Operand(0xffff0000));
1816  add(zeros, zeros, Operand(16), LeaveCC, eq);
1817  mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
1818  // Top 8.
1819  tst(scratch, Operand(0xff000000));
1820  add(zeros, zeros, Operand(8), LeaveCC, eq);
1821  mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
1822  // Top 4.
1823  tst(scratch, Operand(0xf0000000));
1824  add(zeros, zeros, Operand(4), LeaveCC, eq);
1825  mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
1826  // Top 2.
1827  tst(scratch, Operand(0xc0000000));
1828  add(zeros, zeros, Operand(2), LeaveCC, eq);
1829  mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
1830  // Top bit.
1831  tst(scratch, Operand(0x80000000u));
1832  add(zeros, zeros, Operand(1), LeaveCC, eq);
1833#endif
1834}
1835
1836
1837void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
1838    Register first,
1839    Register second,
1840    Register scratch1,
1841    Register scratch2,
1842    Label* failure) {
1843  int kFlatAsciiStringMask =
1844      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
1845  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1846  and_(scratch1, first, Operand(kFlatAsciiStringMask));
1847  and_(scratch2, second, Operand(kFlatAsciiStringMask));
1848  cmp(scratch1, Operand(kFlatAsciiStringTag));
1849  // Ignore second test if first test failed.
1850  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
1851  b(ne, failure);
1852}
1853
1854
1855void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
1856                                                            Register scratch,
1857                                                            Label* failure) {
1858  int kFlatAsciiStringMask =
1859      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
1860  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1861  and_(scratch, type, Operand(kFlatAsciiStringMask));
1862  cmp(scratch, Operand(kFlatAsciiStringTag));
1863  b(ne, failure);
1864}
1865
1866
1867void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
1868  int frame_alignment = ActivationFrameAlignment();
1869  // Up to four simple arguments are passed in registers r0..r3.
1870  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
1871  if (frame_alignment > kPointerSize) {
1872    // Make stack end at alignment and make room for num_arguments - 4 words
1873    // and the original value of sp.
1874    mov(scratch, sp);
1875    sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
1876    ASSERT(IsPowerOf2(frame_alignment));
1877    and_(sp, sp, Operand(-frame_alignment));
1878    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
1879  } else {
1880    sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
1881  }
1882}
1883
1884
1885void MacroAssembler::CallCFunction(ExternalReference function,
1886                                   int num_arguments) {
1887  mov(ip, Operand(function));
1888  CallCFunction(ip, num_arguments);
1889}
1890
1891
1892void MacroAssembler::CallCFunction(Register function, int num_arguments) {
1893  // Make sure that the stack is aligned before calling a C function unless
1894  // running in the simulator. The simulator has its own alignment check which
1895  // provides more information.
1896#if defined(V8_HOST_ARCH_ARM)
1897  if (FLAG_debug_code) {
1898    int frame_alignment = OS::ActivationFrameAlignment();
1899    int frame_alignment_mask = frame_alignment - 1;
1900    if (frame_alignment > kPointerSize) {
1901      ASSERT(IsPowerOf2(frame_alignment));
1902      Label alignment_as_expected;
1903      tst(sp, Operand(frame_alignment_mask));
1904      b(eq, &alignment_as_expected);
1905      // Don't use Check here, as it will call Runtime_Abort possibly
1906      // re-entering here.
1907      stop("Unexpected alignment");
1908      bind(&alignment_as_expected);
1909    }
1910  }
1911#endif
1912
1913  // Just call directly. The function called cannot cause a GC, or
1914  // allow preemption, so the return address in the link register
1915  // stays correct.
1916  Call(function);
1917  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
1918  if (OS::ActivationFrameAlignment() > kPointerSize) {
1919    ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
1920  } else {
1921    add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
1922  }
1923}
1924
1925
1926#ifdef ENABLE_DEBUGGER_SUPPORT
1927CodePatcher::CodePatcher(byte* address, int instructions)
1928    : address_(address),
1929      instructions_(instructions),
1930      size_(instructions * Assembler::kInstrSize),
1931      masm_(address, size_ + Assembler::kGap) {
1932  // Create a new macro assembler pointing to the address of the code to patch.
1933  // The size is adjusted with kGap on order for the assembler to generate size
1934  // bytes of instructions without failing with buffer size constraints.
1935  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1936}
1937
1938
1939CodePatcher::~CodePatcher() {
1940  // Indicate that code has changed.
1941  CPU::FlushICache(address_, size_);
1942
1943  // Check that the code was patched as expected.
1944  ASSERT(masm_.pc_ == address_ + size_);
1945  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1946}
1947
1948
1949void CodePatcher::Emit(Instr x) {
1950  masm()->emit(x);
1951}
1952
1953
1954void CodePatcher::Emit(Address addr) {
1955  masm()->emit(reinterpret_cast<Instr>(addr));
1956}
1957#endif  // ENABLE_DEBUGGER_SUPPORT
1958
1959
1960} }  // namespace v8::internal
1961
1962#endif  // V8_TARGET_ARCH_ARM
1963