1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#if V8_TARGET_ARCH_ARM64
8
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
11#include "src/cpu-profiler.h"
12#include "src/debug.h"
13#include "src/isolate-inl.h"
14#include "src/runtime.h"
15
16namespace v8 {
17namespace internal {
18
19// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
20#define __
21
22
23MacroAssembler::MacroAssembler(Isolate* arg_isolate,
24                               byte * buffer,
25                               unsigned buffer_size)
26    : Assembler(arg_isolate, buffer, buffer_size),
27      generating_stub_(false),
28#if DEBUG
29      allow_macro_instructions_(true),
30#endif
31      has_frame_(false),
32      use_real_aborts_(true),
33      sp_(jssp),
34      tmp_list_(DefaultTmpList()),
35      fptmp_list_(DefaultFPTmpList()) {
36  if (isolate() != NULL) {
37    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
38                                  isolate());
39  }
40}
41
42
43CPURegList MacroAssembler::DefaultTmpList() {
44  return CPURegList(ip0, ip1);
45}
46
47
48CPURegList MacroAssembler::DefaultFPTmpList() {
49  return CPURegList(fp_scratch1, fp_scratch2);
50}
51
52
53void MacroAssembler::LogicalMacro(const Register& rd,
54                                  const Register& rn,
55                                  const Operand& operand,
56                                  LogicalOp op) {
57  UseScratchRegisterScope temps(this);
58
59  if (operand.NeedsRelocation(this)) {
60    Register temp = temps.AcquireX();
61    Ldr(temp, operand.immediate());
62    Logical(rd, rn, temp, op);
63
64  } else if (operand.IsImmediate()) {
65    int64_t immediate = operand.ImmediateValue();
66    unsigned reg_size = rd.SizeInBits();
67    ASSERT(rd.Is64Bits() || is_uint32(immediate));
68
69    // If the operation is NOT, invert the operation and immediate.
70    if ((op & NOT) == NOT) {
71      op = static_cast<LogicalOp>(op & ~NOT);
72      immediate = ~immediate;
73      if (rd.Is32Bits()) {
74        immediate &= kWRegMask;
75      }
76    }
77
78    // Special cases for all set or all clear immediates.
79    if (immediate == 0) {
80      switch (op) {
81        case AND:
82          Mov(rd, 0);
83          return;
84        case ORR:  // Fall through.
85        case EOR:
86          Mov(rd, rn);
87          return;
88        case ANDS:  // Fall through.
89        case BICS:
90          break;
91        default:
92          UNREACHABLE();
93      }
94    } else if ((rd.Is64Bits() && (immediate == -1L)) ||
95               (rd.Is32Bits() && (immediate == 0xffffffffL))) {
96      switch (op) {
97        case AND:
98          Mov(rd, rn);
99          return;
100        case ORR:
101          Mov(rd, immediate);
102          return;
103        case EOR:
104          Mvn(rd, rn);
105          return;
106        case ANDS:  // Fall through.
107        case BICS:
108          break;
109        default:
110          UNREACHABLE();
111      }
112    }
113
114    unsigned n, imm_s, imm_r;
115    if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
116      // Immediate can be encoded in the instruction.
117      LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
118    } else {
119      // Immediate can't be encoded: synthesize using move immediate.
120      Register temp = temps.AcquireSameSizeAs(rn);
121      Mov(temp, immediate);
122      if (rd.Is(csp)) {
123        // If rd is the stack pointer we cannot use it as the destination
124        // register so we use the temp register as an intermediate again.
125        Logical(temp, rn, temp, op);
126        Mov(csp, temp);
127        AssertStackConsistency();
128      } else {
129        Logical(rd, rn, temp, op);
130      }
131    }
132
133  } else if (operand.IsExtendedRegister()) {
134    ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
135    // Add/sub extended supports shift <= 4. We want to support exactly the
136    // same modes here.
137    ASSERT(operand.shift_amount() <= 4);
138    ASSERT(operand.reg().Is64Bits() ||
139           ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
140    Register temp = temps.AcquireSameSizeAs(rn);
141    EmitExtendShift(temp, operand.reg(), operand.extend(),
142                    operand.shift_amount());
143    Logical(rd, rn, temp, op);
144
145  } else {
146    // The operand can be encoded in the instruction.
147    ASSERT(operand.IsShiftedRegister());
148    Logical(rd, rn, operand, op);
149  }
150}
151
152
153void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
154  ASSERT(allow_macro_instructions_);
155  ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
156  ASSERT(!rd.IsZero());
157
158  // TODO(all) extend to support more immediates.
159  //
160  // Immediates on Aarch64 can be produced using an initial value, and zero to
161  // three move keep operations.
162  //
163  // Initial values can be generated with:
164  //  1. 64-bit move zero (movz).
165  //  2. 32-bit move inverted (movn).
166  //  3. 64-bit move inverted.
167  //  4. 32-bit orr immediate.
168  //  5. 64-bit orr immediate.
169  // Move-keep may then be used to modify each of the 16-bit half-words.
170  //
171  // The code below supports all five initial value generators, and
172  // applying move-keep operations to move-zero and move-inverted initial
173  // values.
174
175  unsigned reg_size = rd.SizeInBits();
176  unsigned n, imm_s, imm_r;
177  if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
178    // Immediate can be represented in a move zero instruction. Movz can't
179    // write to the stack pointer.
180    movz(rd, imm);
181  } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
182    // Immediate can be represented in a move inverted instruction. Movn can't
183    // write to the stack pointer.
184    movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
185  } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
186    // Immediate can be represented in a logical orr instruction.
187    LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
188  } else {
189    // Generic immediate case. Imm will be represented by
190    //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
191    // A move-zero or move-inverted is generated for the first non-zero or
192    // non-0xffff immX, and a move-keep for subsequent non-zero immX.
193
194    uint64_t ignored_halfword = 0;
195    bool invert_move = false;
196    // If the number of 0xffff halfwords is greater than the number of 0x0000
197    // halfwords, it's more efficient to use move-inverted.
198    if (CountClearHalfWords(~imm, reg_size) >
199        CountClearHalfWords(imm, reg_size)) {
200      ignored_halfword = 0xffffL;
201      invert_move = true;
202    }
203
204    // Mov instructions can't move immediate values into the stack pointer, so
205    // set up a temporary register, if needed.
206    UseScratchRegisterScope temps(this);
207    Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
208
209    // Iterate through the halfwords. Use movn/movz for the first non-ignored
210    // halfword, and movk for subsequent halfwords.
211    ASSERT((reg_size % 16) == 0);
212    bool first_mov_done = false;
213    for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
214      uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
215      if (imm16 != ignored_halfword) {
216        if (!first_mov_done) {
217          if (invert_move) {
218            movn(temp, (~imm16) & 0xffffL, 16 * i);
219          } else {
220            movz(temp, imm16, 16 * i);
221          }
222          first_mov_done = true;
223        } else {
224          // Construct a wider constant.
225          movk(temp, imm16, 16 * i);
226        }
227      }
228    }
229    ASSERT(first_mov_done);
230
231    // Move the temporary if the original destination register was the stack
232    // pointer.
233    if (rd.IsSP()) {
234      mov(rd, temp);
235      AssertStackConsistency();
236    }
237  }
238}
239
240
241void MacroAssembler::Mov(const Register& rd,
242                         const Operand& operand,
243                         DiscardMoveMode discard_mode) {
244  ASSERT(allow_macro_instructions_);
245  ASSERT(!rd.IsZero());
246
247  // Provide a swap register for instructions that need to write into the
248  // system stack pointer (and can't do this inherently).
249  UseScratchRegisterScope temps(this);
250  Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
251
252  if (operand.NeedsRelocation(this)) {
253    Ldr(dst, operand.immediate());
254
255  } else if (operand.IsImmediate()) {
256    // Call the macro assembler for generic immediates.
257    Mov(dst, operand.ImmediateValue());
258
259  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
260    // Emit a shift instruction if moving a shifted register. This operation
261    // could also be achieved using an orr instruction (like orn used by Mvn),
262    // but using a shift instruction makes the disassembly clearer.
263    EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
264
265  } else if (operand.IsExtendedRegister()) {
266    // Emit an extend instruction if moving an extended register. This handles
267    // extend with post-shift operations, too.
268    EmitExtendShift(dst, operand.reg(), operand.extend(),
269                    operand.shift_amount());
270
271  } else {
272    // Otherwise, emit a register move only if the registers are distinct, or
273    // if they are not X registers.
274    //
275    // Note that mov(w0, w0) is not a no-op because it clears the top word of
276    // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
277    // registers is not required to clear the top word of the X register. In
278    // this case, the instruction is discarded.
279    //
280    // If csp is an operand, add #0 is emitted, otherwise, orr #0.
281    if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
282                                  (discard_mode == kDontDiscardForSameWReg))) {
283      Assembler::mov(rd, operand.reg());
284    }
285    // This case can handle writes into the system stack pointer directly.
286    dst = rd;
287  }
288
289  // Copy the result to the system stack pointer.
290  if (!dst.Is(rd)) {
291    ASSERT(rd.IsSP());
292    Assembler::mov(rd, dst);
293  }
294}
295
296
297void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
298  ASSERT(allow_macro_instructions_);
299
300  if (operand.NeedsRelocation(this)) {
301    Ldr(rd, operand.immediate());
302    mvn(rd, rd);
303
304  } else if (operand.IsImmediate()) {
305    // Call the macro assembler for generic immediates.
306    Mov(rd, ~operand.ImmediateValue());
307
308  } else if (operand.IsExtendedRegister()) {
309    // Emit two instructions for the extend case. This differs from Mov, as
310    // the extend and invert can't be achieved in one instruction.
311    EmitExtendShift(rd, operand.reg(), operand.extend(),
312                    operand.shift_amount());
313    mvn(rd, rd);
314
315  } else {
316    mvn(rd, operand);
317  }
318}
319
320
321unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
322  ASSERT((reg_size % 8) == 0);
323  int count = 0;
324  for (unsigned i = 0; i < (reg_size / 16); i++) {
325    if ((imm & 0xffff) == 0) {
326      count++;
327    }
328    imm >>= 16;
329  }
330  return count;
331}
332
333
334// The movz instruction can generate immediates containing an arbitrary 16-bit
335// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
336bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
337  ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
338  return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
339}
340
341
342// The movn instruction can generate immediates containing an arbitrary 16-bit
343// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
344bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
345  return IsImmMovz(~imm, reg_size);
346}
347
348
349void MacroAssembler::ConditionalCompareMacro(const Register& rn,
350                                             const Operand& operand,
351                                             StatusFlags nzcv,
352                                             Condition cond,
353                                             ConditionalCompareOp op) {
354  ASSERT((cond != al) && (cond != nv));
355  if (operand.NeedsRelocation(this)) {
356    UseScratchRegisterScope temps(this);
357    Register temp = temps.AcquireX();
358    Ldr(temp, operand.immediate());
359    ConditionalCompareMacro(rn, temp, nzcv, cond, op);
360
361  } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
362             (operand.IsImmediate() &&
363              IsImmConditionalCompare(operand.ImmediateValue()))) {
364    // The immediate can be encoded in the instruction, or the operand is an
365    // unshifted register: call the assembler.
366    ConditionalCompare(rn, operand, nzcv, cond, op);
367
368  } else {
369    // The operand isn't directly supported by the instruction: perform the
370    // operation on a temporary register.
371    UseScratchRegisterScope temps(this);
372    Register temp = temps.AcquireSameSizeAs(rn);
373    Mov(temp, operand);
374    ConditionalCompare(rn, temp, nzcv, cond, op);
375  }
376}
377
378
379void MacroAssembler::Csel(const Register& rd,
380                          const Register& rn,
381                          const Operand& operand,
382                          Condition cond) {
383  ASSERT(allow_macro_instructions_);
384  ASSERT(!rd.IsZero());
385  ASSERT((cond != al) && (cond != nv));
386  if (operand.IsImmediate()) {
387    // Immediate argument. Handle special cases of 0, 1 and -1 using zero
388    // register.
389    int64_t imm = operand.ImmediateValue();
390    Register zr = AppropriateZeroRegFor(rn);
391    if (imm == 0) {
392      csel(rd, rn, zr, cond);
393    } else if (imm == 1) {
394      csinc(rd, rn, zr, cond);
395    } else if (imm == -1) {
396      csinv(rd, rn, zr, cond);
397    } else {
398      UseScratchRegisterScope temps(this);
399      Register temp = temps.AcquireSameSizeAs(rn);
400      Mov(temp, imm);
401      csel(rd, rn, temp, cond);
402    }
403  } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
404    // Unshifted register argument.
405    csel(rd, rn, operand.reg(), cond);
406  } else {
407    // All other arguments.
408    UseScratchRegisterScope temps(this);
409    Register temp = temps.AcquireSameSizeAs(rn);
410    Mov(temp, operand);
411    csel(rd, rn, temp, cond);
412  }
413}
414
415
416void MacroAssembler::AddSubMacro(const Register& rd,
417                                 const Register& rn,
418                                 const Operand& operand,
419                                 FlagsUpdate S,
420                                 AddSubOp op) {
421  if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
422      !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
423    // The instruction would be a nop. Avoid generating useless code.
424    return;
425  }
426
427  if (operand.NeedsRelocation(this)) {
428    UseScratchRegisterScope temps(this);
429    Register temp = temps.AcquireX();
430    Ldr(temp, operand.immediate());
431    AddSubMacro(rd, rn, temp, S, op);
432  } else if ((operand.IsImmediate() &&
433              !IsImmAddSub(operand.ImmediateValue()))      ||
434             (rn.IsZero() && !operand.IsShiftedRegister()) ||
435             (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
436    UseScratchRegisterScope temps(this);
437    Register temp = temps.AcquireSameSizeAs(rn);
438    Mov(temp, operand);
439    AddSub(rd, rn, temp, S, op);
440  } else {
441    AddSub(rd, rn, operand, S, op);
442  }
443}
444
445
446void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
447                                          const Register& rn,
448                                          const Operand& operand,
449                                          FlagsUpdate S,
450                                          AddSubWithCarryOp op) {
451  ASSERT(rd.SizeInBits() == rn.SizeInBits());
452  UseScratchRegisterScope temps(this);
453
454  if (operand.NeedsRelocation(this)) {
455    Register temp = temps.AcquireX();
456    Ldr(temp, operand.immediate());
457    AddSubWithCarryMacro(rd, rn, temp, S, op);
458
459  } else if (operand.IsImmediate() ||
460             (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
461    // Add/sub with carry (immediate or ROR shifted register.)
462    Register temp = temps.AcquireSameSizeAs(rn);
463    Mov(temp, operand);
464    AddSubWithCarry(rd, rn, temp, S, op);
465
466  } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
467    // Add/sub with carry (shifted register).
468    ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
469    ASSERT(operand.shift() != ROR);
470    ASSERT(is_uintn(operand.shift_amount(),
471          rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
472                                             : kWRegSizeInBitsLog2));
473    Register temp = temps.AcquireSameSizeAs(rn);
474    EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
475    AddSubWithCarry(rd, rn, temp, S, op);
476
477  } else if (operand.IsExtendedRegister()) {
478    // Add/sub with carry (extended register).
479    ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
480    // Add/sub extended supports a shift <= 4. We want to support exactly the
481    // same modes.
482    ASSERT(operand.shift_amount() <= 4);
483    ASSERT(operand.reg().Is64Bits() ||
484           ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
485    Register temp = temps.AcquireSameSizeAs(rn);
486    EmitExtendShift(temp, operand.reg(), operand.extend(),
487                    operand.shift_amount());
488    AddSubWithCarry(rd, rn, temp, S, op);
489
490  } else {
491    // The addressing mode is directly supported by the instruction.
492    AddSubWithCarry(rd, rn, operand, S, op);
493  }
494}
495
496
497void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
498                                    const MemOperand& addr,
499                                    LoadStoreOp op) {
500  int64_t offset = addr.offset();
501  LSDataSize size = CalcLSDataSize(op);
502
503  // Check if an immediate offset fits in the immediate field of the
504  // appropriate instruction. If not, emit two instructions to perform
505  // the operation.
506  if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
507      !IsImmLSUnscaled(offset)) {
508    // Immediate offset that can't be encoded using unsigned or unscaled
509    // addressing modes.
510    UseScratchRegisterScope temps(this);
511    Register temp = temps.AcquireSameSizeAs(addr.base());
512    Mov(temp, addr.offset());
513    LoadStore(rt, MemOperand(addr.base(), temp), op);
514  } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
515    // Post-index beyond unscaled addressing range.
516    LoadStore(rt, MemOperand(addr.base()), op);
517    add(addr.base(), addr.base(), offset);
518  } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
519    // Pre-index beyond unscaled addressing range.
520    add(addr.base(), addr.base(), offset);
521    LoadStore(rt, MemOperand(addr.base()), op);
522  } else {
523    // Encodable in one load/store instruction.
524    LoadStore(rt, addr, op);
525  }
526}
527
528
529void MacroAssembler::Load(const Register& rt,
530                          const MemOperand& addr,
531                          Representation r) {
532  ASSERT(!r.IsDouble());
533
534  if (r.IsInteger8()) {
535    Ldrsb(rt, addr);
536  } else if (r.IsUInteger8()) {
537    Ldrb(rt, addr);
538  } else if (r.IsInteger16()) {
539    Ldrsh(rt, addr);
540  } else if (r.IsUInteger16()) {
541    Ldrh(rt, addr);
542  } else if (r.IsInteger32()) {
543    Ldr(rt.W(), addr);
544  } else {
545    ASSERT(rt.Is64Bits());
546    Ldr(rt, addr);
547  }
548}
549
550
551void MacroAssembler::Store(const Register& rt,
552                           const MemOperand& addr,
553                           Representation r) {
554  ASSERT(!r.IsDouble());
555
556  if (r.IsInteger8() || r.IsUInteger8()) {
557    Strb(rt, addr);
558  } else if (r.IsInteger16() || r.IsUInteger16()) {
559    Strh(rt, addr);
560  } else if (r.IsInteger32()) {
561    Str(rt.W(), addr);
562  } else {
563    ASSERT(rt.Is64Bits());
564    if (r.IsHeapObject()) {
565      AssertNotSmi(rt);
566    } else if (r.IsSmi()) {
567      AssertSmi(rt);
568    }
569    Str(rt, addr);
570  }
571}
572
573
574bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
575    Label *label, ImmBranchType b_type) {
576  bool need_longer_range = false;
577  // There are two situations in which we care about the offset being out of
578  // range:
579  //  - The label is bound but too far away.
580  //  - The label is not bound but linked, and the previous branch
581  //    instruction in the chain is too far away.
582  if (label->is_bound() || label->is_linked()) {
583    need_longer_range =
584      !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
585  }
586  if (!need_longer_range && !label->is_bound()) {
587    int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
588    unresolved_branches_.insert(
589        std::pair<int, FarBranchInfo>(max_reachable_pc,
590                                      FarBranchInfo(pc_offset(), label)));
591    // Also maintain the next pool check.
592    next_veneer_pool_check_ =
593      Min(next_veneer_pool_check_,
594          max_reachable_pc - kVeneerDistanceCheckMargin);
595  }
596  return need_longer_range;
597}
598
599
600void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
601  ASSERT(allow_macro_instructions_);
602  ASSERT(!rd.IsZero());
603
604  if (hint == kAdrNear) {
605    adr(rd, label);
606    return;
607  }
608
609  ASSERT(hint == kAdrFar);
610  UseScratchRegisterScope temps(this);
611  Register scratch = temps.AcquireX();
612  ASSERT(!AreAliased(rd, scratch));
613
614  if (label->is_bound()) {
615    int label_offset = label->pos() - pc_offset();
616    if (Instruction::IsValidPCRelOffset(label_offset)) {
617      adr(rd, label);
618    } else {
619      ASSERT(label_offset <= 0);
620      int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
621      adr(rd, min_adr_offset);
622      Add(rd, rd, label_offset - min_adr_offset);
623    }
624  } else {
625    InstructionAccurateScope scope(
626        this, PatchingAssembler::kAdrFarPatchableNInstrs);
627    adr(rd, label);
628    for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
629      nop(ADR_FAR_NOP);
630    }
631    movz(scratch, 0);
632    add(rd, rd, scratch);
633  }
634}
635
636
637void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
638  ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
639         (bit == -1 || type >= kBranchTypeFirstUsingBit));
640  if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
641    B(static_cast<Condition>(type), label);
642  } else {
643    switch (type) {
644      case always:        B(label);              break;
645      case never:         break;
646      case reg_zero:      Cbz(reg, label);       break;
647      case reg_not_zero:  Cbnz(reg, label);      break;
648      case reg_bit_clear: Tbz(reg, bit, label);  break;
649      case reg_bit_set:   Tbnz(reg, bit, label); break;
650      default:
651        UNREACHABLE();
652    }
653  }
654}
655
656
657void MacroAssembler::B(Label* label, Condition cond) {
658  ASSERT(allow_macro_instructions_);
659  ASSERT((cond != al) && (cond != nv));
660
661  Label done;
662  bool need_extra_instructions =
663    NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
664
665  if (need_extra_instructions) {
666    b(&done, NegateCondition(cond));
667    B(label);
668  } else {
669    b(label, cond);
670  }
671  bind(&done);
672}
673
674
675void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
676  ASSERT(allow_macro_instructions_);
677
678  Label done;
679  bool need_extra_instructions =
680    NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
681
682  if (need_extra_instructions) {
683    tbz(rt, bit_pos, &done);
684    B(label);
685  } else {
686    tbnz(rt, bit_pos, label);
687  }
688  bind(&done);
689}
690
691
692void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
693  ASSERT(allow_macro_instructions_);
694
695  Label done;
696  bool need_extra_instructions =
697    NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
698
699  if (need_extra_instructions) {
700    tbnz(rt, bit_pos, &done);
701    B(label);
702  } else {
703    tbz(rt, bit_pos, label);
704  }
705  bind(&done);
706}
707
708
709void MacroAssembler::Cbnz(const Register& rt, Label* label) {
710  ASSERT(allow_macro_instructions_);
711
712  Label done;
713  bool need_extra_instructions =
714    NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
715
716  if (need_extra_instructions) {
717    cbz(rt, &done);
718    B(label);
719  } else {
720    cbnz(rt, label);
721  }
722  bind(&done);
723}
724
725
726void MacroAssembler::Cbz(const Register& rt, Label* label) {
727  ASSERT(allow_macro_instructions_);
728
729  Label done;
730  bool need_extra_instructions =
731    NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
732
733  if (need_extra_instructions) {
734    cbnz(rt, &done);
735    B(label);
736  } else {
737    cbz(rt, label);
738  }
739  bind(&done);
740}
741
742
743// Pseudo-instructions.
744
745
746void MacroAssembler::Abs(const Register& rd, const Register& rm,
747                         Label* is_not_representable,
748                         Label* is_representable) {
749  ASSERT(allow_macro_instructions_);
750  ASSERT(AreSameSizeAndType(rd, rm));
751
752  Cmp(rm, 1);
753  Cneg(rd, rm, lt);
754
755  // If the comparison sets the v flag, the input was the smallest value
756  // representable by rm, and the mathematical result of abs(rm) is not
757  // representable using two's complement.
758  if ((is_not_representable != NULL) && (is_representable != NULL)) {
759    B(is_not_representable, vs);
760    B(is_representable);
761  } else if (is_not_representable != NULL) {
762    B(is_not_representable, vs);
763  } else if (is_representable != NULL) {
764    B(is_representable, vc);
765  }
766}
767
768
769// Abstracted stack operations.
770
771
772void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
773                          const CPURegister& src2, const CPURegister& src3) {
774  ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
775
776  int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
777  int size = src0.SizeInBytes();
778
779  PushPreamble(count, size);
780  PushHelper(count, size, src0, src1, src2, src3);
781}
782
783
784void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
785                          const CPURegister& src2, const CPURegister& src3,
786                          const CPURegister& src4, const CPURegister& src5,
787                          const CPURegister& src6, const CPURegister& src7) {
788  ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
789
790  int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
791  int size = src0.SizeInBytes();
792
793  PushPreamble(count, size);
794  PushHelper(4, size, src0, src1, src2, src3);
795  PushHelper(count - 4, size, src4, src5, src6, src7);
796}
797
798
799void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
800                         const CPURegister& dst2, const CPURegister& dst3) {
801  // It is not valid to pop into the same register more than once in one
802  // instruction, not even into the zero register.
803  ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
804  ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
805  ASSERT(dst0.IsValid());
806
807  int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
808  int size = dst0.SizeInBytes();
809
810  PopHelper(count, size, dst0, dst1, dst2, dst3);
811  PopPostamble(count, size);
812}
813
814
815void MacroAssembler::PushPopQueue::PushQueued(
816    PreambleDirective preamble_directive) {
817  if (queued_.empty()) return;
818
819  if (preamble_directive == WITH_PREAMBLE) {
820    masm_->PushPreamble(size_);
821  }
822
823  int count = queued_.size();
824  int index = 0;
825  while (index < count) {
826    // PushHelper can only handle registers with the same size and type, and it
827    // can handle only four at a time. Batch them up accordingly.
828    CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
829    int batch_index = 0;
830    do {
831      batch[batch_index++] = queued_[index++];
832    } while ((batch_index < 4) && (index < count) &&
833             batch[0].IsSameSizeAndType(queued_[index]));
834
835    masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
836                      batch[0], batch[1], batch[2], batch[3]);
837  }
838
839  queued_.clear();
840}
841
842
843void MacroAssembler::PushPopQueue::PopQueued() {
844  if (queued_.empty()) return;
845
846  int count = queued_.size();
847  int index = 0;
848  while (index < count) {
849    // PopHelper can only handle registers with the same size and type, and it
850    // can handle only four at a time. Batch them up accordingly.
851    CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
852    int batch_index = 0;
853    do {
854      batch[batch_index++] = queued_[index++];
855    } while ((batch_index < 4) && (index < count) &&
856             batch[0].IsSameSizeAndType(queued_[index]));
857
858    masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
859                     batch[0], batch[1], batch[2], batch[3]);
860  }
861
862  masm_->PopPostamble(size_);
863  queued_.clear();
864}
865
866
867void MacroAssembler::PushCPURegList(CPURegList registers) {
868  int size = registers.RegisterSizeInBytes();
869
870  PushPreamble(registers.Count(), size);
871  // Push up to four registers at a time because if the current stack pointer is
872  // csp and reg_size is 32, registers must be pushed in blocks of four in order
873  // to maintain the 16-byte alignment for csp.
874  while (!registers.IsEmpty()) {
875    int count_before = registers.Count();
876    const CPURegister& src0 = registers.PopHighestIndex();
877    const CPURegister& src1 = registers.PopHighestIndex();
878    const CPURegister& src2 = registers.PopHighestIndex();
879    const CPURegister& src3 = registers.PopHighestIndex();
880    int count = count_before - registers.Count();
881    PushHelper(count, size, src0, src1, src2, src3);
882  }
883}
884
885
886void MacroAssembler::PopCPURegList(CPURegList registers) {
887  int size = registers.RegisterSizeInBytes();
888
889  // Pop up to four registers at a time because if the current stack pointer is
890  // csp and reg_size is 32, registers must be pushed in blocks of four in
891  // order to maintain the 16-byte alignment for csp.
892  while (!registers.IsEmpty()) {
893    int count_before = registers.Count();
894    const CPURegister& dst0 = registers.PopLowestIndex();
895    const CPURegister& dst1 = registers.PopLowestIndex();
896    const CPURegister& dst2 = registers.PopLowestIndex();
897    const CPURegister& dst3 = registers.PopLowestIndex();
898    int count = count_before - registers.Count();
899    PopHelper(count, size, dst0, dst1, dst2, dst3);
900  }
901  PopPostamble(registers.Count(), size);
902}
903
904
905void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
906  int size = src.SizeInBytes();
907
908  PushPreamble(count, size);
909
910  if (FLAG_optimize_for_size && count > 8) {
911    UseScratchRegisterScope temps(this);
912    Register temp = temps.AcquireX();
913
914    Label loop;
915    __ Mov(temp, count / 2);
916    __ Bind(&loop);
917    PushHelper(2, size, src, src, NoReg, NoReg);
918    __ Subs(temp, temp, 1);
919    __ B(ne, &loop);
920
921    count %= 2;
922  }
923
924  // Push up to four registers at a time if possible because if the current
925  // stack pointer is csp and the register size is 32, registers must be pushed
926  // in blocks of four in order to maintain the 16-byte alignment for csp.
927  while (count >= 4) {
928    PushHelper(4, size, src, src, src, src);
929    count -= 4;
930  }
931  if (count >= 2) {
932    PushHelper(2, size, src, src, NoReg, NoReg);
933    count -= 2;
934  }
935  if (count == 1) {
936    PushHelper(1, size, src, NoReg, NoReg, NoReg);
937    count -= 1;
938  }
939  ASSERT(count == 0);
940}
941
942
943void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
944  PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
945
946  UseScratchRegisterScope temps(this);
947  Register temp = temps.AcquireSameSizeAs(count);
948
949  if (FLAG_optimize_for_size) {
950    Label loop, done;
951
952    Subs(temp, count, 1);
953    B(mi, &done);
954
955    // Push all registers individually, to save code size.
956    Bind(&loop);
957    Subs(temp, temp, 1);
958    PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
959    B(pl, &loop);
960
961    Bind(&done);
962  } else {
963    Label loop, leftover2, leftover1, done;
964
965    Subs(temp, count, 4);
966    B(mi, &leftover2);
967
968    // Push groups of four first.
969    Bind(&loop);
970    Subs(temp, temp, 4);
971    PushHelper(4, src.SizeInBytes(), src, src, src, src);
972    B(pl, &loop);
973
974    // Push groups of two.
975    Bind(&leftover2);
976    Tbz(count, 1, &leftover1);
977    PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
978
979    // Push the last one (if required).
980    Bind(&leftover1);
981    Tbz(count, 0, &done);
982    PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
983
984    Bind(&done);
985  }
986}
987
988
989void MacroAssembler::PushHelper(int count, int size,
990                                const CPURegister& src0,
991                                const CPURegister& src1,
992                                const CPURegister& src2,
993                                const CPURegister& src3) {
994  // Ensure that we don't unintentially modify scratch or debug registers.
995  InstructionAccurateScope scope(this);
996
997  ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
998  ASSERT(size == src0.SizeInBytes());
999
1000  // When pushing multiple registers, the store order is chosen such that
1001  // Push(a, b) is equivalent to Push(a) followed by Push(b).
1002  switch (count) {
1003    case 1:
1004      ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
1005      str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1006      break;
1007    case 2:
1008      ASSERT(src2.IsNone() && src3.IsNone());
1009      stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1010      break;
1011    case 3:
1012      ASSERT(src3.IsNone());
1013      stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1014      str(src0, MemOperand(StackPointer(), 2 * size));
1015      break;
1016    case 4:
1017      // Skip over 4 * size, then fill in the gap. This allows four W registers
1018      // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1019      // at all times.
1020      stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1021      stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1022      break;
1023    default:
1024      UNREACHABLE();
1025  }
1026}
1027
1028
1029void MacroAssembler::PopHelper(int count, int size,
1030                               const CPURegister& dst0,
1031                               const CPURegister& dst1,
1032                               const CPURegister& dst2,
1033                               const CPURegister& dst3) {
1034  // Ensure that we don't unintentially modify scratch or debug registers.
1035  InstructionAccurateScope scope(this);
1036
1037  ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1038  ASSERT(size == dst0.SizeInBytes());
1039
1040  // When popping multiple registers, the load order is chosen such that
1041  // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1042  switch (count) {
1043    case 1:
1044      ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1045      ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1046      break;
1047    case 2:
1048      ASSERT(dst2.IsNone() && dst3.IsNone());
1049      ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1050      break;
1051    case 3:
1052      ASSERT(dst3.IsNone());
1053      ldr(dst2, MemOperand(StackPointer(), 2 * size));
1054      ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1055      break;
1056    case 4:
1057      // Load the higher addresses first, then load the lower addresses and
1058      // skip the whole block in the second instruction. This allows four W
1059      // registers to be popped using csp, whilst maintaining 16-byte alignment
1060      // for csp at all times.
1061      ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1062      ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1063      break;
1064    default:
1065      UNREACHABLE();
1066  }
1067}
1068
1069
1070void MacroAssembler::PushPreamble(Operand total_size) {
1071  if (csp.Is(StackPointer())) {
1072    // If the current stack pointer is csp, then it must be aligned to 16 bytes
1073    // on entry and the total size of the specified registers must also be a
1074    // multiple of 16 bytes.
1075    if (total_size.IsImmediate()) {
1076      ASSERT((total_size.ImmediateValue() % 16) == 0);
1077    }
1078
1079    // Don't check access size for non-immediate sizes. It's difficult to do
1080    // well, and it will be caught by hardware (or the simulator) anyway.
1081  } else {
1082    // Even if the current stack pointer is not the system stack pointer (csp),
1083    // the system stack pointer will still be modified in order to comply with
1084    // ABI rules about accessing memory below the system stack pointer.
1085    BumpSystemStackPointer(total_size);
1086  }
1087}
1088
1089
1090void MacroAssembler::PopPostamble(Operand total_size) {
1091  if (csp.Is(StackPointer())) {
1092    // If the current stack pointer is csp, then it must be aligned to 16 bytes
1093    // on entry and the total size of the specified registers must also be a
1094    // multiple of 16 bytes.
1095    if (total_size.IsImmediate()) {
1096      ASSERT((total_size.ImmediateValue() % 16) == 0);
1097    }
1098
1099    // Don't check access size for non-immediate sizes. It's difficult to do
1100    // well, and it will be caught by hardware (or the simulator) anyway.
1101  } else if (emit_debug_code()) {
1102    // It is safe to leave csp where it is when unwinding the JavaScript stack,
1103    // but if we keep it matching StackPointer, the simulator can detect memory
1104    // accesses in the now-free part of the stack.
1105    SyncSystemStackPointer();
1106  }
1107}
1108
1109
1110void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1111  if (offset.IsImmediate()) {
1112    ASSERT(offset.ImmediateValue() >= 0);
1113  } else if (emit_debug_code()) {
1114    Cmp(xzr, offset);
1115    Check(le, kStackAccessBelowStackPointer);
1116  }
1117
1118  Str(src, MemOperand(StackPointer(), offset));
1119}
1120
1121
1122void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1123  if (offset.IsImmediate()) {
1124    ASSERT(offset.ImmediateValue() >= 0);
1125  } else if (emit_debug_code()) {
1126    Cmp(xzr, offset);
1127    Check(le, kStackAccessBelowStackPointer);
1128  }
1129
1130  Ldr(dst, MemOperand(StackPointer(), offset));
1131}
1132
1133
1134void MacroAssembler::PokePair(const CPURegister& src1,
1135                              const CPURegister& src2,
1136                              int offset) {
1137  ASSERT(AreSameSizeAndType(src1, src2));
1138  ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1139  Stp(src1, src2, MemOperand(StackPointer(), offset));
1140}
1141
1142
1143void MacroAssembler::PeekPair(const CPURegister& dst1,
1144                              const CPURegister& dst2,
1145                              int offset) {
1146  ASSERT(AreSameSizeAndType(dst1, dst2));
1147  ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1148  Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1149}
1150
1151
1152void MacroAssembler::PushCalleeSavedRegisters() {
1153  // Ensure that the macro-assembler doesn't use any scratch registers.
1154  InstructionAccurateScope scope(this);
1155
1156  // This method must not be called unless the current stack pointer is the
1157  // system stack pointer (csp).
1158  ASSERT(csp.Is(StackPointer()));
1159
1160  MemOperand tos(csp, -2 * kXRegSize, PreIndex);
1161
1162  stp(d14, d15, tos);
1163  stp(d12, d13, tos);
1164  stp(d10, d11, tos);
1165  stp(d8, d9, tos);
1166
1167  stp(x29, x30, tos);
1168  stp(x27, x28, tos);    // x28 = jssp
1169  stp(x25, x26, tos);
1170  stp(x23, x24, tos);
1171  stp(x21, x22, tos);
1172  stp(x19, x20, tos);
1173}
1174
1175
1176void MacroAssembler::PopCalleeSavedRegisters() {
1177  // Ensure that the macro-assembler doesn't use any scratch registers.
1178  InstructionAccurateScope scope(this);
1179
1180  // This method must not be called unless the current stack pointer is the
1181  // system stack pointer (csp).
1182  ASSERT(csp.Is(StackPointer()));
1183
1184  MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1185
1186  ldp(x19, x20, tos);
1187  ldp(x21, x22, tos);
1188  ldp(x23, x24, tos);
1189  ldp(x25, x26, tos);
1190  ldp(x27, x28, tos);    // x28 = jssp
1191  ldp(x29, x30, tos);
1192
1193  ldp(d8, d9, tos);
1194  ldp(d10, d11, tos);
1195  ldp(d12, d13, tos);
1196  ldp(d14, d15, tos);
1197}
1198
1199
1200void MacroAssembler::AssertStackConsistency() {
1201  // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1202  // much code to be generated.
1203  if (emit_debug_code() && use_real_aborts()) {
1204    if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
1205      // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true.  We
1206      // can't check the alignment of csp without using a scratch register (or
1207      // clobbering the flags), but the processor (or simulator) will abort if
1208      // it is not properly aligned during a load.
1209      ldr(xzr, MemOperand(csp, 0));
1210    }
1211    if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1212      Label ok;
1213      // Check that csp <= StackPointer(), preserving all registers and NZCV.
1214      sub(StackPointer(), csp, StackPointer());
1215      cbz(StackPointer(), &ok);                 // Ok if csp == StackPointer().
1216      tbnz(StackPointer(), kXSignBit, &ok);     // Ok if csp < StackPointer().
1217
1218      // Avoid generating AssertStackConsistency checks for the Push in Abort.
1219      { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1220        Abort(kTheCurrentStackPointerIsBelowCsp);
1221      }
1222
1223      bind(&ok);
1224      // Restore StackPointer().
1225      sub(StackPointer(), csp, StackPointer());
1226    }
1227  }
1228}
1229
1230
1231void MacroAssembler::AssertFPCRState(Register fpcr) {
1232  if (emit_debug_code()) {
1233    Label unexpected_mode, done;
1234    UseScratchRegisterScope temps(this);
1235    if (fpcr.IsNone()) {
1236      fpcr = temps.AcquireX();
1237      Mrs(fpcr, FPCR);
1238    }
1239
1240    // Settings overridden by ConfiugreFPCR():
1241    //   - Assert that default-NaN mode is set.
1242    Tbz(fpcr, DN_offset, &unexpected_mode);
1243
1244    // Settings left to their default values:
1245    //   - Assert that flush-to-zero is not set.
1246    Tbnz(fpcr, FZ_offset, &unexpected_mode);
1247    //   - Assert that the rounding mode is nearest-with-ties-to-even.
1248    STATIC_ASSERT(FPTieEven == 0);
1249    Tst(fpcr, RMode_mask);
1250    B(eq, &done);
1251
1252    Bind(&unexpected_mode);
1253    Abort(kUnexpectedFPCRMode);
1254
1255    Bind(&done);
1256  }
1257}
1258
1259
1260void MacroAssembler::ConfigureFPCR() {
1261  UseScratchRegisterScope temps(this);
1262  Register fpcr = temps.AcquireX();
1263  Mrs(fpcr, FPCR);
1264
1265  // If necessary, enable default-NaN mode. The default values of the other FPCR
1266  // options should be suitable, and AssertFPCRState will verify that.
1267  Label no_write_required;
1268  Tbnz(fpcr, DN_offset, &no_write_required);
1269
1270  Orr(fpcr, fpcr, DN_mask);
1271  Msr(FPCR, fpcr);
1272
1273  Bind(&no_write_required);
1274  AssertFPCRState(fpcr);
1275}
1276
1277
1278void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1279                                     const FPRegister& src) {
1280  AssertFPCRState();
1281
1282  // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
1283  // for NaNs, which become the default NaN. We use fsub rather than fadd
1284  // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1285  Fsub(dst, src, fp_zero);
1286}
1287
1288
1289void MacroAssembler::LoadRoot(CPURegister destination,
1290                              Heap::RootListIndex index) {
1291  // TODO(jbramley): Most root values are constants, and can be synthesized
1292  // without a load. Refer to the ARM back end for details.
1293  Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1294}
1295
1296
1297void MacroAssembler::StoreRoot(Register source,
1298                               Heap::RootListIndex index) {
1299  Str(source, MemOperand(root, index << kPointerSizeLog2));
1300}
1301
1302
1303void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1304                                        Register false_root) {
1305  STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1306  Ldp(true_root, false_root,
1307      MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1308}
1309
1310
1311void MacroAssembler::LoadHeapObject(Register result,
1312                                    Handle<HeapObject> object) {
1313  AllowDeferredHandleDereference using_raw_address;
1314  if (isolate()->heap()->InNewSpace(*object)) {
1315    Handle<Cell> cell = isolate()->factory()->NewCell(object);
1316    Mov(result, Operand(cell));
1317    Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1318  } else {
1319    Mov(result, Operand(object));
1320  }
1321}
1322
1323
1324void MacroAssembler::LoadInstanceDescriptors(Register map,
1325                                             Register descriptors) {
1326  Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1327}
1328
1329
1330void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1331  Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1332  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1333}
1334
1335
1336void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1337  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1338  Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1339  And(dst, dst, Map::EnumLengthBits::kMask);
1340}
1341
1342
1343void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1344  EnumLengthUntagged(dst, map);
1345  SmiTag(dst, dst);
1346}
1347
1348
1349void MacroAssembler::CheckEnumCache(Register object,
1350                                    Register null_value,
1351                                    Register scratch0,
1352                                    Register scratch1,
1353                                    Register scratch2,
1354                                    Register scratch3,
1355                                    Label* call_runtime) {
1356  ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1357                     scratch3));
1358
1359  Register empty_fixed_array_value = scratch0;
1360  Register current_object = scratch1;
1361
1362  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1363  Label next, start;
1364
1365  Mov(current_object, object);
1366
1367  // Check if the enum length field is properly initialized, indicating that
1368  // there is an enum cache.
1369  Register map = scratch2;
1370  Register enum_length = scratch3;
1371  Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1372
1373  EnumLengthUntagged(enum_length, map);
1374  Cmp(enum_length, kInvalidEnumCacheSentinel);
1375  B(eq, call_runtime);
1376
1377  B(&start);
1378
1379  Bind(&next);
1380  Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1381
1382  // For all objects but the receiver, check that the cache is empty.
1383  EnumLengthUntagged(enum_length, map);
1384  Cbnz(enum_length, call_runtime);
1385
1386  Bind(&start);
1387
1388  // Check that there are no elements. Register current_object contains the
1389  // current JS object we've reached through the prototype chain.
1390  Label no_elements;
1391  Ldr(current_object, FieldMemOperand(current_object,
1392                                      JSObject::kElementsOffset));
1393  Cmp(current_object, empty_fixed_array_value);
1394  B(eq, &no_elements);
1395
1396  // Second chance, the object may be using the empty slow element dictionary.
1397  CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1398  B(ne, call_runtime);
1399
1400  Bind(&no_elements);
1401  Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1402  Cmp(current_object, null_value);
1403  B(ne, &next);
1404}
1405
1406
1407void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1408                                                     Register scratch1,
1409                                                     Register scratch2,
1410                                                     Label* no_memento_found) {
1411  ExternalReference new_space_start =
1412      ExternalReference::new_space_start(isolate());
1413  ExternalReference new_space_allocation_top =
1414      ExternalReference::new_space_allocation_top_address(isolate());
1415
1416  Add(scratch1, receiver,
1417      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1418  Cmp(scratch1, new_space_start);
1419  B(lt, no_memento_found);
1420
1421  Mov(scratch2, new_space_allocation_top);
1422  Ldr(scratch2, MemOperand(scratch2));
1423  Cmp(scratch1, scratch2);
1424  B(gt, no_memento_found);
1425
1426  Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1427  Cmp(scratch1,
1428      Operand(isolate()->factory()->allocation_memento_map()));
1429}
1430
1431
1432void MacroAssembler::JumpToHandlerEntry(Register exception,
1433                                        Register object,
1434                                        Register state,
1435                                        Register scratch1,
1436                                        Register scratch2) {
1437  // Handler expects argument in x0.
1438  ASSERT(exception.Is(x0));
1439
1440  // Compute the handler entry address and jump to it. The handler table is
1441  // a fixed array of (smi-tagged) code offsets.
1442  Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
1443  Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
1444  STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
1445  Lsr(scratch2, state, StackHandler::kKindWidth);
1446  Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
1447  Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
1448  Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1449  Br(scratch1);
1450}
1451
1452
1453void MacroAssembler::InNewSpace(Register object,
1454                                Condition cond,
1455                                Label* branch) {
1456  ASSERT(cond == eq || cond == ne);
1457  UseScratchRegisterScope temps(this);
1458  Register temp = temps.AcquireX();
1459  And(temp, object, ExternalReference::new_space_mask(isolate()));
1460  Cmp(temp, ExternalReference::new_space_start(isolate()));
1461  B(cond, branch);
1462}
1463
1464
1465void MacroAssembler::Throw(Register value,
1466                           Register scratch1,
1467                           Register scratch2,
1468                           Register scratch3,
1469                           Register scratch4) {
1470  // Adjust this code if not the case.
1471  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1472  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1473  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1474  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1475  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1476  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1477
1478  // The handler expects the exception in x0.
1479  ASSERT(value.Is(x0));
1480
1481  // Drop the stack pointer to the top of the top handler.
1482  ASSERT(jssp.Is(StackPointer()));
1483  Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1484                                          isolate())));
1485  Ldr(jssp, MemOperand(scratch1));
1486  // Restore the next handler.
1487  Pop(scratch2);
1488  Str(scratch2, MemOperand(scratch1));
1489
1490  // Get the code object and state.  Restore the context and frame pointer.
1491  Register object = scratch1;
1492  Register state = scratch2;
1493  Pop(object, state, cp, fp);
1494
1495  // If the handler is a JS frame, restore the context to the frame.
1496  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1497  // or cp.
1498  Label not_js_frame;
1499  Cbz(cp, &not_js_frame);
1500  Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1501  Bind(&not_js_frame);
1502
1503  JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1504}
1505
1506
1507void MacroAssembler::ThrowUncatchable(Register value,
1508                                      Register scratch1,
1509                                      Register scratch2,
1510                                      Register scratch3,
1511                                      Register scratch4) {
1512  // Adjust this code if not the case.
1513  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1514  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1515  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1516  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1517  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1518  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1519
1520  // The handler expects the exception in x0.
1521  ASSERT(value.Is(x0));
1522
1523  // Drop the stack pointer to the top of the top stack handler.
1524  ASSERT(jssp.Is(StackPointer()));
1525  Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1526                                          isolate())));
1527  Ldr(jssp, MemOperand(scratch1));
1528
1529  // Unwind the handlers until the ENTRY handler is found.
1530  Label fetch_next, check_kind;
1531  B(&check_kind);
1532  Bind(&fetch_next);
1533  Peek(jssp, StackHandlerConstants::kNextOffset);
1534
1535  Bind(&check_kind);
1536  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1537  Peek(scratch2, StackHandlerConstants::kStateOffset);
1538  TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
1539
1540  // Set the top handler address to next handler past the top ENTRY handler.
1541  Pop(scratch2);
1542  Str(scratch2, MemOperand(scratch1));
1543
1544  // Get the code object and state.  Clear the context and frame pointer (0 was
1545  // saved in the handler).
1546  Register object = scratch1;
1547  Register state = scratch2;
1548  Pop(object, state, cp, fp);
1549
1550  JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1551}
1552
1553
1554void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
1555  ASSERT(smi.Is64Bits());
1556  Abs(smi, smi, slow);
1557}
1558
1559
1560void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1561  if (emit_debug_code()) {
1562    STATIC_ASSERT(kSmiTag == 0);
1563    Tst(object, kSmiTagMask);
1564    Check(eq, reason);
1565  }
1566}
1567
1568
1569void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1570  if (emit_debug_code()) {
1571    STATIC_ASSERT(kSmiTag == 0);
1572    Tst(object, kSmiTagMask);
1573    Check(ne, reason);
1574  }
1575}
1576
1577
1578void MacroAssembler::AssertName(Register object) {
1579  if (emit_debug_code()) {
1580    AssertNotSmi(object, kOperandIsASmiAndNotAName);
1581
1582    UseScratchRegisterScope temps(this);
1583    Register temp = temps.AcquireX();
1584
1585    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1586    CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1587    Check(ls, kOperandIsNotAName);
1588  }
1589}
1590
1591
1592void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1593                                                     Register scratch) {
1594  if (emit_debug_code()) {
1595    Label done_checking;
1596    AssertNotSmi(object);
1597    JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1598    Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1599    CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1600    Assert(eq, kExpectedUndefinedOrCell);
1601    Bind(&done_checking);
1602  }
1603}
1604
1605
1606void MacroAssembler::AssertString(Register object) {
1607  if (emit_debug_code()) {
1608    UseScratchRegisterScope temps(this);
1609    Register temp = temps.AcquireX();
1610    STATIC_ASSERT(kSmiTag == 0);
1611    Tst(object, kSmiTagMask);
1612    Check(ne, kOperandIsASmiAndNotAString);
1613    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1614    CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1615    Check(lo, kOperandIsNotAString);
1616  }
1617}
1618
1619
1620void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1621  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1622  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1623}
1624
1625
1626void MacroAssembler::TailCallStub(CodeStub* stub) {
1627  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1628}
1629
1630
1631void MacroAssembler::CallRuntime(const Runtime::Function* f,
1632                                 int num_arguments,
1633                                 SaveFPRegsMode save_doubles) {
1634  // All arguments must be on the stack before this function is called.
1635  // x0 holds the return value after the call.
1636
1637  // Check that the number of arguments matches what the function expects.
1638  // If f->nargs is -1, the function can accept a variable number of arguments.
1639  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1640
1641  // Place the necessary arguments.
1642  Mov(x0, num_arguments);
1643  Mov(x1, ExternalReference(f, isolate()));
1644
1645  CEntryStub stub(isolate(), 1, save_doubles);
1646  CallStub(&stub);
1647}
1648
1649
1650static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1651  return ref0.address() - ref1.address();
1652}
1653
1654
1655void MacroAssembler::CallApiFunctionAndReturn(
1656    Register function_address,
1657    ExternalReference thunk_ref,
1658    int stack_space,
1659    int spill_offset,
1660    MemOperand return_value_operand,
1661    MemOperand* context_restore_operand) {
1662  ASM_LOCATION("CallApiFunctionAndReturn");
1663  ExternalReference next_address =
1664      ExternalReference::handle_scope_next_address(isolate());
1665  const int kNextOffset = 0;
1666  const int kLimitOffset = AddressOffset(
1667      ExternalReference::handle_scope_limit_address(isolate()),
1668      next_address);
1669  const int kLevelOffset = AddressOffset(
1670      ExternalReference::handle_scope_level_address(isolate()),
1671      next_address);
1672
1673  ASSERT(function_address.is(x1) || function_address.is(x2));
1674
1675  Label profiler_disabled;
1676  Label end_profiler_check;
1677  Mov(x10, ExternalReference::is_profiling_address(isolate()));
1678  Ldrb(w10, MemOperand(x10));
1679  Cbz(w10, &profiler_disabled);
1680  Mov(x3, thunk_ref);
1681  B(&end_profiler_check);
1682
1683  Bind(&profiler_disabled);
1684  Mov(x3, function_address);
1685  Bind(&end_profiler_check);
1686
1687  // Save the callee-save registers we are going to use.
1688  // TODO(all): Is this necessary? ARM doesn't do it.
1689  STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
1690  Poke(x19, (spill_offset + 0) * kXRegSize);
1691  Poke(x20, (spill_offset + 1) * kXRegSize);
1692  Poke(x21, (spill_offset + 2) * kXRegSize);
1693  Poke(x22, (spill_offset + 3) * kXRegSize);
1694
1695  // Allocate HandleScope in callee-save registers.
1696  // We will need to restore the HandleScope after the call to the API function,
1697  // by allocating it in callee-save registers they will be preserved by C code.
1698  Register handle_scope_base = x22;
1699  Register next_address_reg = x19;
1700  Register limit_reg = x20;
1701  Register level_reg = w21;
1702
1703  Mov(handle_scope_base, next_address);
1704  Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1705  Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1706  Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1707  Add(level_reg, level_reg, 1);
1708  Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1709
1710  if (FLAG_log_timer_events) {
1711    FrameScope frame(this, StackFrame::MANUAL);
1712    PushSafepointRegisters();
1713    Mov(x0, ExternalReference::isolate_address(isolate()));
1714    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
1715    PopSafepointRegisters();
1716  }
1717
1718  // Native call returns to the DirectCEntry stub which redirects to the
1719  // return address pushed on stack (could have moved after GC).
1720  // DirectCEntry stub itself is generated early and never moves.
1721  DirectCEntryStub stub(isolate());
1722  stub.GenerateCall(this, x3);
1723
1724  if (FLAG_log_timer_events) {
1725    FrameScope frame(this, StackFrame::MANUAL);
1726    PushSafepointRegisters();
1727    Mov(x0, ExternalReference::isolate_address(isolate()));
1728    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
1729    PopSafepointRegisters();
1730  }
1731
1732  Label promote_scheduled_exception;
1733  Label exception_handled;
1734  Label delete_allocated_handles;
1735  Label leave_exit_frame;
1736  Label return_value_loaded;
1737
1738  // Load value from ReturnValue.
1739  Ldr(x0, return_value_operand);
1740  Bind(&return_value_loaded);
1741  // No more valid handles (the result handle was the last one). Restore
1742  // previous handle scope.
1743  Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1744  if (emit_debug_code()) {
1745    Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
1746    Cmp(w1, level_reg);
1747    Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
1748  }
1749  Sub(level_reg, level_reg, 1);
1750  Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1751  Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
1752  Cmp(limit_reg, x1);
1753  B(ne, &delete_allocated_handles);
1754
1755  Bind(&leave_exit_frame);
1756  // Restore callee-saved registers.
1757  Peek(x19, (spill_offset + 0) * kXRegSize);
1758  Peek(x20, (spill_offset + 1) * kXRegSize);
1759  Peek(x21, (spill_offset + 2) * kXRegSize);
1760  Peek(x22, (spill_offset + 3) * kXRegSize);
1761
1762  // Check if the function scheduled an exception.
1763  Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
1764  Ldr(x5, MemOperand(x5));
1765  JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
1766  Bind(&exception_handled);
1767
1768  bool restore_context = context_restore_operand != NULL;
1769  if (restore_context) {
1770    Ldr(cp, *context_restore_operand);
1771  }
1772
1773  LeaveExitFrame(false, x1, !restore_context);
1774  Drop(stack_space);
1775  Ret();
1776
1777  Bind(&promote_scheduled_exception);
1778  {
1779    FrameScope frame(this, StackFrame::INTERNAL);
1780    CallExternalReference(
1781        ExternalReference(
1782            Runtime::kHiddenPromoteScheduledException, isolate()), 0);
1783  }
1784  B(&exception_handled);
1785
1786  // HandleScope limit has changed. Delete allocated extensions.
1787  Bind(&delete_allocated_handles);
1788  Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1789  // Save the return value in a callee-save register.
1790  Register saved_result = x19;
1791  Mov(saved_result, x0);
1792  Mov(x0, ExternalReference::isolate_address(isolate()));
1793  CallCFunction(
1794      ExternalReference::delete_handle_scope_extensions(isolate()), 1);
1795  Mov(x0, saved_result);
1796  B(&leave_exit_frame);
1797}
1798
1799
1800void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1801                                           int num_arguments) {
1802  Mov(x0, num_arguments);
1803  Mov(x1, ext);
1804
1805  CEntryStub stub(isolate(), 1);
1806  CallStub(&stub);
1807}
1808
1809
1810void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1811  Mov(x1, builtin);
1812  CEntryStub stub(isolate(), 1);
1813  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1814}
1815
1816
1817void MacroAssembler::GetBuiltinFunction(Register target,
1818                                        Builtins::JavaScript id) {
1819  // Load the builtins object into target register.
1820  Ldr(target, GlobalObjectMemOperand());
1821  Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1822  // Load the JavaScript builtin function from the builtins object.
1823  Ldr(target, FieldMemOperand(target,
1824                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1825}
1826
1827
1828void MacroAssembler::GetBuiltinEntry(Register target,
1829                                     Register function,
1830                                     Builtins::JavaScript id) {
1831  ASSERT(!AreAliased(target, function));
1832  GetBuiltinFunction(function, id);
1833  // Load the code entry point from the builtins object.
1834  Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1835}
1836
1837
1838void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1839                                   InvokeFlag flag,
1840                                   const CallWrapper& call_wrapper) {
1841  ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1842  // You can't call a builtin without a valid frame.
1843  ASSERT(flag == JUMP_FUNCTION || has_frame());
1844
1845  // Get the builtin entry in x2 and setup the function object in x1.
1846  GetBuiltinEntry(x2, x1, id);
1847  if (flag == CALL_FUNCTION) {
1848    call_wrapper.BeforeCall(CallSize(x2));
1849    Call(x2);
1850    call_wrapper.AfterCall();
1851  } else {
1852    ASSERT(flag == JUMP_FUNCTION);
1853    Jump(x2);
1854  }
1855}
1856
1857
1858void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1859                                               int num_arguments,
1860                                               int result_size) {
1861  // TODO(1236192): Most runtime routines don't need the number of
1862  // arguments passed in because it is constant. At some point we
1863  // should remove this need and make the runtime routine entry code
1864  // smarter.
1865  Mov(x0, num_arguments);
1866  JumpToExternalReference(ext);
1867}
1868
1869
1870void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1871                                     int num_arguments,
1872                                     int result_size) {
1873  TailCallExternalReference(ExternalReference(fid, isolate()),
1874                            num_arguments,
1875                            result_size);
1876}
1877
1878
1879void MacroAssembler::InitializeNewString(Register string,
1880                                         Register length,
1881                                         Heap::RootListIndex map_index,
1882                                         Register scratch1,
1883                                         Register scratch2) {
1884  ASSERT(!AreAliased(string, length, scratch1, scratch2));
1885  LoadRoot(scratch2, map_index);
1886  SmiTag(scratch1, length);
1887  Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1888
1889  Mov(scratch2, String::kEmptyHashField);
1890  Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1891  Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1892}
1893
1894
1895int MacroAssembler::ActivationFrameAlignment() {
1896#if V8_HOST_ARCH_ARM64
1897  // Running on the real platform. Use the alignment as mandated by the local
1898  // environment.
1899  // Note: This will break if we ever start generating snapshots on one ARM
1900  // platform for another ARM platform with a different alignment.
1901  return OS::ActivationFrameAlignment();
1902#else  // V8_HOST_ARCH_ARM64
1903  // If we are using the simulator then we should always align to the expected
1904  // alignment. As the simulator is used to generate snapshots we do not know
1905  // if the target platform will need alignment, so this is controlled from a
1906  // flag.
1907  return FLAG_sim_stack_alignment;
1908#endif  // V8_HOST_ARCH_ARM64
1909}
1910
1911
1912void MacroAssembler::CallCFunction(ExternalReference function,
1913                                   int num_of_reg_args) {
1914  CallCFunction(function, num_of_reg_args, 0);
1915}
1916
1917
1918void MacroAssembler::CallCFunction(ExternalReference function,
1919                                   int num_of_reg_args,
1920                                   int num_of_double_args) {
1921  UseScratchRegisterScope temps(this);
1922  Register temp = temps.AcquireX();
1923  Mov(temp, function);
1924  CallCFunction(temp, num_of_reg_args, num_of_double_args);
1925}
1926
1927
1928void MacroAssembler::CallCFunction(Register function,
1929                                   int num_of_reg_args,
1930                                   int num_of_double_args) {
1931  ASSERT(has_frame());
1932  // We can pass 8 integer arguments in registers. If we need to pass more than
1933  // that, we'll need to implement support for passing them on the stack.
1934  ASSERT(num_of_reg_args <= 8);
1935
1936  // If we're passing doubles, we're limited to the following prototypes
1937  // (defined by ExternalReference::Type):
1938  //  BUILTIN_COMPARE_CALL:  int f(double, double)
1939  //  BUILTIN_FP_FP_CALL:    double f(double, double)
1940  //  BUILTIN_FP_CALL:       double f(double)
1941  //  BUILTIN_FP_INT_CALL:   double f(double, int)
1942  if (num_of_double_args > 0) {
1943    ASSERT(num_of_reg_args <= 1);
1944    ASSERT((num_of_double_args + num_of_reg_args) <= 2);
1945  }
1946
1947
1948  // If the stack pointer is not csp, we need to derive an aligned csp from the
1949  // current stack pointer.
1950  const Register old_stack_pointer = StackPointer();
1951  if (!csp.Is(old_stack_pointer)) {
1952    AssertStackConsistency();
1953
1954    int sp_alignment = ActivationFrameAlignment();
1955    // The ABI mandates at least 16-byte alignment.
1956    ASSERT(sp_alignment >= 16);
1957    ASSERT(IsPowerOf2(sp_alignment));
1958
1959    // The current stack pointer is a callee saved register, and is preserved
1960    // across the call.
1961    ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1962
1963    // Align and synchronize the system stack pointer with jssp.
1964    Bic(csp, old_stack_pointer, sp_alignment - 1);
1965    SetStackPointer(csp);
1966  }
1967
1968  // Call directly. The function called cannot cause a GC, or allow preemption,
1969  // so the return address in the link register stays correct.
1970  Call(function);
1971
1972  if (!csp.Is(old_stack_pointer)) {
1973    if (emit_debug_code()) {
1974      // Because the stack pointer must be aligned on a 16-byte boundary, the
1975      // aligned csp can be up to 12 bytes below the jssp. This is the case
1976      // where we only pushed one W register on top of an aligned jssp.
1977      UseScratchRegisterScope temps(this);
1978      Register temp = temps.AcquireX();
1979      ASSERT(ActivationFrameAlignment() == 16);
1980      Sub(temp, csp, old_stack_pointer);
1981      // We want temp <= 0 && temp >= -12.
1982      Cmp(temp, 0);
1983      Ccmp(temp, -12, NFlag, le);
1984      Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1985    }
1986    SetStackPointer(old_stack_pointer);
1987  }
1988}
1989
1990
1991void MacroAssembler::Jump(Register target) {
1992  Br(target);
1993}
1994
1995
1996void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1997  UseScratchRegisterScope temps(this);
1998  Register temp = temps.AcquireX();
1999  Mov(temp, Operand(target, rmode));
2000  Br(temp);
2001}
2002
2003
2004void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
2005  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2006  Jump(reinterpret_cast<intptr_t>(target), rmode);
2007}
2008
2009
2010void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
2011  ASSERT(RelocInfo::IsCodeTarget(rmode));
2012  AllowDeferredHandleDereference embedding_raw_address;
2013  Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
2014}
2015
2016
2017void MacroAssembler::Call(Register target) {
2018  BlockPoolsScope scope(this);
2019#ifdef DEBUG
2020  Label start_call;
2021  Bind(&start_call);
2022#endif
2023
2024  Blr(target);
2025
2026#ifdef DEBUG
2027  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2028#endif
2029}
2030
2031
2032void MacroAssembler::Call(Label* target) {
2033  BlockPoolsScope scope(this);
2034#ifdef DEBUG
2035  Label start_call;
2036  Bind(&start_call);
2037#endif
2038
2039  Bl(target);
2040
2041#ifdef DEBUG
2042  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2043#endif
2044}
2045
2046
2047// MacroAssembler::CallSize is sensitive to changes in this function, as it
2048// requires to know how many instructions are used to branch to the target.
2049void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
2050  BlockPoolsScope scope(this);
2051#ifdef DEBUG
2052  Label start_call;
2053  Bind(&start_call);
2054#endif
2055  // Statement positions are expected to be recorded when the target
2056  // address is loaded.
2057  positions_recorder()->WriteRecordedPositions();
2058
2059  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2060  ASSERT(rmode != RelocInfo::NONE32);
2061
2062  UseScratchRegisterScope temps(this);
2063  Register temp = temps.AcquireX();
2064
2065  if (rmode == RelocInfo::NONE64) {
2066    // Addresses are 48 bits so we never need to load the upper 16 bits.
2067    uint64_t imm = reinterpret_cast<uint64_t>(target);
2068    // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
2069    ASSERT(((imm >> 48) & 0xffff) == 0);
2070    movz(temp, (imm >> 0) & 0xffff, 0);
2071    movk(temp, (imm >> 16) & 0xffff, 16);
2072    movk(temp, (imm >> 32) & 0xffff, 32);
2073  } else {
2074    Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
2075  }
2076  Blr(temp);
2077#ifdef DEBUG
2078  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2079#endif
2080}
2081
2082
2083void MacroAssembler::Call(Handle<Code> code,
2084                          RelocInfo::Mode rmode,
2085                          TypeFeedbackId ast_id) {
2086#ifdef DEBUG
2087  Label start_call;
2088  Bind(&start_call);
2089#endif
2090
2091  if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2092    SetRecordedAstId(ast_id);
2093    rmode = RelocInfo::CODE_TARGET_WITH_ID;
2094  }
2095
2096  AllowDeferredHandleDereference embedding_raw_address;
2097  Call(reinterpret_cast<Address>(code.location()), rmode);
2098
2099#ifdef DEBUG
2100  // Check the size of the code generated.
2101  AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2102#endif
2103}
2104
2105
2106int MacroAssembler::CallSize(Register target) {
2107  USE(target);
2108  return kInstructionSize;
2109}
2110
2111
2112int MacroAssembler::CallSize(Label* target) {
2113  USE(target);
2114  return kInstructionSize;
2115}
2116
2117
2118int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2119  USE(target);
2120
2121  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2122  ASSERT(rmode != RelocInfo::NONE32);
2123
2124  if (rmode == RelocInfo::NONE64) {
2125    return kCallSizeWithoutRelocation;
2126  } else {
2127    return kCallSizeWithRelocation;
2128  }
2129}
2130
2131
2132int MacroAssembler::CallSize(Handle<Code> code,
2133                             RelocInfo::Mode rmode,
2134                             TypeFeedbackId ast_id) {
2135  USE(code);
2136  USE(ast_id);
2137
2138  // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2139  ASSERT(rmode != RelocInfo::NONE32);
2140
2141  if (rmode == RelocInfo::NONE64) {
2142    return kCallSizeWithoutRelocation;
2143  } else {
2144    return kCallSizeWithRelocation;
2145  }
2146}
2147
2148
2149
2150
2151
2152void MacroAssembler::JumpForHeapNumber(Register object,
2153                                       Register heap_number_map,
2154                                       Label* on_heap_number,
2155                                       Label* on_not_heap_number) {
2156  ASSERT(on_heap_number || on_not_heap_number);
2157  AssertNotSmi(object);
2158
2159  UseScratchRegisterScope temps(this);
2160  Register temp = temps.AcquireX();
2161
2162  // Load the HeapNumber map if it is not passed.
2163  if (heap_number_map.Is(NoReg)) {
2164    heap_number_map = temps.AcquireX();
2165    LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2166  } else {
2167    AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2168  }
2169
2170  ASSERT(!AreAliased(temp, heap_number_map));
2171
2172  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2173  Cmp(temp, heap_number_map);
2174
2175  if (on_heap_number) {
2176    B(eq, on_heap_number);
2177  }
2178  if (on_not_heap_number) {
2179    B(ne, on_not_heap_number);
2180  }
2181}
2182
2183
2184void MacroAssembler::JumpIfHeapNumber(Register object,
2185                                      Label* on_heap_number,
2186                                      Register heap_number_map) {
2187  JumpForHeapNumber(object,
2188                    heap_number_map,
2189                    on_heap_number,
2190                    NULL);
2191}
2192
2193
2194void MacroAssembler::JumpIfNotHeapNumber(Register object,
2195                                         Label* on_not_heap_number,
2196                                         Register heap_number_map) {
2197  JumpForHeapNumber(object,
2198                    heap_number_map,
2199                    NULL,
2200                    on_not_heap_number);
2201}
2202
2203
2204void MacroAssembler::LookupNumberStringCache(Register object,
2205                                             Register result,
2206                                             Register scratch1,
2207                                             Register scratch2,
2208                                             Register scratch3,
2209                                             Label* not_found) {
2210  ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
2211
2212  // Use of registers. Register result is used as a temporary.
2213  Register number_string_cache = result;
2214  Register mask = scratch3;
2215
2216  // Load the number string cache.
2217  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2218
2219  // Make the hash mask from the length of the number string cache. It
2220  // contains two elements (number and string) for each cache entry.
2221  Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
2222                                      FixedArray::kLengthOffset));
2223  Asr(mask, mask, 1);  // Divide length by two.
2224  Sub(mask, mask, 1);  // Make mask.
2225
2226  // Calculate the entry in the number string cache. The hash value in the
2227  // number string cache for smis is just the smi value, and the hash for
2228  // doubles is the xor of the upper and lower words. See
2229  // Heap::GetNumberStringCache.
2230  Label is_smi;
2231  Label load_result_from_cache;
2232
2233  JumpIfSmi(object, &is_smi);
2234  CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2235           DONT_DO_SMI_CHECK);
2236
2237  STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
2238  Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
2239  Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
2240  Eor(scratch1, scratch1, scratch2);
2241  And(scratch1, scratch1, mask);
2242
2243  // Calculate address of entry in string cache: each entry consists of two
2244  // pointer sized fields.
2245  Add(scratch1, number_string_cache,
2246      Operand(scratch1, LSL, kPointerSizeLog2 + 1));
2247
2248  Register probe = mask;
2249  Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2250  JumpIfSmi(probe, not_found);
2251  Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2252  Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2253  Fcmp(d0, d1);
2254  B(ne, not_found);
2255  B(&load_result_from_cache);
2256
2257  Bind(&is_smi);
2258  Register scratch = scratch1;
2259  And(scratch, mask, Operand::UntagSmi(object));
2260  // Calculate address of entry in string cache: each entry consists
2261  // of two pointer sized fields.
2262  Add(scratch, number_string_cache,
2263      Operand(scratch, LSL, kPointerSizeLog2 + 1));
2264
2265  // Check if the entry is the smi we are looking for.
2266  Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2267  Cmp(object, probe);
2268  B(ne, not_found);
2269
2270  // Get the result from the cache.
2271  Bind(&load_result_from_cache);
2272  Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2273  IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2274                   scratch1, scratch2);
2275}
2276
2277
2278void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2279                                             FPRegister value,
2280                                             FPRegister scratch_d,
2281                                             Label* on_successful_conversion,
2282                                             Label* on_failed_conversion) {
2283  // Convert to an int and back again, then compare with the original value.
2284  Fcvtzs(as_int, value);
2285  Scvtf(scratch_d, as_int);
2286  Fcmp(value, scratch_d);
2287
2288  if (on_successful_conversion) {
2289    B(on_successful_conversion, eq);
2290  }
2291  if (on_failed_conversion) {
2292    B(on_failed_conversion, ne);
2293  }
2294}
2295
2296
2297void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2298  UseScratchRegisterScope temps(this);
2299  Register temp = temps.AcquireX();
2300  // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2301  // cause overflow.
2302  Fmov(temp, input);
2303  Cmp(temp, 1);
2304}
2305
2306
2307void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2308                                     Label* on_negative_zero) {
2309  TestForMinusZero(input);
2310  B(vs, on_negative_zero);
2311}
2312
2313
2314void MacroAssembler::JumpIfMinusZero(Register input,
2315                                     Label* on_negative_zero) {
2316  ASSERT(input.Is64Bits());
2317  // Floating point value is in an integer register. Detect -0.0 by subtracting
2318  // 1 (cmp), which will cause overflow.
2319  Cmp(input, 1);
2320  B(vs, on_negative_zero);
2321}
2322
2323
2324void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2325  // Clamp the value to [0..255].
2326  Cmp(input.W(), Operand(input.W(), UXTB));
2327  // If input < input & 0xff, it must be < 0, so saturate to 0.
2328  Csel(output.W(), wzr, input.W(), lt);
2329  // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2330  Csel(output.W(), output.W(), 255, le);
2331}
2332
2333
2334void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2335  ClampInt32ToUint8(in_out, in_out);
2336}
2337
2338
2339void MacroAssembler::ClampDoubleToUint8(Register output,
2340                                        DoubleRegister input,
2341                                        DoubleRegister dbl_scratch) {
2342  // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2343  //   - Inputs lower than 0 (including -infinity) produce 0.
2344  //   - Inputs higher than 255 (including +infinity) produce 255.
2345  // Also, it seems that PIXEL types use round-to-nearest rather than
2346  // round-towards-zero.
2347
2348  // Squash +infinity before the conversion, since Fcvtnu will normally
2349  // convert it to 0.
2350  Fmov(dbl_scratch, 255);
2351  Fmin(dbl_scratch, dbl_scratch, input);
2352
2353  // Convert double to unsigned integer. Values less than zero become zero.
2354  // Values greater than 255 have already been clamped to 255.
2355  Fcvtnu(output, dbl_scratch);
2356}
2357
2358
2359void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2360                                               Register src,
2361                                               unsigned count,
2362                                               Register scratch1,
2363                                               Register scratch2,
2364                                               Register scratch3,
2365                                               Register scratch4,
2366                                               Register scratch5) {
2367  // Untag src and dst into scratch registers.
2368  // Copy src->dst in a tight loop.
2369  ASSERT(!AreAliased(dst, src,
2370                     scratch1, scratch2, scratch3, scratch4, scratch5));
2371  ASSERT(count >= 2);
2372
2373  const Register& remaining = scratch3;
2374  Mov(remaining, count / 2);
2375
2376  const Register& dst_untagged = scratch1;
2377  const Register& src_untagged = scratch2;
2378  Sub(dst_untagged, dst, kHeapObjectTag);
2379  Sub(src_untagged, src, kHeapObjectTag);
2380
2381  // Copy fields in pairs.
2382  Label loop;
2383  Bind(&loop);
2384  Ldp(scratch4, scratch5,
2385      MemOperand(src_untagged, kXRegSize* 2, PostIndex));
2386  Stp(scratch4, scratch5,
2387      MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
2388  Sub(remaining, remaining, 1);
2389  Cbnz(remaining, &loop);
2390
2391  // Handle the leftovers.
2392  if (count & 1) {
2393    Ldr(scratch4, MemOperand(src_untagged));
2394    Str(scratch4, MemOperand(dst_untagged));
2395  }
2396}
2397
2398
2399void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2400                                                   Register src,
2401                                                   unsigned count,
2402                                                   Register scratch1,
2403                                                   Register scratch2,
2404                                                   Register scratch3,
2405                                                   Register scratch4) {
2406  // Untag src and dst into scratch registers.
2407  // Copy src->dst in an unrolled loop.
2408  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2409
2410  const Register& dst_untagged = scratch1;
2411  const Register& src_untagged = scratch2;
2412  sub(dst_untagged, dst, kHeapObjectTag);
2413  sub(src_untagged, src, kHeapObjectTag);
2414
2415  // Copy fields in pairs.
2416  for (unsigned i = 0; i < count / 2; i++) {
2417    Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2418    Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2419  }
2420
2421  // Handle the leftovers.
2422  if (count & 1) {
2423    Ldr(scratch3, MemOperand(src_untagged));
2424    Str(scratch3, MemOperand(dst_untagged));
2425  }
2426}
2427
2428
2429void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2430                                              Register src,
2431                                              unsigned count,
2432                                              Register scratch1,
2433                                              Register scratch2,
2434                                              Register scratch3) {
2435  // Untag src and dst into scratch registers.
2436  // Copy src->dst in an unrolled loop.
2437  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2438
2439  const Register& dst_untagged = scratch1;
2440  const Register& src_untagged = scratch2;
2441  Sub(dst_untagged, dst, kHeapObjectTag);
2442  Sub(src_untagged, src, kHeapObjectTag);
2443
2444  // Copy fields one by one.
2445  for (unsigned i = 0; i < count; i++) {
2446    Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2447    Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2448  }
2449}
2450
2451
2452void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2453                                unsigned count) {
2454  // One of two methods is used:
2455  //
2456  // For high 'count' values where many scratch registers are available:
2457  //    Untag src and dst into scratch registers.
2458  //    Copy src->dst in a tight loop.
2459  //
2460  // For low 'count' values or where few scratch registers are available:
2461  //    Untag src and dst into scratch registers.
2462  //    Copy src->dst in an unrolled loop.
2463  //
2464  // In both cases, fields are copied in pairs if possible, and left-overs are
2465  // handled separately.
2466  ASSERT(!AreAliased(dst, src));
2467  ASSERT(!temps.IncludesAliasOf(dst));
2468  ASSERT(!temps.IncludesAliasOf(src));
2469  ASSERT(!temps.IncludesAliasOf(xzr));
2470
2471  if (emit_debug_code()) {
2472    Cmp(dst, src);
2473    Check(ne, kTheSourceAndDestinationAreTheSame);
2474  }
2475
2476  // The value of 'count' at which a loop will be generated (if there are
2477  // enough scratch registers).
2478  static const unsigned kLoopThreshold = 8;
2479
2480  UseScratchRegisterScope masm_temps(this);
2481  if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2482    CopyFieldsLoopPairsHelper(dst, src, count,
2483                              Register(temps.PopLowestIndex()),
2484                              Register(temps.PopLowestIndex()),
2485                              Register(temps.PopLowestIndex()),
2486                              masm_temps.AcquireX(),
2487                              masm_temps.AcquireX());
2488  } else if (temps.Count() >= 2) {
2489    CopyFieldsUnrolledPairsHelper(dst, src, count,
2490                                  Register(temps.PopLowestIndex()),
2491                                  Register(temps.PopLowestIndex()),
2492                                  masm_temps.AcquireX(),
2493                                  masm_temps.AcquireX());
2494  } else if (temps.Count() == 1) {
2495    CopyFieldsUnrolledHelper(dst, src, count,
2496                             Register(temps.PopLowestIndex()),
2497                             masm_temps.AcquireX(),
2498                             masm_temps.AcquireX());
2499  } else {
2500    UNREACHABLE();
2501  }
2502}
2503
2504
2505void MacroAssembler::CopyBytes(Register dst,
2506                               Register src,
2507                               Register length,
2508                               Register scratch,
2509                               CopyHint hint) {
2510  UseScratchRegisterScope temps(this);
2511  Register tmp1 = temps.AcquireX();
2512  Register tmp2 = temps.AcquireX();
2513  ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2514  ASSERT(!AreAliased(src, dst, csp));
2515
2516  if (emit_debug_code()) {
2517    // Check copy length.
2518    Cmp(length, 0);
2519    Assert(ge, kUnexpectedNegativeValue);
2520
2521    // Check src and dst buffers don't overlap.
2522    Add(scratch, src, length);  // Calculate end of src buffer.
2523    Cmp(scratch, dst);
2524    Add(scratch, dst, length);  // Calculate end of dst buffer.
2525    Ccmp(scratch, src, ZFlag, gt);
2526    Assert(le, kCopyBuffersOverlap);
2527  }
2528
2529  Label short_copy, short_loop, bulk_loop, done;
2530
2531  if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2532    Register bulk_length = scratch;
2533    int pair_size = 2 * kXRegSize;
2534    int pair_mask = pair_size - 1;
2535
2536    Bic(bulk_length, length, pair_mask);
2537    Cbz(bulk_length, &short_copy);
2538    Bind(&bulk_loop);
2539    Sub(bulk_length, bulk_length, pair_size);
2540    Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2541    Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2542    Cbnz(bulk_length, &bulk_loop);
2543
2544    And(length, length, pair_mask);
2545  }
2546
2547  Bind(&short_copy);
2548  Cbz(length, &done);
2549  Bind(&short_loop);
2550  Sub(length, length, 1);
2551  Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2552  Strb(tmp1, MemOperand(dst, 1, PostIndex));
2553  Cbnz(length, &short_loop);
2554
2555
2556  Bind(&done);
2557}
2558
2559
2560void MacroAssembler::FillFields(Register dst,
2561                                Register field_count,
2562                                Register filler) {
2563  ASSERT(!dst.Is(csp));
2564  UseScratchRegisterScope temps(this);
2565  Register field_ptr = temps.AcquireX();
2566  Register counter = temps.AcquireX();
2567  Label done;
2568
2569  // Decrement count. If the result < zero, count was zero, and there's nothing
2570  // to do. If count was one, flags are set to fail the gt condition at the end
2571  // of the pairs loop.
2572  Subs(counter, field_count, 1);
2573  B(lt, &done);
2574
2575  // There's at least one field to fill, so do this unconditionally.
2576  Str(filler, MemOperand(dst, kPointerSize, PostIndex));
2577
2578  // If the bottom bit of counter is set, there are an even number of fields to
2579  // fill, so pull the start pointer back by one field, allowing the pairs loop
2580  // to overwrite the field that was stored above.
2581  And(field_ptr, counter, 1);
2582  Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
2583
2584  // Store filler to memory in pairs.
2585  Label entry, loop;
2586  B(&entry);
2587  Bind(&loop);
2588  Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
2589  Subs(counter, counter, 2);
2590  Bind(&entry);
2591  B(gt, &loop);
2592
2593  Bind(&done);
2594}
2595
2596
2597void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
2598    Register first,
2599    Register second,
2600    Register scratch1,
2601    Register scratch2,
2602    Label* failure,
2603    SmiCheckType smi_check) {
2604
2605  if (smi_check == DO_SMI_CHECK) {
2606    JumpIfEitherSmi(first, second, failure);
2607  } else if (emit_debug_code()) {
2608    ASSERT(smi_check == DONT_DO_SMI_CHECK);
2609    Label not_smi;
2610    JumpIfEitherSmi(first, second, NULL, &not_smi);
2611
2612    // At least one input is a smi, but the flags indicated a smi check wasn't
2613    // needed.
2614    Abort(kUnexpectedSmi);
2615
2616    Bind(&not_smi);
2617  }
2618
2619  // Test that both first and second are sequential ASCII strings.
2620  Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2621  Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2622  Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2623  Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2624
2625  JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
2626                                               scratch2,
2627                                               scratch1,
2628                                               scratch2,
2629                                               failure);
2630}
2631
2632
2633void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
2634    Register first,
2635    Register second,
2636    Register scratch1,
2637    Register scratch2,
2638    Label* failure) {
2639  ASSERT(!AreAliased(scratch1, second));
2640  ASSERT(!AreAliased(scratch1, scratch2));
2641  static const int kFlatAsciiStringMask =
2642      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2643  static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2644  And(scratch1, first, kFlatAsciiStringMask);
2645  And(scratch2, second, kFlatAsciiStringMask);
2646  Cmp(scratch1, kFlatAsciiStringTag);
2647  Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2648  B(ne, failure);
2649}
2650
2651
2652void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
2653                                                            Register scratch,
2654                                                            Label* failure) {
2655  const int kFlatAsciiStringMask =
2656      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2657  const int kFlatAsciiStringTag =
2658      kStringTag | kOneByteStringTag | kSeqStringTag;
2659  And(scratch, type, kFlatAsciiStringMask);
2660  Cmp(scratch, kFlatAsciiStringTag);
2661  B(ne, failure);
2662}
2663
2664
2665void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2666    Register first,
2667    Register second,
2668    Register scratch1,
2669    Register scratch2,
2670    Label* failure) {
2671  ASSERT(!AreAliased(first, second, scratch1, scratch2));
2672  const int kFlatAsciiStringMask =
2673      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2674  const int kFlatAsciiStringTag =
2675      kStringTag | kOneByteStringTag | kSeqStringTag;
2676  And(scratch1, first, kFlatAsciiStringMask);
2677  And(scratch2, second, kFlatAsciiStringMask);
2678  Cmp(scratch1, kFlatAsciiStringTag);
2679  Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2680  B(ne, failure);
2681}
2682
2683
2684void MacroAssembler::JumpIfNotUniqueName(Register type,
2685                                         Label* not_unique_name) {
2686  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2687  // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2688  //   continue
2689  // } else {
2690  //   goto not_unique_name
2691  // }
2692  Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2693  Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2694  B(ne, not_unique_name);
2695}
2696
2697
2698void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2699                                    const ParameterCount& actual,
2700                                    Handle<Code> code_constant,
2701                                    Register code_reg,
2702                                    Label* done,
2703                                    InvokeFlag flag,
2704                                    bool* definitely_mismatches,
2705                                    const CallWrapper& call_wrapper) {
2706  bool definitely_matches = false;
2707  *definitely_mismatches = false;
2708  Label regular_invoke;
2709
2710  // Check whether the expected and actual arguments count match. If not,
2711  // setup registers according to contract with ArgumentsAdaptorTrampoline:
2712  //  x0: actual arguments count.
2713  //  x1: function (passed through to callee).
2714  //  x2: expected arguments count.
2715
2716  // The code below is made a lot easier because the calling code already sets
2717  // up actual and expected registers according to the contract if values are
2718  // passed in registers.
2719  ASSERT(actual.is_immediate() || actual.reg().is(x0));
2720  ASSERT(expected.is_immediate() || expected.reg().is(x2));
2721  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2722
2723  if (expected.is_immediate()) {
2724    ASSERT(actual.is_immediate());
2725    if (expected.immediate() == actual.immediate()) {
2726      definitely_matches = true;
2727
2728    } else {
2729      Mov(x0, actual.immediate());
2730      if (expected.immediate() ==
2731          SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2732        // Don't worry about adapting arguments for builtins that
2733        // don't want that done. Skip adaption code by making it look
2734        // like we have a match between expected and actual number of
2735        // arguments.
2736        definitely_matches = true;
2737      } else {
2738        *definitely_mismatches = true;
2739        // Set up x2 for the argument adaptor.
2740        Mov(x2, expected.immediate());
2741      }
2742    }
2743
2744  } else {  // expected is a register.
2745    Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2746                                              : Operand(actual.reg());
2747    // If actual == expected perform a regular invocation.
2748    Cmp(expected.reg(), actual_op);
2749    B(eq, &regular_invoke);
2750    // Otherwise set up x0 for the argument adaptor.
2751    Mov(x0, actual_op);
2752  }
2753
2754  // If the argument counts may mismatch, generate a call to the argument
2755  // adaptor.
2756  if (!definitely_matches) {
2757    if (!code_constant.is_null()) {
2758      Mov(x3, Operand(code_constant));
2759      Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
2760    }
2761
2762    Handle<Code> adaptor =
2763        isolate()->builtins()->ArgumentsAdaptorTrampoline();
2764    if (flag == CALL_FUNCTION) {
2765      call_wrapper.BeforeCall(CallSize(adaptor));
2766      Call(adaptor);
2767      call_wrapper.AfterCall();
2768      if (!*definitely_mismatches) {
2769        // If the arg counts don't match, no extra code is emitted by
2770        // MAsm::InvokeCode and we can just fall through.
2771        B(done);
2772      }
2773    } else {
2774      Jump(adaptor, RelocInfo::CODE_TARGET);
2775    }
2776  }
2777  Bind(&regular_invoke);
2778}
2779
2780
2781void MacroAssembler::InvokeCode(Register code,
2782                                const ParameterCount& expected,
2783                                const ParameterCount& actual,
2784                                InvokeFlag flag,
2785                                const CallWrapper& call_wrapper) {
2786  // You can't call a function without a valid frame.
2787  ASSERT(flag == JUMP_FUNCTION || has_frame());
2788
2789  Label done;
2790
2791  bool definitely_mismatches = false;
2792  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2793                 &definitely_mismatches, call_wrapper);
2794
2795  // If we are certain that actual != expected, then we know InvokePrologue will
2796  // have handled the call through the argument adaptor mechanism.
2797  // The called function expects the call kind in x5.
2798  if (!definitely_mismatches) {
2799    if (flag == CALL_FUNCTION) {
2800      call_wrapper.BeforeCall(CallSize(code));
2801      Call(code);
2802      call_wrapper.AfterCall();
2803    } else {
2804      ASSERT(flag == JUMP_FUNCTION);
2805      Jump(code);
2806    }
2807  }
2808
2809  // Continue here if InvokePrologue does handle the invocation due to
2810  // mismatched parameter counts.
2811  Bind(&done);
2812}
2813
2814
2815void MacroAssembler::InvokeFunction(Register function,
2816                                    const ParameterCount& actual,
2817                                    InvokeFlag flag,
2818                                    const CallWrapper& call_wrapper) {
2819  // You can't call a function without a valid frame.
2820  ASSERT(flag == JUMP_FUNCTION || has_frame());
2821
2822  // Contract with called JS functions requires that function is passed in x1.
2823  // (See FullCodeGenerator::Generate().)
2824  ASSERT(function.is(x1));
2825
2826  Register expected_reg = x2;
2827  Register code_reg = x3;
2828
2829  Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2830  // The number of arguments is stored as an int32_t, and -1 is a marker
2831  // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2832  // extension to correctly handle it.
2833  Ldr(expected_reg, FieldMemOperand(function,
2834                                    JSFunction::kSharedFunctionInfoOffset));
2835  Ldrsw(expected_reg,
2836        FieldMemOperand(expected_reg,
2837                        SharedFunctionInfo::kFormalParameterCountOffset));
2838  Ldr(code_reg,
2839      FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2840
2841  ParameterCount expected(expected_reg);
2842  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2843}
2844
2845
2846void MacroAssembler::InvokeFunction(Register function,
2847                                    const ParameterCount& expected,
2848                                    const ParameterCount& actual,
2849                                    InvokeFlag flag,
2850                                    const CallWrapper& call_wrapper) {
2851  // You can't call a function without a valid frame.
2852  ASSERT(flag == JUMP_FUNCTION || has_frame());
2853
2854  // Contract with called JS functions requires that function is passed in x1.
2855  // (See FullCodeGenerator::Generate().)
2856  ASSERT(function.Is(x1));
2857
2858  Register code_reg = x3;
2859
2860  // Set up the context.
2861  Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2862
2863  // We call indirectly through the code field in the function to
2864  // allow recompilation to take effect without changing any of the
2865  // call sites.
2866  Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2867  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2868}
2869
2870
2871void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2872                                    const ParameterCount& expected,
2873                                    const ParameterCount& actual,
2874                                    InvokeFlag flag,
2875                                    const CallWrapper& call_wrapper) {
2876  // Contract with called JS functions requires that function is passed in x1.
2877  // (See FullCodeGenerator::Generate().)
2878  __ LoadObject(x1, function);
2879  InvokeFunction(x1, expected, actual, flag, call_wrapper);
2880}
2881
2882
2883void MacroAssembler::TryConvertDoubleToInt64(Register result,
2884                                             DoubleRegister double_input,
2885                                             Label* done) {
2886  // Try to convert with an FPU convert instruction. It's trivial to compute
2887  // the modulo operation on an integer register so we convert to a 64-bit
2888  // integer.
2889  //
2890  // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2891  // when the double is out of range. NaNs and infinities will be converted to 0
2892  // (as ECMA-262 requires).
2893  Fcvtzs(result.X(), double_input);
2894
2895  // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2896  // representable using a double, so if the result is one of those then we know
2897  // that saturation occured, and we need to manually handle the conversion.
2898  //
2899  // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2900  // 1 will cause signed overflow.
2901  Cmp(result.X(), 1);
2902  Ccmp(result.X(), -1, VFlag, vc);
2903
2904  B(vc, done);
2905}
2906
2907
2908void MacroAssembler::TruncateDoubleToI(Register result,
2909                                       DoubleRegister double_input) {
2910  Label done;
2911  ASSERT(jssp.Is(StackPointer()));
2912
2913  // Try to convert the double to an int64. If successful, the bottom 32 bits
2914  // contain our truncated int32 result.
2915  TryConvertDoubleToInt64(result, double_input, &done);
2916
2917  // If we fell through then inline version didn't succeed - call stub instead.
2918  Push(lr);
2919  Push(double_input);  // Put input on stack.
2920
2921  DoubleToIStub stub(isolate(),
2922                     jssp,
2923                     result,
2924                     0,
2925                     true,   // is_truncating
2926                     true);  // skip_fastpath
2927  CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber
2928
2929  Drop(1, kDoubleSize);  // Drop the double input on the stack.
2930  Pop(lr);
2931
2932  Bind(&done);
2933}
2934
2935
2936void MacroAssembler::TruncateHeapNumberToI(Register result,
2937                                           Register object) {
2938  Label done;
2939  ASSERT(!result.is(object));
2940  ASSERT(jssp.Is(StackPointer()));
2941
2942  Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2943
2944  // Try to convert the double to an int64. If successful, the bottom 32 bits
2945  // contain our truncated int32 result.
2946  TryConvertDoubleToInt64(result, fp_scratch, &done);
2947
2948  // If we fell through then inline version didn't succeed - call stub instead.
2949  Push(lr);
2950  DoubleToIStub stub(isolate(),
2951                     object,
2952                     result,
2953                     HeapNumber::kValueOffset - kHeapObjectTag,
2954                     true,   // is_truncating
2955                     true);  // skip_fastpath
2956  CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber
2957  Pop(lr);
2958
2959  Bind(&done);
2960}
2961
2962
2963void MacroAssembler::StubPrologue() {
2964  ASSERT(StackPointer().Is(jssp));
2965  UseScratchRegisterScope temps(this);
2966  Register temp = temps.AcquireX();
2967  __ Mov(temp, Smi::FromInt(StackFrame::STUB));
2968  // Compiled stubs don't age, and so they don't need the predictable code
2969  // ageing sequence.
2970  __ Push(lr, fp, cp, temp);
2971  __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
2972}
2973
2974
2975void MacroAssembler::Prologue(bool code_pre_aging) {
2976  if (code_pre_aging) {
2977    Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2978    __ EmitCodeAgeSequence(stub);
2979  } else {
2980    __ EmitFrameSetupForCodeAgePatching();
2981  }
2982}
2983
2984
2985void MacroAssembler::EnterFrame(StackFrame::Type type) {
2986  ASSERT(jssp.Is(StackPointer()));
2987  UseScratchRegisterScope temps(this);
2988  Register type_reg = temps.AcquireX();
2989  Register code_reg = temps.AcquireX();
2990
2991  Push(lr, fp, cp);
2992  Mov(type_reg, Smi::FromInt(type));
2993  Mov(code_reg, Operand(CodeObject()));
2994  Push(type_reg, code_reg);
2995  // jssp[4] : lr
2996  // jssp[3] : fp
2997  // jssp[2] : cp
2998  // jssp[1] : type
2999  // jssp[0] : code object
3000
3001  // Adjust FP to point to saved FP.
3002  Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
3003}
3004
3005
3006void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3007  ASSERT(jssp.Is(StackPointer()));
3008  // Drop the execution stack down to the frame pointer and restore
3009  // the caller frame pointer and return address.
3010  Mov(jssp, fp);
3011  AssertStackConsistency();
3012  Pop(fp, lr);
3013}
3014
3015
3016void MacroAssembler::ExitFramePreserveFPRegs() {
3017  PushCPURegList(kCallerSavedFP);
3018}
3019
3020
3021void MacroAssembler::ExitFrameRestoreFPRegs() {
3022  // Read the registers from the stack without popping them. The stack pointer
3023  // will be reset as part of the unwinding process.
3024  CPURegList saved_fp_regs = kCallerSavedFP;
3025  ASSERT(saved_fp_regs.Count() % 2 == 0);
3026
3027  int offset = ExitFrameConstants::kLastExitFrameField;
3028  while (!saved_fp_regs.IsEmpty()) {
3029    const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
3030    const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
3031    offset -= 2 * kDRegSize;
3032    Ldp(dst1, dst0, MemOperand(fp, offset));
3033  }
3034}
3035
3036
3037void MacroAssembler::EnterExitFrame(bool save_doubles,
3038                                    const Register& scratch,
3039                                    int extra_space) {
3040  ASSERT(jssp.Is(StackPointer()));
3041
3042  // Set up the new stack frame.
3043  Mov(scratch, Operand(CodeObject()));
3044  Push(lr, fp);
3045  Mov(fp, StackPointer());
3046  Push(xzr, scratch);
3047  //          fp[8]: CallerPC (lr)
3048  //    fp -> fp[0]: CallerFP (old fp)
3049  //          fp[-8]: Space reserved for SPOffset.
3050  //  jssp -> fp[-16]: CodeObject()
3051  STATIC_ASSERT((2 * kPointerSize) ==
3052                ExitFrameConstants::kCallerSPDisplacement);
3053  STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
3054  STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
3055  STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
3056  STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
3057
3058  // Save the frame pointer and context pointer in the top frame.
3059  Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3060                                         isolate())));
3061  Str(fp, MemOperand(scratch));
3062  Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3063                                         isolate())));
3064  Str(cp, MemOperand(scratch));
3065
3066  STATIC_ASSERT((-2 * kPointerSize) ==
3067                ExitFrameConstants::kLastExitFrameField);
3068  if (save_doubles) {
3069    ExitFramePreserveFPRegs();
3070  }
3071
3072  // Reserve space for the return address and for user requested memory.
3073  // We do this before aligning to make sure that we end up correctly
3074  // aligned with the minimum of wasted space.
3075  Claim(extra_space + 1, kXRegSize);
3076  //         fp[8]: CallerPC (lr)
3077  //   fp -> fp[0]: CallerFP (old fp)
3078  //         fp[-8]: Space reserved for SPOffset.
3079  //         fp[-16]: CodeObject()
3080  //         fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3081  //         jssp[8]: Extra space reserved for caller (if extra_space != 0).
3082  // jssp -> jssp[0]: Space reserved for the return address.
3083
3084  // Align and synchronize the system stack pointer with jssp.
3085  AlignAndSetCSPForFrame();
3086  ASSERT(csp.Is(StackPointer()));
3087
3088  //         fp[8]: CallerPC (lr)
3089  //   fp -> fp[0]: CallerFP (old fp)
3090  //         fp[-8]: Space reserved for SPOffset.
3091  //         fp[-16]: CodeObject()
3092  //         fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3093  //         csp[8]: Memory reserved for the caller if extra_space != 0.
3094  //                 Alignment padding, if necessary.
3095  //  csp -> csp[0]: Space reserved for the return address.
3096
3097  // ExitFrame::GetStateForFramePointer expects to find the return address at
3098  // the memory address immediately below the pointer stored in SPOffset.
3099  // It is not safe to derive much else from SPOffset, because the size of the
3100  // padding can vary.
3101  Add(scratch, csp, kXRegSize);
3102  Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
3103}
3104
3105
3106// Leave the current exit frame.
3107void MacroAssembler::LeaveExitFrame(bool restore_doubles,
3108                                    const Register& scratch,
3109                                    bool restore_context) {
3110  ASSERT(csp.Is(StackPointer()));
3111
3112  if (restore_doubles) {
3113    ExitFrameRestoreFPRegs();
3114  }
3115
3116  // Restore the context pointer from the top frame.
3117  if (restore_context) {
3118    Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3119                                           isolate())));
3120    Ldr(cp, MemOperand(scratch));
3121  }
3122
3123  if (emit_debug_code()) {
3124    // Also emit debug code to clear the cp in the top frame.
3125    Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3126                                           isolate())));
3127    Str(xzr, MemOperand(scratch));
3128  }
3129  // Clear the frame pointer from the top frame.
3130  Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3131                                         isolate())));
3132  Str(xzr, MemOperand(scratch));
3133
3134  // Pop the exit frame.
3135  //         fp[8]: CallerPC (lr)
3136  //   fp -> fp[0]: CallerFP (old fp)
3137  //         fp[...]: The rest of the frame.
3138  Mov(jssp, fp);
3139  SetStackPointer(jssp);
3140  AssertStackConsistency();
3141  Pop(fp, lr);
3142}
3143
3144
3145void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3146                                Register scratch1, Register scratch2) {
3147  if (FLAG_native_code_counters && counter->Enabled()) {
3148    Mov(scratch1, value);
3149    Mov(scratch2, ExternalReference(counter));
3150    Str(scratch1, MemOperand(scratch2));
3151  }
3152}
3153
3154
3155void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3156                                      Register scratch1, Register scratch2) {
3157  ASSERT(value != 0);
3158  if (FLAG_native_code_counters && counter->Enabled()) {
3159    Mov(scratch2, ExternalReference(counter));
3160    Ldr(scratch1, MemOperand(scratch2));
3161    Add(scratch1, scratch1, value);
3162    Str(scratch1, MemOperand(scratch2));
3163  }
3164}
3165
3166
3167void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3168                                      Register scratch1, Register scratch2) {
3169  IncrementCounter(counter, -value, scratch1, scratch2);
3170}
3171
3172
3173void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3174  if (context_chain_length > 0) {
3175    // Move up the chain of contexts to the context containing the slot.
3176    Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3177    for (int i = 1; i < context_chain_length; i++) {
3178      Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3179    }
3180  } else {
3181    // Slot is in the current function context.  Move it into the
3182    // destination register in case we store into it (the write barrier
3183    // cannot be allowed to destroy the context in cp).
3184    Mov(dst, cp);
3185  }
3186}
3187
3188
3189void MacroAssembler::DebugBreak() {
3190  Mov(x0, 0);
3191  Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3192  CEntryStub ces(isolate(), 1);
3193  ASSERT(AllowThisStubCall(&ces));
3194  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3195}
3196
3197
3198void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3199                                    int handler_index) {
3200  ASSERT(jssp.Is(StackPointer()));
3201  // Adjust this code if the asserts don't hold.
3202  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3203  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3204  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3205  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3206  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3207  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3208
3209  // For the JSEntry handler, we must preserve the live registers x0-x4.
3210  // (See JSEntryStub::GenerateBody().)
3211
3212  unsigned state =
3213      StackHandler::IndexField::encode(handler_index) |
3214      StackHandler::KindField::encode(kind);
3215
3216  // Set up the code object and the state for pushing.
3217  Mov(x10, Operand(CodeObject()));
3218  Mov(x11, state);
3219
3220  // Push the frame pointer, context, state, and code object.
3221  if (kind == StackHandler::JS_ENTRY) {
3222    ASSERT(Smi::FromInt(0) == 0);
3223    Push(xzr, xzr, x11, x10);
3224  } else {
3225    Push(fp, cp, x11, x10);
3226  }
3227
3228  // Link the current handler as the next handler.
3229  Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3230  Ldr(x10, MemOperand(x11));
3231  Push(x10);
3232  // Set this new handler as the current one.
3233  Str(jssp, MemOperand(x11));
3234}
3235
3236
3237void MacroAssembler::PopTryHandler() {
3238  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3239  Pop(x10);
3240  Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3241  Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3242  Str(x10, MemOperand(x11));
3243}
3244
3245
3246void MacroAssembler::Allocate(int object_size,
3247                              Register result,
3248                              Register scratch1,
3249                              Register scratch2,
3250                              Label* gc_required,
3251                              AllocationFlags flags) {
3252  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
3253  if (!FLAG_inline_new) {
3254    if (emit_debug_code()) {
3255      // Trash the registers to simulate an allocation failure.
3256      // We apply salt to the original zap value to easily spot the values.
3257      Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3258      Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3259      Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3260    }
3261    B(gc_required);
3262    return;
3263  }
3264
3265  UseScratchRegisterScope temps(this);
3266  Register scratch3 = temps.AcquireX();
3267
3268  ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
3269  ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3270
3271  // Make object size into bytes.
3272  if ((flags & SIZE_IN_WORDS) != 0) {
3273    object_size *= kPointerSize;
3274  }
3275  ASSERT(0 == (object_size & kObjectAlignmentMask));
3276
3277  // Check relative positions of allocation top and limit addresses.
3278  // The values must be adjacent in memory to allow the use of LDP.
3279  ExternalReference heap_allocation_top =
3280      AllocationUtils::GetAllocationTopReference(isolate(), flags);
3281  ExternalReference heap_allocation_limit =
3282      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3283  intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3284  intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3285  ASSERT((limit - top) == kPointerSize);
3286
3287  // Set up allocation top address and object size registers.
3288  Register top_address = scratch1;
3289  Register allocation_limit = scratch2;
3290  Mov(top_address, Operand(heap_allocation_top));
3291
3292  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3293    // Load allocation top into result and the allocation limit.
3294    Ldp(result, allocation_limit, MemOperand(top_address));
3295  } else {
3296    if (emit_debug_code()) {
3297      // Assert that result actually contains top on entry.
3298      Ldr(scratch3, MemOperand(top_address));
3299      Cmp(result, scratch3);
3300      Check(eq, kUnexpectedAllocationTop);
3301    }
3302    // Load the allocation limit. 'result' already contains the allocation top.
3303    Ldr(allocation_limit, MemOperand(top_address, limit - top));
3304  }
3305
3306  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3307  // the same alignment on ARM64.
3308  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3309
3310  // Calculate new top and bail out if new space is exhausted.
3311  Adds(scratch3, result, object_size);
3312  Ccmp(scratch3, allocation_limit, CFlag, cc);
3313  B(hi, gc_required);
3314  Str(scratch3, MemOperand(top_address));
3315
3316  // Tag the object if requested.
3317  if ((flags & TAG_OBJECT) != 0) {
3318    ObjectTag(result, result);
3319  }
3320}
3321
3322
3323void MacroAssembler::Allocate(Register object_size,
3324                              Register result,
3325                              Register scratch1,
3326                              Register scratch2,
3327                              Label* gc_required,
3328                              AllocationFlags flags) {
3329  if (!FLAG_inline_new) {
3330    if (emit_debug_code()) {
3331      // Trash the registers to simulate an allocation failure.
3332      // We apply salt to the original zap value to easily spot the values.
3333      Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3334      Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3335      Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3336    }
3337    B(gc_required);
3338    return;
3339  }
3340
3341  UseScratchRegisterScope temps(this);
3342  Register scratch3 = temps.AcquireX();
3343
3344  ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3345  ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
3346         scratch1.Is64Bits() && scratch2.Is64Bits());
3347
3348  // Check relative positions of allocation top and limit addresses.
3349  // The values must be adjacent in memory to allow the use of LDP.
3350  ExternalReference heap_allocation_top =
3351      AllocationUtils::GetAllocationTopReference(isolate(), flags);
3352  ExternalReference heap_allocation_limit =
3353      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3354  intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3355  intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3356  ASSERT((limit - top) == kPointerSize);
3357
3358  // Set up allocation top address and object size registers.
3359  Register top_address = scratch1;
3360  Register allocation_limit = scratch2;
3361  Mov(top_address, heap_allocation_top);
3362
3363  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3364    // Load allocation top into result and the allocation limit.
3365    Ldp(result, allocation_limit, MemOperand(top_address));
3366  } else {
3367    if (emit_debug_code()) {
3368      // Assert that result actually contains top on entry.
3369      Ldr(scratch3, MemOperand(top_address));
3370      Cmp(result, scratch3);
3371      Check(eq, kUnexpectedAllocationTop);
3372    }
3373    // Load the allocation limit. 'result' already contains the allocation top.
3374    Ldr(allocation_limit, MemOperand(top_address, limit - top));
3375  }
3376
3377  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3378  // the same alignment on ARM64.
3379  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3380
3381  // Calculate new top and bail out if new space is exhausted
3382  if ((flags & SIZE_IN_WORDS) != 0) {
3383    Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
3384  } else {
3385    Adds(scratch3, result, object_size);
3386  }
3387
3388  if (emit_debug_code()) {
3389    Tst(scratch3, kObjectAlignmentMask);
3390    Check(eq, kUnalignedAllocationInNewSpace);
3391  }
3392
3393  Ccmp(scratch3, allocation_limit, CFlag, cc);
3394  B(hi, gc_required);
3395  Str(scratch3, MemOperand(top_address));
3396
3397  // Tag the object if requested.
3398  if ((flags & TAG_OBJECT) != 0) {
3399    ObjectTag(result, result);
3400  }
3401}
3402
3403
3404void MacroAssembler::UndoAllocationInNewSpace(Register object,
3405                                              Register scratch) {
3406  ExternalReference new_space_allocation_top =
3407      ExternalReference::new_space_allocation_top_address(isolate());
3408
3409  // Make sure the object has no tag before resetting top.
3410  Bic(object, object, kHeapObjectTagMask);
3411#ifdef DEBUG
3412  // Check that the object un-allocated is below the current top.
3413  Mov(scratch, new_space_allocation_top);
3414  Ldr(scratch, MemOperand(scratch));
3415  Cmp(object, scratch);
3416  Check(lt, kUndoAllocationOfNonAllocatedMemory);
3417#endif
3418  // Write the address of the object to un-allocate as the current top.
3419  Mov(scratch, new_space_allocation_top);
3420  Str(object, MemOperand(scratch));
3421}
3422
3423
3424void MacroAssembler::AllocateTwoByteString(Register result,
3425                                           Register length,
3426                                           Register scratch1,
3427                                           Register scratch2,
3428                                           Register scratch3,
3429                                           Label* gc_required) {
3430  ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3431  // Calculate the number of bytes needed for the characters in the string while
3432  // observing object alignment.
3433  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3434  Add(scratch1, length, length);  // Length in bytes, not chars.
3435  Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3436  Bic(scratch1, scratch1, kObjectAlignmentMask);
3437
3438  // Allocate two-byte string in new space.
3439  Allocate(scratch1,
3440           result,
3441           scratch2,
3442           scratch3,
3443           gc_required,
3444           TAG_OBJECT);
3445
3446  // Set the map, length and hash field.
3447  InitializeNewString(result,
3448                      length,
3449                      Heap::kStringMapRootIndex,
3450                      scratch1,
3451                      scratch2);
3452}
3453
3454
3455void MacroAssembler::AllocateAsciiString(Register result,
3456                                         Register length,
3457                                         Register scratch1,
3458                                         Register scratch2,
3459                                         Register scratch3,
3460                                         Label* gc_required) {
3461  ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3462  // Calculate the number of bytes needed for the characters in the string while
3463  // observing object alignment.
3464  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3465  STATIC_ASSERT(kCharSize == 1);
3466  Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3467  Bic(scratch1, scratch1, kObjectAlignmentMask);
3468
3469  // Allocate ASCII string in new space.
3470  Allocate(scratch1,
3471           result,
3472           scratch2,
3473           scratch3,
3474           gc_required,
3475           TAG_OBJECT);
3476
3477  // Set the map, length and hash field.
3478  InitializeNewString(result,
3479                      length,
3480                      Heap::kAsciiStringMapRootIndex,
3481                      scratch1,
3482                      scratch2);
3483}
3484
3485
3486void MacroAssembler::AllocateTwoByteConsString(Register result,
3487                                               Register length,
3488                                               Register scratch1,
3489                                               Register scratch2,
3490                                               Label* gc_required) {
3491  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3492           TAG_OBJECT);
3493
3494  InitializeNewString(result,
3495                      length,
3496                      Heap::kConsStringMapRootIndex,
3497                      scratch1,
3498                      scratch2);
3499}
3500
3501
3502void MacroAssembler::AllocateAsciiConsString(Register result,
3503                                             Register length,
3504                                             Register scratch1,
3505                                             Register scratch2,
3506                                             Label* gc_required) {
3507  Allocate(ConsString::kSize,
3508           result,
3509           scratch1,
3510           scratch2,
3511           gc_required,
3512           TAG_OBJECT);
3513
3514  InitializeNewString(result,
3515                      length,
3516                      Heap::kConsAsciiStringMapRootIndex,
3517                      scratch1,
3518                      scratch2);
3519}
3520
3521
3522void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3523                                                 Register length,
3524                                                 Register scratch1,
3525                                                 Register scratch2,
3526                                                 Label* gc_required) {
3527  ASSERT(!AreAliased(result, length, scratch1, scratch2));
3528  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3529           TAG_OBJECT);
3530
3531  InitializeNewString(result,
3532                      length,
3533                      Heap::kSlicedStringMapRootIndex,
3534                      scratch1,
3535                      scratch2);
3536}
3537
3538
3539void MacroAssembler::AllocateAsciiSlicedString(Register result,
3540                                               Register length,
3541                                               Register scratch1,
3542                                               Register scratch2,
3543                                               Label* gc_required) {
3544  ASSERT(!AreAliased(result, length, scratch1, scratch2));
3545  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3546           TAG_OBJECT);
3547
3548  InitializeNewString(result,
3549                      length,
3550                      Heap::kSlicedAsciiStringMapRootIndex,
3551                      scratch1,
3552                      scratch2);
3553}
3554
3555
3556// Allocates a heap number or jumps to the need_gc label if the young space
3557// is full and a scavenge is needed.
3558void MacroAssembler::AllocateHeapNumber(Register result,
3559                                        Label* gc_required,
3560                                        Register scratch1,
3561                                        Register scratch2,
3562                                        CPURegister value,
3563                                        CPURegister heap_number_map) {
3564  ASSERT(!value.IsValid() || value.Is64Bits());
3565  UseScratchRegisterScope temps(this);
3566
3567  // Allocate an object in the heap for the heap number and tag it as a heap
3568  // object.
3569  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3570           NO_ALLOCATION_FLAGS);
3571
3572  // Prepare the heap number map.
3573  if (!heap_number_map.IsValid()) {
3574    // If we have a valid value register, use the same type of register to store
3575    // the map so we can use STP to store both in one instruction.
3576    if (value.IsValid() && value.IsFPRegister()) {
3577      heap_number_map = temps.AcquireD();
3578    } else {
3579      heap_number_map = scratch1;
3580    }
3581    LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3582  }
3583  if (emit_debug_code()) {
3584    Register map;
3585    if (heap_number_map.IsFPRegister()) {
3586      map = scratch1;
3587      Fmov(map, DoubleRegister(heap_number_map));
3588    } else {
3589      map = Register(heap_number_map);
3590    }
3591    AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
3592  }
3593
3594  // Store the heap number map and the value in the allocated object.
3595  if (value.IsSameSizeAndType(heap_number_map)) {
3596    STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3597                  HeapNumber::kValueOffset);
3598    Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
3599  } else {
3600    Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3601    if (value.IsValid()) {
3602      Str(value, MemOperand(result, HeapNumber::kValueOffset));
3603    }
3604  }
3605  ObjectTag(result, result);
3606}
3607
3608
3609void MacroAssembler::JumpIfObjectType(Register object,
3610                                      Register map,
3611                                      Register type_reg,
3612                                      InstanceType type,
3613                                      Label* if_cond_pass,
3614                                      Condition cond) {
3615  CompareObjectType(object, map, type_reg, type);
3616  B(cond, if_cond_pass);
3617}
3618
3619
3620void MacroAssembler::JumpIfNotObjectType(Register object,
3621                                         Register map,
3622                                         Register type_reg,
3623                                         InstanceType type,
3624                                         Label* if_not_object) {
3625  JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3626}
3627
3628
3629// Sets condition flags based on comparison, and returns type in type_reg.
3630void MacroAssembler::CompareObjectType(Register object,
3631                                       Register map,
3632                                       Register type_reg,
3633                                       InstanceType type) {
3634  Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3635  CompareInstanceType(map, type_reg, type);
3636}
3637
3638
3639// Sets condition flags based on comparison, and returns type in type_reg.
3640void MacroAssembler::CompareInstanceType(Register map,
3641                                         Register type_reg,
3642                                         InstanceType type) {
3643  Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3644  Cmp(type_reg, type);
3645}
3646
3647
3648void MacroAssembler::CompareMap(Register obj,
3649                                Register scratch,
3650                                Handle<Map> map) {
3651  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3652  CompareMap(scratch, map);
3653}
3654
3655
3656void MacroAssembler::CompareMap(Register obj_map,
3657                                Handle<Map> map) {
3658  Cmp(obj_map, Operand(map));
3659}
3660
3661
3662void MacroAssembler::CheckMap(Register obj,
3663                              Register scratch,
3664                              Handle<Map> map,
3665                              Label* fail,
3666                              SmiCheckType smi_check_type) {
3667  if (smi_check_type == DO_SMI_CHECK) {
3668    JumpIfSmi(obj, fail);
3669  }
3670
3671  CompareMap(obj, scratch, map);
3672  B(ne, fail);
3673}
3674
3675
3676void MacroAssembler::CheckMap(Register obj,
3677                              Register scratch,
3678                              Heap::RootListIndex index,
3679                              Label* fail,
3680                              SmiCheckType smi_check_type) {
3681  if (smi_check_type == DO_SMI_CHECK) {
3682    JumpIfSmi(obj, fail);
3683  }
3684  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3685  JumpIfNotRoot(scratch, index, fail);
3686}
3687
3688
3689void MacroAssembler::CheckMap(Register obj_map,
3690                              Handle<Map> map,
3691                              Label* fail,
3692                              SmiCheckType smi_check_type) {
3693  if (smi_check_type == DO_SMI_CHECK) {
3694    JumpIfSmi(obj_map, fail);
3695  }
3696
3697  CompareMap(obj_map, map);
3698  B(ne, fail);
3699}
3700
3701
3702void MacroAssembler::DispatchMap(Register obj,
3703                                 Register scratch,
3704                                 Handle<Map> map,
3705                                 Handle<Code> success,
3706                                 SmiCheckType smi_check_type) {
3707  Label fail;
3708  if (smi_check_type == DO_SMI_CHECK) {
3709    JumpIfSmi(obj, &fail);
3710  }
3711  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3712  Cmp(scratch, Operand(map));
3713  B(ne, &fail);
3714  Jump(success, RelocInfo::CODE_TARGET);
3715  Bind(&fail);
3716}
3717
3718
3719void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3720  UseScratchRegisterScope temps(this);
3721  Register temp = temps.AcquireX();
3722  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3723  Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3724  Tst(temp, mask);
3725}
3726
3727
3728void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3729  // Load the map's "bit field 2".
3730  __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3731  // Retrieve elements_kind from bit field 2.
3732  DecodeField<Map::ElementsKindBits>(result);
3733}
3734
3735
3736void MacroAssembler::TryGetFunctionPrototype(Register function,
3737                                             Register result,
3738                                             Register scratch,
3739                                             Label* miss,
3740                                             BoundFunctionAction action) {
3741  ASSERT(!AreAliased(function, result, scratch));
3742
3743  // Check that the receiver isn't a smi.
3744  JumpIfSmi(function, miss);
3745
3746  // Check that the function really is a function. Load map into result reg.
3747  JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3748
3749  if (action == kMissOnBoundFunction) {
3750    Register scratch_w = scratch.W();
3751    Ldr(scratch,
3752        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3753    // On 64-bit platforms, compiler hints field is not a smi. See definition of
3754    // kCompilerHintsOffset in src/objects.h.
3755    Ldr(scratch_w,
3756        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3757    Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3758  }
3759
3760  // Make sure that the function has an instance prototype.
3761  Label non_instance;
3762  Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3763  Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3764
3765  // Get the prototype or initial map from the function.
3766  Ldr(result,
3767      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3768
3769  // If the prototype or initial map is the hole, don't return it and simply
3770  // miss the cache instead. This will allow us to allocate a prototype object
3771  // on-demand in the runtime system.
3772  JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3773
3774  // If the function does not have an initial map, we're done.
3775  Label done;
3776  JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3777
3778  // Get the prototype from the initial map.
3779  Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3780  B(&done);
3781
3782  // Non-instance prototype: fetch prototype from constructor field in initial
3783  // map.
3784  Bind(&non_instance);
3785  Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3786
3787  // All done.
3788  Bind(&done);
3789}
3790
3791
3792void MacroAssembler::CompareRoot(const Register& obj,
3793                                 Heap::RootListIndex index) {
3794  UseScratchRegisterScope temps(this);
3795  Register temp = temps.AcquireX();
3796  ASSERT(!AreAliased(obj, temp));
3797  LoadRoot(temp, index);
3798  Cmp(obj, temp);
3799}
3800
3801
3802void MacroAssembler::JumpIfRoot(const Register& obj,
3803                                Heap::RootListIndex index,
3804                                Label* if_equal) {
3805  CompareRoot(obj, index);
3806  B(eq, if_equal);
3807}
3808
3809
3810void MacroAssembler::JumpIfNotRoot(const Register& obj,
3811                                   Heap::RootListIndex index,
3812                                   Label* if_not_equal) {
3813  CompareRoot(obj, index);
3814  B(ne, if_not_equal);
3815}
3816
3817
3818void MacroAssembler::CompareAndSplit(const Register& lhs,
3819                                     const Operand& rhs,
3820                                     Condition cond,
3821                                     Label* if_true,
3822                                     Label* if_false,
3823                                     Label* fall_through) {
3824  if ((if_true == if_false) && (if_false == fall_through)) {
3825    // Fall through.
3826  } else if (if_true == if_false) {
3827    B(if_true);
3828  } else if (if_false == fall_through) {
3829    CompareAndBranch(lhs, rhs, cond, if_true);
3830  } else if (if_true == fall_through) {
3831    CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3832  } else {
3833    CompareAndBranch(lhs, rhs, cond, if_true);
3834    B(if_false);
3835  }
3836}
3837
3838
3839void MacroAssembler::TestAndSplit(const Register& reg,
3840                                  uint64_t bit_pattern,
3841                                  Label* if_all_clear,
3842                                  Label* if_any_set,
3843                                  Label* fall_through) {
3844  if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3845    // Fall through.
3846  } else if (if_all_clear == if_any_set) {
3847    B(if_all_clear);
3848  } else if (if_all_clear == fall_through) {
3849    TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3850  } else if (if_any_set == fall_through) {
3851    TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3852  } else {
3853    TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3854    B(if_all_clear);
3855  }
3856}
3857
3858
3859void MacroAssembler::CheckFastElements(Register map,
3860                                       Register scratch,
3861                                       Label* fail) {
3862  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3863  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3864  STATIC_ASSERT(FAST_ELEMENTS == 2);
3865  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3866  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3867  Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3868  B(hi, fail);
3869}
3870
3871
3872void MacroAssembler::CheckFastObjectElements(Register map,
3873                                             Register scratch,
3874                                             Label* fail) {
3875  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3876  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3877  STATIC_ASSERT(FAST_ELEMENTS == 2);
3878  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3879  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3880  Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3881  // If cond==ls, set cond=hi, otherwise compare.
3882  Ccmp(scratch,
3883       Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3884  B(hi, fail);
3885}
3886
3887
3888// Note: The ARM version of this clobbers elements_reg, but this version does
3889// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3890void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3891                                                 Register key_reg,
3892                                                 Register elements_reg,
3893                                                 Register scratch1,
3894                                                 FPRegister fpscratch1,
3895                                                 Label* fail,
3896                                                 int elements_offset) {
3897  ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3898  Label store_num;
3899
3900  // Speculatively convert the smi to a double - all smis can be exactly
3901  // represented as a double.
3902  SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3903
3904  // If value_reg is a smi, we're done.
3905  JumpIfSmi(value_reg, &store_num);
3906
3907  // Ensure that the object is a heap number.
3908  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
3909           fail, DONT_DO_SMI_CHECK);
3910
3911  Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3912
3913  // Canonicalize NaNs.
3914  CanonicalizeNaN(fpscratch1);
3915
3916  // Store the result.
3917  Bind(&store_num);
3918  Add(scratch1, elements_reg,
3919      Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3920  Str(fpscratch1,
3921      FieldMemOperand(scratch1,
3922                      FixedDoubleArray::kHeaderSize - elements_offset));
3923}
3924
3925
3926bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3927  return has_frame_ || !stub->SometimesSetsUpAFrame();
3928}
3929
3930
3931void MacroAssembler::IndexFromHash(Register hash, Register index) {
3932  // If the hash field contains an array index pick it out. The assert checks
3933  // that the constants for the maximum number of digits for an array index
3934  // cached in the hash field and the number of bits reserved for it does not
3935  // conflict.
3936  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3937         (1 << String::kArrayIndexValueBits));
3938  DecodeField<String::ArrayIndexValueBits>(index, hash);
3939  SmiTag(index, index);
3940}
3941
3942
3943void MacroAssembler::EmitSeqStringSetCharCheck(
3944    Register string,
3945    Register index,
3946    SeqStringSetCharCheckIndexType index_type,
3947    Register scratch,
3948    uint32_t encoding_mask) {
3949  ASSERT(!AreAliased(string, index, scratch));
3950
3951  if (index_type == kIndexIsSmi) {
3952    AssertSmi(index);
3953  }
3954
3955  // Check that string is an object.
3956  AssertNotSmi(string, kNonObject);
3957
3958  // Check that string has an appropriate map.
3959  Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3960  Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3961
3962  And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3963  Cmp(scratch, encoding_mask);
3964  Check(eq, kUnexpectedStringType);
3965
3966  Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3967  Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3968  Check(lt, kIndexIsTooLarge);
3969
3970  ASSERT_EQ(0, Smi::FromInt(0));
3971  Cmp(index, 0);
3972  Check(ge, kIndexIsNegative);
3973}
3974
3975
3976void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3977                                            Register scratch1,
3978                                            Register scratch2,
3979                                            Label* miss) {
3980  ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
3981  Label same_contexts;
3982
3983  // Load current lexical context from the stack frame.
3984  Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
3985  // In debug mode, make sure the lexical context is set.
3986#ifdef DEBUG
3987  Cmp(scratch1, 0);
3988  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3989#endif
3990
3991  // Load the native context of the current context.
3992  int offset =
3993      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3994  Ldr(scratch1, FieldMemOperand(scratch1, offset));
3995  Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
3996
3997  // Check the context is a native context.
3998  if (emit_debug_code()) {
3999    // Read the first word and compare to the global_context_map.
4000    Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
4001    CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
4002    Check(eq, kExpectedNativeContext);
4003  }
4004
4005  // Check if both contexts are the same.
4006  Ldr(scratch2, FieldMemOperand(holder_reg,
4007                                JSGlobalProxy::kNativeContextOffset));
4008  Cmp(scratch1, scratch2);
4009  B(&same_contexts, eq);
4010
4011  // Check the context is a native context.
4012  if (emit_debug_code()) {
4013    // We're short on scratch registers here, so use holder_reg as a scratch.
4014    Push(holder_reg);
4015    Register scratch3 = holder_reg;
4016
4017    CompareRoot(scratch2, Heap::kNullValueRootIndex);
4018    Check(ne, kExpectedNonNullContext);
4019
4020    Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4021    CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
4022    Check(eq, kExpectedNativeContext);
4023    Pop(holder_reg);
4024  }
4025
4026  // Check that the security token in the calling global object is
4027  // compatible with the security token in the receiving global
4028  // object.
4029  int token_offset = Context::kHeaderSize +
4030                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
4031
4032  Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
4033  Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
4034  Cmp(scratch1, scratch2);
4035  B(miss, ne);
4036
4037  Bind(&same_contexts);
4038}
4039
4040
4041// Compute the hash code from the untagged key. This must be kept in sync with
4042// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
4043// code-stub-hydrogen.cc
4044void MacroAssembler::GetNumberHash(Register key, Register scratch) {
4045  ASSERT(!AreAliased(key, scratch));
4046
4047  // Xor original key with a seed.
4048  LoadRoot(scratch, Heap::kHashSeedRootIndex);
4049  Eor(key, key, Operand::UntagSmi(scratch));
4050
4051  // The algorithm uses 32-bit integer values.
4052  key = key.W();
4053  scratch = scratch.W();
4054
4055  // Compute the hash code from the untagged key.  This must be kept in sync
4056  // with ComputeIntegerHash in utils.h.
4057  //
4058  // hash = ~hash + (hash <<1 15);
4059  Mvn(scratch, key);
4060  Add(key, scratch, Operand(key, LSL, 15));
4061  // hash = hash ^ (hash >> 12);
4062  Eor(key, key, Operand(key, LSR, 12));
4063  // hash = hash + (hash << 2);
4064  Add(key, key, Operand(key, LSL, 2));
4065  // hash = hash ^ (hash >> 4);
4066  Eor(key, key, Operand(key, LSR, 4));
4067  // hash = hash * 2057;
4068  Mov(scratch, Operand(key, LSL, 11));
4069  Add(key, key, Operand(key, LSL, 3));
4070  Add(key, key, scratch);
4071  // hash = hash ^ (hash >> 16);
4072  Eor(key, key, Operand(key, LSR, 16));
4073}
4074
4075
4076void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4077                                              Register elements,
4078                                              Register key,
4079                                              Register result,
4080                                              Register scratch0,
4081                                              Register scratch1,
4082                                              Register scratch2,
4083                                              Register scratch3) {
4084  ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4085
4086  Label done;
4087
4088  SmiUntag(scratch0, key);
4089  GetNumberHash(scratch0, scratch1);
4090
4091  // Compute the capacity mask.
4092  Ldrsw(scratch1,
4093        UntagSmiFieldMemOperand(elements,
4094                                SeededNumberDictionary::kCapacityOffset));
4095  Sub(scratch1, scratch1, 1);
4096
4097  // Generate an unrolled loop that performs a few probes before giving up.
4098  for (int i = 0; i < kNumberDictionaryProbes; i++) {
4099    // Compute the masked index: (hash + i + i * i) & mask.
4100    if (i > 0) {
4101      Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
4102    } else {
4103      Mov(scratch2, scratch0);
4104    }
4105    And(scratch2, scratch2, scratch1);
4106
4107    // Scale the index by multiplying by the element size.
4108    ASSERT(SeededNumberDictionary::kEntrySize == 3);
4109    Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4110
4111    // Check if the key is identical to the name.
4112    Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4113    Ldr(scratch3,
4114        FieldMemOperand(scratch2,
4115                        SeededNumberDictionary::kElementsStartOffset));
4116    Cmp(key, scratch3);
4117    if (i != (kNumberDictionaryProbes - 1)) {
4118      B(eq, &done);
4119    } else {
4120      B(ne, miss);
4121    }
4122  }
4123
4124  Bind(&done);
4125  // Check that the value is a normal property.
4126  const int kDetailsOffset =
4127      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4128  Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
4129  TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
4130
4131  // Get the value at the masked, scaled index and return.
4132  const int kValueOffset =
4133      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4134  Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4135}
4136
4137
4138void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
4139                                         Register address,
4140                                         Register scratch1,
4141                                         SaveFPRegsMode fp_mode,
4142                                         RememberedSetFinalAction and_then) {
4143  ASSERT(!AreAliased(object, address, scratch1));
4144  Label done, store_buffer_overflow;
4145  if (emit_debug_code()) {
4146    Label ok;
4147    JumpIfNotInNewSpace(object, &ok);
4148    Abort(kRememberedSetPointerInNewSpace);
4149    bind(&ok);
4150  }
4151  UseScratchRegisterScope temps(this);
4152  Register scratch2 = temps.AcquireX();
4153
4154  // Load store buffer top.
4155  Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4156  Ldr(scratch1, MemOperand(scratch2));
4157  // Store pointer to buffer and increment buffer top.
4158  Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4159  // Write back new top of buffer.
4160  Str(scratch1, MemOperand(scratch2));
4161  // Call stub on end of buffer.
4162  // Check for end of buffer.
4163  ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
4164         (1 << (14 + kPointerSizeLog2)));
4165  if (and_then == kFallThroughAtEnd) {
4166    Tbz(scratch1, (14 + kPointerSizeLog2), &done);
4167  } else {
4168    ASSERT(and_then == kReturnAtEnd);
4169    Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
4170    Ret();
4171  }
4172
4173  Bind(&store_buffer_overflow);
4174  Push(lr);
4175  StoreBufferOverflowStub store_buffer_overflow_stub =
4176      StoreBufferOverflowStub(isolate(), fp_mode);
4177  CallStub(&store_buffer_overflow_stub);
4178  Pop(lr);
4179
4180  Bind(&done);
4181  if (and_then == kReturnAtEnd) {
4182    Ret();
4183  }
4184}
4185
4186
4187void MacroAssembler::PopSafepointRegisters() {
4188  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4189  PopXRegList(kSafepointSavedRegisters);
4190  Drop(num_unsaved);
4191}
4192
4193
4194void MacroAssembler::PushSafepointRegisters() {
4195  // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4196  // adjust the stack for unsaved registers.
4197  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4198  ASSERT(num_unsaved >= 0);
4199  Claim(num_unsaved);
4200  PushXRegList(kSafepointSavedRegisters);
4201}
4202
4203
4204void MacroAssembler::PushSafepointRegistersAndDoubles() {
4205  PushSafepointRegisters();
4206  PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4207                            FPRegister::kAllocatableFPRegisters));
4208}
4209
4210
4211void MacroAssembler::PopSafepointRegistersAndDoubles() {
4212  PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4213                           FPRegister::kAllocatableFPRegisters));
4214  PopSafepointRegisters();
4215}
4216
4217
4218int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4219  // Make sure the safepoint registers list is what we expect.
4220  ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4221
4222  // Safepoint registers are stored contiguously on the stack, but not all the
4223  // registers are saved. The following registers are excluded:
4224  //  - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4225  //    the macro assembler.
4226  //  - x28 (jssp) because JS stack pointer doesn't need to be included in
4227  //    safepoint registers.
4228  //  - x31 (csp) because the system stack pointer doesn't need to be included
4229  //    in safepoint registers.
4230  //
4231  // This function implements the mapping of register code to index into the
4232  // safepoint register slots.
4233  if ((reg_code >= 0) && (reg_code <= 15)) {
4234    return reg_code;
4235  } else if ((reg_code >= 18) && (reg_code <= 27)) {
4236    // Skip ip0 and ip1.
4237    return reg_code - 2;
4238  } else if ((reg_code == 29) || (reg_code == 30)) {
4239    // Also skip jssp.
4240    return reg_code - 3;
4241  } else {
4242    // This register has no safepoint register slot.
4243    UNREACHABLE();
4244    return -1;
4245  }
4246}
4247
4248
4249void MacroAssembler::CheckPageFlagSet(const Register& object,
4250                                      const Register& scratch,
4251                                      int mask,
4252                                      Label* if_any_set) {
4253  And(scratch, object, ~Page::kPageAlignmentMask);
4254  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4255  TestAndBranchIfAnySet(scratch, mask, if_any_set);
4256}
4257
4258
4259void MacroAssembler::CheckPageFlagClear(const Register& object,
4260                                        const Register& scratch,
4261                                        int mask,
4262                                        Label* if_all_clear) {
4263  And(scratch, object, ~Page::kPageAlignmentMask);
4264  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4265  TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4266}
4267
4268
4269void MacroAssembler::RecordWriteField(
4270    Register object,
4271    int offset,
4272    Register value,
4273    Register scratch,
4274    LinkRegisterStatus lr_status,
4275    SaveFPRegsMode save_fp,
4276    RememberedSetAction remembered_set_action,
4277    SmiCheck smi_check,
4278    PointersToHereCheck pointers_to_here_check_for_value) {
4279  // First, check if a write barrier is even needed. The tests below
4280  // catch stores of Smis.
4281  Label done;
4282
4283  // Skip the barrier if writing a smi.
4284  if (smi_check == INLINE_SMI_CHECK) {
4285    JumpIfSmi(value, &done);
4286  }
4287
4288  // Although the object register is tagged, the offset is relative to the start
4289  // of the object, so offset must be a multiple of kPointerSize.
4290  ASSERT(IsAligned(offset, kPointerSize));
4291
4292  Add(scratch, object, offset - kHeapObjectTag);
4293  if (emit_debug_code()) {
4294    Label ok;
4295    Tst(scratch, (1 << kPointerSizeLog2) - 1);
4296    B(eq, &ok);
4297    Abort(kUnalignedCellInWriteBarrier);
4298    Bind(&ok);
4299  }
4300
4301  RecordWrite(object,
4302              scratch,
4303              value,
4304              lr_status,
4305              save_fp,
4306              remembered_set_action,
4307              OMIT_SMI_CHECK,
4308              pointers_to_here_check_for_value);
4309
4310  Bind(&done);
4311
4312  // Clobber clobbered input registers when running with the debug-code flag
4313  // turned on to provoke errors.
4314  if (emit_debug_code()) {
4315    Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
4316    Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
4317  }
4318}
4319
4320
4321// Will clobber: object, map, dst.
4322// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4323void MacroAssembler::RecordWriteForMap(Register object,
4324                                       Register map,
4325                                       Register dst,
4326                                       LinkRegisterStatus lr_status,
4327                                       SaveFPRegsMode fp_mode) {
4328  ASM_LOCATION("MacroAssembler::RecordWrite");
4329  ASSERT(!AreAliased(object, map));
4330
4331  if (emit_debug_code()) {
4332    UseScratchRegisterScope temps(this);
4333    Register temp = temps.AcquireX();
4334
4335    CompareMap(map, temp, isolate()->factory()->meta_map());
4336    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4337  }
4338
4339  if (!FLAG_incremental_marking) {
4340    return;
4341  }
4342
4343  if (emit_debug_code()) {
4344    UseScratchRegisterScope temps(this);
4345    Register temp = temps.AcquireX();
4346
4347    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4348    Cmp(temp, map);
4349    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4350  }
4351
4352  // Count number of write barriers in generated code.
4353  isolate()->counters()->write_barriers_static()->Increment();
4354  // TODO(mstarzinger): Dynamic counter missing.
4355
4356  // First, check if a write barrier is even needed. The tests below
4357  // catch stores of smis and stores into the young generation.
4358  Label done;
4359
4360  // A single check of the map's pages interesting flag suffices, since it is
4361  // only set during incremental collection, and then it's also guaranteed that
4362  // the from object's page's interesting flag is also set.  This optimization
4363  // relies on the fact that maps can never be in new space.
4364  CheckPageFlagClear(map,
4365                     map,  // Used as scratch.
4366                     MemoryChunk::kPointersToHereAreInterestingMask,
4367                     &done);
4368
4369  // Record the actual write.
4370  if (lr_status == kLRHasNotBeenSaved) {
4371    Push(lr);
4372  }
4373  Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4374  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4375                       fp_mode);
4376  CallStub(&stub);
4377  if (lr_status == kLRHasNotBeenSaved) {
4378    Pop(lr);
4379  }
4380
4381  Bind(&done);
4382
4383  // Clobber clobbered registers when running with the debug-code flag
4384  // turned on to provoke errors.
4385  if (emit_debug_code()) {
4386    Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
4387    Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
4388  }
4389}
4390
4391
4392// Will clobber: object, address, value.
4393// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4394//
4395// The register 'object' contains a heap object pointer. The heap object tag is
4396// shifted away.
4397void MacroAssembler::RecordWrite(
4398    Register object,
4399    Register address,
4400    Register value,
4401    LinkRegisterStatus lr_status,
4402    SaveFPRegsMode fp_mode,
4403    RememberedSetAction remembered_set_action,
4404    SmiCheck smi_check,
4405    PointersToHereCheck pointers_to_here_check_for_value) {
4406  ASM_LOCATION("MacroAssembler::RecordWrite");
4407  ASSERT(!AreAliased(object, value));
4408
4409  if (emit_debug_code()) {
4410    UseScratchRegisterScope temps(this);
4411    Register temp = temps.AcquireX();
4412
4413    Ldr(temp, MemOperand(address));
4414    Cmp(temp, value);
4415    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4416  }
4417
4418  // Count number of write barriers in generated code.
4419  isolate()->counters()->write_barriers_static()->Increment();
4420  // TODO(mstarzinger): Dynamic counter missing.
4421
4422  // First, check if a write barrier is even needed. The tests below
4423  // catch stores of smis and stores into the young generation.
4424  Label done;
4425
4426  if (smi_check == INLINE_SMI_CHECK) {
4427    ASSERT_EQ(0, kSmiTag);
4428    JumpIfSmi(value, &done);
4429  }
4430
4431  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4432    CheckPageFlagClear(value,
4433                       value,  // Used as scratch.
4434                       MemoryChunk::kPointersToHereAreInterestingMask,
4435                       &done);
4436  }
4437  CheckPageFlagClear(object,
4438                     value,  // Used as scratch.
4439                     MemoryChunk::kPointersFromHereAreInterestingMask,
4440                     &done);
4441
4442  // Record the actual write.
4443  if (lr_status == kLRHasNotBeenSaved) {
4444    Push(lr);
4445  }
4446  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4447                       fp_mode);
4448  CallStub(&stub);
4449  if (lr_status == kLRHasNotBeenSaved) {
4450    Pop(lr);
4451  }
4452
4453  Bind(&done);
4454
4455  // Clobber clobbered registers when running with the debug-code flag
4456  // turned on to provoke errors.
4457  if (emit_debug_code()) {
4458    Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
4459    Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
4460  }
4461}
4462
4463
4464void MacroAssembler::AssertHasValidColor(const Register& reg) {
4465  if (emit_debug_code()) {
4466    // The bit sequence is backward. The first character in the string
4467    // represents the least significant bit.
4468    ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4469
4470    Label color_is_valid;
4471    Tbnz(reg, 0, &color_is_valid);
4472    Tbz(reg, 1, &color_is_valid);
4473    Abort(kUnexpectedColorFound);
4474    Bind(&color_is_valid);
4475  }
4476}
4477
4478
4479void MacroAssembler::GetMarkBits(Register addr_reg,
4480                                 Register bitmap_reg,
4481                                 Register shift_reg) {
4482  ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4483  ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4484  // addr_reg is divided into fields:
4485  // |63        page base        20|19    high      8|7   shift   3|2  0|
4486  // 'high' gives the index of the cell holding color bits for the object.
4487  // 'shift' gives the offset in the cell for this object's color.
4488  const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4489  UseScratchRegisterScope temps(this);
4490  Register temp = temps.AcquireX();
4491  Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4492  Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4493  Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4494  // bitmap_reg:
4495  // |63        page base        20|19 zeros 15|14      high      3|2  0|
4496  Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4497}
4498
4499
4500void MacroAssembler::HasColor(Register object,
4501                              Register bitmap_scratch,
4502                              Register shift_scratch,
4503                              Label* has_color,
4504                              int first_bit,
4505                              int second_bit) {
4506  // See mark-compact.h for color definitions.
4507  ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
4508
4509  GetMarkBits(object, bitmap_scratch, shift_scratch);
4510  Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4511  // Shift the bitmap down to get the color of the object in bits [1:0].
4512  Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4513
4514  AssertHasValidColor(bitmap_scratch);
4515
4516  // These bit sequences are backwards. The first character in the string
4517  // represents the least significant bit.
4518  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4519  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4520  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4521
4522  // Check for the color.
4523  if (first_bit == 0) {
4524    // Checking for white.
4525    ASSERT(second_bit == 0);
4526    // We only need to test the first bit.
4527    Tbz(bitmap_scratch, 0, has_color);
4528  } else {
4529    Label other_color;
4530    // Checking for grey or black.
4531    Tbz(bitmap_scratch, 0, &other_color);
4532    if (second_bit == 0) {
4533      Tbz(bitmap_scratch, 1, has_color);
4534    } else {
4535      Tbnz(bitmap_scratch, 1, has_color);
4536    }
4537    Bind(&other_color);
4538  }
4539
4540  // Fall through if it does not have the right color.
4541}
4542
4543
4544void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4545                                        Register scratch,
4546                                        Label* if_deprecated) {
4547  if (map->CanBeDeprecated()) {
4548    Mov(scratch, Operand(map));
4549    Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
4550    TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
4551  }
4552}
4553
4554
4555void MacroAssembler::JumpIfBlack(Register object,
4556                                 Register scratch0,
4557                                 Register scratch1,
4558                                 Label* on_black) {
4559  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4560  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
4561}
4562
4563
4564void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4565    Register object,
4566    Register scratch0,
4567    Register scratch1,
4568    Label* found) {
4569  ASSERT(!AreAliased(object, scratch0, scratch1));
4570  Factory* factory = isolate()->factory();
4571  Register current = scratch0;
4572  Label loop_again;
4573
4574  // Scratch contains elements pointer.
4575  Mov(current, object);
4576
4577  // Loop based on the map going up the prototype chain.
4578  Bind(&loop_again);
4579  Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4580  Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4581  DecodeField<Map::ElementsKindBits>(scratch1);
4582  CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4583  Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4584  CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4585}
4586
4587
4588void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4589                                               Register result) {
4590  ASSERT(!result.Is(ldr_location));
4591  const uint32_t kLdrLitOffset_lsb = 5;
4592  const uint32_t kLdrLitOffset_width = 19;
4593  Ldr(result, MemOperand(ldr_location));
4594  if (emit_debug_code()) {
4595    And(result, result, LoadLiteralFMask);
4596    Cmp(result, LoadLiteralFixed);
4597    Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4598    // The instruction was clobbered. Reload it.
4599    Ldr(result, MemOperand(ldr_location));
4600  }
4601  Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4602  Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4603}
4604
4605
4606void MacroAssembler::EnsureNotWhite(
4607    Register value,
4608    Register bitmap_scratch,
4609    Register shift_scratch,
4610    Register load_scratch,
4611    Register length_scratch,
4612    Label* value_is_white_and_not_data) {
4613  ASSERT(!AreAliased(
4614      value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4615
4616  // These bit sequences are backwards. The first character in the string
4617  // represents the least significant bit.
4618  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4619  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4620  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4621
4622  GetMarkBits(value, bitmap_scratch, shift_scratch);
4623  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4624  Lsr(load_scratch, load_scratch, shift_scratch);
4625
4626  AssertHasValidColor(load_scratch);
4627
4628  // If the value is black or grey we don't need to do anything.
4629  // Since both black and grey have a 1 in the first position and white does
4630  // not have a 1 there we only need to check one bit.
4631  Label done;
4632  Tbnz(load_scratch, 0, &done);
4633
4634  // Value is white.  We check whether it is data that doesn't need scanning.
4635  Register map = load_scratch;  // Holds map while checking type.
4636  Label is_data_object;
4637
4638  // Check for heap-number.
4639  Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4640  Mov(length_scratch, HeapNumber::kSize);
4641  JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4642
4643  // Check for strings.
4644  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4645  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4646  // If it's a string and it's not a cons string then it's an object containing
4647  // no GC pointers.
4648  Register instance_type = load_scratch;
4649  Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4650  TestAndBranchIfAnySet(instance_type,
4651                        kIsIndirectStringMask | kIsNotStringMask,
4652                        value_is_white_and_not_data);
4653
4654  // It's a non-indirect (non-cons and non-slice) string.
4655  // If it's external, the length is just ExternalString::kSize.
4656  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4657  // External strings are the only ones with the kExternalStringTag bit
4658  // set.
4659  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4660  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4661  Mov(length_scratch, ExternalString::kSize);
4662  TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4663
4664  // Sequential string, either ASCII or UC16.
4665  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4666  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4667  // getting the length multiplied by 2.
4668  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4669  Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4670                                                String::kLengthOffset));
4671  Tst(instance_type, kStringEncodingMask);
4672  Cset(load_scratch, eq);
4673  Lsl(length_scratch, length_scratch, load_scratch);
4674  Add(length_scratch,
4675      length_scratch,
4676      SeqString::kHeaderSize + kObjectAlignmentMask);
4677  Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4678
4679  Bind(&is_data_object);
4680  // Value is a data object, and it is white.  Mark it black.  Since we know
4681  // that the object is white we can make it black by flipping one bit.
4682  Register mask = shift_scratch;
4683  Mov(load_scratch, 1);
4684  Lsl(mask, load_scratch, shift_scratch);
4685
4686  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4687  Orr(load_scratch, load_scratch, mask);
4688  Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4689
4690  Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4691  Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4692  Add(load_scratch, load_scratch, length_scratch);
4693  Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4694
4695  Bind(&done);
4696}
4697
4698
4699void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4700  if (emit_debug_code()) {
4701    Check(cond, reason);
4702  }
4703}
4704
4705
4706
4707void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4708  if (emit_debug_code()) {
4709    CheckRegisterIsClear(reg, reason);
4710  }
4711}
4712
4713
4714void MacroAssembler::AssertRegisterIsRoot(Register reg,
4715                                          Heap::RootListIndex index,
4716                                          BailoutReason reason) {
4717  if (emit_debug_code()) {
4718    CompareRoot(reg, index);
4719    Check(eq, reason);
4720  }
4721}
4722
4723
4724void MacroAssembler::AssertFastElements(Register elements) {
4725  if (emit_debug_code()) {
4726    UseScratchRegisterScope temps(this);
4727    Register temp = temps.AcquireX();
4728    Label ok;
4729    Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4730    JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4731    JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4732    JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4733    Abort(kJSObjectWithFastElementsMapHasSlowElements);
4734    Bind(&ok);
4735  }
4736}
4737
4738
4739void MacroAssembler::AssertIsString(const Register& object) {
4740  if (emit_debug_code()) {
4741    UseScratchRegisterScope temps(this);
4742    Register temp = temps.AcquireX();
4743    STATIC_ASSERT(kSmiTag == 0);
4744    Tst(object, kSmiTagMask);
4745    Check(ne, kOperandIsNotAString);
4746    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4747    CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4748    Check(lo, kOperandIsNotAString);
4749  }
4750}
4751
4752
4753void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4754  Label ok;
4755  B(cond, &ok);
4756  Abort(reason);
4757  // Will not return here.
4758  Bind(&ok);
4759}
4760
4761
4762void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4763  Label ok;
4764  Cbz(reg, &ok);
4765  Abort(reason);
4766  // Will not return here.
4767  Bind(&ok);
4768}
4769
4770
4771void MacroAssembler::Abort(BailoutReason reason) {
4772#ifdef DEBUG
4773  RecordComment("Abort message: ");
4774  RecordComment(GetBailoutReason(reason));
4775
4776  if (FLAG_trap_on_abort) {
4777    Brk(0);
4778    return;
4779  }
4780#endif
4781
4782  // Abort is used in some contexts where csp is the stack pointer. In order to
4783  // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4784  // There is no risk of register corruption here because Abort doesn't return.
4785  Register old_stack_pointer = StackPointer();
4786  SetStackPointer(jssp);
4787  Mov(jssp, old_stack_pointer);
4788
4789  // We need some scratch registers for the MacroAssembler, so make sure we have
4790  // some. This is safe here because Abort never returns.
4791  RegList old_tmp_list = TmpList()->list();
4792  TmpList()->Combine(MacroAssembler::DefaultTmpList());
4793
4794  if (use_real_aborts()) {
4795    // Avoid infinite recursion; Push contains some assertions that use Abort.
4796    NoUseRealAbortsScope no_real_aborts(this);
4797
4798    Mov(x0, Smi::FromInt(reason));
4799    Push(x0);
4800
4801    if (!has_frame_) {
4802      // We don't actually want to generate a pile of code for this, so just
4803      // claim there is a stack frame, without generating one.
4804      FrameScope scope(this, StackFrame::NONE);
4805      CallRuntime(Runtime::kAbort, 1);
4806    } else {
4807      CallRuntime(Runtime::kAbort, 1);
4808    }
4809  } else {
4810    // Load the string to pass to Printf.
4811    Label msg_address;
4812    Adr(x0, &msg_address);
4813
4814    // Call Printf directly to report the error.
4815    CallPrintf();
4816
4817    // We need a way to stop execution on both the simulator and real hardware,
4818    // and Unreachable() is the best option.
4819    Unreachable();
4820
4821    // Emit the message string directly in the instruction stream.
4822    {
4823      BlockPoolsScope scope(this);
4824      Bind(&msg_address);
4825      EmitStringData(GetBailoutReason(reason));
4826    }
4827  }
4828
4829  SetStackPointer(old_stack_pointer);
4830  TmpList()->set_list(old_tmp_list);
4831}
4832
4833
4834void MacroAssembler::LoadTransitionedArrayMapConditional(
4835    ElementsKind expected_kind,
4836    ElementsKind transitioned_kind,
4837    Register map_in_out,
4838    Register scratch1,
4839    Register scratch2,
4840    Label* no_map_match) {
4841  // Load the global or builtins object from the current context.
4842  Ldr(scratch1, GlobalObjectMemOperand());
4843  Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4844
4845  // Check that the function's map is the same as the expected cached map.
4846  Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
4847  size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4848  Ldr(scratch2, FieldMemOperand(scratch1, offset));
4849  Cmp(map_in_out, scratch2);
4850  B(ne, no_map_match);
4851
4852  // Use the transitioned cached map.
4853  offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4854  Ldr(map_in_out, FieldMemOperand(scratch1, offset));
4855}
4856
4857
4858void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4859  // Load the global or builtins object from the current context.
4860  Ldr(function, GlobalObjectMemOperand());
4861  // Load the native context from the global or builtins object.
4862  Ldr(function, FieldMemOperand(function,
4863                                GlobalObject::kNativeContextOffset));
4864  // Load the function from the native context.
4865  Ldr(function, ContextMemOperand(function, index));
4866}
4867
4868
4869void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4870                                                  Register map,
4871                                                  Register scratch) {
4872  // Load the initial map. The global functions all have initial maps.
4873  Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4874  if (emit_debug_code()) {
4875    Label ok, fail;
4876    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4877    B(&ok);
4878    Bind(&fail);
4879    Abort(kGlobalFunctionsMustHaveInitialMap);
4880    Bind(&ok);
4881  }
4882}
4883
4884
4885// This is the main Printf implementation. All other Printf variants call
4886// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4887void MacroAssembler::PrintfNoPreserve(const char * format,
4888                                      const CPURegister& arg0,
4889                                      const CPURegister& arg1,
4890                                      const CPURegister& arg2,
4891                                      const CPURegister& arg3) {
4892  // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4893  // in most cases anyway, so this restriction shouldn't be too serious.
4894  ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4895
4896  // The provided arguments, and their proper procedure-call standard registers.
4897  CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4898  CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4899
4900  int arg_count = kPrintfMaxArgCount;
4901
4902  // The PCS varargs registers for printf. Note that x0 is used for the printf
4903  // format string.
4904  static const CPURegList kPCSVarargs =
4905      CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4906  static const CPURegList kPCSVarargsFP =
4907      CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4908
4909  // We can use caller-saved registers as scratch values, except for the
4910  // arguments and the PCS registers where they might need to go.
4911  CPURegList tmp_list = kCallerSaved;
4912  tmp_list.Remove(x0);      // Used to pass the format string.
4913  tmp_list.Remove(kPCSVarargs);
4914  tmp_list.Remove(arg0, arg1, arg2, arg3);
4915
4916  CPURegList fp_tmp_list = kCallerSavedFP;
4917  fp_tmp_list.Remove(kPCSVarargsFP);
4918  fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4919
4920  // Override the MacroAssembler's scratch register list. The lists will be
4921  // reset automatically at the end of the UseScratchRegisterScope.
4922  UseScratchRegisterScope temps(this);
4923  TmpList()->set_list(tmp_list.list());
4924  FPTmpList()->set_list(fp_tmp_list.list());
4925
4926  // Copies of the printf vararg registers that we can pop from.
4927  CPURegList pcs_varargs = kPCSVarargs;
4928  CPURegList pcs_varargs_fp = kPCSVarargsFP;
4929
4930  // Place the arguments. There are lots of clever tricks and optimizations we
4931  // could use here, but Printf is a debug tool so instead we just try to keep
4932  // it simple: Move each input that isn't already in the right place to a
4933  // scratch register, then move everything back.
4934  for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4935    // Work out the proper PCS register for this argument.
4936    if (args[i].IsRegister()) {
4937      pcs[i] = pcs_varargs.PopLowestIndex().X();
4938      // We might only need a W register here. We need to know the size of the
4939      // argument so we can properly encode it for the simulator call.
4940      if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4941    } else if (args[i].IsFPRegister()) {
4942      // In C, floats are always cast to doubles for varargs calls.
4943      pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4944    } else {
4945      ASSERT(args[i].IsNone());
4946      arg_count = i;
4947      break;
4948    }
4949
4950    // If the argument is already in the right place, leave it where it is.
4951    if (args[i].Aliases(pcs[i])) continue;
4952
4953    // Otherwise, if the argument is in a PCS argument register, allocate an
4954    // appropriate scratch register and then move it out of the way.
4955    if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4956        kPCSVarargsFP.IncludesAliasOf(args[i])) {
4957      if (args[i].IsRegister()) {
4958        Register old_arg = Register(args[i]);
4959        Register new_arg = temps.AcquireSameSizeAs(old_arg);
4960        Mov(new_arg, old_arg);
4961        args[i] = new_arg;
4962      } else {
4963        FPRegister old_arg = FPRegister(args[i]);
4964        FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4965        Fmov(new_arg, old_arg);
4966        args[i] = new_arg;
4967      }
4968    }
4969  }
4970
4971  // Do a second pass to move values into their final positions and perform any
4972  // conversions that may be required.
4973  for (int i = 0; i < arg_count; i++) {
4974    ASSERT(pcs[i].type() == args[i].type());
4975    if (pcs[i].IsRegister()) {
4976      Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4977    } else {
4978      ASSERT(pcs[i].IsFPRegister());
4979      if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4980        Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4981      } else {
4982        Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4983      }
4984    }
4985  }
4986
4987  // Load the format string into x0, as per the procedure-call standard.
4988  //
4989  // To make the code as portable as possible, the format string is encoded
4990  // directly in the instruction stream. It might be cleaner to encode it in a
4991  // literal pool, but since Printf is usually used for debugging, it is
4992  // beneficial for it to be minimally dependent on other features.
4993  Label format_address;
4994  Adr(x0, &format_address);
4995
4996  // Emit the format string directly in the instruction stream.
4997  { BlockPoolsScope scope(this);
4998    Label after_data;
4999    B(&after_data);
5000    Bind(&format_address);
5001    EmitStringData(format);
5002    Unreachable();
5003    Bind(&after_data);
5004  }
5005
5006  // We don't pass any arguments on the stack, but we still need to align the C
5007  // stack pointer to a 16-byte boundary for PCS compliance.
5008  if (!csp.Is(StackPointer())) {
5009    Bic(csp, StackPointer(), 0xf);
5010  }
5011
5012  CallPrintf(arg_count, pcs);
5013}
5014
5015
5016void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
5017  // A call to printf needs special handling for the simulator, since the system
5018  // printf function will use a different instruction set and the procedure-call
5019  // standard will not be compatible.
5020#ifdef USE_SIMULATOR
5021  { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
5022    hlt(kImmExceptionIsPrintf);
5023    dc32(arg_count);          // kPrintfArgCountOffset
5024
5025    // Determine the argument pattern.
5026    uint32_t arg_pattern_list = 0;
5027    for (int i = 0; i < arg_count; i++) {
5028      uint32_t arg_pattern;
5029      if (args[i].IsRegister()) {
5030        arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
5031      } else {
5032        ASSERT(args[i].Is64Bits());
5033        arg_pattern = kPrintfArgD;
5034      }
5035      ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
5036      arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
5037    }
5038    dc32(arg_pattern_list);   // kPrintfArgPatternListOffset
5039  }
5040#else
5041  Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
5042#endif
5043}
5044
5045
5046void MacroAssembler::Printf(const char * format,
5047                            CPURegister arg0,
5048                            CPURegister arg1,
5049                            CPURegister arg2,
5050                            CPURegister arg3) {
5051  // We can only print sp if it is the current stack pointer.
5052  if (!csp.Is(StackPointer())) {
5053    ASSERT(!csp.Aliases(arg0));
5054    ASSERT(!csp.Aliases(arg1));
5055    ASSERT(!csp.Aliases(arg2));
5056    ASSERT(!csp.Aliases(arg3));
5057  }
5058
5059  // Printf is expected to preserve all registers, so make sure that none are
5060  // available as scratch registers until we've preserved them.
5061  RegList old_tmp_list = TmpList()->list();
5062  RegList old_fp_tmp_list = FPTmpList()->list();
5063  TmpList()->set_list(0);
5064  FPTmpList()->set_list(0);
5065
5066  // Preserve all caller-saved registers as well as NZCV.
5067  // If csp is the stack pointer, PushCPURegList asserts that the size of each
5068  // list is a multiple of 16 bytes.
5069  PushCPURegList(kCallerSaved);
5070  PushCPURegList(kCallerSavedFP);
5071
5072  // We can use caller-saved registers as scratch values (except for argN).
5073  CPURegList tmp_list = kCallerSaved;
5074  CPURegList fp_tmp_list = kCallerSavedFP;
5075  tmp_list.Remove(arg0, arg1, arg2, arg3);
5076  fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
5077  TmpList()->set_list(tmp_list.list());
5078  FPTmpList()->set_list(fp_tmp_list.list());
5079
5080  { UseScratchRegisterScope temps(this);
5081    // If any of the arguments are the current stack pointer, allocate a new
5082    // register for them, and adjust the value to compensate for pushing the
5083    // caller-saved registers.
5084    bool arg0_sp = StackPointer().Aliases(arg0);
5085    bool arg1_sp = StackPointer().Aliases(arg1);
5086    bool arg2_sp = StackPointer().Aliases(arg2);
5087    bool arg3_sp = StackPointer().Aliases(arg3);
5088    if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
5089      // Allocate a register to hold the original stack pointer value, to pass
5090      // to PrintfNoPreserve as an argument.
5091      Register arg_sp = temps.AcquireX();
5092      Add(arg_sp, StackPointer(),
5093          kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
5094      if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
5095      if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
5096      if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
5097      if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
5098    }
5099
5100    // Preserve NZCV.
5101    { UseScratchRegisterScope temps(this);
5102      Register tmp = temps.AcquireX();
5103      Mrs(tmp, NZCV);
5104      Push(tmp, xzr);
5105    }
5106
5107    PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
5108
5109    // Restore NZCV.
5110    { UseScratchRegisterScope temps(this);
5111      Register tmp = temps.AcquireX();
5112      Pop(xzr, tmp);
5113      Msr(NZCV, tmp);
5114    }
5115  }
5116
5117  PopCPURegList(kCallerSavedFP);
5118  PopCPURegList(kCallerSaved);
5119
5120  TmpList()->set_list(old_tmp_list);
5121  FPTmpList()->set_list(old_fp_tmp_list);
5122}
5123
5124
5125void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
5126  // TODO(jbramley): Other architectures use the internal memcpy to copy the
5127  // sequence. If this is a performance bottleneck, we should consider caching
5128  // the sequence and copying it in the same way.
5129  InstructionAccurateScope scope(this,
5130                                 kNoCodeAgeSequenceLength / kInstructionSize);
5131  ASSERT(jssp.Is(StackPointer()));
5132  EmitFrameSetupForCodeAgePatching(this);
5133}
5134
5135
5136
5137void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
5138  InstructionAccurateScope scope(this,
5139                                 kNoCodeAgeSequenceLength / kInstructionSize);
5140  ASSERT(jssp.Is(StackPointer()));
5141  EmitCodeAgeSequence(this, stub);
5142}
5143
5144
5145#undef __
5146#define __ assm->
5147
5148
5149void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5150  Label start;
5151  __ bind(&start);
5152
5153  // We can do this sequence using four instructions, but the code ageing
5154  // sequence that patches it needs five, so we use the extra space to try to
5155  // simplify some addressing modes and remove some dependencies (compared to
5156  // using two stp instructions with write-back).
5157  __ sub(jssp, jssp, 4 * kXRegSize);
5158  __ sub(csp, csp, 4 * kXRegSize);
5159  __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5160  __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5161  __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5162
5163  __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5164}
5165
5166
5167void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5168                                         Code * stub) {
5169  Label start;
5170  __ bind(&start);
5171  // When the stub is called, the sequence is replaced with the young sequence
5172  // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5173  // stub jumps to &start, stored in x0. The young sequence does not call the
5174  // stub so there is no infinite loop here.
5175  //
5176  // A branch (br) is used rather than a call (blr) because this code replaces
5177  // the frame setup code that would normally preserve lr.
5178  __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
5179  __ adr(x0, &start);
5180  __ br(ip0);
5181  // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5182  // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5183  __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5184  if (stub) {
5185    __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5186    __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5187  }
5188}
5189
5190
5191bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
5192  bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5193  ASSERT(is_young ||
5194         isolate->code_aging_helper()->IsOld(sequence));
5195  return is_young;
5196}
5197
5198
5199void MacroAssembler::TruncatingDiv(Register result,
5200                                   Register dividend,
5201                                   int32_t divisor) {
5202  ASSERT(!AreAliased(result, dividend));
5203  ASSERT(result.Is32Bits() && dividend.Is32Bits());
5204  MultiplierAndShift ms(divisor);
5205  Mov(result, ms.multiplier());
5206  Smull(result.X(), dividend, result);
5207  Asr(result.X(), result.X(), 32);
5208  if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
5209  if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
5210  if (ms.shift() > 0) Asr(result, result, ms.shift());
5211  Add(result, result, Operand(dividend, LSR, 31));
5212}
5213
5214
5215#undef __
5216
5217
5218UseScratchRegisterScope::~UseScratchRegisterScope() {
5219  available_->set_list(old_available_);
5220  availablefp_->set_list(old_availablefp_);
5221}
5222
5223
5224Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5225  int code = AcquireNextAvailable(available_).code();
5226  return Register::Create(code, reg.SizeInBits());
5227}
5228
5229
5230FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5231  int code = AcquireNextAvailable(availablefp_).code();
5232  return FPRegister::Create(code, reg.SizeInBits());
5233}
5234
5235
5236CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5237    CPURegList* available) {
5238  CHECK(!available->IsEmpty());
5239  CPURegister result = available->PopLowestIndex();
5240  ASSERT(!AreAliased(result, xzr, csp));
5241  return result;
5242}
5243
5244
5245CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5246                                                   const CPURegister& reg) {
5247  ASSERT(available->IncludesAliasOf(reg));
5248  available->Remove(reg);
5249  return reg;
5250}
5251
5252
5253#define __ masm->
5254
5255
5256void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5257                              const Label* smi_check) {
5258  Assembler::BlockPoolsScope scope(masm);
5259  if (reg.IsValid()) {
5260    ASSERT(smi_check->is_bound());
5261    ASSERT(reg.Is64Bits());
5262
5263    // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5264    // 'check' in the other bits. The possible offset is limited in that we
5265    // use BitField to pack the data, and the underlying data type is a
5266    // uint32_t.
5267    uint32_t delta = __ InstructionsGeneratedSince(smi_check);
5268    __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5269  } else {
5270    ASSERT(!smi_check->is_bound());
5271
5272    // An offset of 0 indicates that there is no patch site.
5273    __ InlineData(0);
5274  }
5275}
5276
5277
5278InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5279    : reg_(NoReg), smi_check_(NULL) {
5280  InstructionSequence* inline_data = InstructionSequence::At(info);
5281  ASSERT(inline_data->IsInlineData());
5282  if (inline_data->IsInlineData()) {
5283    uint64_t payload = inline_data->InlineData();
5284    // We use BitField to decode the payload, and BitField can only handle
5285    // 32-bit values.
5286    ASSERT(is_uint32(payload));
5287    if (payload != 0) {
5288      int reg_code = RegisterBits::decode(payload);
5289      reg_ = Register::XRegFromCode(reg_code);
5290      uint64_t smi_check_delta = DeltaBits::decode(payload);
5291      ASSERT(smi_check_delta != 0);
5292      smi_check_ = inline_data->preceding(smi_check_delta);
5293    }
5294  }
5295}
5296
5297
5298#undef __
5299
5300
5301} }  // namespace v8::internal
5302
5303#endif  // V8_TARGET_ARCH_ARM64
5304