assembler-arm.cc revision 50ef84f5fad2def87d3fbc737bec4a32711fdef4
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2010 the V8 project authors. All rights reserved.
36
37#include "v8.h"
38
39#if defined(V8_TARGET_ARCH_ARM)
40
41#include "arm/assembler-arm-inl.h"
42#include "serialize.h"
43
44namespace v8 {
45namespace internal {
46
47// Safe default is no features.
48unsigned CpuFeatures::supported_ = 0;
49unsigned CpuFeatures::enabled_ = 0;
50unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51
52
53#ifdef __arm__
54static uint64_t CpuFeaturesImpliedByCompiler() {
55  uint64_t answer = 0;
56#ifdef CAN_USE_ARMV7_INSTRUCTIONS
57  answer |= 1u << ARMv7;
58#endif  // def CAN_USE_ARMV7_INSTRUCTIONS
59  // If the compiler is allowed to use VFP then we can use VFP too in our code
60  // generation even when generating snapshots.  This won't work for cross
61  // compilation.
62#if defined(__VFP_FP__) && !defined(__SOFTFP__)
63  answer |= 1u << VFP3;
64#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
65#ifdef CAN_USE_VFP_INSTRUCTIONS
66  answer |= 1u << VFP3;
67#endif  // def CAN_USE_VFP_INSTRUCTIONS
68  return answer;
69}
70#endif  // def __arm__
71
72
73void CpuFeatures::Probe() {
74#ifndef __arm__
75  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
76  if (FLAG_enable_vfp3) {
77    supported_ |= 1u << VFP3;
78  }
79  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
80  if (FLAG_enable_armv7) {
81    supported_ |= 1u << ARMv7;
82  }
83#else  // def __arm__
84  if (Serializer::enabled()) {
85    supported_ |= OS::CpuFeaturesImpliedByPlatform();
86    supported_ |= CpuFeaturesImpliedByCompiler();
87    return;  // No features if we might serialize.
88  }
89
90  if (OS::ArmCpuHasFeature(VFP3)) {
91    // This implementation also sets the VFP flags if
92    // runtime detection of VFP returns true.
93    supported_ |= 1u << VFP3;
94    found_by_runtime_probing_ |= 1u << VFP3;
95  }
96
97  if (OS::ArmCpuHasFeature(ARMv7)) {
98    supported_ |= 1u << ARMv7;
99    found_by_runtime_probing_ |= 1u << ARMv7;
100  }
101#endif
102}
103
104
105// -----------------------------------------------------------------------------
106// Implementation of RelocInfo
107
108const int RelocInfo::kApplyMask = 0;
109
110
111bool RelocInfo::IsCodedSpecially() {
112  // The deserializer needs to know whether a pointer is specially coded.  Being
113  // specially coded on ARM means that it is a movw/movt instruction.  We don't
114  // generate those yet.
115  return false;
116}
117
118
119
120void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
121  // Patch the code at the current address with the supplied instructions.
122  Instr* pc = reinterpret_cast<Instr*>(pc_);
123  Instr* instr = reinterpret_cast<Instr*>(instructions);
124  for (int i = 0; i < instruction_count; i++) {
125    *(pc + i) = *(instr + i);
126  }
127
128  // Indicate that code has changed.
129  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
130}
131
132
133// Patch the code at the current PC with a call to the target address.
134// Additional guard instructions can be added if required.
135void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
136  // Patch the code at the current address with a call to the target.
137  UNIMPLEMENTED();
138}
139
140
141// -----------------------------------------------------------------------------
142// Implementation of Operand and MemOperand
143// See assembler-arm-inl.h for inlined constructors
144
145Operand::Operand(Handle<Object> handle) {
146  rm_ = no_reg;
147  // Verify all Objects referred by code are NOT in new space.
148  Object* obj = *handle;
149  ASSERT(!Heap::InNewSpace(obj));
150  if (obj->IsHeapObject()) {
151    imm32_ = reinterpret_cast<intptr_t>(handle.location());
152    rmode_ = RelocInfo::EMBEDDED_OBJECT;
153  } else {
154    // no relocation needed
155    imm32_ =  reinterpret_cast<intptr_t>(obj);
156    rmode_ = RelocInfo::NONE;
157  }
158}
159
160
161Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
162  ASSERT(is_uint5(shift_imm));
163  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
164  rm_ = rm;
165  rs_ = no_reg;
166  shift_op_ = shift_op;
167  shift_imm_ = shift_imm & 31;
168  if (shift_op == RRX) {
169    // encoded as ROR with shift_imm == 0
170    ASSERT(shift_imm == 0);
171    shift_op_ = ROR;
172    shift_imm_ = 0;
173  }
174}
175
176
177Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
178  ASSERT(shift_op != RRX);
179  rm_ = rm;
180  rs_ = no_reg;
181  shift_op_ = shift_op;
182  rs_ = rs;
183}
184
185
186MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
187  rn_ = rn;
188  rm_ = no_reg;
189  offset_ = offset;
190  am_ = am;
191}
192
193MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
194  rn_ = rn;
195  rm_ = rm;
196  shift_op_ = LSL;
197  shift_imm_ = 0;
198  am_ = am;
199}
200
201
202MemOperand::MemOperand(Register rn, Register rm,
203                       ShiftOp shift_op, int shift_imm, AddrMode am) {
204  ASSERT(is_uint5(shift_imm));
205  rn_ = rn;
206  rm_ = rm;
207  shift_op_ = shift_op;
208  shift_imm_ = shift_imm & 31;
209  am_ = am;
210}
211
212
213// -----------------------------------------------------------------------------
214// Implementation of Assembler.
215
216// Instruction encoding bits.
217enum {
218  H   = 1 << 5,   // halfword (or byte)
219  S6  = 1 << 6,   // signed (or unsigned)
220  L   = 1 << 20,  // load (or store)
221  S   = 1 << 20,  // set condition code (or leave unchanged)
222  W   = 1 << 21,  // writeback base register (or leave unchanged)
223  A   = 1 << 21,  // accumulate in multiply instruction (or not)
224  B   = 1 << 22,  // unsigned byte (or word)
225  N   = 1 << 22,  // long (or short)
226  U   = 1 << 23,  // positive (or negative) offset/index
227  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
228  I   = 1 << 25,  // immediate shifter operand (or not)
229
230  B4  = 1 << 4,
231  B5  = 1 << 5,
232  B6  = 1 << 6,
233  B7  = 1 << 7,
234  B8  = 1 << 8,
235  B9  = 1 << 9,
236  B12 = 1 << 12,
237  B16 = 1 << 16,
238  B18 = 1 << 18,
239  B19 = 1 << 19,
240  B20 = 1 << 20,
241  B21 = 1 << 21,
242  B22 = 1 << 22,
243  B23 = 1 << 23,
244  B24 = 1 << 24,
245  B25 = 1 << 25,
246  B26 = 1 << 26,
247  B27 = 1 << 27,
248
249  // Instruction bit masks.
250  RdMask     = 15 << 12,  // in str instruction
251  CondMask   = 15 << 28,
252  CoprocessorMask = 15 << 8,
253  OpCodeMask = 15 << 21,  // in data-processing instructions
254  Imm24Mask  = (1 << 24) - 1,
255  Off12Mask  = (1 << 12) - 1,
256  // Reserved condition.
257  nv = 15 << 28
258};
259
260
261// add(sp, sp, 4) instruction (aka Pop())
262static const Instr kPopInstruction =
263    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
264// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
265// register r is not encoded.
266static const Instr kPushRegPattern =
267    al | B26 | 4 | NegPreIndex | sp.code() * B16;
268// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
269// register r is not encoded.
270static const Instr kPopRegPattern =
271    al | B26 | L | 4 | PostIndex | sp.code() * B16;
272// mov lr, pc
273const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
274// ldr rd, [pc, #offset]
275const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
276const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
277// blxcc rm
278const Instr kBlxRegMask =
279    15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
280const Instr kBlxRegPattern =
281    B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
282const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
283const Instr kMovMvnPattern = 0xd * B21;
284const Instr kMovMvnFlip = B22;
285const Instr kMovLeaveCCMask = 0xdff * B16;
286const Instr kMovLeaveCCPattern = 0x1a0 * B16;
287const Instr kMovwMask = 0xff * B20;
288const Instr kMovwPattern = 0x30 * B20;
289const Instr kMovwLeaveCCFlip = 0x5 * B21;
290const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
291const Instr kCmpCmnPattern = 0x15 * B20;
292const Instr kCmpCmnFlip = B21;
293const Instr kALUMask = 0x6f * B21;
294const Instr kAddPattern = 0x4 * B21;
295const Instr kSubPattern = 0x2 * B21;
296const Instr kBicPattern = 0xe * B21;
297const Instr kAndPattern = 0x0 * B21;
298const Instr kAddSubFlip = 0x6 * B21;
299const Instr kAndBicFlip = 0xe * B21;
300
301// A mask for the Rd register for push, pop, ldr, str instructions.
302const Instr kRdMask = 0x0000f000;
303static const int kRdShift = 12;
304static const Instr kLdrRegFpOffsetPattern =
305    al | B26 | L | Offset | fp.code() * B16;
306static const Instr kStrRegFpOffsetPattern =
307    al | B26 | Offset | fp.code() * B16;
308static const Instr kLdrRegFpNegOffsetPattern =
309    al | B26 | L | NegOffset | fp.code() * B16;
310static const Instr kStrRegFpNegOffsetPattern =
311    al | B26 | NegOffset | fp.code() * B16;
312static const Instr kLdrStrInstrTypeMask = 0xffff0000;
313static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
314static const Instr kLdrStrOffsetMask = 0x00000fff;
315
316// Spare buffer.
317static const int kMinimalBufferSize = 4*KB;
318static byte* spare_buffer_ = NULL;
319
320Assembler::Assembler(void* buffer, int buffer_size) {
321  if (buffer == NULL) {
322    // Do our own buffer management.
323    if (buffer_size <= kMinimalBufferSize) {
324      buffer_size = kMinimalBufferSize;
325
326      if (spare_buffer_ != NULL) {
327        buffer = spare_buffer_;
328        spare_buffer_ = NULL;
329      }
330    }
331    if (buffer == NULL) {
332      buffer_ = NewArray<byte>(buffer_size);
333    } else {
334      buffer_ = static_cast<byte*>(buffer);
335    }
336    buffer_size_ = buffer_size;
337    own_buffer_ = true;
338
339  } else {
340    // Use externally provided buffer instead.
341    ASSERT(buffer_size > 0);
342    buffer_ = static_cast<byte*>(buffer);
343    buffer_size_ = buffer_size;
344    own_buffer_ = false;
345  }
346
347  // Setup buffer pointers.
348  ASSERT(buffer_ != NULL);
349  pc_ = buffer_;
350  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
351  num_prinfo_ = 0;
352  next_buffer_check_ = 0;
353  const_pool_blocked_nesting_ = 0;
354  no_const_pool_before_ = 0;
355  last_const_pool_end_ = 0;
356  last_bound_pos_ = 0;
357  current_statement_position_ = RelocInfo::kNoPosition;
358  current_position_ = RelocInfo::kNoPosition;
359  written_statement_position_ = current_statement_position_;
360  written_position_ = current_position_;
361}
362
363
364Assembler::~Assembler() {
365  ASSERT(const_pool_blocked_nesting_ == 0);
366  if (own_buffer_) {
367    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
368      spare_buffer_ = buffer_;
369    } else {
370      DeleteArray(buffer_);
371    }
372  }
373}
374
375
376void Assembler::GetCode(CodeDesc* desc) {
377  // Emit constant pool if necessary.
378  CheckConstPool(true, false);
379  ASSERT(num_prinfo_ == 0);
380
381  // Setup code descriptor.
382  desc->buffer = buffer_;
383  desc->buffer_size = buffer_size_;
384  desc->instr_size = pc_offset();
385  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
386}
387
388
389void Assembler::Align(int m) {
390  ASSERT(m >= 4 && IsPowerOf2(m));
391  while ((pc_offset() & (m - 1)) != 0) {
392    nop();
393  }
394}
395
396
397void Assembler::CodeTargetAlign() {
398  // Preferred alignment of jump targets on some ARM chips.
399  Align(8);
400}
401
402
403bool Assembler::IsNop(Instr instr, int type) {
404  // Check for mov rx, rx.
405  ASSERT(0 <= type && type <= 14);  // mov pc, pc is not a nop.
406  return instr == (al | 13*B21 | type*B12 | type);
407}
408
409
410bool Assembler::IsBranch(Instr instr) {
411  return (instr & (B27 | B25)) == (B27 | B25);
412}
413
414
415int Assembler::GetBranchOffset(Instr instr) {
416  ASSERT(IsBranch(instr));
417  // Take the jump offset in the lower 24 bits, sign extend it and multiply it
418  // with 4 to get the offset in bytes.
419  return ((instr & Imm24Mask) << 8) >> 6;
420}
421
422
423bool Assembler::IsLdrRegisterImmediate(Instr instr) {
424  return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
425}
426
427
428int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
429  ASSERT(IsLdrRegisterImmediate(instr));
430  bool positive = (instr & B23) == B23;
431  int offset = instr & Off12Mask;  // Zero extended offset.
432  return positive ? offset : -offset;
433}
434
435
436Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
437  ASSERT(IsLdrRegisterImmediate(instr));
438  bool positive = offset >= 0;
439  if (!positive) offset = -offset;
440  ASSERT(is_uint12(offset));
441  // Set bit indicating whether the offset should be added.
442  instr = (instr & ~B23) | (positive ? B23 : 0);
443  // Set the actual offset.
444  return (instr & ~Off12Mask) | offset;
445}
446
447
448bool Assembler::IsStrRegisterImmediate(Instr instr) {
449  return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
450}
451
452
453Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
454  ASSERT(IsStrRegisterImmediate(instr));
455  bool positive = offset >= 0;
456  if (!positive) offset = -offset;
457  ASSERT(is_uint12(offset));
458  // Set bit indicating whether the offset should be added.
459  instr = (instr & ~B23) | (positive ? B23 : 0);
460  // Set the actual offset.
461  return (instr & ~Off12Mask) | offset;
462}
463
464
465bool Assembler::IsAddRegisterImmediate(Instr instr) {
466  return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
467}
468
469
470Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
471  ASSERT(IsAddRegisterImmediate(instr));
472  ASSERT(offset >= 0);
473  ASSERT(is_uint12(offset));
474  // Set the offset.
475  return (instr & ~Off12Mask) | offset;
476}
477
478
479Register Assembler::GetRd(Instr instr) {
480  Register reg;
481  reg.code_ = ((instr & kRdMask) >> kRdShift);
482  return reg;
483}
484
485
486bool Assembler::IsPush(Instr instr) {
487  return ((instr & ~kRdMask) == kPushRegPattern);
488}
489
490
491bool Assembler::IsPop(Instr instr) {
492  return ((instr & ~kRdMask) == kPopRegPattern);
493}
494
495
496bool Assembler::IsStrRegFpOffset(Instr instr) {
497  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
498}
499
500
501bool Assembler::IsLdrRegFpOffset(Instr instr) {
502  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
503}
504
505
506bool Assembler::IsStrRegFpNegOffset(Instr instr) {
507  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
508}
509
510
511bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
512  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
513}
514
515
516// Labels refer to positions in the (to be) generated code.
517// There are bound, linked, and unused labels.
518//
519// Bound labels refer to known positions in the already
520// generated code. pos() is the position the label refers to.
521//
522// Linked labels refer to unknown positions in the code
523// to be generated; pos() is the position of the last
524// instruction using the label.
525
526
527// The link chain is terminated by a negative code position (must be aligned)
528const int kEndOfChain = -4;
529
530
531int Assembler::target_at(int pos)  {
532  Instr instr = instr_at(pos);
533  if ((instr & ~Imm24Mask) == 0) {
534    // Emitted label constant, not part of a branch.
535    return instr - (Code::kHeaderSize - kHeapObjectTag);
536  }
537  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
538  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
539  if ((instr & CondMask) == nv && (instr & B24) != 0) {
540    // blx uses bit 24 to encode bit 2 of imm26
541    imm26 += 2;
542  }
543  return pos + kPcLoadDelta + imm26;
544}
545
546
547void Assembler::target_at_put(int pos, int target_pos) {
548  Instr instr = instr_at(pos);
549  if ((instr & ~Imm24Mask) == 0) {
550    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
551    // Emitted label constant, not part of a branch.
552    // Make label relative to Code* of generated Code object.
553    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
554    return;
555  }
556  int imm26 = target_pos - (pos + kPcLoadDelta);
557  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
558  if ((instr & CondMask) == nv) {
559    // blx uses bit 24 to encode bit 2 of imm26
560    ASSERT((imm26 & 1) == 0);
561    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
562  } else {
563    ASSERT((imm26 & 3) == 0);
564    instr &= ~Imm24Mask;
565  }
566  int imm24 = imm26 >> 2;
567  ASSERT(is_int24(imm24));
568  instr_at_put(pos, instr | (imm24 & Imm24Mask));
569}
570
571
572void Assembler::print(Label* L) {
573  if (L->is_unused()) {
574    PrintF("unused label\n");
575  } else if (L->is_bound()) {
576    PrintF("bound label to %d\n", L->pos());
577  } else if (L->is_linked()) {
578    Label l = *L;
579    PrintF("unbound label");
580    while (l.is_linked()) {
581      PrintF("@ %d ", l.pos());
582      Instr instr = instr_at(l.pos());
583      if ((instr & ~Imm24Mask) == 0) {
584        PrintF("value\n");
585      } else {
586        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
587        int cond = instr & CondMask;
588        const char* b;
589        const char* c;
590        if (cond == nv) {
591          b = "blx";
592          c = "";
593        } else {
594          if ((instr & B24) != 0)
595            b = "bl";
596          else
597            b = "b";
598
599          switch (cond) {
600            case eq: c = "eq"; break;
601            case ne: c = "ne"; break;
602            case hs: c = "hs"; break;
603            case lo: c = "lo"; break;
604            case mi: c = "mi"; break;
605            case pl: c = "pl"; break;
606            case vs: c = "vs"; break;
607            case vc: c = "vc"; break;
608            case hi: c = "hi"; break;
609            case ls: c = "ls"; break;
610            case ge: c = "ge"; break;
611            case lt: c = "lt"; break;
612            case gt: c = "gt"; break;
613            case le: c = "le"; break;
614            case al: c = ""; break;
615            default:
616              c = "";
617              UNREACHABLE();
618          }
619        }
620        PrintF("%s%s\n", b, c);
621      }
622      next(&l);
623    }
624  } else {
625    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
626  }
627}
628
629
630void Assembler::bind_to(Label* L, int pos) {
631  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
632  while (L->is_linked()) {
633    int fixup_pos = L->pos();
634    next(L);  // call next before overwriting link with target at fixup_pos
635    target_at_put(fixup_pos, pos);
636  }
637  L->bind_to(pos);
638
639  // Keep track of the last bound label so we don't eliminate any instructions
640  // before a bound label.
641  if (pos > last_bound_pos_)
642    last_bound_pos_ = pos;
643}
644
645
646void Assembler::link_to(Label* L, Label* appendix) {
647  if (appendix->is_linked()) {
648    if (L->is_linked()) {
649      // Append appendix to L's list.
650      int fixup_pos;
651      int link = L->pos();
652      do {
653        fixup_pos = link;
654        link = target_at(fixup_pos);
655      } while (link > 0);
656      ASSERT(link == kEndOfChain);
657      target_at_put(fixup_pos, appendix->pos());
658    } else {
659      // L is empty, simply use appendix.
660      *L = *appendix;
661    }
662  }
663  appendix->Unuse();  // appendix should not be used anymore
664}
665
666
667void Assembler::bind(Label* L) {
668  ASSERT(!L->is_bound());  // label can only be bound once
669  bind_to(L, pc_offset());
670}
671
672
673void Assembler::next(Label* L) {
674  ASSERT(L->is_linked());
675  int link = target_at(L->pos());
676  if (link > 0) {
677    L->link_to(link);
678  } else {
679    ASSERT(link == kEndOfChain);
680    L->Unuse();
681  }
682}
683
684
685static Instr EncodeMovwImmediate(uint32_t immediate) {
686  ASSERT(immediate < 0x10000);
687  return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
688}
689
690
691// Low-level code emission routines depending on the addressing mode.
692// If this returns true then you have to use the rotate_imm and immed_8
693// that it returns, because it may have already changed the instruction
694// to match them!
695static bool fits_shifter(uint32_t imm32,
696                         uint32_t* rotate_imm,
697                         uint32_t* immed_8,
698                         Instr* instr) {
699  // imm32 must be unsigned.
700  for (int rot = 0; rot < 16; rot++) {
701    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
702    if ((imm8 <= 0xff)) {
703      *rotate_imm = rot;
704      *immed_8 = imm8;
705      return true;
706    }
707  }
708  // If the opcode is one with a complementary version and the complementary
709  // immediate fits, change the opcode.
710  if (instr != NULL) {
711    if ((*instr & kMovMvnMask) == kMovMvnPattern) {
712      if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
713        *instr ^= kMovMvnFlip;
714        return true;
715      } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
716        if (CpuFeatures::IsSupported(ARMv7)) {
717          if (imm32 < 0x10000) {
718            *instr ^= kMovwLeaveCCFlip;
719            *instr |= EncodeMovwImmediate(imm32);
720            *rotate_imm = *immed_8 = 0;  // Not used for movw.
721            return true;
722          }
723        }
724      }
725    } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
726      if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
727        *instr ^= kCmpCmnFlip;
728        return true;
729      }
730    } else {
731      Instr alu_insn = (*instr & kALUMask);
732      if (alu_insn == kAddPattern ||
733          alu_insn == kSubPattern) {
734        if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
735          *instr ^= kAddSubFlip;
736          return true;
737        }
738      } else if (alu_insn == kAndPattern ||
739                 alu_insn == kBicPattern) {
740        if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
741          *instr ^= kAndBicFlip;
742          return true;
743        }
744      }
745    }
746  }
747  return false;
748}
749
750
751// We have to use the temporary register for things that can be relocated even
752// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
753// space.  There is no guarantee that the relocated location can be similarly
754// encoded.
755static bool MustUseConstantPool(RelocInfo::Mode rmode) {
756  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
757#ifdef DEBUG
758    if (!Serializer::enabled()) {
759      Serializer::TooLateToEnableNow();
760    }
761#endif  // def DEBUG
762    return Serializer::enabled();
763  } else if (rmode == RelocInfo::NONE) {
764    return false;
765  }
766  return true;
767}
768
769
770bool Operand::is_single_instruction() const {
771  if (rm_.is_valid()) return true;
772  if (MustUseConstantPool(rmode_)) return false;
773  uint32_t dummy1, dummy2;
774  return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
775}
776
777
778void Assembler::addrmod1(Instr instr,
779                         Register rn,
780                         Register rd,
781                         const Operand& x) {
782  CheckBuffer();
783  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
784  if (!x.rm_.is_valid()) {
785    // Immediate.
786    uint32_t rotate_imm;
787    uint32_t immed_8;
788    if (MustUseConstantPool(x.rmode_) ||
789        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
790      // The immediate operand cannot be encoded as a shifter operand, so load
791      // it first to register ip and change the original instruction to use ip.
792      // However, if the original instruction is a 'mov rd, x' (not setting the
793      // condition code), then replace it with a 'ldr rd, [pc]'.
794      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
795      Condition cond = static_cast<Condition>(instr & CondMask);
796      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
797        if (MustUseConstantPool(x.rmode_) ||
798            !CpuFeatures::IsSupported(ARMv7)) {
799          RecordRelocInfo(x.rmode_, x.imm32_);
800          ldr(rd, MemOperand(pc, 0), cond);
801        } else {
802          // Will probably use movw, will certainly not use constant pool.
803          mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
804          movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
805        }
806      } else {
807        // If this is not a mov or mvn instruction we may still be able to avoid
808        // a constant pool entry by using mvn or movw.
809        if (!MustUseConstantPool(x.rmode_) &&
810            (instr & kMovMvnMask) != kMovMvnPattern) {
811          mov(ip, x, LeaveCC, cond);
812        } else {
813          RecordRelocInfo(x.rmode_, x.imm32_);
814          ldr(ip, MemOperand(pc, 0), cond);
815        }
816        addrmod1(instr, rn, rd, Operand(ip));
817      }
818      return;
819    }
820    instr |= I | rotate_imm*B8 | immed_8;
821  } else if (!x.rs_.is_valid()) {
822    // Immediate shift.
823    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
824  } else {
825    // Register shift.
826    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
827    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
828  }
829  emit(instr | rn.code()*B16 | rd.code()*B12);
830  if (rn.is(pc) || x.rm_.is(pc)) {
831    // Block constant pool emission for one instruction after reading pc.
832    BlockConstPoolBefore(pc_offset() + kInstrSize);
833  }
834}
835
836
837void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
838  ASSERT((instr & ~(CondMask | B | L)) == B26);
839  int am = x.am_;
840  if (!x.rm_.is_valid()) {
841    // Immediate offset.
842    int offset_12 = x.offset_;
843    if (offset_12 < 0) {
844      offset_12 = -offset_12;
845      am ^= U;
846    }
847    if (!is_uint12(offset_12)) {
848      // Immediate offset cannot be encoded, load it first to register ip
849      // rn (and rd in a load) should never be ip, or will be trashed.
850      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
851      mov(ip, Operand(x.offset_), LeaveCC,
852          static_cast<Condition>(instr & CondMask));
853      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
854      return;
855    }
856    ASSERT(offset_12 >= 0);  // no masking needed
857    instr |= offset_12;
858  } else {
859    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
860    // register offset the constructors make sure than both shift_imm_
861    // and shift_op_ are initialized.
862    ASSERT(!x.rm_.is(pc));
863    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
864  }
865  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
866  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
867}
868
869
870void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
871  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
872  ASSERT(x.rn_.is_valid());
873  int am = x.am_;
874  if (!x.rm_.is_valid()) {
875    // Immediate offset.
876    int offset_8 = x.offset_;
877    if (offset_8 < 0) {
878      offset_8 = -offset_8;
879      am ^= U;
880    }
881    if (!is_uint8(offset_8)) {
882      // Immediate offset cannot be encoded, load it first to register ip
883      // rn (and rd in a load) should never be ip, or will be trashed.
884      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
885      mov(ip, Operand(x.offset_), LeaveCC,
886          static_cast<Condition>(instr & CondMask));
887      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
888      return;
889    }
890    ASSERT(offset_8 >= 0);  // no masking needed
891    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
892  } else if (x.shift_imm_ != 0) {
893    // Scaled register offset not supported, load index first
894    // rn (and rd in a load) should never be ip, or will be trashed.
895    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
896    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
897        static_cast<Condition>(instr & CondMask));
898    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
899    return;
900  } else {
901    // Register offset.
902    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
903    instr |= x.rm_.code();
904  }
905  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
906  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
907}
908
909
910void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
911  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
912  ASSERT(rl != 0);
913  ASSERT(!rn.is(pc));
914  emit(instr | rn.code()*B16 | rl);
915}
916
917
918void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
919  // Unindexed addressing is not encoded by this function.
920  ASSERT_EQ((B27 | B26),
921            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
922  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
923  int am = x.am_;
924  int offset_8 = x.offset_;
925  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
926  offset_8 >>= 2;
927  if (offset_8 < 0) {
928    offset_8 = -offset_8;
929    am ^= U;
930  }
931  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
932  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
933
934  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
935  if ((am & P) == 0)
936    am |= W;
937
938  ASSERT(offset_8 >= 0);  // no masking needed
939  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
940}
941
942
943int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
944  int target_pos;
945  if (L->is_bound()) {
946    target_pos = L->pos();
947  } else {
948    if (L->is_linked()) {
949      target_pos = L->pos();  // L's link
950    } else {
951      target_pos = kEndOfChain;
952    }
953    L->link_to(pc_offset());
954  }
955
956  // Block the emission of the constant pool, since the branch instruction must
957  // be emitted at the pc offset recorded by the label.
958  BlockConstPoolBefore(pc_offset() + kInstrSize);
959  return target_pos - (pc_offset() + kPcLoadDelta);
960}
961
962
963void Assembler::label_at_put(Label* L, int at_offset) {
964  int target_pos;
965  if (L->is_bound()) {
966    target_pos = L->pos();
967  } else {
968    if (L->is_linked()) {
969      target_pos = L->pos();  // L's link
970    } else {
971      target_pos = kEndOfChain;
972    }
973    L->link_to(at_offset);
974    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
975  }
976}
977
978
979// Branch instructions.
980void Assembler::b(int branch_offset, Condition cond) {
981  ASSERT((branch_offset & 3) == 0);
982  int imm24 = branch_offset >> 2;
983  ASSERT(is_int24(imm24));
984  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
985
986  if (cond == al) {
987    // Dead code is a good location to emit the constant pool.
988    CheckConstPool(false, false);
989  }
990}
991
992
993void Assembler::bl(int branch_offset, Condition cond) {
994  ASSERT((branch_offset & 3) == 0);
995  int imm24 = branch_offset >> 2;
996  ASSERT(is_int24(imm24));
997  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
998}
999
1000
1001void Assembler::blx(int branch_offset) {  // v5 and above
1002  WriteRecordedPositions();
1003  ASSERT((branch_offset & 1) == 0);
1004  int h = ((branch_offset & 2) >> 1)*B24;
1005  int imm24 = branch_offset >> 2;
1006  ASSERT(is_int24(imm24));
1007  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
1008}
1009
1010
1011void Assembler::blx(Register target, Condition cond) {  // v5 and above
1012  WriteRecordedPositions();
1013  ASSERT(!target.is(pc));
1014  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
1015}
1016
1017
1018void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
1019  WriteRecordedPositions();
1020  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
1021  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
1022}
1023
1024
1025// Data-processing instructions.
1026
1027void Assembler::and_(Register dst, Register src1, const Operand& src2,
1028                     SBit s, Condition cond) {
1029  addrmod1(cond | 0*B21 | s, src1, dst, src2);
1030}
1031
1032
1033void Assembler::eor(Register dst, Register src1, const Operand& src2,
1034                    SBit s, Condition cond) {
1035  addrmod1(cond | 1*B21 | s, src1, dst, src2);
1036}
1037
1038
1039void Assembler::sub(Register dst, Register src1, const Operand& src2,
1040                    SBit s, Condition cond) {
1041  addrmod1(cond | 2*B21 | s, src1, dst, src2);
1042}
1043
1044
1045void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1046                    SBit s, Condition cond) {
1047  addrmod1(cond | 3*B21 | s, src1, dst, src2);
1048}
1049
1050
1051void Assembler::add(Register dst, Register src1, const Operand& src2,
1052                    SBit s, Condition cond) {
1053  addrmod1(cond | 4*B21 | s, src1, dst, src2);
1054
1055  // Eliminate pattern: push(r), pop()
1056  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
1057  //   add(sp, sp, Operand(kPointerSize));
1058  // Both instructions can be eliminated.
1059  if (can_peephole_optimize(2) &&
1060      // Pattern.
1061      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
1062      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
1063    pc_ -= 2 * kInstrSize;
1064    if (FLAG_print_peephole_optimization) {
1065      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
1066    }
1067  }
1068}
1069
1070
1071void Assembler::adc(Register dst, Register src1, const Operand& src2,
1072                    SBit s, Condition cond) {
1073  addrmod1(cond | 5*B21 | s, src1, dst, src2);
1074}
1075
1076
1077void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1078                    SBit s, Condition cond) {
1079  addrmod1(cond | 6*B21 | s, src1, dst, src2);
1080}
1081
1082
1083void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1084                    SBit s, Condition cond) {
1085  addrmod1(cond | 7*B21 | s, src1, dst, src2);
1086}
1087
1088
1089void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1090  addrmod1(cond | 8*B21 | S, src1, r0, src2);
1091}
1092
1093
1094void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1095  addrmod1(cond | 9*B21 | S, src1, r0, src2);
1096}
1097
1098
1099void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1100  addrmod1(cond | 10*B21 | S, src1, r0, src2);
1101}
1102
1103
1104void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1105  addrmod1(cond | 11*B21 | S, src1, r0, src2);
1106}
1107
1108
1109void Assembler::orr(Register dst, Register src1, const Operand& src2,
1110                    SBit s, Condition cond) {
1111  addrmod1(cond | 12*B21 | s, src1, dst, src2);
1112}
1113
1114
1115void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1116  if (dst.is(pc)) {
1117    WriteRecordedPositions();
1118  }
1119  // Don't allow nop instructions in the form mov rn, rn to be generated using
1120  // the mov instruction. They must be generated using nop(int)
1121  // pseudo instructions.
1122  ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1123  addrmod1(cond | 13*B21 | s, r0, dst, src);
1124}
1125
1126
1127void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1128  ASSERT(immediate < 0x10000);
1129  mov(reg, Operand(immediate), LeaveCC, cond);
1130}
1131
1132
1133void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1134  emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1135}
1136
1137
1138void Assembler::bic(Register dst, Register src1, const Operand& src2,
1139                    SBit s, Condition cond) {
1140  addrmod1(cond | 14*B21 | s, src1, dst, src2);
1141}
1142
1143
1144void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1145  addrmod1(cond | 15*B21 | s, r0, dst, src);
1146}
1147
1148
1149// Multiply instructions.
1150void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1151                    SBit s, Condition cond) {
1152  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1153  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1154       src2.code()*B8 | B7 | B4 | src1.code());
1155}
1156
1157
1158void Assembler::mul(Register dst, Register src1, Register src2,
1159                    SBit s, Condition cond) {
1160  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1161  // dst goes in bits 16-19 for this instruction!
1162  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1163}
1164
1165
1166void Assembler::smlal(Register dstL,
1167                      Register dstH,
1168                      Register src1,
1169                      Register src2,
1170                      SBit s,
1171                      Condition cond) {
1172  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1173  ASSERT(!dstL.is(dstH));
1174  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1175       src2.code()*B8 | B7 | B4 | src1.code());
1176}
1177
1178
1179void Assembler::smull(Register dstL,
1180                      Register dstH,
1181                      Register src1,
1182                      Register src2,
1183                      SBit s,
1184                      Condition cond) {
1185  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1186  ASSERT(!dstL.is(dstH));
1187  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1188       src2.code()*B8 | B7 | B4 | src1.code());
1189}
1190
1191
1192void Assembler::umlal(Register dstL,
1193                      Register dstH,
1194                      Register src1,
1195                      Register src2,
1196                      SBit s,
1197                      Condition cond) {
1198  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1199  ASSERT(!dstL.is(dstH));
1200  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1201       src2.code()*B8 | B7 | B4 | src1.code());
1202}
1203
1204
1205void Assembler::umull(Register dstL,
1206                      Register dstH,
1207                      Register src1,
1208                      Register src2,
1209                      SBit s,
1210                      Condition cond) {
1211  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1212  ASSERT(!dstL.is(dstH));
1213  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1214       src2.code()*B8 | B7 | B4 | src1.code());
1215}
1216
1217
1218// Miscellaneous arithmetic instructions.
1219void Assembler::clz(Register dst, Register src, Condition cond) {
1220  // v5 and above.
1221  ASSERT(!dst.is(pc) && !src.is(pc));
1222  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1223       15*B8 | B4 | src.code());
1224}
1225
1226
1227// Saturating instructions.
1228
1229// Unsigned saturate.
1230void Assembler::usat(Register dst,
1231                     int satpos,
1232                     const Operand& src,
1233                     Condition cond) {
1234  // v6 and above.
1235  ASSERT(CpuFeatures::IsSupported(ARMv7));
1236  ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1237  ASSERT((satpos >= 0) && (satpos <= 31));
1238  ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1239  ASSERT(src.rs_.is(no_reg));
1240
1241  int sh = 0;
1242  if (src.shift_op_ == ASR) {
1243      sh = 1;
1244  }
1245
1246  emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1247       src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1248}
1249
1250
1251// Bitfield manipulation instructions.
1252
1253// Unsigned bit field extract.
1254// Extracts #width adjacent bits from position #lsb in a register, and
1255// writes them to the low bits of a destination register.
1256//   ubfx dst, src, #lsb, #width
1257void Assembler::ubfx(Register dst,
1258                     Register src,
1259                     int lsb,
1260                     int width,
1261                     Condition cond) {
1262  // v7 and above.
1263  ASSERT(CpuFeatures::IsSupported(ARMv7));
1264  ASSERT(!dst.is(pc) && !src.is(pc));
1265  ASSERT((lsb >= 0) && (lsb <= 31));
1266  ASSERT((width >= 1) && (width <= (32 - lsb)));
1267  emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1268       lsb*B7 | B6 | B4 | src.code());
1269}
1270
1271
1272// Signed bit field extract.
1273// Extracts #width adjacent bits from position #lsb in a register, and
1274// writes them to the low bits of a destination register. The extracted
1275// value is sign extended to fill the destination register.
1276//   sbfx dst, src, #lsb, #width
1277void Assembler::sbfx(Register dst,
1278                     Register src,
1279                     int lsb,
1280                     int width,
1281                     Condition cond) {
1282  // v7 and above.
1283  ASSERT(CpuFeatures::IsSupported(ARMv7));
1284  ASSERT(!dst.is(pc) && !src.is(pc));
1285  ASSERT((lsb >= 0) && (lsb <= 31));
1286  ASSERT((width >= 1) && (width <= (32 - lsb)));
1287  emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1288       lsb*B7 | B6 | B4 | src.code());
1289}
1290
1291
1292// Bit field clear.
1293// Sets #width adjacent bits at position #lsb in the destination register
1294// to zero, preserving the value of the other bits.
1295//   bfc dst, #lsb, #width
1296void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1297  // v7 and above.
1298  ASSERT(CpuFeatures::IsSupported(ARMv7));
1299  ASSERT(!dst.is(pc));
1300  ASSERT((lsb >= 0) && (lsb <= 31));
1301  ASSERT((width >= 1) && (width <= (32 - lsb)));
1302  int msb = lsb + width - 1;
1303  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1304}
1305
1306
1307// Bit field insert.
1308// Inserts #width adjacent bits from the low bits of the source register
1309// into position #lsb of the destination register.
1310//   bfi dst, src, #lsb, #width
1311void Assembler::bfi(Register dst,
1312                    Register src,
1313                    int lsb,
1314                    int width,
1315                    Condition cond) {
1316  // v7 and above.
1317  ASSERT(CpuFeatures::IsSupported(ARMv7));
1318  ASSERT(!dst.is(pc) && !src.is(pc));
1319  ASSERT((lsb >= 0) && (lsb <= 31));
1320  ASSERT((width >= 1) && (width <= (32 - lsb)));
1321  int msb = lsb + width - 1;
1322  emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1323       src.code());
1324}
1325
1326
1327// Status register access instructions.
1328void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1329  ASSERT(!dst.is(pc));
1330  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1331}
1332
1333
1334void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1335                    Condition cond) {
1336  ASSERT(fields >= B16 && fields < B20);  // at least one field set
1337  Instr instr;
1338  if (!src.rm_.is_valid()) {
1339    // Immediate.
1340    uint32_t rotate_imm;
1341    uint32_t immed_8;
1342    if (MustUseConstantPool(src.rmode_) ||
1343        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1344      // Immediate operand cannot be encoded, load it first to register ip.
1345      RecordRelocInfo(src.rmode_, src.imm32_);
1346      ldr(ip, MemOperand(pc, 0), cond);
1347      msr(fields, Operand(ip), cond);
1348      return;
1349    }
1350    instr = I | rotate_imm*B8 | immed_8;
1351  } else {
1352    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
1353    instr = src.rm_.code();
1354  }
1355  emit(cond | instr | B24 | B21 | fields | 15*B12);
1356}
1357
1358
1359// Load/Store instructions.
1360void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1361  if (dst.is(pc)) {
1362    WriteRecordedPositions();
1363  }
1364  addrmod2(cond | B26 | L, dst, src);
1365
1366  // Eliminate pattern: push(ry), pop(rx)
1367  //   str(ry, MemOperand(sp, 4, NegPreIndex), al)
1368  //   ldr(rx, MemOperand(sp, 4, PostIndex), al)
1369  // Both instructions can be eliminated if ry = rx.
1370  // If ry != rx, a register copy from ry to rx is inserted
1371  // after eliminating the push and the pop instructions.
1372  if (can_peephole_optimize(2)) {
1373    Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
1374    Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
1375
1376    if (IsPush(push_instr) && IsPop(pop_instr)) {
1377      if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
1378        // For consecutive push and pop on different registers,
1379        // we delete both the push & pop and insert a register move.
1380        // push ry, pop rx --> mov rx, ry
1381        Register reg_pushed, reg_popped;
1382        reg_pushed = GetRd(push_instr);
1383        reg_popped = GetRd(pop_instr);
1384        pc_ -= 2 * kInstrSize;
1385        // Insert a mov instruction, which is better than a pair of push & pop
1386        mov(reg_popped, reg_pushed);
1387        if (FLAG_print_peephole_optimization) {
1388          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1389                 pc_offset());
1390        }
1391      } else {
1392        // For consecutive push and pop on the same register,
1393        // both the push and the pop can be deleted.
1394        pc_ -= 2 * kInstrSize;
1395        if (FLAG_print_peephole_optimization) {
1396          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1397        }
1398      }
1399    }
1400  }
1401
1402  if (can_peephole_optimize(2)) {
1403    Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
1404    Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
1405
1406    if ((IsStrRegFpOffset(str_instr) &&
1407         IsLdrRegFpOffset(ldr_instr)) ||
1408       (IsStrRegFpNegOffset(str_instr) &&
1409         IsLdrRegFpNegOffset(ldr_instr))) {
1410      if ((ldr_instr & kLdrStrInstrArgumentMask) ==
1411            (str_instr & kLdrStrInstrArgumentMask)) {
1412        // Pattern: Ldr/str same fp+offset, same register.
1413        //
1414        // The following:
1415        // str rx, [fp, #-12]
1416        // ldr rx, [fp, #-12]
1417        //
1418        // Becomes:
1419        // str rx, [fp, #-12]
1420
1421        pc_ -= 1 * kInstrSize;
1422        if (FLAG_print_peephole_optimization) {
1423          PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
1424        }
1425      } else if ((ldr_instr & kLdrStrOffsetMask) ==
1426                 (str_instr & kLdrStrOffsetMask)) {
1427        // Pattern: Ldr/str same fp+offset, different register.
1428        //
1429        // The following:
1430        // str rx, [fp, #-12]
1431        // ldr ry, [fp, #-12]
1432        //
1433        // Becomes:
1434        // str rx, [fp, #-12]
1435        // mov ry, rx
1436
1437        Register reg_stored, reg_loaded;
1438        reg_stored = GetRd(str_instr);
1439        reg_loaded = GetRd(ldr_instr);
1440        pc_ -= 1 * kInstrSize;
1441        // Insert a mov instruction, which is better than ldr.
1442        mov(reg_loaded, reg_stored);
1443        if (FLAG_print_peephole_optimization) {
1444          PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
1445        }
1446      }
1447    }
1448  }
1449
1450  if (can_peephole_optimize(3)) {
1451    Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
1452    Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
1453    Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
1454    if (IsPush(mem_write_instr) &&
1455        IsPop(mem_read_instr)) {
1456      if ((IsLdrRegFpOffset(ldr_instr) ||
1457        IsLdrRegFpNegOffset(ldr_instr))) {
1458        if ((mem_write_instr & kRdMask) ==
1459              (mem_read_instr & kRdMask)) {
1460          // Pattern: push & pop from/to same register,
1461          // with a fp+offset ldr in between
1462          //
1463          // The following:
1464          // str rx, [sp, #-4]!
1465          // ldr rz, [fp, #-24]
1466          // ldr rx, [sp], #+4
1467          //
1468          // Becomes:
1469          // if(rx == rz)
1470          //   delete all
1471          // else
1472          //   ldr rz, [fp, #-24]
1473
1474          if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
1475            pc_ -= 3 * kInstrSize;
1476          } else {
1477            pc_ -= 3 * kInstrSize;
1478            // Reinsert back the ldr rz.
1479            emit(ldr_instr);
1480          }
1481          if (FLAG_print_peephole_optimization) {
1482            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1483          }
1484        } else {
1485          // Pattern: push & pop from/to different registers
1486          // with a fp+offset ldr in between
1487          //
1488          // The following:
1489          // str rx, [sp, #-4]!
1490          // ldr rz, [fp, #-24]
1491          // ldr ry, [sp], #+4
1492          //
1493          // Becomes:
1494          // if(ry == rz)
1495          //   mov ry, rx;
1496          // else if(rx != rz)
1497          //   ldr rz, [fp, #-24]
1498          //   mov ry, rx
1499          // else if((ry != rz) || (rx == rz)) becomes:
1500          //   mov ry, rx
1501          //   ldr rz, [fp, #-24]
1502
1503          Register reg_pushed, reg_popped;
1504          if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
1505            reg_pushed = GetRd(mem_write_instr);
1506            reg_popped = GetRd(mem_read_instr);
1507            pc_ -= 3 * kInstrSize;
1508            mov(reg_popped, reg_pushed);
1509          } else if ((mem_write_instr & kRdMask)
1510                                != (ldr_instr & kRdMask)) {
1511            reg_pushed = GetRd(mem_write_instr);
1512            reg_popped = GetRd(mem_read_instr);
1513            pc_ -= 3 * kInstrSize;
1514            emit(ldr_instr);
1515            mov(reg_popped, reg_pushed);
1516          } else if (((mem_read_instr & kRdMask)
1517                                     != (ldr_instr & kRdMask)) ||
1518                    ((mem_write_instr & kRdMask)
1519                                     == (ldr_instr & kRdMask)) ) {
1520            reg_pushed = GetRd(mem_write_instr);
1521            reg_popped = GetRd(mem_read_instr);
1522            pc_ -= 3 * kInstrSize;
1523            mov(reg_popped, reg_pushed);
1524            emit(ldr_instr);
1525          }
1526          if (FLAG_print_peephole_optimization) {
1527            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1528          }
1529        }
1530      }
1531    }
1532  }
1533}
1534
1535
1536void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1537  addrmod2(cond | B26, src, dst);
1538
1539  // Eliminate pattern: pop(), push(r)
1540  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1541  // ->  str r, [sp, 0], al
1542  if (can_peephole_optimize(2) &&
1543     // Pattern.
1544     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1545     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1546    pc_ -= 2 * kInstrSize;
1547    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1548    if (FLAG_print_peephole_optimization) {
1549      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1550    }
1551  }
1552}
1553
1554
1555void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1556  addrmod2(cond | B26 | B | L, dst, src);
1557}
1558
1559
1560void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1561  addrmod2(cond | B26 | B, src, dst);
1562}
1563
1564
1565void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1566  addrmod3(cond | L | B7 | H | B4, dst, src);
1567}
1568
1569
1570void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1571  addrmod3(cond | B7 | H | B4, src, dst);
1572}
1573
1574
1575void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1576  addrmod3(cond | L | B7 | S6 | B4, dst, src);
1577}
1578
1579
1580void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1581  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1582}
1583
1584
1585void Assembler::ldrd(Register dst1, Register dst2,
1586                     const MemOperand& src, Condition cond) {
1587  ASSERT(CpuFeatures::IsEnabled(ARMv7));
1588  ASSERT(src.rm().is(no_reg));
1589  ASSERT(!dst1.is(lr));  // r14.
1590  ASSERT_EQ(0, dst1.code() % 2);
1591  ASSERT_EQ(dst1.code() + 1, dst2.code());
1592  addrmod3(cond | B7 | B6 | B4, dst1, src);
1593}
1594
1595
1596void Assembler::strd(Register src1, Register src2,
1597                     const MemOperand& dst, Condition cond) {
1598  ASSERT(dst.rm().is(no_reg));
1599  ASSERT(!src1.is(lr));  // r14.
1600  ASSERT_EQ(0, src1.code() % 2);
1601  ASSERT_EQ(src1.code() + 1, src2.code());
1602  ASSERT(CpuFeatures::IsEnabled(ARMv7));
1603  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
1604}
1605
1606// Load/Store multiple instructions.
1607void Assembler::ldm(BlockAddrMode am,
1608                    Register base,
1609                    RegList dst,
1610                    Condition cond) {
1611  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
1612  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1613
1614  addrmod4(cond | B27 | am | L, base, dst);
1615
1616  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1617  if (cond == al && (dst & pc.bit()) != 0) {
1618    // There is a slight chance that the ldm instruction was actually a call,
1619    // in which case it would be wrong to return into the constant pool; we
1620    // recognize this case by checking if the emission of the pool was blocked
1621    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1622    // the case, we emit a jump over the pool.
1623    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1624  }
1625}
1626
1627
1628void Assembler::stm(BlockAddrMode am,
1629                    Register base,
1630                    RegList src,
1631                    Condition cond) {
1632  addrmod4(cond | B27 | am, base, src);
1633}
1634
1635
1636// Exception-generating instructions and debugging support.
1637void Assembler::stop(const char* msg) {
1638#ifndef __arm__
1639  // The simulator handles these special instructions and stops execution.
1640  emit(15 << 28 | ((intptr_t) msg));
1641#else  // def __arm__
1642#ifdef CAN_USE_ARMV5_INSTRUCTIONS
1643  bkpt(0);
1644#else  // ndef CAN_USE_ARMV5_INSTRUCTIONS
1645  swi(0x9f0001);
1646#endif  // ndef CAN_USE_ARMV5_INSTRUCTIONS
1647#endif  // def __arm__
1648}
1649
1650
1651void Assembler::bkpt(uint32_t imm16) {  // v5 and above
1652  ASSERT(is_uint16(imm16));
1653  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1654}
1655
1656
1657void Assembler::swi(uint32_t imm24, Condition cond) {
1658  ASSERT(is_uint24(imm24));
1659  emit(cond | 15*B24 | imm24);
1660}
1661
1662
1663// Coprocessor instructions.
1664void Assembler::cdp(Coprocessor coproc,
1665                    int opcode_1,
1666                    CRegister crd,
1667                    CRegister crn,
1668                    CRegister crm,
1669                    int opcode_2,
1670                    Condition cond) {
1671  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1672  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1673       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1674}
1675
1676
1677void Assembler::cdp2(Coprocessor coproc,
1678                     int opcode_1,
1679                     CRegister crd,
1680                     CRegister crn,
1681                     CRegister crm,
1682                     int opcode_2) {  // v5 and above
1683  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1684}
1685
1686
1687void Assembler::mcr(Coprocessor coproc,
1688                    int opcode_1,
1689                    Register rd,
1690                    CRegister crn,
1691                    CRegister crm,
1692                    int opcode_2,
1693                    Condition cond) {
1694  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1695  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1696       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1697}
1698
1699
1700void Assembler::mcr2(Coprocessor coproc,
1701                     int opcode_1,
1702                     Register rd,
1703                     CRegister crn,
1704                     CRegister crm,
1705                     int opcode_2) {  // v5 and above
1706  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1707}
1708
1709
1710void Assembler::mrc(Coprocessor coproc,
1711                    int opcode_1,
1712                    Register rd,
1713                    CRegister crn,
1714                    CRegister crm,
1715                    int opcode_2,
1716                    Condition cond) {
1717  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1718  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1719       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1720}
1721
1722
1723void Assembler::mrc2(Coprocessor coproc,
1724                     int opcode_1,
1725                     Register rd,
1726                     CRegister crn,
1727                     CRegister crm,
1728                     int opcode_2) {  // v5 and above
1729  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1730}
1731
1732
1733void Assembler::ldc(Coprocessor coproc,
1734                    CRegister crd,
1735                    const MemOperand& src,
1736                    LFlag l,
1737                    Condition cond) {
1738  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1739}
1740
1741
1742void Assembler::ldc(Coprocessor coproc,
1743                    CRegister crd,
1744                    Register rn,
1745                    int option,
1746                    LFlag l,
1747                    Condition cond) {
1748  // Unindexed addressing.
1749  ASSERT(is_uint8(option));
1750  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1751       coproc*B8 | (option & 255));
1752}
1753
1754
1755void Assembler::ldc2(Coprocessor coproc,
1756                     CRegister crd,
1757                     const MemOperand& src,
1758                     LFlag l) {  // v5 and above
1759  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1760}
1761
1762
1763void Assembler::ldc2(Coprocessor coproc,
1764                     CRegister crd,
1765                     Register rn,
1766                     int option,
1767                     LFlag l) {  // v5 and above
1768  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1769}
1770
1771
1772void Assembler::stc(Coprocessor coproc,
1773                    CRegister crd,
1774                    const MemOperand& dst,
1775                    LFlag l,
1776                    Condition cond) {
1777  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1778}
1779
1780
1781void Assembler::stc(Coprocessor coproc,
1782                    CRegister crd,
1783                    Register rn,
1784                    int option,
1785                    LFlag l,
1786                    Condition cond) {
1787  // Unindexed addressing.
1788  ASSERT(is_uint8(option));
1789  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1790       coproc*B8 | (option & 255));
1791}
1792
1793
1794void Assembler::stc2(Coprocessor
1795                     coproc, CRegister crd,
1796                     const MemOperand& dst,
1797                     LFlag l) {  // v5 and above
1798  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1799}
1800
1801
1802void Assembler::stc2(Coprocessor coproc,
1803                     CRegister crd,
1804                     Register rn,
1805                     int option,
1806                     LFlag l) {  // v5 and above
1807  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1808}
1809
1810
1811// Support for VFP.
1812void Assembler::vldr(const DwVfpRegister dst,
1813                     const Register base,
1814                     int offset,
1815                     const Condition cond) {
1816  // Ddst = MEM(Rbase + offset).
1817  // Instruction details available in ARM DDI 0406A, A8-628.
1818  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1819  // Vdst(15-12) | 1011(11-8) | offset
1820  ASSERT(CpuFeatures::IsEnabled(VFP3));
1821  ASSERT(offset % 4 == 0);
1822  ASSERT((offset / 4) < 256);
1823  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1824       0xB*B8 | ((offset / 4) & 255));
1825}
1826
1827
1828void Assembler::vldr(const SwVfpRegister dst,
1829                     const Register base,
1830                     int offset,
1831                     const Condition cond) {
1832  // Sdst = MEM(Rbase + offset).
1833  // Instruction details available in ARM DDI 0406A, A8-628.
1834  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1835  // Vdst(15-12) | 1010(11-8) | offset
1836  ASSERT(CpuFeatures::IsEnabled(VFP3));
1837  ASSERT(offset % 4 == 0);
1838  ASSERT((offset / 4) < 256);
1839  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1840       0xA*B8 | ((offset / 4) & 255));
1841}
1842
1843
1844void Assembler::vstr(const DwVfpRegister src,
1845                     const Register base,
1846                     int offset,
1847                     const Condition cond) {
1848  // MEM(Rbase + offset) = Dsrc.
1849  // Instruction details available in ARM DDI 0406A, A8-786.
1850  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
1851  // Vsrc(15-12) | 1011(11-8) | (offset/4)
1852  ASSERT(CpuFeatures::IsEnabled(VFP3));
1853  ASSERT(offset % 4 == 0);
1854  ASSERT((offset / 4) < 256);
1855  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
1856       0xB*B8 | ((offset / 4) & 255));
1857}
1858
1859
1860static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1861  uint64_t i;
1862  memcpy(&i, &d, 8);
1863
1864  *lo = i & 0xffffffff;
1865  *hi = i >> 32;
1866}
1867
1868// Only works for little endian floating point formats.
1869// We don't support VFP on the mixed endian floating point platform.
1870static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
1871  ASSERT(CpuFeatures::IsEnabled(VFP3));
1872
1873  // VMOV can accept an immediate of the form:
1874  //
1875  //  +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
1876  //
1877  // The immediate is encoded using an 8-bit quantity, comprised of two
1878  // 4-bit fields. For an 8-bit immediate of the form:
1879  //
1880  //  [abcdefgh]
1881  //
1882  // where a is the MSB and h is the LSB, an immediate 64-bit double can be
1883  // created of the form:
1884  //
1885  //  [aBbbbbbb,bbcdefgh,00000000,00000000,
1886  //      00000000,00000000,00000000,00000000]
1887  //
1888  // where B = ~b.
1889  //
1890
1891  uint32_t lo, hi;
1892  DoubleAsTwoUInt32(d, &lo, &hi);
1893
1894  // The most obvious constraint is the long block of zeroes.
1895  if ((lo != 0) || ((hi & 0xffff) != 0)) {
1896    return false;
1897  }
1898
1899  // Bits 62:55 must be all clear or all set.
1900  if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
1901    return false;
1902  }
1903
1904  // Bit 63 must be NOT bit 62.
1905  if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
1906    return false;
1907  }
1908
1909  // Create the encoded immediate in the form:
1910  //  [00000000,0000abcd,00000000,0000efgh]
1911  *encoding  = (hi >> 16) & 0xf;      // Low nybble.
1912  *encoding |= (hi >> 4) & 0x70000;   // Low three bits of the high nybble.
1913  *encoding |= (hi >> 12) & 0x80000;  // Top bit of the high nybble.
1914
1915  return true;
1916}
1917
1918
1919void Assembler::vmov(const DwVfpRegister dst,
1920                     double imm,
1921                     const Condition cond) {
1922  // Dd = immediate
1923  // Instruction details available in ARM DDI 0406B, A8-640.
1924  ASSERT(CpuFeatures::IsEnabled(VFP3));
1925
1926  uint32_t enc;
1927  if (FitsVMOVDoubleImmediate(imm, &enc)) {
1928    // The double can be encoded in the instruction.
1929    emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
1930  } else {
1931    // Synthesise the double from ARM immediates. This could be implemented
1932    // using vldr from a constant pool.
1933    uint32_t lo, hi;
1934    DoubleAsTwoUInt32(imm, &lo, &hi);
1935
1936    if (lo == hi) {
1937      // If the lo and hi parts of the double are equal, the literal is easier
1938      // to create. This is the case with 0.0.
1939      mov(ip, Operand(lo));
1940      vmov(dst, ip, ip);
1941    } else {
1942      // Move the low part of the double into the lower of the corresponsing S
1943      // registers of D register dst.
1944      mov(ip, Operand(lo));
1945      vmov(dst.low(), ip, cond);
1946
1947      // Move the high part of the double into the higher of the corresponsing S
1948      // registers of D register dst.
1949      mov(ip, Operand(hi));
1950      vmov(dst.high(), ip, cond);
1951    }
1952  }
1953}
1954
1955
1956void Assembler::vmov(const SwVfpRegister dst,
1957                     const SwVfpRegister src,
1958                     const Condition cond) {
1959  // Sd = Sm
1960  // Instruction details available in ARM DDI 0406B, A8-642.
1961  ASSERT(CpuFeatures::IsEnabled(VFP3));
1962  emit(cond | 0xE*B24 | 0xB*B20 |
1963       dst.code()*B12 | 0x5*B9 | B6 | src.code());
1964}
1965
1966
1967void Assembler::vmov(const DwVfpRegister dst,
1968                     const DwVfpRegister src,
1969                     const Condition cond) {
1970  // Dd = Dm
1971  // Instruction details available in ARM DDI 0406B, A8-642.
1972  ASSERT(CpuFeatures::IsEnabled(VFP3));
1973  emit(cond | 0xE*B24 | 0xB*B20 |
1974       dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
1975}
1976
1977
1978void Assembler::vmov(const DwVfpRegister dst,
1979                     const Register src1,
1980                     const Register src2,
1981                     const Condition cond) {
1982  // Dm = <Rt,Rt2>.
1983  // Instruction details available in ARM DDI 0406A, A8-646.
1984  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
1985  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1986  ASSERT(CpuFeatures::IsEnabled(VFP3));
1987  ASSERT(!src1.is(pc) && !src2.is(pc));
1988  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
1989       src1.code()*B12 | 0xB*B8 | B4 | dst.code());
1990}
1991
1992
1993void Assembler::vmov(const Register dst1,
1994                     const Register dst2,
1995                     const DwVfpRegister src,
1996                     const Condition cond) {
1997  // <Rt,Rt2> = Dm.
1998  // Instruction details available in ARM DDI 0406A, A8-646.
1999  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2000  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2001  ASSERT(CpuFeatures::IsEnabled(VFP3));
2002  ASSERT(!dst1.is(pc) && !dst2.is(pc));
2003  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2004       dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2005}
2006
2007
2008void Assembler::vmov(const SwVfpRegister dst,
2009                     const Register src,
2010                     const Condition cond) {
2011  // Sn = Rt.
2012  // Instruction details available in ARM DDI 0406A, A8-642.
2013  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2014  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2015  ASSERT(CpuFeatures::IsEnabled(VFP3));
2016  ASSERT(!src.is(pc));
2017  emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
2018       src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
2019}
2020
2021
2022void Assembler::vmov(const Register dst,
2023                     const SwVfpRegister src,
2024                     const Condition cond) {
2025  // Rt = Sn.
2026  // Instruction details available in ARM DDI 0406A, A8-642.
2027  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2028  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2029  ASSERT(CpuFeatures::IsEnabled(VFP3));
2030  ASSERT(!dst.is(pc));
2031  emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
2032       dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
2033}
2034
2035
2036// Type of data to read from or write to VFP register.
2037// Used as specifier in generic vcvt instruction.
2038enum VFPType { S32, U32, F32, F64 };
2039
2040
2041static bool IsSignedVFPType(VFPType type) {
2042  switch (type) {
2043    case S32:
2044      return true;
2045    case U32:
2046      return false;
2047    default:
2048      UNREACHABLE();
2049      return false;
2050  }
2051}
2052
2053
2054static bool IsIntegerVFPType(VFPType type) {
2055  switch (type) {
2056    case S32:
2057    case U32:
2058      return true;
2059    case F32:
2060    case F64:
2061      return false;
2062    default:
2063      UNREACHABLE();
2064      return false;
2065  }
2066}
2067
2068
2069static bool IsDoubleVFPType(VFPType type) {
2070  switch (type) {
2071    case F32:
2072      return false;
2073    case F64:
2074      return true;
2075    default:
2076      UNREACHABLE();
2077      return false;
2078  }
2079}
2080
2081
2082// Depending on split_last_bit split binary representation of reg_code into Vm:M
2083// or M:Vm form (where M is single bit).
2084static void SplitRegCode(bool split_last_bit,
2085                         int reg_code,
2086                         int* vm,
2087                         int* m) {
2088  if (split_last_bit) {
2089    *m  = reg_code & 0x1;
2090    *vm = reg_code >> 1;
2091  } else {
2092    *m  = (reg_code & 0x10) >> 4;
2093    *vm = reg_code & 0x0F;
2094  }
2095}
2096
2097
2098// Encode vcvt.src_type.dst_type instruction.
2099static Instr EncodeVCVT(const VFPType dst_type,
2100                        const int dst_code,
2101                        const VFPType src_type,
2102                        const int src_code,
2103                        const Condition cond) {
2104  if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2105    // Conversion between IEEE floating point and 32-bit integer.
2106    // Instruction details available in ARM DDI 0406B, A8.6.295.
2107    // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2108    // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2109    ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2110
2111    int sz, opc2, D, Vd, M, Vm, op;
2112
2113    if (IsIntegerVFPType(dst_type)) {
2114      opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2115      sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2116      op = 1;  // round towards zero
2117      SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
2118      SplitRegCode(true, dst_code, &Vd, &D);
2119    } else {
2120      ASSERT(IsIntegerVFPType(src_type));
2121
2122      opc2 = 0x0;
2123      sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2124      op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2125      SplitRegCode(true, src_code, &Vm, &M);
2126      SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
2127    }
2128
2129    return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2130            Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2131  } else {
2132    // Conversion between IEEE double and single precision.
2133    // Instruction details available in ARM DDI 0406B, A8.6.298.
2134    // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2135    // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2136    int sz, D, Vd, M, Vm;
2137
2138    ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
2139    sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2140    SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
2141    SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
2142
2143    return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2144            Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2145  }
2146}
2147
2148
2149void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2150                             const SwVfpRegister src,
2151                             const Condition cond) {
2152  ASSERT(CpuFeatures::IsEnabled(VFP3));
2153  emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
2154}
2155
2156
2157void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2158                             const SwVfpRegister src,
2159                             const Condition cond) {
2160  ASSERT(CpuFeatures::IsEnabled(VFP3));
2161  emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
2162}
2163
2164
2165void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2166                             const SwVfpRegister src,
2167                             const Condition cond) {
2168  ASSERT(CpuFeatures::IsEnabled(VFP3));
2169  emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
2170}
2171
2172
2173void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2174                             const DwVfpRegister src,
2175                             const Condition cond) {
2176  ASSERT(CpuFeatures::IsEnabled(VFP3));
2177  emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
2178}
2179
2180
2181void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2182                             const DwVfpRegister src,
2183                             const Condition cond) {
2184  ASSERT(CpuFeatures::IsEnabled(VFP3));
2185  emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
2186}
2187
2188
2189void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2190                             const SwVfpRegister src,
2191                             const Condition cond) {
2192  ASSERT(CpuFeatures::IsEnabled(VFP3));
2193  emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
2194}
2195
2196
2197void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2198                             const DwVfpRegister src,
2199                             const Condition cond) {
2200  ASSERT(CpuFeatures::IsEnabled(VFP3));
2201  emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
2202}
2203
2204
2205void Assembler::vadd(const DwVfpRegister dst,
2206                     const DwVfpRegister src1,
2207                     const DwVfpRegister src2,
2208                     const Condition cond) {
2209  // Dd = vadd(Dn, Dm) double precision floating point addition.
2210  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2211  // Instruction details available in ARM DDI 0406A, A8-536.
2212  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2213  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2214  ASSERT(CpuFeatures::IsEnabled(VFP3));
2215  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2216       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2217}
2218
2219
2220void Assembler::vsub(const DwVfpRegister dst,
2221                     const DwVfpRegister src1,
2222                     const DwVfpRegister src2,
2223                     const Condition cond) {
2224  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
2225  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2226  // Instruction details available in ARM DDI 0406A, A8-784.
2227  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2228  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
2229  ASSERT(CpuFeatures::IsEnabled(VFP3));
2230  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2231       dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2232}
2233
2234
2235void Assembler::vmul(const DwVfpRegister dst,
2236                     const DwVfpRegister src1,
2237                     const DwVfpRegister src2,
2238                     const Condition cond) {
2239  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
2240  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2241  // Instruction details available in ARM DDI 0406A, A8-784.
2242  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2243  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2244  ASSERT(CpuFeatures::IsEnabled(VFP3));
2245  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2246       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2247}
2248
2249
2250void Assembler::vdiv(const DwVfpRegister dst,
2251                     const DwVfpRegister src1,
2252                     const DwVfpRegister src2,
2253                     const Condition cond) {
2254  // Dd = vdiv(Dn, Dm) double precision floating point division.
2255  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2256  // Instruction details available in ARM DDI 0406A, A8-584.
2257  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2258  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2259  ASSERT(CpuFeatures::IsEnabled(VFP3));
2260  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2261       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2262}
2263
2264
2265void Assembler::vcmp(const DwVfpRegister src1,
2266                     const DwVfpRegister src2,
2267                     const SBit s,
2268                     const Condition cond) {
2269  // vcmp(Dd, Dm) double precision floating point comparison.
2270  // Instruction details available in ARM DDI 0406A, A8-570.
2271  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2272  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2273  ASSERT(CpuFeatures::IsEnabled(VFP3));
2274  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2275       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2276}
2277
2278
2279void Assembler::vmrs(Register dst, Condition cond) {
2280  // Instruction details available in ARM DDI 0406A, A8-652.
2281  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2282  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2283  ASSERT(CpuFeatures::IsEnabled(VFP3));
2284  emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
2285       dst.code()*B12 | 0xA*B8 | B4);
2286}
2287
2288
2289
2290void Assembler::vsqrt(const DwVfpRegister dst,
2291                      const DwVfpRegister src,
2292                      const Condition cond) {
2293  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2294  // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
2295  ASSERT(CpuFeatures::IsEnabled(VFP3));
2296  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2297       dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2298}
2299
2300
2301// Pseudo instructions.
2302void Assembler::nop(int type) {
2303  // This is mov rx, rx.
2304  ASSERT(0 <= type && type <= 14);  // mov pc, pc is not a nop.
2305  emit(al | 13*B21 | type*B12 | type);
2306}
2307
2308
2309bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2310  uint32_t dummy1;
2311  uint32_t dummy2;
2312  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2313}
2314
2315
2316void Assembler::BlockConstPoolFor(int instructions) {
2317  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
2318}
2319
2320
2321// Debugging.
2322void Assembler::RecordJSReturn() {
2323  WriteRecordedPositions();
2324  CheckBuffer();
2325  RecordRelocInfo(RelocInfo::JS_RETURN);
2326}
2327
2328
2329void Assembler::RecordDebugBreakSlot() {
2330  WriteRecordedPositions();
2331  CheckBuffer();
2332  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2333}
2334
2335
2336void Assembler::RecordComment(const char* msg) {
2337  if (FLAG_debug_code) {
2338    CheckBuffer();
2339    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2340  }
2341}
2342
2343
2344void Assembler::RecordPosition(int pos) {
2345  if (pos == RelocInfo::kNoPosition) return;
2346  ASSERT(pos >= 0);
2347  current_position_ = pos;
2348}
2349
2350
2351void Assembler::RecordStatementPosition(int pos) {
2352  if (pos == RelocInfo::kNoPosition) return;
2353  ASSERT(pos >= 0);
2354  current_statement_position_ = pos;
2355}
2356
2357
2358bool Assembler::WriteRecordedPositions() {
2359  bool written = false;
2360
2361  // Write the statement position if it is different from what was written last
2362  // time.
2363  if (current_statement_position_ != written_statement_position_) {
2364    CheckBuffer();
2365    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
2366    written_statement_position_ = current_statement_position_;
2367    written = true;
2368  }
2369
2370  // Write the position if it is different from what was written last time and
2371  // also different from the written statement position.
2372  if (current_position_ != written_position_ &&
2373      current_position_ != written_statement_position_) {
2374    CheckBuffer();
2375    RecordRelocInfo(RelocInfo::POSITION, current_position_);
2376    written_position_ = current_position_;
2377    written = true;
2378  }
2379
2380  // Return whether something was written.
2381  return written;
2382}
2383
2384
2385void Assembler::GrowBuffer() {
2386  if (!own_buffer_) FATAL("external code buffer is too small");
2387
2388  // Compute new buffer size.
2389  CodeDesc desc;  // the new buffer
2390  if (buffer_size_ < 4*KB) {
2391    desc.buffer_size = 4*KB;
2392  } else if (buffer_size_ < 1*MB) {
2393    desc.buffer_size = 2*buffer_size_;
2394  } else {
2395    desc.buffer_size = buffer_size_ + 1*MB;
2396  }
2397  CHECK_GT(desc.buffer_size, 0);  // no overflow
2398
2399  // Setup new buffer.
2400  desc.buffer = NewArray<byte>(desc.buffer_size);
2401
2402  desc.instr_size = pc_offset();
2403  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2404
2405  // Copy the data.
2406  int pc_delta = desc.buffer - buffer_;
2407  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2408  memmove(desc.buffer, buffer_, desc.instr_size);
2409  memmove(reloc_info_writer.pos() + rc_delta,
2410          reloc_info_writer.pos(), desc.reloc_size);
2411
2412  // Switch buffers.
2413  DeleteArray(buffer_);
2414  buffer_ = desc.buffer;
2415  buffer_size_ = desc.buffer_size;
2416  pc_ += pc_delta;
2417  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2418                               reloc_info_writer.last_pc() + pc_delta);
2419
2420  // None of our relocation types are pc relative pointing outside the code
2421  // buffer nor pc absolute pointing inside the code buffer, so there is no need
2422  // to relocate any emitted relocation entries.
2423
2424  // Relocate pending relocation entries.
2425  for (int i = 0; i < num_prinfo_; i++) {
2426    RelocInfo& rinfo = prinfo_[i];
2427    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2428           rinfo.rmode() != RelocInfo::POSITION);
2429    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2430      rinfo.set_pc(rinfo.pc() + pc_delta);
2431    }
2432  }
2433}
2434
2435
2436void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2437  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
2438  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2439    // Adjust code for new modes.
2440    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2441           || RelocInfo::IsJSReturn(rmode)
2442           || RelocInfo::IsComment(rmode)
2443           || RelocInfo::IsPosition(rmode));
2444    // These modes do not need an entry in the constant pool.
2445  } else {
2446    ASSERT(num_prinfo_ < kMaxNumPRInfo);
2447    prinfo_[num_prinfo_++] = rinfo;
2448    // Make sure the constant pool is not emitted in place of the next
2449    // instruction for which we just recorded relocation info.
2450    BlockConstPoolBefore(pc_offset() + kInstrSize);
2451  }
2452  if (rinfo.rmode() != RelocInfo::NONE) {
2453    // Don't record external references unless the heap will be serialized.
2454    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2455#ifdef DEBUG
2456      if (!Serializer::enabled()) {
2457        Serializer::TooLateToEnableNow();
2458      }
2459#endif
2460      if (!Serializer::enabled() && !FLAG_debug_code) {
2461        return;
2462      }
2463    }
2464    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
2465    reloc_info_writer.Write(&rinfo);
2466  }
2467}
2468
2469
2470void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2471  // Calculate the offset of the next check. It will be overwritten
2472  // when a const pool is generated or when const pools are being
2473  // blocked for a specific range.
2474  next_buffer_check_ = pc_offset() + kCheckConstInterval;
2475
2476  // There is nothing to do if there are no pending relocation info entries.
2477  if (num_prinfo_ == 0) return;
2478
2479  // We emit a constant pool at regular intervals of about kDistBetweenPools
2480  // or when requested by parameter force_emit (e.g. after each function).
2481  // We prefer not to emit a jump unless the max distance is reached or if we
2482  // are running low on slots, which can happen if a lot of constants are being
2483  // emitted (e.g. --debug-code and many static references).
2484  int dist = pc_offset() - last_const_pool_end_;
2485  if (!force_emit && dist < kMaxDistBetweenPools &&
2486      (require_jump || dist < kDistBetweenPools) &&
2487      // TODO(1236125): Cleanup the "magic" number below. We know that
2488      // the code generation will test every kCheckConstIntervalInst.
2489      // Thus we are safe as long as we generate less than 7 constant
2490      // entries per instruction.
2491      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
2492    return;
2493  }
2494
2495  // If we did not return by now, we need to emit the constant pool soon.
2496
2497  // However, some small sequences of instructions must not be broken up by the
2498  // insertion of a constant pool; such sequences are protected by setting
2499  // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
2500  // both checked here. Also, recursive calls to CheckConstPool are blocked by
2501  // no_const_pool_before_.
2502  if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
2503    // Emission is currently blocked; make sure we try again as soon as
2504    // possible.
2505    if (const_pool_blocked_nesting_ > 0) {
2506      next_buffer_check_ = pc_offset() + kInstrSize;
2507    } else {
2508      next_buffer_check_ = no_const_pool_before_;
2509    }
2510
2511    // Something is wrong if emission is forced and blocked at the same time.
2512    ASSERT(!force_emit);
2513    return;
2514  }
2515
2516  int jump_instr = require_jump ? kInstrSize : 0;
2517
2518  // Check that the code buffer is large enough before emitting the constant
2519  // pool and relocation information (include the jump over the pool and the
2520  // constant pool marker).
2521  int max_needed_space =
2522      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2523  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2524
2525  // Block recursive calls to CheckConstPool.
2526  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
2527                       num_prinfo_*kInstrSize);
2528  // Don't bother to check for the emit calls below.
2529  next_buffer_check_ = no_const_pool_before_;
2530
2531  // Emit jump over constant pool if necessary.
2532  Label after_pool;
2533  if (require_jump) b(&after_pool);
2534
2535  RecordComment("[ Constant Pool");
2536
2537  // Put down constant pool marker "Undefined instruction" as specified by
2538  // A3.1 Instruction set encoding.
2539  emit(0x03000000 | num_prinfo_);
2540
2541  // Emit constant pool entries.
2542  for (int i = 0; i < num_prinfo_; i++) {
2543    RelocInfo& rinfo = prinfo_[i];
2544    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2545           rinfo.rmode() != RelocInfo::POSITION &&
2546           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2547    Instr instr = instr_at(rinfo.pc());
2548
2549    // Instruction to patch must be a ldr/str [pc, #offset].
2550    // P and U set, B and W clear, Rn == pc, offset12 still 0.
2551    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
2552           (2*B25 | P | U | pc.code()*B16));
2553    int delta = pc_ - rinfo.pc() - 8;
2554    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
2555    if (delta < 0) {
2556      instr &= ~U;
2557      delta = -delta;
2558    }
2559    ASSERT(is_uint12(delta));
2560    instr_at_put(rinfo.pc(), instr + delta);
2561    emit(rinfo.data());
2562  }
2563  num_prinfo_ = 0;
2564  last_const_pool_end_ = pc_offset();
2565
2566  RecordComment("]");
2567
2568  if (after_pool.is_linked()) {
2569    bind(&after_pool);
2570  }
2571
2572  // Since a constant pool was just emitted, move the check offset forward by
2573  // the standard interval.
2574  next_buffer_check_ = pc_offset() + kCheckConstInterval;
2575}
2576
2577
2578} }  // namespace v8::internal
2579
2580#endif  // V8_TARGET_ARCH_ARM
2581