assembler-arm.cc revision 3100271588b61cbc1dc472a3f2f105d2eed8497f
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2010 the V8 project authors. All rights reserved.
36
37#include "v8.h"
38
39#include "arm/assembler-arm-inl.h"
40#include "serialize.h"
41
42namespace v8 {
43namespace internal {
44
45// Safe default is no features.
46unsigned CpuFeatures::supported_ = 0;
47unsigned CpuFeatures::enabled_ = 0;
48unsigned CpuFeatures::found_by_runtime_probing_ = 0;
49
50void CpuFeatures::Probe() {
51  // If the compiler is allowed to use vfp then we can use vfp too in our
52  // code generation.
53#if !defined(__arm__)
54  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
55  if (FLAG_enable_vfp3) {
56      supported_ |= 1u << VFP3;
57  }
58  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
59  if (FLAG_enable_armv7) {
60      supported_ |= 1u << ARMv7;
61  }
62#else
63  if (Serializer::enabled()) {
64      supported_ |= 1u << VFP3;
65    //supported_ |= OS::CpuFeaturesImpliedByPlatform();
66    return;  // No features if we might serialize.
67  }
68
69  if (OS::ArmCpuHasFeature(VFP3)) {
70    // This implementation also sets the VFP flags if
71    // runtime detection of VFP returns true.
72    supported_ |= 1u << VFP3;
73    found_by_runtime_probing_ |= 1u << VFP3;
74  }
75
76  if (OS::ArmCpuHasFeature(ARMv7)) {
77    supported_ |= 1u << ARMv7;
78    found_by_runtime_probing_ |= 1u << ARMv7;
79  }
80#endif
81}
82
83
84// -----------------------------------------------------------------------------
85// Implementation of Register and CRegister
86
87Register no_reg = { -1 };
88
89Register r0  = {  0 };
90Register r1  = {  1 };
91Register r2  = {  2 };
92Register r3  = {  3 };
93Register r4  = {  4 };
94Register r5  = {  5 };
95Register r6  = {  6 };
96Register r7  = {  7 };
97Register r8  = {  8 };  // Used as context register.
98Register r9  = {  9 };
99Register r10 = { 10 };  // Used as roots register.
100Register fp  = { 11 };
101Register ip  = { 12 };
102Register sp  = { 13 };
103Register lr  = { 14 };
104Register pc  = { 15 };
105
106
107CRegister no_creg = { -1 };
108
109CRegister cr0  = {  0 };
110CRegister cr1  = {  1 };
111CRegister cr2  = {  2 };
112CRegister cr3  = {  3 };
113CRegister cr4  = {  4 };
114CRegister cr5  = {  5 };
115CRegister cr6  = {  6 };
116CRegister cr7  = {  7 };
117CRegister cr8  = {  8 };
118CRegister cr9  = {  9 };
119CRegister cr10 = { 10 };
120CRegister cr11 = { 11 };
121CRegister cr12 = { 12 };
122CRegister cr13 = { 13 };
123CRegister cr14 = { 14 };
124CRegister cr15 = { 15 };
125
126// Support for the VFP registers s0 to s31 (d0 to d15).
127// Note that "sN:sM" is the same as "dN/2".
128SwVfpRegister s0  = {  0 };
129SwVfpRegister s1  = {  1 };
130SwVfpRegister s2  = {  2 };
131SwVfpRegister s3  = {  3 };
132SwVfpRegister s4  = {  4 };
133SwVfpRegister s5  = {  5 };
134SwVfpRegister s6  = {  6 };
135SwVfpRegister s7  = {  7 };
136SwVfpRegister s8  = {  8 };
137SwVfpRegister s9  = {  9 };
138SwVfpRegister s10 = { 10 };
139SwVfpRegister s11 = { 11 };
140SwVfpRegister s12 = { 12 };
141SwVfpRegister s13 = { 13 };
142SwVfpRegister s14 = { 14 };
143SwVfpRegister s15 = { 15 };
144SwVfpRegister s16 = { 16 };
145SwVfpRegister s17 = { 17 };
146SwVfpRegister s18 = { 18 };
147SwVfpRegister s19 = { 19 };
148SwVfpRegister s20 = { 20 };
149SwVfpRegister s21 = { 21 };
150SwVfpRegister s22 = { 22 };
151SwVfpRegister s23 = { 23 };
152SwVfpRegister s24 = { 24 };
153SwVfpRegister s25 = { 25 };
154SwVfpRegister s26 = { 26 };
155SwVfpRegister s27 = { 27 };
156SwVfpRegister s28 = { 28 };
157SwVfpRegister s29 = { 29 };
158SwVfpRegister s30 = { 30 };
159SwVfpRegister s31 = { 31 };
160
161DwVfpRegister d0  = {  0 };
162DwVfpRegister d1  = {  1 };
163DwVfpRegister d2  = {  2 };
164DwVfpRegister d3  = {  3 };
165DwVfpRegister d4  = {  4 };
166DwVfpRegister d5  = {  5 };
167DwVfpRegister d6  = {  6 };
168DwVfpRegister d7  = {  7 };
169DwVfpRegister d8  = {  8 };
170DwVfpRegister d9  = {  9 };
171DwVfpRegister d10 = { 10 };
172DwVfpRegister d11 = { 11 };
173DwVfpRegister d12 = { 12 };
174DwVfpRegister d13 = { 13 };
175DwVfpRegister d14 = { 14 };
176DwVfpRegister d15 = { 15 };
177
178// -----------------------------------------------------------------------------
179// Implementation of RelocInfo
180
181const int RelocInfo::kApplyMask = 0;
182
183
184void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
185  // Patch the code at the current address with the supplied instructions.
186  Instr* pc = reinterpret_cast<Instr*>(pc_);
187  Instr* instr = reinterpret_cast<Instr*>(instructions);
188  for (int i = 0; i < instruction_count; i++) {
189    *(pc + i) = *(instr + i);
190  }
191
192  // Indicate that code has changed.
193  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
194}
195
196
197// Patch the code at the current PC with a call to the target address.
198// Additional guard instructions can be added if required.
199void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
200  // Patch the code at the current address with a call to the target.
201  UNIMPLEMENTED();
202}
203
204
205// -----------------------------------------------------------------------------
206// Implementation of Operand and MemOperand
207// See assembler-arm-inl.h for inlined constructors
208
209Operand::Operand(Handle<Object> handle) {
210  rm_ = no_reg;
211  // Verify all Objects referred by code are NOT in new space.
212  Object* obj = *handle;
213  ASSERT(!Heap::InNewSpace(obj));
214  if (obj->IsHeapObject()) {
215    imm32_ = reinterpret_cast<intptr_t>(handle.location());
216    rmode_ = RelocInfo::EMBEDDED_OBJECT;
217  } else {
218    // no relocation needed
219    imm32_ =  reinterpret_cast<intptr_t>(obj);
220    rmode_ = RelocInfo::NONE;
221  }
222}
223
224
225Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
226  ASSERT(is_uint5(shift_imm));
227  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
228  rm_ = rm;
229  rs_ = no_reg;
230  shift_op_ = shift_op;
231  shift_imm_ = shift_imm & 31;
232  if (shift_op == RRX) {
233    // encoded as ROR with shift_imm == 0
234    ASSERT(shift_imm == 0);
235    shift_op_ = ROR;
236    shift_imm_ = 0;
237  }
238}
239
240
241Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
242  ASSERT(shift_op != RRX);
243  rm_ = rm;
244  rs_ = no_reg;
245  shift_op_ = shift_op;
246  rs_ = rs;
247}
248
249
250MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
251  rn_ = rn;
252  rm_ = no_reg;
253  offset_ = offset;
254  am_ = am;
255}
256
257MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
258  rn_ = rn;
259  rm_ = rm;
260  shift_op_ = LSL;
261  shift_imm_ = 0;
262  am_ = am;
263}
264
265
266MemOperand::MemOperand(Register rn, Register rm,
267                       ShiftOp shift_op, int shift_imm, AddrMode am) {
268  ASSERT(is_uint5(shift_imm));
269  rn_ = rn;
270  rm_ = rm;
271  shift_op_ = shift_op;
272  shift_imm_ = shift_imm & 31;
273  am_ = am;
274}
275
276
277// -----------------------------------------------------------------------------
278// Implementation of Assembler.
279
280// Instruction encoding bits.
281enum {
282  H   = 1 << 5,   // halfword (or byte)
283  S6  = 1 << 6,   // signed (or unsigned)
284  L   = 1 << 20,  // load (or store)
285  S   = 1 << 20,  // set condition code (or leave unchanged)
286  W   = 1 << 21,  // writeback base register (or leave unchanged)
287  A   = 1 << 21,  // accumulate in multiply instruction (or not)
288  B   = 1 << 22,  // unsigned byte (or word)
289  N   = 1 << 22,  // long (or short)
290  U   = 1 << 23,  // positive (or negative) offset/index
291  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
292  I   = 1 << 25,  // immediate shifter operand (or not)
293
294  B4  = 1 << 4,
295  B5  = 1 << 5,
296  B6  = 1 << 6,
297  B7  = 1 << 7,
298  B8  = 1 << 8,
299  B9  = 1 << 9,
300  B12 = 1 << 12,
301  B16 = 1 << 16,
302  B18 = 1 << 18,
303  B19 = 1 << 19,
304  B20 = 1 << 20,
305  B21 = 1 << 21,
306  B22 = 1 << 22,
307  B23 = 1 << 23,
308  B24 = 1 << 24,
309  B25 = 1 << 25,
310  B26 = 1 << 26,
311  B27 = 1 << 27,
312
313  // Instruction bit masks.
314  RdMask     = 15 << 12,  // in str instruction
315  CondMask   = 15 << 28,
316  CoprocessorMask = 15 << 8,
317  OpCodeMask = 15 << 21,  // in data-processing instructions
318  Imm24Mask  = (1 << 24) - 1,
319  Off12Mask  = (1 << 12) - 1,
320  // Reserved condition.
321  nv = 15 << 28
322};
323
324
325// add(sp, sp, 4) instruction (aka Pop())
326static const Instr kPopInstruction =
327    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
328// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
329// register r is not encoded.
330static const Instr kPushRegPattern =
331    al | B26 | 4 | NegPreIndex | sp.code() * B16;
332// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
333// register r is not encoded.
334static const Instr kPopRegPattern =
335    al | B26 | L | 4 | PostIndex | sp.code() * B16;
336// mov lr, pc
337const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
338// ldr pc, [pc, #XXX]
339const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
340
341// Spare buffer.
342static const int kMinimalBufferSize = 4*KB;
343static byte* spare_buffer_ = NULL;
344
345Assembler::Assembler(void* buffer, int buffer_size) {
346  if (buffer == NULL) {
347    // Do our own buffer management.
348    if (buffer_size <= kMinimalBufferSize) {
349      buffer_size = kMinimalBufferSize;
350
351      if (spare_buffer_ != NULL) {
352        buffer = spare_buffer_;
353        spare_buffer_ = NULL;
354      }
355    }
356    if (buffer == NULL) {
357      buffer_ = NewArray<byte>(buffer_size);
358    } else {
359      buffer_ = static_cast<byte*>(buffer);
360    }
361    buffer_size_ = buffer_size;
362    own_buffer_ = true;
363
364  } else {
365    // Use externally provided buffer instead.
366    ASSERT(buffer_size > 0);
367    buffer_ = static_cast<byte*>(buffer);
368    buffer_size_ = buffer_size;
369    own_buffer_ = false;
370  }
371
372  // Setup buffer pointers.
373  ASSERT(buffer_ != NULL);
374  pc_ = buffer_;
375  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
376  num_prinfo_ = 0;
377  next_buffer_check_ = 0;
378  no_const_pool_before_ = 0;
379  last_const_pool_end_ = 0;
380  last_bound_pos_ = 0;
381  current_statement_position_ = RelocInfo::kNoPosition;
382  current_position_ = RelocInfo::kNoPosition;
383  written_statement_position_ = current_statement_position_;
384  written_position_ = current_position_;
385}
386
387
388Assembler::~Assembler() {
389  if (own_buffer_) {
390    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
391      spare_buffer_ = buffer_;
392    } else {
393      DeleteArray(buffer_);
394    }
395  }
396}
397
398
399void Assembler::GetCode(CodeDesc* desc) {
400  // Emit constant pool if necessary.
401  CheckConstPool(true, false);
402  ASSERT(num_prinfo_ == 0);
403
404  // Setup code descriptor.
405  desc->buffer = buffer_;
406  desc->buffer_size = buffer_size_;
407  desc->instr_size = pc_offset();
408  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
409}
410
411
412void Assembler::Align(int m) {
413  ASSERT(m >= 4 && IsPowerOf2(m));
414  while ((pc_offset() & (m - 1)) != 0) {
415    nop();
416  }
417}
418
419
420// Labels refer to positions in the (to be) generated code.
421// There are bound, linked, and unused labels.
422//
423// Bound labels refer to known positions in the already
424// generated code. pos() is the position the label refers to.
425//
426// Linked labels refer to unknown positions in the code
427// to be generated; pos() is the position of the last
428// instruction using the label.
429
430
431// The link chain is terminated by a negative code position (must be aligned)
432const int kEndOfChain = -4;
433
434
435int Assembler::target_at(int pos)  {
436  Instr instr = instr_at(pos);
437  if ((instr & ~Imm24Mask) == 0) {
438    // Emitted label constant, not part of a branch.
439    return instr - (Code::kHeaderSize - kHeapObjectTag);
440  }
441  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
442  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
443  if ((instr & CondMask) == nv && (instr & B24) != 0)
444    // blx uses bit 24 to encode bit 2 of imm26
445    imm26 += 2;
446
447  return pos + kPcLoadDelta + imm26;
448}
449
450
451void Assembler::target_at_put(int pos, int target_pos) {
452  Instr instr = instr_at(pos);
453  if ((instr & ~Imm24Mask) == 0) {
454    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
455    // Emitted label constant, not part of a branch.
456    // Make label relative to Code* of generated Code object.
457    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
458    return;
459  }
460  int imm26 = target_pos - (pos + kPcLoadDelta);
461  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
462  if ((instr & CondMask) == nv) {
463    // blx uses bit 24 to encode bit 2 of imm26
464    ASSERT((imm26 & 1) == 0);
465    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
466  } else {
467    ASSERT((imm26 & 3) == 0);
468    instr &= ~Imm24Mask;
469  }
470  int imm24 = imm26 >> 2;
471  ASSERT(is_int24(imm24));
472  instr_at_put(pos, instr | (imm24 & Imm24Mask));
473}
474
475
476void Assembler::print(Label* L) {
477  if (L->is_unused()) {
478    PrintF("unused label\n");
479  } else if (L->is_bound()) {
480    PrintF("bound label to %d\n", L->pos());
481  } else if (L->is_linked()) {
482    Label l = *L;
483    PrintF("unbound label");
484    while (l.is_linked()) {
485      PrintF("@ %d ", l.pos());
486      Instr instr = instr_at(l.pos());
487      if ((instr & ~Imm24Mask) == 0) {
488        PrintF("value\n");
489      } else {
490        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
491        int cond = instr & CondMask;
492        const char* b;
493        const char* c;
494        if (cond == nv) {
495          b = "blx";
496          c = "";
497        } else {
498          if ((instr & B24) != 0)
499            b = "bl";
500          else
501            b = "b";
502
503          switch (cond) {
504            case eq: c = "eq"; break;
505            case ne: c = "ne"; break;
506            case hs: c = "hs"; break;
507            case lo: c = "lo"; break;
508            case mi: c = "mi"; break;
509            case pl: c = "pl"; break;
510            case vs: c = "vs"; break;
511            case vc: c = "vc"; break;
512            case hi: c = "hi"; break;
513            case ls: c = "ls"; break;
514            case ge: c = "ge"; break;
515            case lt: c = "lt"; break;
516            case gt: c = "gt"; break;
517            case le: c = "le"; break;
518            case al: c = ""; break;
519            default:
520              c = "";
521              UNREACHABLE();
522          }
523        }
524        PrintF("%s%s\n", b, c);
525      }
526      next(&l);
527    }
528  } else {
529    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
530  }
531}
532
533
534void Assembler::bind_to(Label* L, int pos) {
535  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
536  while (L->is_linked()) {
537    int fixup_pos = L->pos();
538    next(L);  // call next before overwriting link with target at fixup_pos
539    target_at_put(fixup_pos, pos);
540  }
541  L->bind_to(pos);
542
543  // Keep track of the last bound label so we don't eliminate any instructions
544  // before a bound label.
545  if (pos > last_bound_pos_)
546    last_bound_pos_ = pos;
547}
548
549
550void Assembler::link_to(Label* L, Label* appendix) {
551  if (appendix->is_linked()) {
552    if (L->is_linked()) {
553      // Append appendix to L's list.
554      int fixup_pos;
555      int link = L->pos();
556      do {
557        fixup_pos = link;
558        link = target_at(fixup_pos);
559      } while (link > 0);
560      ASSERT(link == kEndOfChain);
561      target_at_put(fixup_pos, appendix->pos());
562    } else {
563      // L is empty, simply use appendix.
564      *L = *appendix;
565    }
566  }
567  appendix->Unuse();  // appendix should not be used anymore
568}
569
570
571void Assembler::bind(Label* L) {
572  ASSERT(!L->is_bound());  // label can only be bound once
573  bind_to(L, pc_offset());
574}
575
576
577void Assembler::next(Label* L) {
578  ASSERT(L->is_linked());
579  int link = target_at(L->pos());
580  if (link > 0) {
581    L->link_to(link);
582  } else {
583    ASSERT(link == kEndOfChain);
584    L->Unuse();
585  }
586}
587
588
589// Low-level code emission routines depending on the addressing mode.
590static bool fits_shifter(uint32_t imm32,
591                         uint32_t* rotate_imm,
592                         uint32_t* immed_8,
593                         Instr* instr) {
594  // imm32 must be unsigned.
595  for (int rot = 0; rot < 16; rot++) {
596    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
597    if ((imm8 <= 0xff)) {
598      *rotate_imm = rot;
599      *immed_8 = imm8;
600      return true;
601    }
602  }
603  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
604  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
605    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
606      *instr ^= 0x2*B21;
607      return true;
608    }
609  }
610  return false;
611}
612
613
614// We have to use the temporary register for things that can be relocated even
615// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
616// space.  There is no guarantee that the relocated location can be similarly
617// encoded.
618static bool MustUseIp(RelocInfo::Mode rmode) {
619  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
620#ifdef DEBUG
621    if (!Serializer::enabled()) {
622      Serializer::TooLateToEnableNow();
623    }
624#endif
625    return Serializer::enabled();
626  } else if (rmode == RelocInfo::NONE) {
627    return false;
628  }
629  return true;
630}
631
632
633void Assembler::addrmod1(Instr instr,
634                         Register rn,
635                         Register rd,
636                         const Operand& x) {
637  CheckBuffer();
638  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
639  if (!x.rm_.is_valid()) {
640    // Immediate.
641    uint32_t rotate_imm;
642    uint32_t immed_8;
643    if (MustUseIp(x.rmode_) ||
644        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
645      // The immediate operand cannot be encoded as a shifter operand, so load
646      // it first to register ip and change the original instruction to use ip.
647      // However, if the original instruction is a 'mov rd, x' (not setting the
648      // condition code), then replace it with a 'ldr rd, [pc]'.
649      RecordRelocInfo(x.rmode_, x.imm32_);
650      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
651      Condition cond = static_cast<Condition>(instr & CondMask);
652      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
653        ldr(rd, MemOperand(pc, 0), cond);
654      } else {
655        ldr(ip, MemOperand(pc, 0), cond);
656        addrmod1(instr, rn, rd, Operand(ip));
657      }
658      return;
659    }
660    instr |= I | rotate_imm*B8 | immed_8;
661  } else if (!x.rs_.is_valid()) {
662    // Immediate shift.
663    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
664  } else {
665    // Register shift.
666    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
667    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
668  }
669  emit(instr | rn.code()*B16 | rd.code()*B12);
670  if (rn.is(pc) || x.rm_.is(pc))
671    // Block constant pool emission for one instruction after reading pc.
672    BlockConstPoolBefore(pc_offset() + kInstrSize);
673}
674
675
676void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
677  ASSERT((instr & ~(CondMask | B | L)) == B26);
678  int am = x.am_;
679  if (!x.rm_.is_valid()) {
680    // Immediate offset.
681    int offset_12 = x.offset_;
682    if (offset_12 < 0) {
683      offset_12 = -offset_12;
684      am ^= U;
685    }
686    if (!is_uint12(offset_12)) {
687      // Immediate offset cannot be encoded, load it first to register ip
688      // rn (and rd in a load) should never be ip, or will be trashed.
689      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
690      mov(ip, Operand(x.offset_), LeaveCC,
691          static_cast<Condition>(instr & CondMask));
692      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
693      return;
694    }
695    ASSERT(offset_12 >= 0);  // no masking needed
696    instr |= offset_12;
697  } else {
698    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
699    // register offset the constructors make sure than both shift_imm_
700    // and shift_op_ are initialized.
701    ASSERT(!x.rm_.is(pc));
702    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
703  }
704  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
705  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
706}
707
708
709void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
710  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
711  ASSERT(x.rn_.is_valid());
712  int am = x.am_;
713  if (!x.rm_.is_valid()) {
714    // Immediate offset.
715    int offset_8 = x.offset_;
716    if (offset_8 < 0) {
717      offset_8 = -offset_8;
718      am ^= U;
719    }
720    if (!is_uint8(offset_8)) {
721      // Immediate offset cannot be encoded, load it first to register ip
722      // rn (and rd in a load) should never be ip, or will be trashed.
723      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
724      mov(ip, Operand(x.offset_), LeaveCC,
725          static_cast<Condition>(instr & CondMask));
726      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
727      return;
728    }
729    ASSERT(offset_8 >= 0);  // no masking needed
730    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
731  } else if (x.shift_imm_ != 0) {
732    // Scaled register offset not supported, load index first
733    // rn (and rd in a load) should never be ip, or will be trashed.
734    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
735    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
736        static_cast<Condition>(instr & CondMask));
737    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
738    return;
739  } else {
740    // Register offset.
741    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
742    instr |= x.rm_.code();
743  }
744  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
745  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
746}
747
748
749void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
750  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
751  ASSERT(rl != 0);
752  ASSERT(!rn.is(pc));
753  emit(instr | rn.code()*B16 | rl);
754}
755
756
757void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
758  // Unindexed addressing is not encoded by this function.
759  ASSERT_EQ((B27 | B26),
760            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
761  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
762  int am = x.am_;
763  int offset_8 = x.offset_;
764  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
765  offset_8 >>= 2;
766  if (offset_8 < 0) {
767    offset_8 = -offset_8;
768    am ^= U;
769  }
770  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
771  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
772
773  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
774  if ((am & P) == 0)
775    am |= W;
776
777  ASSERT(offset_8 >= 0);  // no masking needed
778  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
779}
780
781
782int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
783  int target_pos;
784  if (L->is_bound()) {
785    target_pos = L->pos();
786  } else {
787    if (L->is_linked()) {
788      target_pos = L->pos();  // L's link
789    } else {
790      target_pos = kEndOfChain;
791    }
792    L->link_to(pc_offset());
793  }
794
795  // Block the emission of the constant pool, since the branch instruction must
796  // be emitted at the pc offset recorded by the label.
797  BlockConstPoolBefore(pc_offset() + kInstrSize);
798  return target_pos - (pc_offset() + kPcLoadDelta);
799}
800
801
802void Assembler::label_at_put(Label* L, int at_offset) {
803  int target_pos;
804  if (L->is_bound()) {
805    target_pos = L->pos();
806  } else {
807    if (L->is_linked()) {
808      target_pos = L->pos();  // L's link
809    } else {
810      target_pos = kEndOfChain;
811    }
812    L->link_to(at_offset);
813    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
814  }
815}
816
817
818// Branch instructions.
819void Assembler::b(int branch_offset, Condition cond) {
820  ASSERT((branch_offset & 3) == 0);
821  int imm24 = branch_offset >> 2;
822  ASSERT(is_int24(imm24));
823  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
824
825  if (cond == al)
826    // Dead code is a good location to emit the constant pool.
827    CheckConstPool(false, false);
828}
829
830
831void Assembler::bl(int branch_offset, Condition cond) {
832  ASSERT((branch_offset & 3) == 0);
833  int imm24 = branch_offset >> 2;
834  ASSERT(is_int24(imm24));
835  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
836}
837
838
839void Assembler::blx(int branch_offset) {  // v5 and above
840  WriteRecordedPositions();
841  ASSERT((branch_offset & 1) == 0);
842  int h = ((branch_offset & 2) >> 1)*B24;
843  int imm24 = branch_offset >> 2;
844  ASSERT(is_int24(imm24));
845  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
846}
847
848
849void Assembler::blx(Register target, Condition cond) {  // v5 and above
850  WriteRecordedPositions();
851  ASSERT(!target.is(pc));
852  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
853}
854
855
856void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
857  WriteRecordedPositions();
858  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
859  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
860}
861
862
863// Data-processing instructions.
864
865// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
866// Instruction details available in ARM DDI 0406A, A8-464.
867// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
868//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
869void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
870                     const Operand& src3, Condition cond) {
871  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
872  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
873  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
874  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
875       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
876}
877
878
879void Assembler::and_(Register dst, Register src1, const Operand& src2,
880                     SBit s, Condition cond) {
881  addrmod1(cond | 0*B21 | s, src1, dst, src2);
882}
883
884
885void Assembler::eor(Register dst, Register src1, const Operand& src2,
886                    SBit s, Condition cond) {
887  addrmod1(cond | 1*B21 | s, src1, dst, src2);
888}
889
890
891void Assembler::sub(Register dst, Register src1, const Operand& src2,
892                    SBit s, Condition cond) {
893  addrmod1(cond | 2*B21 | s, src1, dst, src2);
894}
895
896
897void Assembler::rsb(Register dst, Register src1, const Operand& src2,
898                    SBit s, Condition cond) {
899  addrmod1(cond | 3*B21 | s, src1, dst, src2);
900}
901
902
903void Assembler::add(Register dst, Register src1, const Operand& src2,
904                    SBit s, Condition cond) {
905  addrmod1(cond | 4*B21 | s, src1, dst, src2);
906
907  // Eliminate pattern: push(r), pop()
908  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
909  //   add(sp, sp, Operand(kPointerSize));
910  // Both instructions can be eliminated.
911  int pattern_size = 2 * kInstrSize;
912  if (FLAG_push_pop_elimination &&
913      last_bound_pos_ <= (pc_offset() - pattern_size) &&
914      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
915      // Pattern.
916      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
917      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
918    pc_ -= 2 * kInstrSize;
919    if (FLAG_print_push_pop_elimination) {
920      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
921    }
922  }
923}
924
925
926void Assembler::adc(Register dst, Register src1, const Operand& src2,
927                    SBit s, Condition cond) {
928  addrmod1(cond | 5*B21 | s, src1, dst, src2);
929}
930
931
932void Assembler::sbc(Register dst, Register src1, const Operand& src2,
933                    SBit s, Condition cond) {
934  addrmod1(cond | 6*B21 | s, src1, dst, src2);
935}
936
937
938void Assembler::rsc(Register dst, Register src1, const Operand& src2,
939                    SBit s, Condition cond) {
940  addrmod1(cond | 7*B21 | s, src1, dst, src2);
941}
942
943
944void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
945  addrmod1(cond | 8*B21 | S, src1, r0, src2);
946}
947
948
949void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
950  addrmod1(cond | 9*B21 | S, src1, r0, src2);
951}
952
953
954void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
955  addrmod1(cond | 10*B21 | S, src1, r0, src2);
956}
957
958
959void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
960  addrmod1(cond | 11*B21 | S, src1, r0, src2);
961}
962
963
964void Assembler::orr(Register dst, Register src1, const Operand& src2,
965                    SBit s, Condition cond) {
966  addrmod1(cond | 12*B21 | s, src1, dst, src2);
967}
968
969
970void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
971  if (dst.is(pc)) {
972    WriteRecordedPositions();
973  }
974  addrmod1(cond | 13*B21 | s, r0, dst, src);
975}
976
977
978void Assembler::bic(Register dst, Register src1, const Operand& src2,
979                    SBit s, Condition cond) {
980  addrmod1(cond | 14*B21 | s, src1, dst, src2);
981}
982
983
984void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
985  addrmod1(cond | 15*B21 | s, r0, dst, src);
986}
987
988
989// Multiply instructions.
990void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
991                    SBit s, Condition cond) {
992  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
993  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
994       src2.code()*B8 | B7 | B4 | src1.code());
995}
996
997
998void Assembler::mul(Register dst, Register src1, Register src2,
999                    SBit s, Condition cond) {
1000  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1001  // dst goes in bits 16-19 for this instruction!
1002  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1003}
1004
1005
1006void Assembler::smlal(Register dstL,
1007                      Register dstH,
1008                      Register src1,
1009                      Register src2,
1010                      SBit s,
1011                      Condition cond) {
1012  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1013  ASSERT(!dstL.is(dstH));
1014  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1015       src2.code()*B8 | B7 | B4 | src1.code());
1016}
1017
1018
1019void Assembler::smull(Register dstL,
1020                      Register dstH,
1021                      Register src1,
1022                      Register src2,
1023                      SBit s,
1024                      Condition cond) {
1025  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1026  ASSERT(!dstL.is(dstH));
1027  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1028       src2.code()*B8 | B7 | B4 | src1.code());
1029}
1030
1031
1032void Assembler::umlal(Register dstL,
1033                      Register dstH,
1034                      Register src1,
1035                      Register src2,
1036                      SBit s,
1037                      Condition cond) {
1038  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1039  ASSERT(!dstL.is(dstH));
1040  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1041       src2.code()*B8 | B7 | B4 | src1.code());
1042}
1043
1044
1045void Assembler::umull(Register dstL,
1046                      Register dstH,
1047                      Register src1,
1048                      Register src2,
1049                      SBit s,
1050                      Condition cond) {
1051  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1052  ASSERT(!dstL.is(dstH));
1053  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1054       src2.code()*B8 | B7 | B4 | src1.code());
1055}
1056
1057
1058// Miscellaneous arithmetic instructions.
1059void Assembler::clz(Register dst, Register src, Condition cond) {
1060  // v5 and above.
1061  ASSERT(!dst.is(pc) && !src.is(pc));
1062  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1063       15*B8 | B4 | src.code());
1064}
1065
1066
1067// Status register access instructions.
1068void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1069  ASSERT(!dst.is(pc));
1070  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1071}
1072
1073
1074void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1075                    Condition cond) {
1076  ASSERT(fields >= B16 && fields < B20);  // at least one field set
1077  Instr instr;
1078  if (!src.rm_.is_valid()) {
1079    // Immediate.
1080    uint32_t rotate_imm;
1081    uint32_t immed_8;
1082    if (MustUseIp(src.rmode_) ||
1083        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1084      // Immediate operand cannot be encoded, load it first to register ip.
1085      RecordRelocInfo(src.rmode_, src.imm32_);
1086      ldr(ip, MemOperand(pc, 0), cond);
1087      msr(fields, Operand(ip), cond);
1088      return;
1089    }
1090    instr = I | rotate_imm*B8 | immed_8;
1091  } else {
1092    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
1093    instr = src.rm_.code();
1094  }
1095  emit(cond | instr | B24 | B21 | fields | 15*B12);
1096}
1097
1098
1099// Load/Store instructions.
1100void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1101  if (dst.is(pc)) {
1102    WriteRecordedPositions();
1103  }
1104  addrmod2(cond | B26 | L, dst, src);
1105
1106  // Eliminate pattern: push(r), pop(r)
1107  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
1108  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
1109  // Both instructions can be eliminated.
1110  int pattern_size = 2 * kInstrSize;
1111  if (FLAG_push_pop_elimination &&
1112      last_bound_pos_ <= (pc_offset() - pattern_size) &&
1113      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1114      // Pattern.
1115      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
1116      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
1117    pc_ -= 2 * kInstrSize;
1118    if (FLAG_print_push_pop_elimination) {
1119      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1120    }
1121  }
1122}
1123
1124
1125void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1126  addrmod2(cond | B26, src, dst);
1127
1128  // Eliminate pattern: pop(), push(r)
1129  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1130  // ->  str r, [sp, 0], al
1131  int pattern_size = 2 * kInstrSize;
1132  if (FLAG_push_pop_elimination &&
1133     last_bound_pos_ <= (pc_offset() - pattern_size) &&
1134     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1135     // Pattern.
1136     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1137     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1138    pc_ -= 2 * kInstrSize;
1139    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1140    if (FLAG_print_push_pop_elimination) {
1141      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1142    }
1143  }
1144}
1145
1146
1147void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1148  addrmod2(cond | B26 | B | L, dst, src);
1149}
1150
1151
1152void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1153  addrmod2(cond | B26 | B, src, dst);
1154}
1155
1156
1157void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1158  addrmod3(cond | L | B7 | H | B4, dst, src);
1159}
1160
1161
1162void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1163  addrmod3(cond | B7 | H | B4, src, dst);
1164}
1165
1166
1167void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1168  addrmod3(cond | L | B7 | S6 | B4, dst, src);
1169}
1170
1171
1172void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1173  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1174}
1175
1176
1177// Load/Store multiple instructions.
1178void Assembler::ldm(BlockAddrMode am,
1179                    Register base,
1180                    RegList dst,
1181                    Condition cond) {
1182  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
1183  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1184
1185  addrmod4(cond | B27 | am | L, base, dst);
1186
1187  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1188  if (cond == al && (dst & pc.bit()) != 0) {
1189    // There is a slight chance that the ldm instruction was actually a call,
1190    // in which case it would be wrong to return into the constant pool; we
1191    // recognize this case by checking if the emission of the pool was blocked
1192    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1193    // the case, we emit a jump over the pool.
1194    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1195  }
1196}
1197
1198
1199void Assembler::stm(BlockAddrMode am,
1200                    Register base,
1201                    RegList src,
1202                    Condition cond) {
1203  addrmod4(cond | B27 | am, base, src);
1204}
1205
1206
1207// Semaphore instructions.
1208void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1209  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1210  ASSERT(!dst.is(base) && !src.is(base));
1211  emit(cond | P | base.code()*B16 | dst.code()*B12 |
1212       B7 | B4 | src.code());
1213}
1214
1215
1216void Assembler::swpb(Register dst,
1217                     Register src,
1218                     Register base,
1219                     Condition cond) {
1220  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1221  ASSERT(!dst.is(base) && !src.is(base));
1222  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1223       B7 | B4 | src.code());
1224}
1225
1226
1227// Exception-generating instructions and debugging support.
1228void Assembler::stop(const char* msg) {
1229#if !defined(__arm__)
1230  // The simulator handles these special instructions and stops execution.
1231  emit(15 << 28 | ((intptr_t) msg));
1232#else
1233  // Just issue a simple break instruction for now. Alternatively we could use
1234  // the swi(0x9f0001) instruction on Linux.
1235  bkpt(0);
1236#endif
1237}
1238
1239
1240void Assembler::bkpt(uint32_t imm16) {  // v5 and above
1241  ASSERT(is_uint16(imm16));
1242  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1243}
1244
1245
1246void Assembler::swi(uint32_t imm24, Condition cond) {
1247  ASSERT(is_uint24(imm24));
1248  emit(cond | 15*B24 | imm24);
1249}
1250
1251
1252// Coprocessor instructions.
1253void Assembler::cdp(Coprocessor coproc,
1254                    int opcode_1,
1255                    CRegister crd,
1256                    CRegister crn,
1257                    CRegister crm,
1258                    int opcode_2,
1259                    Condition cond) {
1260  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1261  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1262       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1263}
1264
1265
1266void Assembler::cdp2(Coprocessor coproc,
1267                     int opcode_1,
1268                     CRegister crd,
1269                     CRegister crn,
1270                     CRegister crm,
1271                     int opcode_2) {  // v5 and above
1272  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1273}
1274
1275
1276void Assembler::mcr(Coprocessor coproc,
1277                    int opcode_1,
1278                    Register rd,
1279                    CRegister crn,
1280                    CRegister crm,
1281                    int opcode_2,
1282                    Condition cond) {
1283  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1284  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1285       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1286}
1287
1288
1289void Assembler::mcr2(Coprocessor coproc,
1290                     int opcode_1,
1291                     Register rd,
1292                     CRegister crn,
1293                     CRegister crm,
1294                     int opcode_2) {  // v5 and above
1295  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1296}
1297
1298
1299void Assembler::mrc(Coprocessor coproc,
1300                    int opcode_1,
1301                    Register rd,
1302                    CRegister crn,
1303                    CRegister crm,
1304                    int opcode_2,
1305                    Condition cond) {
1306  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1307  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1308       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1309}
1310
1311
1312void Assembler::mrc2(Coprocessor coproc,
1313                     int opcode_1,
1314                     Register rd,
1315                     CRegister crn,
1316                     CRegister crm,
1317                     int opcode_2) {  // v5 and above
1318  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1319}
1320
1321
1322void Assembler::ldc(Coprocessor coproc,
1323                    CRegister crd,
1324                    const MemOperand& src,
1325                    LFlag l,
1326                    Condition cond) {
1327  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1328}
1329
1330
1331void Assembler::ldc(Coprocessor coproc,
1332                    CRegister crd,
1333                    Register rn,
1334                    int option,
1335                    LFlag l,
1336                    Condition cond) {
1337  // Unindexed addressing.
1338  ASSERT(is_uint8(option));
1339  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1340       coproc*B8 | (option & 255));
1341}
1342
1343
1344void Assembler::ldc2(Coprocessor coproc,
1345                     CRegister crd,
1346                     const MemOperand& src,
1347                     LFlag l) {  // v5 and above
1348  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1349}
1350
1351
1352void Assembler::ldc2(Coprocessor coproc,
1353                     CRegister crd,
1354                     Register rn,
1355                     int option,
1356                     LFlag l) {  // v5 and above
1357  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1358}
1359
1360
1361void Assembler::stc(Coprocessor coproc,
1362                    CRegister crd,
1363                    const MemOperand& dst,
1364                    LFlag l,
1365                    Condition cond) {
1366  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1367}
1368
1369
1370void Assembler::stc(Coprocessor coproc,
1371                    CRegister crd,
1372                    Register rn,
1373                    int option,
1374                    LFlag l,
1375                    Condition cond) {
1376  // Unindexed addressing.
1377  ASSERT(is_uint8(option));
1378  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1379       coproc*B8 | (option & 255));
1380}
1381
1382
1383void Assembler::stc2(Coprocessor
1384                     coproc, CRegister crd,
1385                     const MemOperand& dst,
1386                     LFlag l) {  // v5 and above
1387  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1388}
1389
1390
1391void Assembler::stc2(Coprocessor coproc,
1392                     CRegister crd,
1393                     Register rn,
1394                     int option,
1395                     LFlag l) {  // v5 and above
1396  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1397}
1398
1399
1400// Support for VFP.
1401void Assembler::vldr(const DwVfpRegister dst,
1402                     const Register base,
1403                     int offset,
1404                     const Condition cond) {
1405  // Ddst = MEM(Rbase + offset).
1406  // Instruction details available in ARM DDI 0406A, A8-628.
1407  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1408  // Vdst(15-12) | 1011(11-8) | offset
1409  ASSERT(CpuFeatures::IsEnabled(VFP3));
1410  ASSERT(offset % 4 == 0);
1411  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1412       0xB*B8 | ((offset / 4) & 255));
1413}
1414
1415
1416void Assembler::vstr(const DwVfpRegister src,
1417                     const Register base,
1418                     int offset,
1419                     const Condition cond) {
1420  // MEM(Rbase + offset) = Dsrc.
1421  // Instruction details available in ARM DDI 0406A, A8-786.
1422  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
1423  // Vsrc(15-12) | 1011(11-8) | (offset/4)
1424  ASSERT(CpuFeatures::IsEnabled(VFP3));
1425  ASSERT(offset % 4 == 0);
1426  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
1427       0xB*B8 | ((offset / 4) & 255));
1428}
1429
1430
1431void Assembler::vmov(const DwVfpRegister dst,
1432                     const Register src1,
1433                     const Register src2,
1434                     const Condition cond) {
1435  // Dm = <Rt,Rt2>.
1436  // Instruction details available in ARM DDI 0406A, A8-646.
1437  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
1438  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1439  ASSERT(CpuFeatures::IsEnabled(VFP3));
1440  ASSERT(!src1.is(pc) && !src2.is(pc));
1441  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
1442       src1.code()*B12 | 0xB*B8 | B4 | dst.code());
1443}
1444
1445
1446void Assembler::vmov(const Register dst1,
1447                     const Register dst2,
1448                     const DwVfpRegister src,
1449                     const Condition cond) {
1450  // <Rt,Rt2> = Dm.
1451  // Instruction details available in ARM DDI 0406A, A8-646.
1452  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
1453  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1454  ASSERT(CpuFeatures::IsEnabled(VFP3));
1455  ASSERT(!dst1.is(pc) && !dst2.is(pc));
1456  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
1457       dst1.code()*B12 | 0xB*B8 | B4 | src.code());
1458}
1459
1460
1461void Assembler::vmov(const SwVfpRegister dst,
1462                     const Register src,
1463                     const Condition cond) {
1464  // Sn = Rt.
1465  // Instruction details available in ARM DDI 0406A, A8-642.
1466  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
1467  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1468  ASSERT(CpuFeatures::IsEnabled(VFP3));
1469  ASSERT(!src.is(pc));
1470  emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
1471       src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
1472}
1473
1474
1475void Assembler::vmov(const Register dst,
1476                     const SwVfpRegister src,
1477                     const Condition cond) {
1478  // Rt = Sn.
1479  // Instruction details available in ARM DDI 0406A, A8-642.
1480  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
1481  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1482  ASSERT(CpuFeatures::IsEnabled(VFP3));
1483  ASSERT(!dst.is(pc));
1484  emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
1485       dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
1486}
1487
1488
1489void Assembler::vcvt(const DwVfpRegister dst,
1490                     const SwVfpRegister src,
1491                     const Condition cond) {
1492  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
1493  // Instruction details available in ARM DDI 0406A, A8-576.
1494  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
1495  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1496  ASSERT(CpuFeatures::IsEnabled(VFP3));
1497  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
1498       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
1499       (0x1 & src.code())*B5 | (src.code() >> 1));
1500}
1501
1502
1503void Assembler::vcvt(const SwVfpRegister dst,
1504                     const DwVfpRegister src,
1505                     const Condition cond) {
1506  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
1507  // Instruction details available in ARM DDI 0406A, A8-576.
1508  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
1509  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1510  ASSERT(CpuFeatures::IsEnabled(VFP3));
1511  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
1512       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
1513       0x5*B9 | B8 | B7 | B6 | src.code());
1514}
1515
1516
1517void Assembler::vadd(const DwVfpRegister dst,
1518                     const DwVfpRegister src1,
1519                     const DwVfpRegister src2,
1520                     const Condition cond) {
1521  // Dd = vadd(Dn, Dm) double precision floating point addition.
1522  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1523  // Instruction details available in ARM DDI 0406A, A8-536.
1524  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1525  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1526  ASSERT(CpuFeatures::IsEnabled(VFP3));
1527  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1528       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1529}
1530
1531
1532void Assembler::vsub(const DwVfpRegister dst,
1533                     const DwVfpRegister src1,
1534                     const DwVfpRegister src2,
1535                     const Condition cond) {
1536  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
1537  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1538  // Instruction details available in ARM DDI 0406A, A8-784.
1539  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1540  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1541  ASSERT(CpuFeatures::IsEnabled(VFP3));
1542  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1543       dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1544}
1545
1546
1547void Assembler::vmul(const DwVfpRegister dst,
1548                     const DwVfpRegister src1,
1549                     const DwVfpRegister src2,
1550                     const Condition cond) {
1551  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
1552  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1553  // Instruction details available in ARM DDI 0406A, A8-784.
1554  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
1555  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1556  ASSERT(CpuFeatures::IsEnabled(VFP3));
1557  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
1558       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1559}
1560
1561
1562void Assembler::vdiv(const DwVfpRegister dst,
1563                     const DwVfpRegister src1,
1564                     const DwVfpRegister src2,
1565                     const Condition cond) {
1566  // Dd = vdiv(Dn, Dm) double precision floating point division.
1567  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1568  // Instruction details available in ARM DDI 0406A, A8-584.
1569  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
1570  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1571  ASSERT(CpuFeatures::IsEnabled(VFP3));
1572  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
1573       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1574}
1575
1576
1577void Assembler::vcmp(const DwVfpRegister src1,
1578                     const DwVfpRegister src2,
1579                     const SBit s,
1580                     const Condition cond) {
1581  // vcmp(Dd, Dm) double precision floating point comparison.
1582  // Instruction details available in ARM DDI 0406A, A8-570.
1583  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
1584  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
1585  ASSERT(CpuFeatures::IsEnabled(VFP3));
1586  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
1587       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1588}
1589
1590
1591void Assembler::vmrs(Register dst, Condition cond) {
1592  // Instruction details available in ARM DDI 0406A, A8-652.
1593  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
1594  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
1595  ASSERT(CpuFeatures::IsEnabled(VFP3));
1596  emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
1597       dst.code()*B12 | 0xA*B8 | B4);
1598}
1599
1600
1601// Pseudo instructions.
1602void Assembler::lea(Register dst,
1603                    const MemOperand& x,
1604                    SBit s,
1605                    Condition cond) {
1606  int am = x.am_;
1607  if (!x.rm_.is_valid()) {
1608    // Immediate offset.
1609    if ((am & P) == 0)  // post indexing
1610      mov(dst, Operand(x.rn_), s, cond);
1611    else if ((am & U) == 0)  // negative indexing
1612      sub(dst, x.rn_, Operand(x.offset_), s, cond);
1613    else
1614      add(dst, x.rn_, Operand(x.offset_), s, cond);
1615  } else {
1616    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1617    // register offset the constructors make sure than both shift_imm_
1618    // and shift_op_ are initialized.
1619    ASSERT(!x.rm_.is(pc));
1620    if ((am & P) == 0)  // post indexing
1621      mov(dst, Operand(x.rn_), s, cond);
1622    else if ((am & U) == 0)  // negative indexing
1623      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1624    else
1625      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1626  }
1627}
1628
1629
1630bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
1631  uint32_t dummy1;
1632  uint32_t dummy2;
1633  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
1634}
1635
1636
1637void Assembler::BlockConstPoolFor(int instructions) {
1638  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
1639}
1640
1641
1642// Debugging.
1643void Assembler::RecordJSReturn() {
1644  WriteRecordedPositions();
1645  CheckBuffer();
1646  RecordRelocInfo(RelocInfo::JS_RETURN);
1647}
1648
1649
1650void Assembler::RecordComment(const char* msg) {
1651  if (FLAG_debug_code) {
1652    CheckBuffer();
1653    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1654  }
1655}
1656
1657
1658void Assembler::RecordPosition(int pos) {
1659  if (pos == RelocInfo::kNoPosition) return;
1660  ASSERT(pos >= 0);
1661  current_position_ = pos;
1662}
1663
1664
1665void Assembler::RecordStatementPosition(int pos) {
1666  if (pos == RelocInfo::kNoPosition) return;
1667  ASSERT(pos >= 0);
1668  current_statement_position_ = pos;
1669}
1670
1671
1672void Assembler::WriteRecordedPositions() {
1673  // Write the statement position if it is different from what was written last
1674  // time.
1675  if (current_statement_position_ != written_statement_position_) {
1676    CheckBuffer();
1677    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1678    written_statement_position_ = current_statement_position_;
1679  }
1680
1681  // Write the position if it is different from what was written last time and
1682  // also different from the written statement position.
1683  if (current_position_ != written_position_ &&
1684      current_position_ != written_statement_position_) {
1685    CheckBuffer();
1686    RecordRelocInfo(RelocInfo::POSITION, current_position_);
1687    written_position_ = current_position_;
1688  }
1689}
1690
1691
1692void Assembler::GrowBuffer() {
1693  if (!own_buffer_) FATAL("external code buffer is too small");
1694
1695  // Compute new buffer size.
1696  CodeDesc desc;  // the new buffer
1697  if (buffer_size_ < 4*KB) {
1698    desc.buffer_size = 4*KB;
1699  } else if (buffer_size_ < 1*MB) {
1700    desc.buffer_size = 2*buffer_size_;
1701  } else {
1702    desc.buffer_size = buffer_size_ + 1*MB;
1703  }
1704  CHECK_GT(desc.buffer_size, 0);  // no overflow
1705
1706  // Setup new buffer.
1707  desc.buffer = NewArray<byte>(desc.buffer_size);
1708
1709  desc.instr_size = pc_offset();
1710  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1711
1712  // Copy the data.
1713  int pc_delta = desc.buffer - buffer_;
1714  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1715  memmove(desc.buffer, buffer_, desc.instr_size);
1716  memmove(reloc_info_writer.pos() + rc_delta,
1717          reloc_info_writer.pos(), desc.reloc_size);
1718
1719  // Switch buffers.
1720  DeleteArray(buffer_);
1721  buffer_ = desc.buffer;
1722  buffer_size_ = desc.buffer_size;
1723  pc_ += pc_delta;
1724  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1725                               reloc_info_writer.last_pc() + pc_delta);
1726
1727  // None of our relocation types are pc relative pointing outside the code
1728  // buffer nor pc absolute pointing inside the code buffer, so there is no need
1729  // to relocate any emitted relocation entries.
1730
1731  // Relocate pending relocation entries.
1732  for (int i = 0; i < num_prinfo_; i++) {
1733    RelocInfo& rinfo = prinfo_[i];
1734    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1735           rinfo.rmode() != RelocInfo::POSITION);
1736    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
1737      rinfo.set_pc(rinfo.pc() + pc_delta);
1738    }
1739  }
1740}
1741
1742
1743void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1744  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
1745  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1746    // Adjust code for new modes.
1747    ASSERT(RelocInfo::IsJSReturn(rmode)
1748           || RelocInfo::IsComment(rmode)
1749           || RelocInfo::IsPosition(rmode));
1750    // These modes do not need an entry in the constant pool.
1751  } else {
1752    ASSERT(num_prinfo_ < kMaxNumPRInfo);
1753    prinfo_[num_prinfo_++] = rinfo;
1754    // Make sure the constant pool is not emitted in place of the next
1755    // instruction for which we just recorded relocation info.
1756    BlockConstPoolBefore(pc_offset() + kInstrSize);
1757  }
1758  if (rinfo.rmode() != RelocInfo::NONE) {
1759    // Don't record external references unless the heap will be serialized.
1760    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
1761#ifdef DEBUG
1762      if (!Serializer::enabled()) {
1763        Serializer::TooLateToEnableNow();
1764      }
1765#endif
1766      if (!Serializer::enabled() && !FLAG_debug_code) {
1767        return;
1768      }
1769    }
1770    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
1771    reloc_info_writer.Write(&rinfo);
1772  }
1773}
1774
1775
1776void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
1777  // Calculate the offset of the next check. It will be overwritten
1778  // when a const pool is generated or when const pools are being
1779  // blocked for a specific range.
1780  next_buffer_check_ = pc_offset() + kCheckConstInterval;
1781
1782  // There is nothing to do if there are no pending relocation info entries.
1783  if (num_prinfo_ == 0) return;
1784
1785  // We emit a constant pool at regular intervals of about kDistBetweenPools
1786  // or when requested by parameter force_emit (e.g. after each function).
1787  // We prefer not to emit a jump unless the max distance is reached or if we
1788  // are running low on slots, which can happen if a lot of constants are being
1789  // emitted (e.g. --debug-code and many static references).
1790  int dist = pc_offset() - last_const_pool_end_;
1791  if (!force_emit && dist < kMaxDistBetweenPools &&
1792      (require_jump || dist < kDistBetweenPools) &&
1793      // TODO(1236125): Cleanup the "magic" number below. We know that
1794      // the code generation will test every kCheckConstIntervalInst.
1795      // Thus we are safe as long as we generate less than 7 constant
1796      // entries per instruction.
1797      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
1798    return;
1799  }
1800
1801  // If we did not return by now, we need to emit the constant pool soon.
1802
1803  // However, some small sequences of instructions must not be broken up by the
1804  // insertion of a constant pool; such sequences are protected by setting
1805  // no_const_pool_before_, which is checked here. Also, recursive calls to
1806  // CheckConstPool are blocked by no_const_pool_before_.
1807  if (pc_offset() < no_const_pool_before_) {
1808    // Emission is currently blocked; make sure we try again as soon as
1809    // possible.
1810    next_buffer_check_ = no_const_pool_before_;
1811
1812    // Something is wrong if emission is forced and blocked at the same time.
1813    ASSERT(!force_emit);
1814    return;
1815  }
1816
1817  int jump_instr = require_jump ? kInstrSize : 0;
1818
1819  // Check that the code buffer is large enough before emitting the constant
1820  // pool and relocation information (include the jump over the pool and the
1821  // constant pool marker).
1822  int max_needed_space =
1823      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
1824  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
1825
1826  // Block recursive calls to CheckConstPool.
1827  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
1828                       num_prinfo_*kInstrSize);
1829  // Don't bother to check for the emit calls below.
1830  next_buffer_check_ = no_const_pool_before_;
1831
1832  // Emit jump over constant pool if necessary.
1833  Label after_pool;
1834  if (require_jump) b(&after_pool);
1835
1836  RecordComment("[ Constant Pool");
1837
1838  // Put down constant pool marker "Undefined instruction" as specified by
1839  // A3.1 Instruction set encoding.
1840  emit(0x03000000 | num_prinfo_);
1841
1842  // Emit constant pool entries.
1843  for (int i = 0; i < num_prinfo_; i++) {
1844    RelocInfo& rinfo = prinfo_[i];
1845    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1846           rinfo.rmode() != RelocInfo::POSITION &&
1847           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
1848    Instr instr = instr_at(rinfo.pc());
1849
1850    // Instruction to patch must be a ldr/str [pc, #offset].
1851    // P and U set, B and W clear, Rn == pc, offset12 still 0.
1852    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
1853           (2*B25 | P | U | pc.code()*B16));
1854    int delta = pc_ - rinfo.pc() - 8;
1855    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
1856    if (delta < 0) {
1857      instr &= ~U;
1858      delta = -delta;
1859    }
1860    ASSERT(is_uint12(delta));
1861    instr_at_put(rinfo.pc(), instr + delta);
1862    emit(rinfo.data());
1863  }
1864  num_prinfo_ = 0;
1865  last_const_pool_end_ = pc_offset();
1866
1867  RecordComment("]");
1868
1869  if (after_pool.is_linked()) {
1870    bind(&after_pool);
1871  }
1872
1873  // Since a constant pool was just emitted, move the check offset forward by
1874  // the standard interval.
1875  next_buffer_check_ = pc_offset() + kCheckConstInterval;
1876}
1877
1878
1879} }  // namespace v8::internal
1880