1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2010 the V8 project authors. All rights reserved.
36
37#include "v8.h"
38
39#include "arm/assembler-thumb2-inl.h"
40#include "serialize.h"
41
42namespace v8 {
43namespace internal {
44
45// Safe default is no features.
46unsigned CpuFeatures::supported_ = 0;
47unsigned CpuFeatures::enabled_ = 0;
48unsigned CpuFeatures::found_by_runtime_probing_ = 0;
49
50void CpuFeatures::Probe() {
51  // If the compiler is allowed to use vfp then we can use vfp too in our
52  // code generation.
53#if !defined(__arm__)
54  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
55  if (FLAG_enable_vfp3) {
56      supported_ |= 1u << VFP3;
57  }
58  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
59  if (FLAG_enable_armv7) {
60      supported_ |= 1u << ARMv7;
61  }
62#else
63  if (Serializer::enabled()) {
64    supported_ |= OS::CpuFeaturesImpliedByPlatform();
65    return;  // No features if we might serialize.
66  }
67
68  if (OS::ArmCpuHasFeature(VFP3)) {
69    // This implementation also sets the VFP flags if
70    // runtime detection of VFP returns true.
71    supported_ |= 1u << VFP3;
72    found_by_runtime_probing_ |= 1u << VFP3;
73  }
74
75  if (OS::ArmCpuHasFeature(ARMv7)) {
76    supported_ |= 1u << ARMv7;
77    found_by_runtime_probing_ |= 1u << ARMv7;
78  }
79#endif
80}
81
82
83// -----------------------------------------------------------------------------
84// Implementation of Register and CRegister
85
86Register no_reg = { -1 };
87
88Register r0  = {  0 };
89Register r1  = {  1 };
90Register r2  = {  2 };
91Register r3  = {  3 };
92Register r4  = {  4 };
93Register r5  = {  5 };
94Register r6  = {  6 };
95Register r7  = {  7 };
96Register r8  = {  8 };  // Used as context register.
97Register r9  = {  9 };
98Register r10 = { 10 };  // Used as roots register.
99Register fp  = { 11 };
100Register ip  = { 12 };
101Register sp  = { 13 };
102Register lr  = { 14 };
103Register pc  = { 15 };
104
105
106CRegister no_creg = { -1 };
107
108CRegister cr0  = {  0 };
109CRegister cr1  = {  1 };
110CRegister cr2  = {  2 };
111CRegister cr3  = {  3 };
112CRegister cr4  = {  4 };
113CRegister cr5  = {  5 };
114CRegister cr6  = {  6 };
115CRegister cr7  = {  7 };
116CRegister cr8  = {  8 };
117CRegister cr9  = {  9 };
118CRegister cr10 = { 10 };
119CRegister cr11 = { 11 };
120CRegister cr12 = { 12 };
121CRegister cr13 = { 13 };
122CRegister cr14 = { 14 };
123CRegister cr15 = { 15 };
124
125// Support for the VFP registers s0 to s31 (d0 to d15).
126// Note that "sN:sM" is the same as "dN/2".
127SwVfpRegister s0  = {  0 };
128SwVfpRegister s1  = {  1 };
129SwVfpRegister s2  = {  2 };
130SwVfpRegister s3  = {  3 };
131SwVfpRegister s4  = {  4 };
132SwVfpRegister s5  = {  5 };
133SwVfpRegister s6  = {  6 };
134SwVfpRegister s7  = {  7 };
135SwVfpRegister s8  = {  8 };
136SwVfpRegister s9  = {  9 };
137SwVfpRegister s10 = { 10 };
138SwVfpRegister s11 = { 11 };
139SwVfpRegister s12 = { 12 };
140SwVfpRegister s13 = { 13 };
141SwVfpRegister s14 = { 14 };
142SwVfpRegister s15 = { 15 };
143SwVfpRegister s16 = { 16 };
144SwVfpRegister s17 = { 17 };
145SwVfpRegister s18 = { 18 };
146SwVfpRegister s19 = { 19 };
147SwVfpRegister s20 = { 20 };
148SwVfpRegister s21 = { 21 };
149SwVfpRegister s22 = { 22 };
150SwVfpRegister s23 = { 23 };
151SwVfpRegister s24 = { 24 };
152SwVfpRegister s25 = { 25 };
153SwVfpRegister s26 = { 26 };
154SwVfpRegister s27 = { 27 };
155SwVfpRegister s28 = { 28 };
156SwVfpRegister s29 = { 29 };
157SwVfpRegister s30 = { 30 };
158SwVfpRegister s31 = { 31 };
159
160DwVfpRegister d0  = {  0 };
161DwVfpRegister d1  = {  1 };
162DwVfpRegister d2  = {  2 };
163DwVfpRegister d3  = {  3 };
164DwVfpRegister d4  = {  4 };
165DwVfpRegister d5  = {  5 };
166DwVfpRegister d6  = {  6 };
167DwVfpRegister d7  = {  7 };
168DwVfpRegister d8  = {  8 };
169DwVfpRegister d9  = {  9 };
170DwVfpRegister d10 = { 10 };
171DwVfpRegister d11 = { 11 };
172DwVfpRegister d12 = { 12 };
173DwVfpRegister d13 = { 13 };
174DwVfpRegister d14 = { 14 };
175DwVfpRegister d15 = { 15 };
176
177// -----------------------------------------------------------------------------
178// Implementation of RelocInfo
179
180const int RelocInfo::kApplyMask = 0;
181
182
183void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
184  // Patch the code at the current address with the supplied instructions.
185  Instr* pc = reinterpret_cast<Instr*>(pc_);
186  Instr* instr = reinterpret_cast<Instr*>(instructions);
187  for (int i = 0; i < instruction_count; i++) {
188    *(pc + i) = *(instr + i);
189  }
190
191  // Indicate that code has changed.
192  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
193}
194
195
196// Patch the code at the current PC with a call to the target address.
197// Additional guard instructions can be added if required.
198void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
199  // Patch the code at the current address with a call to the target.
200  UNIMPLEMENTED();
201}
202
203
204// -----------------------------------------------------------------------------
205// Implementation of Operand and MemOperand
206// See assembler-thumb2-inl.h for inlined constructors
207
208Operand::Operand(Handle<Object> handle) {
209  rm_ = no_reg;
210  // Verify all Objects referred by code are NOT in new space.
211  Object* obj = *handle;
212  ASSERT(!Heap::InNewSpace(obj));
213  if (obj->IsHeapObject()) {
214    imm32_ = reinterpret_cast<intptr_t>(handle.location());
215    rmode_ = RelocInfo::EMBEDDED_OBJECT;
216  } else {
217    // no relocation needed
218    imm32_ =  reinterpret_cast<intptr_t>(obj);
219    rmode_ = RelocInfo::NONE;
220  }
221}
222
223
224Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
225  ASSERT(is_uint5(shift_imm));
226  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
227  rm_ = rm;
228  rs_ = no_reg;
229  shift_op_ = shift_op;
230  shift_imm_ = shift_imm & 31;
231  if (shift_op == RRX) {
232    // encoded as ROR with shift_imm == 0
233    ASSERT(shift_imm == 0);
234    shift_op_ = ROR;
235    shift_imm_ = 0;
236  }
237}
238
239
240Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
241  ASSERT(shift_op != RRX);
242  rm_ = rm;
243  rs_ = no_reg;
244  shift_op_ = shift_op;
245  rs_ = rs;
246}
247
248
249MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
250  rn_ = rn;
251  rm_ = no_reg;
252  offset_ = offset;
253  am_ = am;
254}
255
256MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
257  rn_ = rn;
258  rm_ = rm;
259  shift_op_ = LSL;
260  shift_imm_ = 0;
261  am_ = am;
262}
263
264
265MemOperand::MemOperand(Register rn, Register rm,
266                       ShiftOp shift_op, int shift_imm, AddrMode am) {
267  ASSERT(is_uint5(shift_imm));
268  rn_ = rn;
269  rm_ = rm;
270  shift_op_ = shift_op;
271  shift_imm_ = shift_imm & 31;
272  am_ = am;
273}
274
275
276// -----------------------------------------------------------------------------
277// Implementation of Assembler.
278
279// Instruction encoding bits.
280enum {
281  H   = 1 << 5,   // halfword (or byte)
282  S6  = 1 << 6,   // signed (or unsigned)
283  L   = 1 << 20,  // load (or store)
284  S   = 1 << 20,  // set condition code (or leave unchanged)
285  W   = 1 << 21,  // writeback base register (or leave unchanged)
286  A   = 1 << 21,  // accumulate in multiply instruction (or not)
287  B   = 1 << 22,  // unsigned byte (or word)
288  N   = 1 << 22,  // long (or short)
289  U   = 1 << 23,  // positive (or negative) offset/index
290  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
291  I   = 1 << 25,  // immediate shifter operand (or not)
292
293  B4  = 1 << 4,
294  B5  = 1 << 5,
295  B6  = 1 << 6,
296  B7  = 1 << 7,
297  B8  = 1 << 8,
298  B9  = 1 << 9,
299  B12 = 1 << 12,
300  B16 = 1 << 16,
301  B18 = 1 << 18,
302  B19 = 1 << 19,
303  B20 = 1 << 20,
304  B21 = 1 << 21,
305  B22 = 1 << 22,
306  B23 = 1 << 23,
307  B24 = 1 << 24,
308  B25 = 1 << 25,
309  B26 = 1 << 26,
310  B27 = 1 << 27,
311
312  // Instruction bit masks.
313  RdMask     = 15 << 12,  // in str instruction
314  CondMask   = 15 << 28,
315  CoprocessorMask = 15 << 8,
316  OpCodeMask = 15 << 21,  // in data-processing instructions
317  Imm24Mask  = (1 << 24) - 1,
318  Off12Mask  = (1 << 12) - 1,
319  // Reserved condition.
320  nv = 15 << 28
321};
322
323
324// add(sp, sp, 4) instruction (aka Pop())
325static const Instr kPopInstruction =
326    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
327// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
328// register r is not encoded.
329static const Instr kPushRegPattern =
330    al | B26 | 4 | NegPreIndex | sp.code() * B16;
331// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
332// register r is not encoded.
333static const Instr kPopRegPattern =
334    al | B26 | L | 4 | PostIndex | sp.code() * B16;
335// mov lr, pc
336const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
337// ldr pc, [pc, #XXX]
338const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
339
340// Spare buffer.
341static const int kMinimalBufferSize = 4*KB;
342static byte* spare_buffer_ = NULL;
343
344Assembler::Assembler(void* buffer, int buffer_size) {
345  if (buffer == NULL) {
346    // Do our own buffer management.
347    if (buffer_size <= kMinimalBufferSize) {
348      buffer_size = kMinimalBufferSize;
349
350      if (spare_buffer_ != NULL) {
351        buffer = spare_buffer_;
352        spare_buffer_ = NULL;
353      }
354    }
355    if (buffer == NULL) {
356      buffer_ = NewArray<byte>(buffer_size);
357    } else {
358      buffer_ = static_cast<byte*>(buffer);
359    }
360    buffer_size_ = buffer_size;
361    own_buffer_ = true;
362
363  } else {
364    // Use externally provided buffer instead.
365    ASSERT(buffer_size > 0);
366    buffer_ = static_cast<byte*>(buffer);
367    buffer_size_ = buffer_size;
368    own_buffer_ = false;
369  }
370
371  // Setup buffer pointers.
372  ASSERT(buffer_ != NULL);
373  pc_ = buffer_;
374  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
375  num_prinfo_ = 0;
376  next_buffer_check_ = 0;
377  no_const_pool_before_ = 0;
378  last_const_pool_end_ = 0;
379  last_bound_pos_ = 0;
380  current_statement_position_ = RelocInfo::kNoPosition;
381  current_position_ = RelocInfo::kNoPosition;
382  written_statement_position_ = current_statement_position_;
383  written_position_ = current_position_;
384}
385
386
387Assembler::~Assembler() {
388  if (own_buffer_) {
389    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
390      spare_buffer_ = buffer_;
391    } else {
392      DeleteArray(buffer_);
393    }
394  }
395}
396
397
398void Assembler::GetCode(CodeDesc* desc) {
399  // Emit constant pool if necessary.
400  CheckConstPool(true, false);
401  ASSERT(num_prinfo_ == 0);
402
403  // Setup code descriptor.
404  desc->buffer = buffer_;
405  desc->buffer_size = buffer_size_;
406  desc->instr_size = pc_offset();
407  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
408}
409
410
411void Assembler::Align(int m) {
412  ASSERT(m >= 4 && IsPowerOf2(m));
413  while ((pc_offset() & (m - 1)) != 0) {
414    nop();
415  }
416}
417
418
419// Labels refer to positions in the (to be) generated code.
420// There are bound, linked, and unused labels.
421//
422// Bound labels refer to known positions in the already
423// generated code. pos() is the position the label refers to.
424//
425// Linked labels refer to unknown positions in the code
426// to be generated; pos() is the position of the last
427// instruction using the label.
428
429
430// The link chain is terminated by a negative code position (must be aligned)
431const int kEndOfChain = -4;
432
433
434int Assembler::target_at(int pos)  {
435  Instr instr = instr_at(pos);
436  if ((instr & ~Imm24Mask) == 0) {
437    // Emitted label constant, not part of a branch.
438    return instr - (Code::kHeaderSize - kHeapObjectTag);
439  }
440  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
441  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
442  if ((instr & CondMask) == nv && (instr & B24) != 0)
443    // blx uses bit 24 to encode bit 2 of imm26
444    imm26 += 2;
445
446  return pos + kPcLoadDelta + imm26;
447}
448
449
450void Assembler::target_at_put(int pos, int target_pos) {
451  Instr instr = instr_at(pos);
452  if ((instr & ~Imm24Mask) == 0) {
453    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
454    // Emitted label constant, not part of a branch.
455    // Make label relative to Code* of generated Code object.
456    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
457    return;
458  }
459  int imm26 = target_pos - (pos + kPcLoadDelta);
460  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
461  if ((instr & CondMask) == nv) {
462    // blx uses bit 24 to encode bit 2 of imm26
463    ASSERT((imm26 & 1) == 0);
464    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
465  } else {
466    ASSERT((imm26 & 3) == 0);
467    instr &= ~Imm24Mask;
468  }
469  int imm24 = imm26 >> 2;
470  ASSERT(is_int24(imm24));
471  instr_at_put(pos, instr | (imm24 & Imm24Mask));
472}
473
474
475void Assembler::print(Label* L) {
476  if (L->is_unused()) {
477    PrintF("unused label\n");
478  } else if (L->is_bound()) {
479    PrintF("bound label to %d\n", L->pos());
480  } else if (L->is_linked()) {
481    Label l = *L;
482    PrintF("unbound label");
483    while (l.is_linked()) {
484      PrintF("@ %d ", l.pos());
485      Instr instr = instr_at(l.pos());
486      if ((instr & ~Imm24Mask) == 0) {
487        PrintF("value\n");
488      } else {
489        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
490        int cond = instr & CondMask;
491        const char* b;
492        const char* c;
493        if (cond == nv) {
494          b = "blx";
495          c = "";
496        } else {
497          if ((instr & B24) != 0)
498            b = "bl";
499          else
500            b = "b";
501
502          switch (cond) {
503            case eq: c = "eq"; break;
504            case ne: c = "ne"; break;
505            case hs: c = "hs"; break;
506            case lo: c = "lo"; break;
507            case mi: c = "mi"; break;
508            case pl: c = "pl"; break;
509            case vs: c = "vs"; break;
510            case vc: c = "vc"; break;
511            case hi: c = "hi"; break;
512            case ls: c = "ls"; break;
513            case ge: c = "ge"; break;
514            case lt: c = "lt"; break;
515            case gt: c = "gt"; break;
516            case le: c = "le"; break;
517            case al: c = ""; break;
518            default:
519              c = "";
520              UNREACHABLE();
521          }
522        }
523        PrintF("%s%s\n", b, c);
524      }
525      next(&l);
526    }
527  } else {
528    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
529  }
530}
531
532
533void Assembler::bind_to(Label* L, int pos) {
534  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
535  while (L->is_linked()) {
536    int fixup_pos = L->pos();
537    next(L);  // call next before overwriting link with target at fixup_pos
538    target_at_put(fixup_pos, pos);
539  }
540  L->bind_to(pos);
541
542  // Keep track of the last bound label so we don't eliminate any instructions
543  // before a bound label.
544  if (pos > last_bound_pos_)
545    last_bound_pos_ = pos;
546}
547
548
549void Assembler::link_to(Label* L, Label* appendix) {
550  if (appendix->is_linked()) {
551    if (L->is_linked()) {
552      // Append appendix to L's list.
553      int fixup_pos;
554      int link = L->pos();
555      do {
556        fixup_pos = link;
557        link = target_at(fixup_pos);
558      } while (link > 0);
559      ASSERT(link == kEndOfChain);
560      target_at_put(fixup_pos, appendix->pos());
561    } else {
562      // L is empty, simply use appendix.
563      *L = *appendix;
564    }
565  }
566  appendix->Unuse();  // appendix should not be used anymore
567}
568
569
570void Assembler::bind(Label* L) {
571  ASSERT(!L->is_bound());  // label can only be bound once
572  bind_to(L, pc_offset());
573}
574
575
576void Assembler::next(Label* L) {
577  ASSERT(L->is_linked());
578  int link = target_at(L->pos());
579  if (link > 0) {
580    L->link_to(link);
581  } else {
582    ASSERT(link == kEndOfChain);
583    L->Unuse();
584  }
585}
586
587
588// Low-level code emission routines depending on the addressing mode.
589static bool fits_shifter(uint32_t imm32,
590                         uint32_t* rotate_imm,
591                         uint32_t* immed_8,
592                         Instr* instr) {
593  // imm32 must be unsigned.
594  for (int rot = 0; rot < 16; rot++) {
595    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
596    if ((imm8 <= 0xff)) {
597      *rotate_imm = rot;
598      *immed_8 = imm8;
599      return true;
600    }
601  }
602  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
603  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
604    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
605      *instr ^= 0x2*B21;
606      return true;
607    }
608  }
609  return false;
610}
611
612
613// We have to use the temporary register for things that can be relocated even
614// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
615// space.  There is no guarantee that the relocated location can be similarly
616// encoded.
617static bool MustUseIp(RelocInfo::Mode rmode) {
618  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
619#ifdef DEBUG
620    if (!Serializer::enabled()) {
621      Serializer::TooLateToEnableNow();
622    }
623#endif
624    return Serializer::enabled();
625  } else if (rmode == RelocInfo::NONE) {
626    return false;
627  }
628  return true;
629}
630
631
632void Assembler::addrmod1(Instr instr,
633                         Register rn,
634                         Register rd,
635                         const Operand& x) {
636  CheckBuffer();
637  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
638  if (!x.rm_.is_valid()) {
639    // Immediate.
640    uint32_t rotate_imm;
641    uint32_t immed_8;
642    if (MustUseIp(x.rmode_) ||
643        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
644      // The immediate operand cannot be encoded as a shifter operand, so load
645      // it first to register ip and change the original instruction to use ip.
646      // However, if the original instruction is a 'mov rd, x' (not setting the
647      // condition code), then replace it with a 'ldr rd, [pc]'.
648      RecordRelocInfo(x.rmode_, x.imm32_);
649      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
650      Condition cond = static_cast<Condition>(instr & CondMask);
651      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
652        ldr(rd, MemOperand(pc, 0), cond);
653      } else {
654        ldr(ip, MemOperand(pc, 0), cond);
655        addrmod1(instr, rn, rd, Operand(ip));
656      }
657      return;
658    }
659    instr |= I | rotate_imm*B8 | immed_8;
660  } else if (!x.rs_.is_valid()) {
661    // Immediate shift.
662    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
663  } else {
664    // Register shift.
665    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
666    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
667  }
668  emit(instr | rn.code()*B16 | rd.code()*B12);
669  if (rn.is(pc) || x.rm_.is(pc))
670    // Block constant pool emission for one instruction after reading pc.
671    BlockConstPoolBefore(pc_offset() + kInstrSize);
672}
673
674
675void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
676  ASSERT((instr & ~(CondMask | B | L)) == B26);
677  int am = x.am_;
678  if (!x.rm_.is_valid()) {
679    // Immediate offset.
680    int offset_12 = x.offset_;
681    if (offset_12 < 0) {
682      offset_12 = -offset_12;
683      am ^= U;
684    }
685    if (!is_uint12(offset_12)) {
686      // Immediate offset cannot be encoded, load it first to register ip
687      // rn (and rd in a load) should never be ip, or will be trashed.
688      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
689      mov(ip, Operand(x.offset_), LeaveCC,
690          static_cast<Condition>(instr & CondMask));
691      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
692      return;
693    }
694    ASSERT(offset_12 >= 0);  // no masking needed
695    instr |= offset_12;
696  } else {
697    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
698    // register offset the constructors make sure than both shift_imm_
699    // and shift_op_ are initialized.
700    ASSERT(!x.rm_.is(pc));
701    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
702  }
703  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
704  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
705}
706
707
708void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
709  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
710  ASSERT(x.rn_.is_valid());
711  int am = x.am_;
712  if (!x.rm_.is_valid()) {
713    // Immediate offset.
714    int offset_8 = x.offset_;
715    if (offset_8 < 0) {
716      offset_8 = -offset_8;
717      am ^= U;
718    }
719    if (!is_uint8(offset_8)) {
720      // Immediate offset cannot be encoded, load it first to register ip
721      // rn (and rd in a load) should never be ip, or will be trashed.
722      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
723      mov(ip, Operand(x.offset_), LeaveCC,
724          static_cast<Condition>(instr & CondMask));
725      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
726      return;
727    }
728    ASSERT(offset_8 >= 0);  // no masking needed
729    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
730  } else if (x.shift_imm_ != 0) {
731    // Scaled register offset not supported, load index first
732    // rn (and rd in a load) should never be ip, or will be trashed.
733    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
734    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
735        static_cast<Condition>(instr & CondMask));
736    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
737    return;
738  } else {
739    // Register offset.
740    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
741    instr |= x.rm_.code();
742  }
743  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
744  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
745}
746
747
748void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
749  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
750  ASSERT(rl != 0);
751  ASSERT(!rn.is(pc));
752  emit(instr | rn.code()*B16 | rl);
753}
754
755
756void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
757  // Unindexed addressing is not encoded by this function.
758  ASSERT_EQ((B27 | B26),
759            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
760  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
761  int am = x.am_;
762  int offset_8 = x.offset_;
763  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
764  offset_8 >>= 2;
765  if (offset_8 < 0) {
766    offset_8 = -offset_8;
767    am ^= U;
768  }
769  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
770  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
771
772  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
773  if ((am & P) == 0)
774    am |= W;
775
776  ASSERT(offset_8 >= 0);  // no masking needed
777  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
778}
779
780
781int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
782  int target_pos;
783  if (L->is_bound()) {
784    target_pos = L->pos();
785  } else {
786    if (L->is_linked()) {
787      target_pos = L->pos();  // L's link
788    } else {
789      target_pos = kEndOfChain;
790    }
791    L->link_to(pc_offset());
792  }
793
794  // Block the emission of the constant pool, since the branch instruction must
795  // be emitted at the pc offset recorded by the label.
796  BlockConstPoolBefore(pc_offset() + kInstrSize);
797  return target_pos - (pc_offset() + kPcLoadDelta);
798}
799
800
801void Assembler::label_at_put(Label* L, int at_offset) {
802  int target_pos;
803  if (L->is_bound()) {
804    target_pos = L->pos();
805  } else {
806    if (L->is_linked()) {
807      target_pos = L->pos();  // L's link
808    } else {
809      target_pos = kEndOfChain;
810    }
811    L->link_to(at_offset);
812    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
813  }
814}
815
816
817// Branch instructions.
818void Assembler::b(int branch_offset, Condition cond) {
819  ASSERT((branch_offset & 3) == 0);
820  int imm24 = branch_offset >> 2;
821  ASSERT(is_int24(imm24));
822  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
823
824  if (cond == al)
825    // Dead code is a good location to emit the constant pool.
826    CheckConstPool(false, false);
827}
828
829
830void Assembler::bl(int branch_offset, Condition cond) {
831  ASSERT((branch_offset & 3) == 0);
832  int imm24 = branch_offset >> 2;
833  ASSERT(is_int24(imm24));
834  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
835}
836
837
838void Assembler::blx(int branch_offset) {  // v5 and above
839  WriteRecordedPositions();
840  ASSERT((branch_offset & 1) == 0);
841  int h = ((branch_offset & 2) >> 1)*B24;
842  int imm24 = branch_offset >> 2;
843  ASSERT(is_int24(imm24));
844  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
845}
846
847
848void Assembler::blx(Register target, Condition cond) {  // v5 and above
849  WriteRecordedPositions();
850  ASSERT(!target.is(pc));
851  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
852}
853
854
855void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
856  WriteRecordedPositions();
857  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
858  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
859}
860
861
862// Data-processing instructions.
863
864// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
865// Instruction details available in ARM DDI 0406A, A8-464.
866// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
867//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
868void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
869                     const Operand& src3, Condition cond) {
870  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
871  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
872  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
873  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
874       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
875}
876
877
878void Assembler::and_(Register dst, Register src1, const Operand& src2,
879                     SBit s, Condition cond) {
880  addrmod1(cond | 0*B21 | s, src1, dst, src2);
881}
882
883
884void Assembler::eor(Register dst, Register src1, const Operand& src2,
885                    SBit s, Condition cond) {
886  addrmod1(cond | 1*B21 | s, src1, dst, src2);
887}
888
889
890void Assembler::sub(Register dst, Register src1, const Operand& src2,
891                    SBit s, Condition cond) {
892  addrmod1(cond | 2*B21 | s, src1, dst, src2);
893}
894
895
896void Assembler::rsb(Register dst, Register src1, const Operand& src2,
897                    SBit s, Condition cond) {
898  addrmod1(cond | 3*B21 | s, src1, dst, src2);
899}
900
901
902void Assembler::add(Register dst, Register src1, const Operand& src2,
903                    SBit s, Condition cond) {
904  addrmod1(cond | 4*B21 | s, src1, dst, src2);
905
906  // Eliminate pattern: push(r), pop()
907  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
908  //   add(sp, sp, Operand(kPointerSize));
909  // Both instructions can be eliminated.
910  int pattern_size = 2 * kInstrSize;
911  if (FLAG_push_pop_elimination &&
912      last_bound_pos_ <= (pc_offset() - pattern_size) &&
913      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
914      // Pattern.
915      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
916      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
917    pc_ -= 2 * kInstrSize;
918    if (FLAG_print_push_pop_elimination) {
919      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
920    }
921  }
922}
923
924
925void Assembler::adc(Register dst, Register src1, const Operand& src2,
926                    SBit s, Condition cond) {
927  addrmod1(cond | 5*B21 | s, src1, dst, src2);
928}
929
930
931void Assembler::sbc(Register dst, Register src1, const Operand& src2,
932                    SBit s, Condition cond) {
933  addrmod1(cond | 6*B21 | s, src1, dst, src2);
934}
935
936
937void Assembler::rsc(Register dst, Register src1, const Operand& src2,
938                    SBit s, Condition cond) {
939  addrmod1(cond | 7*B21 | s, src1, dst, src2);
940}
941
942
943void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
944  addrmod1(cond | 8*B21 | S, src1, r0, src2);
945}
946
947
948void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
949  addrmod1(cond | 9*B21 | S, src1, r0, src2);
950}
951
952
953void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
954  addrmod1(cond | 10*B21 | S, src1, r0, src2);
955}
956
957
958void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
959  addrmod1(cond | 11*B21 | S, src1, r0, src2);
960}
961
962
963void Assembler::orr(Register dst, Register src1, const Operand& src2,
964                    SBit s, Condition cond) {
965  addrmod1(cond | 12*B21 | s, src1, dst, src2);
966}
967
968
969void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
970  if (dst.is(pc)) {
971    WriteRecordedPositions();
972  }
973  addrmod1(cond | 13*B21 | s, r0, dst, src);
974}
975
976
977void Assembler::bic(Register dst, Register src1, const Operand& src2,
978                    SBit s, Condition cond) {
979  addrmod1(cond | 14*B21 | s, src1, dst, src2);
980}
981
982
983void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
984  addrmod1(cond | 15*B21 | s, r0, dst, src);
985}
986
987
988// Multiply instructions.
989void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
990                    SBit s, Condition cond) {
991  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
992  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
993       src2.code()*B8 | B7 | B4 | src1.code());
994}
995
996
997void Assembler::mul(Register dst, Register src1, Register src2,
998                    SBit s, Condition cond) {
999  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1000  // dst goes in bits 16-19 for this instruction!
1001  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1002}
1003
1004
1005void Assembler::smlal(Register dstL,
1006                      Register dstH,
1007                      Register src1,
1008                      Register src2,
1009                      SBit s,
1010                      Condition cond) {
1011  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1012  ASSERT(!dstL.is(dstH));
1013  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1014       src2.code()*B8 | B7 | B4 | src1.code());
1015}
1016
1017
1018void Assembler::smull(Register dstL,
1019                      Register dstH,
1020                      Register src1,
1021                      Register src2,
1022                      SBit s,
1023                      Condition cond) {
1024  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1025  ASSERT(!dstL.is(dstH));
1026  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1027       src2.code()*B8 | B7 | B4 | src1.code());
1028}
1029
1030
1031void Assembler::umlal(Register dstL,
1032                      Register dstH,
1033                      Register src1,
1034                      Register src2,
1035                      SBit s,
1036                      Condition cond) {
1037  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1038  ASSERT(!dstL.is(dstH));
1039  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1040       src2.code()*B8 | B7 | B4 | src1.code());
1041}
1042
1043
1044void Assembler::umull(Register dstL,
1045                      Register dstH,
1046                      Register src1,
1047                      Register src2,
1048                      SBit s,
1049                      Condition cond) {
1050  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1051  ASSERT(!dstL.is(dstH));
1052  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1053       src2.code()*B8 | B7 | B4 | src1.code());
1054}
1055
1056
1057// Miscellaneous arithmetic instructions.
1058void Assembler::clz(Register dst, Register src, Condition cond) {
1059  // v5 and above.
1060  ASSERT(!dst.is(pc) && !src.is(pc));
1061  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1062       15*B8 | B4 | src.code());
1063}
1064
1065
1066// Status register access instructions.
1067void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1068  ASSERT(!dst.is(pc));
1069  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1070}
1071
1072
1073void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1074                    Condition cond) {
1075  ASSERT(fields >= B16 && fields < B20);  // at least one field set
1076  Instr instr;
1077  if (!src.rm_.is_valid()) {
1078    // Immediate.
1079    uint32_t rotate_imm;
1080    uint32_t immed_8;
1081    if (MustUseIp(src.rmode_) ||
1082        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1083      // Immediate operand cannot be encoded, load it first to register ip.
1084      RecordRelocInfo(src.rmode_, src.imm32_);
1085      ldr(ip, MemOperand(pc, 0), cond);
1086      msr(fields, Operand(ip), cond);
1087      return;
1088    }
1089    instr = I | rotate_imm*B8 | immed_8;
1090  } else {
1091    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
1092    instr = src.rm_.code();
1093  }
1094  emit(cond | instr | B24 | B21 | fields | 15*B12);
1095}
1096
1097
1098// Load/Store instructions.
1099void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1100  if (dst.is(pc)) {
1101    WriteRecordedPositions();
1102  }
1103  addrmod2(cond | B26 | L, dst, src);
1104
1105  // Eliminate pattern: push(r), pop(r)
1106  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
1107  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
1108  // Both instructions can be eliminated.
1109  int pattern_size = 2 * kInstrSize;
1110  if (FLAG_push_pop_elimination &&
1111      last_bound_pos_ <= (pc_offset() - pattern_size) &&
1112      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1113      // Pattern.
1114      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
1115      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
1116    pc_ -= 2 * kInstrSize;
1117    if (FLAG_print_push_pop_elimination) {
1118      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1119    }
1120  }
1121}
1122
1123
1124void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1125  addrmod2(cond | B26, src, dst);
1126
1127  // Eliminate pattern: pop(), push(r)
1128  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1129  // ->  str r, [sp, 0], al
1130  int pattern_size = 2 * kInstrSize;
1131  if (FLAG_push_pop_elimination &&
1132     last_bound_pos_ <= (pc_offset() - pattern_size) &&
1133     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1134     // Pattern.
1135     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1136     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1137    pc_ -= 2 * kInstrSize;
1138    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1139    if (FLAG_print_push_pop_elimination) {
1140      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1141    }
1142  }
1143}
1144
1145
1146void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1147  addrmod2(cond | B26 | B | L, dst, src);
1148}
1149
1150
1151void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1152  addrmod2(cond | B26 | B, src, dst);
1153}
1154
1155
1156void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1157  addrmod3(cond | L | B7 | H | B4, dst, src);
1158}
1159
1160
1161void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1162  addrmod3(cond | B7 | H | B4, src, dst);
1163}
1164
1165
1166void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1167  addrmod3(cond | L | B7 | S6 | B4, dst, src);
1168}
1169
1170
1171void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1172  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1173}
1174
1175
1176// Load/Store multiple instructions.
1177void Assembler::ldm(BlockAddrMode am,
1178                    Register base,
1179                    RegList dst,
1180                    Condition cond) {
1181  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
1182  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1183
1184  addrmod4(cond | B27 | am | L, base, dst);
1185
1186  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
1187  if (cond == al && (dst & pc.bit()) != 0) {
1188    // There is a slight chance that the ldm instruction was actually a call,
1189    // in which case it would be wrong to return into the constant pool; we
1190    // recognize this case by checking if the emission of the pool was blocked
1191    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1192    // the case, we emit a jump over the pool.
1193    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1194  }
1195}
1196
1197
1198void Assembler::stm(BlockAddrMode am,
1199                    Register base,
1200                    RegList src,
1201                    Condition cond) {
1202  addrmod4(cond | B27 | am, base, src);
1203}
1204
1205
1206// Semaphore instructions.
1207void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1208  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1209  ASSERT(!dst.is(base) && !src.is(base));
1210  emit(cond | P | base.code()*B16 | dst.code()*B12 |
1211       B7 | B4 | src.code());
1212}
1213
1214
1215void Assembler::swpb(Register dst,
1216                     Register src,
1217                     Register base,
1218                     Condition cond) {
1219  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1220  ASSERT(!dst.is(base) && !src.is(base));
1221  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1222       B7 | B4 | src.code());
1223}
1224
1225
1226// Exception-generating instructions and debugging support.
1227void Assembler::stop(const char* msg) {
1228#if !defined(__arm__)
1229  // The simulator handles these special instructions and stops execution.
1230  emit(15 << 28 | ((intptr_t) msg));
1231#else
1232  // Just issue a simple break instruction for now. Alternatively we could use
1233  // the swi(0x9f0001) instruction on Linux.
1234  bkpt(0);
1235#endif
1236}
1237
1238
1239void Assembler::bkpt(uint32_t imm16) {  // v5 and above
1240  ASSERT(is_uint16(imm16));
1241  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1242}
1243
1244
1245void Assembler::swi(uint32_t imm24, Condition cond) {
1246  ASSERT(is_uint24(imm24));
1247  emit(cond | 15*B24 | imm24);
1248}
1249
1250
1251// Coprocessor instructions.
1252void Assembler::cdp(Coprocessor coproc,
1253                    int opcode_1,
1254                    CRegister crd,
1255                    CRegister crn,
1256                    CRegister crm,
1257                    int opcode_2,
1258                    Condition cond) {
1259  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1260  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1261       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1262}
1263
1264
1265void Assembler::cdp2(Coprocessor coproc,
1266                     int opcode_1,
1267                     CRegister crd,
1268                     CRegister crn,
1269                     CRegister crm,
1270                     int opcode_2) {  // v5 and above
1271  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1272}
1273
1274
1275void Assembler::mcr(Coprocessor coproc,
1276                    int opcode_1,
1277                    Register rd,
1278                    CRegister crn,
1279                    CRegister crm,
1280                    int opcode_2,
1281                    Condition cond) {
1282  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1283  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1284       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1285}
1286
1287
1288void Assembler::mcr2(Coprocessor coproc,
1289                     int opcode_1,
1290                     Register rd,
1291                     CRegister crn,
1292                     CRegister crm,
1293                     int opcode_2) {  // v5 and above
1294  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1295}
1296
1297
1298void Assembler::mrc(Coprocessor coproc,
1299                    int opcode_1,
1300                    Register rd,
1301                    CRegister crn,
1302                    CRegister crm,
1303                    int opcode_2,
1304                    Condition cond) {
1305  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1306  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1307       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1308}
1309
1310
1311void Assembler::mrc2(Coprocessor coproc,
1312                     int opcode_1,
1313                     Register rd,
1314                     CRegister crn,
1315                     CRegister crm,
1316                     int opcode_2) {  // v5 and above
1317  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1318}
1319
1320
1321void Assembler::ldc(Coprocessor coproc,
1322                    CRegister crd,
1323                    const MemOperand& src,
1324                    LFlag l,
1325                    Condition cond) {
1326  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1327}
1328
1329
1330void Assembler::ldc(Coprocessor coproc,
1331                    CRegister crd,
1332                    Register rn,
1333                    int option,
1334                    LFlag l,
1335                    Condition cond) {
1336  // Unindexed addressing.
1337  ASSERT(is_uint8(option));
1338  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1339       coproc*B8 | (option & 255));
1340}
1341
1342
1343void Assembler::ldc2(Coprocessor coproc,
1344                     CRegister crd,
1345                     const MemOperand& src,
1346                     LFlag l) {  // v5 and above
1347  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1348}
1349
1350
1351void Assembler::ldc2(Coprocessor coproc,
1352                     CRegister crd,
1353                     Register rn,
1354                     int option,
1355                     LFlag l) {  // v5 and above
1356  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1357}
1358
1359
1360void Assembler::stc(Coprocessor coproc,
1361                    CRegister crd,
1362                    const MemOperand& dst,
1363                    LFlag l,
1364                    Condition cond) {
1365  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1366}
1367
1368
1369void Assembler::stc(Coprocessor coproc,
1370                    CRegister crd,
1371                    Register rn,
1372                    int option,
1373                    LFlag l,
1374                    Condition cond) {
1375  // Unindexed addressing.
1376  ASSERT(is_uint8(option));
1377  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1378       coproc*B8 | (option & 255));
1379}
1380
1381
1382void Assembler::stc2(Coprocessor
1383                     coproc, CRegister crd,
1384                     const MemOperand& dst,
1385                     LFlag l) {  // v5 and above
1386  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1387}
1388
1389
1390void Assembler::stc2(Coprocessor coproc,
1391                     CRegister crd,
1392                     Register rn,
1393                     int option,
1394                     LFlag l) {  // v5 and above
1395  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1396}
1397
1398
1399// Support for VFP.
1400void Assembler::vldr(const DwVfpRegister dst,
1401                     const Register base,
1402                     int offset,
1403                     const Condition cond) {
1404  // Ddst = MEM(Rbase + offset).
1405  // Instruction details available in ARM DDI 0406A, A8-628.
1406  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1407  // Vdst(15-12) | 1011(11-8) | offset
1408  ASSERT(CpuFeatures::IsEnabled(VFP3));
1409  ASSERT(offset % 4 == 0);
1410  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1411       0xB*B8 | ((offset / 4) & 255));
1412}
1413
1414
1415void Assembler::vstr(const DwVfpRegister src,
1416                     const Register base,
1417                     int offset,
1418                     const Condition cond) {
1419  // MEM(Rbase + offset) = Dsrc.
1420  // Instruction details available in ARM DDI 0406A, A8-786.
1421  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
1422  // Vsrc(15-12) | 1011(11-8) | (offset/4)
1423  ASSERT(CpuFeatures::IsEnabled(VFP3));
1424  ASSERT(offset % 4 == 0);
1425  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
1426       0xB*B8 | ((offset / 4) & 255));
1427}
1428
1429
1430void Assembler::vmov(const DwVfpRegister dst,
1431                     const Register src1,
1432                     const Register src2,
1433                     const Condition cond) {
1434  // Dm = <Rt,Rt2>.
1435  // Instruction details available in ARM DDI 0406A, A8-646.
1436  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
1437  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1438  ASSERT(CpuFeatures::IsEnabled(VFP3));
1439  ASSERT(!src1.is(pc) && !src2.is(pc));
1440  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
1441       src1.code()*B12 | 0xB*B8 | B4 | dst.code());
1442}
1443
1444
1445void Assembler::vmov(const Register dst1,
1446                     const Register dst2,
1447                     const DwVfpRegister src,
1448                     const Condition cond) {
1449  // <Rt,Rt2> = Dm.
1450  // Instruction details available in ARM DDI 0406A, A8-646.
1451  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
1452  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1453  ASSERT(CpuFeatures::IsEnabled(VFP3));
1454  ASSERT(!dst1.is(pc) && !dst2.is(pc));
1455  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
1456       dst1.code()*B12 | 0xB*B8 | B4 | src.code());
1457}
1458
1459
1460void Assembler::vmov(const SwVfpRegister dst,
1461                     const Register src,
1462                     const Condition cond) {
1463  // Sn = Rt.
1464  // Instruction details available in ARM DDI 0406A, A8-642.
1465  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
1466  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1467  ASSERT(CpuFeatures::IsEnabled(VFP3));
1468  ASSERT(!src.is(pc));
1469  emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
1470       src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
1471}
1472
1473
1474void Assembler::vmov(const Register dst,
1475                     const SwVfpRegister src,
1476                     const Condition cond) {
1477  // Rt = Sn.
1478  // Instruction details available in ARM DDI 0406A, A8-642.
1479  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
1480  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1481  ASSERT(CpuFeatures::IsEnabled(VFP3));
1482  ASSERT(!dst.is(pc));
1483  emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
1484       dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
1485}
1486
1487
1488void Assembler::vcvt(const DwVfpRegister dst,
1489                     const SwVfpRegister src,
1490                     const Condition cond) {
1491  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
1492  // Instruction details available in ARM DDI 0406A, A8-576.
1493  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
1494  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1495  ASSERT(CpuFeatures::IsEnabled(VFP3));
1496  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
1497       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
1498       (0x1 & src.code())*B5 | (src.code() >> 1));
1499}
1500
1501
1502void Assembler::vcvt(const SwVfpRegister dst,
1503                     const DwVfpRegister src,
1504                     const Condition cond) {
1505  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
1506  // Instruction details available in ARM DDI 0406A, A8-576.
1507  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
1508  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1509  ASSERT(CpuFeatures::IsEnabled(VFP3));
1510  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
1511       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
1512       0x5*B9 | B8 | B7 | B6 | src.code());
1513}
1514
1515
1516void Assembler::vadd(const DwVfpRegister dst,
1517                     const DwVfpRegister src1,
1518                     const DwVfpRegister src2,
1519                     const Condition cond) {
1520  // Dd = vadd(Dn, Dm) double precision floating point addition.
1521  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1522  // Instruction details available in ARM DDI 0406A, A8-536.
1523  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1524  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1525  ASSERT(CpuFeatures::IsEnabled(VFP3));
1526  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1527       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1528}
1529
1530
1531void Assembler::vsub(const DwVfpRegister dst,
1532                     const DwVfpRegister src1,
1533                     const DwVfpRegister src2,
1534                     const Condition cond) {
1535  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
1536  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1537  // Instruction details available in ARM DDI 0406A, A8-784.
1538  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1539  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1540  ASSERT(CpuFeatures::IsEnabled(VFP3));
1541  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1542       dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1543}
1544
1545
1546void Assembler::vmul(const DwVfpRegister dst,
1547                     const DwVfpRegister src1,
1548                     const DwVfpRegister src2,
1549                     const Condition cond) {
1550  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
1551  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1552  // Instruction details available in ARM DDI 0406A, A8-784.
1553  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
1554  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1555  ASSERT(CpuFeatures::IsEnabled(VFP3));
1556  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
1557       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1558}
1559
1560
1561void Assembler::vdiv(const DwVfpRegister dst,
1562                     const DwVfpRegister src1,
1563                     const DwVfpRegister src2,
1564                     const Condition cond) {
1565  // Dd = vdiv(Dn, Dm) double precision floating point division.
1566  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1567  // Instruction details available in ARM DDI 0406A, A8-584.
1568  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
1569  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1570  ASSERT(CpuFeatures::IsEnabled(VFP3));
1571  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
1572       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1573}
1574
1575
1576void Assembler::vcmp(const DwVfpRegister src1,
1577                     const DwVfpRegister src2,
1578                     const SBit s,
1579                     const Condition cond) {
1580  // vcmp(Dd, Dm) double precision floating point comparison.
1581  // Instruction details available in ARM DDI 0406A, A8-570.
1582  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
1583  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
1584  ASSERT(CpuFeatures::IsEnabled(VFP3));
1585  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
1586       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1587}
1588
1589
1590void Assembler::vmrs(Register dst, Condition cond) {
1591  // Instruction details available in ARM DDI 0406A, A8-652.
1592  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
1593  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
1594  ASSERT(CpuFeatures::IsEnabled(VFP3));
1595  emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
1596       dst.code()*B12 | 0xA*B8 | B4);
1597}
1598
1599
1600// Pseudo instructions.
1601void Assembler::lea(Register dst,
1602                    const MemOperand& x,
1603                    SBit s,
1604                    Condition cond) {
1605  int am = x.am_;
1606  if (!x.rm_.is_valid()) {
1607    // Immediate offset.
1608    if ((am & P) == 0)  // post indexing
1609      mov(dst, Operand(x.rn_), s, cond);
1610    else if ((am & U) == 0)  // negative indexing
1611      sub(dst, x.rn_, Operand(x.offset_), s, cond);
1612    else
1613      add(dst, x.rn_, Operand(x.offset_), s, cond);
1614  } else {
1615    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1616    // register offset the constructors make sure than both shift_imm_
1617    // and shift_op_ are initialized.
1618    ASSERT(!x.rm_.is(pc));
1619    if ((am & P) == 0)  // post indexing
1620      mov(dst, Operand(x.rn_), s, cond);
1621    else if ((am & U) == 0)  // negative indexing
1622      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1623    else
1624      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1625  }
1626}
1627
1628
1629bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
1630  uint32_t dummy1;
1631  uint32_t dummy2;
1632  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
1633}
1634
1635
1636void Assembler::BlockConstPoolFor(int instructions) {
1637  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
1638}
1639
1640
1641// Debugging.
1642void Assembler::RecordJSReturn() {
1643  WriteRecordedPositions();
1644  CheckBuffer();
1645  RecordRelocInfo(RelocInfo::JS_RETURN);
1646}
1647
1648
1649void Assembler::RecordComment(const char* msg) {
1650  if (FLAG_debug_code) {
1651    CheckBuffer();
1652    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1653  }
1654}
1655
1656
1657void Assembler::RecordPosition(int pos) {
1658  if (pos == RelocInfo::kNoPosition) return;
1659  ASSERT(pos >= 0);
1660  current_position_ = pos;
1661}
1662
1663
1664void Assembler::RecordStatementPosition(int pos) {
1665  if (pos == RelocInfo::kNoPosition) return;
1666  ASSERT(pos >= 0);
1667  current_statement_position_ = pos;
1668}
1669
1670
1671void Assembler::WriteRecordedPositions() {
1672  // Write the statement position if it is different from what was written last
1673  // time.
1674  if (current_statement_position_ != written_statement_position_) {
1675    CheckBuffer();
1676    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1677    written_statement_position_ = current_statement_position_;
1678  }
1679
1680  // Write the position if it is different from what was written last time and
1681  // also different from the written statement position.
1682  if (current_position_ != written_position_ &&
1683      current_position_ != written_statement_position_) {
1684    CheckBuffer();
1685    RecordRelocInfo(RelocInfo::POSITION, current_position_);
1686    written_position_ = current_position_;
1687  }
1688}
1689
1690
1691void Assembler::GrowBuffer() {
1692  if (!own_buffer_) FATAL("external code buffer is too small");
1693
1694  // Compute new buffer size.
1695  CodeDesc desc;  // the new buffer
1696  if (buffer_size_ < 4*KB) {
1697    desc.buffer_size = 4*KB;
1698  } else if (buffer_size_ < 1*MB) {
1699    desc.buffer_size = 2*buffer_size_;
1700  } else {
1701    desc.buffer_size = buffer_size_ + 1*MB;
1702  }
1703  CHECK_GT(desc.buffer_size, 0);  // no overflow
1704
1705  // Setup new buffer.
1706  desc.buffer = NewArray<byte>(desc.buffer_size);
1707
1708  desc.instr_size = pc_offset();
1709  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1710
1711  // Copy the data.
1712  int pc_delta = desc.buffer - buffer_;
1713  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1714  memmove(desc.buffer, buffer_, desc.instr_size);
1715  memmove(reloc_info_writer.pos() + rc_delta,
1716          reloc_info_writer.pos(), desc.reloc_size);
1717
1718  // Switch buffers.
1719  DeleteArray(buffer_);
1720  buffer_ = desc.buffer;
1721  buffer_size_ = desc.buffer_size;
1722  pc_ += pc_delta;
1723  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1724                               reloc_info_writer.last_pc() + pc_delta);
1725
1726  // None of our relocation types are pc relative pointing outside the code
1727  // buffer nor pc absolute pointing inside the code buffer, so there is no need
1728  // to relocate any emitted relocation entries.
1729
1730  // Relocate pending relocation entries.
1731  for (int i = 0; i < num_prinfo_; i++) {
1732    RelocInfo& rinfo = prinfo_[i];
1733    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1734           rinfo.rmode() != RelocInfo::POSITION);
1735    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
1736      rinfo.set_pc(rinfo.pc() + pc_delta);
1737    }
1738  }
1739}
1740
1741
1742void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1743  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
1744  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1745    // Adjust code for new modes.
1746    ASSERT(RelocInfo::IsJSReturn(rmode)
1747           || RelocInfo::IsComment(rmode)
1748           || RelocInfo::IsPosition(rmode));
1749    // These modes do not need an entry in the constant pool.
1750  } else {
1751    ASSERT(num_prinfo_ < kMaxNumPRInfo);
1752    prinfo_[num_prinfo_++] = rinfo;
1753    // Make sure the constant pool is not emitted in place of the next
1754    // instruction for which we just recorded relocation info.
1755    BlockConstPoolBefore(pc_offset() + kInstrSize);
1756  }
1757  if (rinfo.rmode() != RelocInfo::NONE) {
1758    // Don't record external references unless the heap will be serialized.
1759    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
1760#ifdef DEBUG
1761      if (!Serializer::enabled()) {
1762        Serializer::TooLateToEnableNow();
1763      }
1764#endif
1765      if (!Serializer::enabled() && !FLAG_debug_code) {
1766        return;
1767      }
1768    }
1769    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
1770    reloc_info_writer.Write(&rinfo);
1771  }
1772}
1773
1774
1775void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
1776  // Calculate the offset of the next check. It will be overwritten
1777  // when a const pool is generated or when const pools are being
1778  // blocked for a specific range.
1779  next_buffer_check_ = pc_offset() + kCheckConstInterval;
1780
1781  // There is nothing to do if there are no pending relocation info entries.
1782  if (num_prinfo_ == 0) return;
1783
1784  // We emit a constant pool at regular intervals of about kDistBetweenPools
1785  // or when requested by parameter force_emit (e.g. after each function).
1786  // We prefer not to emit a jump unless the max distance is reached or if we
1787  // are running low on slots, which can happen if a lot of constants are being
1788  // emitted (e.g. --debug-code and many static references).
1789  int dist = pc_offset() - last_const_pool_end_;
1790  if (!force_emit && dist < kMaxDistBetweenPools &&
1791      (require_jump || dist < kDistBetweenPools) &&
1792      // TODO(1236125): Cleanup the "magic" number below. We know that
1793      // the code generation will test every kCheckConstIntervalInst.
1794      // Thus we are safe as long as we generate less than 7 constant
1795      // entries per instruction.
1796      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
1797    return;
1798  }
1799
1800  // If we did not return by now, we need to emit the constant pool soon.
1801
1802  // However, some small sequences of instructions must not be broken up by the
1803  // insertion of a constant pool; such sequences are protected by setting
1804  // no_const_pool_before_, which is checked here. Also, recursive calls to
1805  // CheckConstPool are blocked by no_const_pool_before_.
1806  if (pc_offset() < no_const_pool_before_) {
1807    // Emission is currently blocked; make sure we try again as soon as
1808    // possible.
1809    next_buffer_check_ = no_const_pool_before_;
1810
1811    // Something is wrong if emission is forced and blocked at the same time.
1812    ASSERT(!force_emit);
1813    return;
1814  }
1815
1816  int jump_instr = require_jump ? kInstrSize : 0;
1817
1818  // Check that the code buffer is large enough before emitting the constant
1819  // pool and relocation information (include the jump over the pool and the
1820  // constant pool marker).
1821  int max_needed_space =
1822      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
1823  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
1824
1825  // Block recursive calls to CheckConstPool.
1826  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
1827                       num_prinfo_*kInstrSize);
1828  // Don't bother to check for the emit calls below.
1829  next_buffer_check_ = no_const_pool_before_;
1830
1831  // Emit jump over constant pool if necessary.
1832  Label after_pool;
1833  if (require_jump) b(&after_pool);
1834
1835  RecordComment("[ Constant Pool");
1836
1837  // Put down constant pool marker "Undefined instruction" as specified by
1838  // A3.1 Instruction set encoding.
1839  emit(0x03000000 | num_prinfo_);
1840
1841  // Emit constant pool entries.
1842  for (int i = 0; i < num_prinfo_; i++) {
1843    RelocInfo& rinfo = prinfo_[i];
1844    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1845           rinfo.rmode() != RelocInfo::POSITION &&
1846           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
1847    Instr instr = instr_at(rinfo.pc());
1848
1849    // Instruction to patch must be a ldr/str [pc, #offset].
1850    // P and U set, B and W clear, Rn == pc, offset12 still 0.
1851    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
1852           (2*B25 | P | U | pc.code()*B16));
1853    int delta = pc_ - rinfo.pc() - 8;
1854    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
1855    if (delta < 0) {
1856      instr &= ~U;
1857      delta = -delta;
1858    }
1859    ASSERT(is_uint12(delta));
1860    instr_at_put(rinfo.pc(), instr + delta);
1861    emit(rinfo.data());
1862  }
1863  num_prinfo_ = 0;
1864  last_const_pool_end_ = pc_offset();
1865
1866  RecordComment("]");
1867
1868  if (after_pool.is_linked()) {
1869    bind(&after_pool);
1870  }
1871
1872  // Since a constant pool was just emitted, move the check offset forward by
1873  // the standard interval.
1874  next_buffer_check_ = pc_offset() + kCheckConstInterval;
1875}
1876
1877
1878} }  // namespace v8::internal
1879