1// Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4//
5// This is forked from Dart revision df52deea9f25690eb8b66c5995da92b70f7ac1fe
6// Please update the (git) revision if we merge changes from Dart.
7// https://code.google.com/p/dart/wiki/GettingTheSource
8
9#include "vm/globals.h"  // NOLINT
10#if defined(TARGET_ARCH_ARM)
11
12#include "vm/assembler.h"
13#include "vm/cpu.h"
14#include "vm/longjump.h"
15#include "vm/runtime_entry.h"
16#include "vm/simulator.h"
17#include "vm/stack_frame.h"
18#include "vm/stub_code.h"
19
20// An extra check since we are assuming the existence of /proc/cpuinfo below.
21#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID)
22#error ARM cross-compile only supported on Linux
23#endif
24
25namespace dart {
26
27DECLARE_FLAG(bool, allow_absolute_addresses);
28DEFINE_FLAG(bool, print_stop_message, true, "Print stop message.");
29DECLARE_FLAG(bool, inline_alloc);
30
31#if 0
32// Moved to encodeImmRegOffsetEnc3 in IceAssemblerARM32.cpp
33uint32_t Address::encoding3() const {
34  if (kind_ == Immediate) {
35    uint32_t offset = encoding_ & kOffset12Mask;
36    ASSERT(offset < 256);
37    return (encoding_ & ~kOffset12Mask) | B22 |
38           ((offset & 0xf0) << 4) | (offset & 0xf);
39  }
40  ASSERT(kind_ == IndexRegister);
41  return encoding_;
42}
43#endif
44
45uint32_t Address::vencoding() const {
46  ASSERT(kind_ == Immediate);
47  uint32_t offset = encoding_ & kOffset12Mask;
48  ASSERT(offset < (1 << 10));  // In the range 0 to +1020.
49  ASSERT(Utils::IsAligned(offset, 4));  // Multiple of 4.
50  int mode = encoding_ & ((8|4|1) << 21);
51  ASSERT((mode == Offset) || (mode == NegOffset));
52  uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
53  if (mode == Offset) {
54    vencoding |= 1 << 23;
55  }
56  return vencoding;
57}
58
59
60void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
61  ASSERT(Utils::IsAligned(data, 4));
62  ASSERT(Utils::IsAligned(length, 4));
63  const uword end = data + length;
64  while (data < end) {
65    *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction;
66    data += 4;
67  }
68}
69
70
71void Assembler::Emit(int32_t value) {
72  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
73  buffer_.Emit<int32_t>(value);
74}
75
76#if 0
77// Moved to ARM32::AssemblerARM32::emitType01()
78void Assembler::EmitType01(Condition cond,
79                           int type,
80                           Opcode opcode,
81                           int set_cc,
82                           Register rn,
83                           Register rd,
84                           Operand o) {
85  ASSERT(rd != kNoRegister);
86  ASSERT(cond != kNoCondition);
87  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
88                     type << kTypeShift |
89                     static_cast<int32_t>(opcode) << kOpcodeShift |
90                     set_cc << kSShift |
91                     static_cast<int32_t>(rn) << kRnShift |
92                     static_cast<int32_t>(rd) << kRdShift |
93                     o.encoding();
94  Emit(encoding);
95}
96
97// Moved to ARM32::AssemblerARM32::emitType05()
98void Assembler::EmitType5(Condition cond, int32_t offset, bool link) {
99  ASSERT(cond != kNoCondition);
100  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
101                     5 << kTypeShift |
102                     (link ? 1 : 0) << kLinkShift;
103  Emit(Assembler::EncodeBranchOffset(offset, encoding));
104}
105
106// Moved to ARM32::AssemblerARM32::emitMemOp()
107void Assembler::EmitMemOp(Condition cond,
108                          bool load,
109                          bool byte,
110                          Register rd,
111                          Address ad) {
112  ASSERT(rd != kNoRegister);
113  ASSERT(cond != kNoCondition);
114  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
115                     B26 | (ad.kind() == Address::Immediate ? 0 : B25) |
116                     (load ? L : 0) |
117                     (byte ? B : 0) |
118                     (static_cast<int32_t>(rd) << kRdShift) |
119                     ad.encoding();
120  Emit(encoding);
121}
122
123// Moved to AssemblerARM32::emitMemOpEnc3();
124void Assembler::EmitMemOpAddressMode3(Condition cond,
125                                      int32_t mode,
126                                      Register rd,
127                                      Address ad) {
128  ASSERT(rd != kNoRegister);
129  ASSERT(cond != kNoCondition);
130  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
131                     mode |
132                     (static_cast<int32_t>(rd) << kRdShift) |
133                     ad.encoding3();
134  Emit(encoding);
135}
136
137// Moved to ARM32::AssemblerARM32::emitMuliMemOp()
138void Assembler::EmitMultiMemOp(Condition cond,
139                               BlockAddressMode am,
140                               bool load,
141                               Register base,
142                               RegList regs) {
143  ASSERT(base != kNoRegister);
144  ASSERT(cond != kNoCondition);
145  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
146                     B27 |
147                     am |
148                     (load ? L : 0) |
149                     (static_cast<int32_t>(base) << kRnShift) |
150                     regs;
151  Emit(encoding);
152}
153#endif
154
155void Assembler::EmitShiftImmediate(Condition cond,
156                                   Shift opcode,
157                                   Register rd,
158                                   Register rm,
159                                   Operand o) {
160  ASSERT(cond != kNoCondition);
161  ASSERT(o.type() == 1);
162  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
163                     static_cast<int32_t>(MOV) << kOpcodeShift |
164                     static_cast<int32_t>(rd) << kRdShift |
165                     o.encoding() << kShiftImmShift |
166                     static_cast<int32_t>(opcode) << kShiftShift |
167                     static_cast<int32_t>(rm);
168  Emit(encoding);
169}
170
171
172void Assembler::EmitShiftRegister(Condition cond,
173                                  Shift opcode,
174                                  Register rd,
175                                  Register rm,
176                                  Operand o) {
177  ASSERT(cond != kNoCondition);
178  ASSERT(o.type() == 0);
179  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
180                     static_cast<int32_t>(MOV) << kOpcodeShift |
181                     static_cast<int32_t>(rd) << kRdShift |
182                     o.encoding() << kShiftRegisterShift |
183                     static_cast<int32_t>(opcode) << kShiftShift |
184                     B4 |
185                     static_cast<int32_t>(rm);
186  Emit(encoding);
187}
188
189
190#if 0
191// Moved to ARM32::AssemblerARM32::and_()
192void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) {
193  EmitType01(cond, o.type(), AND, 0, rn, rd, o);
194}
195
196// Moved to ARM32::AssemberARM32::eor()
197void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) {
198  EmitType01(cond, o.type(), EOR, 0, rn, rd, o);
199}
200
201// Moved to ARM32::AssemberARM32::sub()
202void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) {
203  EmitType01(cond, o.type(), SUB, 0, rn, rd, o);
204}
205
206// Moved to ARM32::AssemberARM32::rsb()
207void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) {
208  EmitType01(cond, o.type(), RSB, 0, rn, rd, o);
209}
210
211// Moved to ARM32::AssemberARM32::rsb()
212void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) {
213  EmitType01(cond, o.type(), RSB, 1, rn, rd, o);
214}
215
216// Moved to ARM32::AssemberARM32::add()
217void Assembler::add(Register rd, Register rn, Operand o, Condition cond) {
218  EmitType01(cond, o.type(), ADD, 0, rn, rd, o);
219}
220
221// Moved to ARM32::AssemberARM32::add()
222void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) {
223  EmitType01(cond, o.type(), ADD, 1, rn, rd, o);
224}
225
226// Moved to ARM32::AssemberARM32::sub()
227void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) {
228  EmitType01(cond, o.type(), SUB, 1, rn, rd, o);
229}
230
231// Moved to ARM32::AssemberARM32::adc()
232void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) {
233  EmitType01(cond, o.type(), ADC, 0, rn, rd, o);
234}
235
236// Moved to ARM32::AssemberARM32::adc()
237void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) {
238  EmitType01(cond, o.type(), ADC, 1, rn, rd, o);
239}
240#endif
241
242void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) {
243  EmitType01(cond, o.type(), SBC, 0, rn, rd, o);
244}
245
246
247void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) {
248  EmitType01(cond, o.type(), SBC, 1, rn, rd, o);
249}
250
251#if 0
252// Moved to ARM32::AssemblerARM32::rsc()f
253void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) {
254  EmitType01(cond, o.type(), RSC, 0, rn, rd, o);
255}
256
257// Moved to ARM32::AssemblerARM32::tst()
258void Assembler::tst(Register rn, Operand o, Condition cond) {
259  EmitType01(cond, o.type(), TST, 1, rn, R0, o);
260}
261#endif
262
263void Assembler::teq(Register rn, Operand o, Condition cond) {
264  EmitType01(cond, o.type(), TEQ, 1, rn, R0, o);
265}
266
267#if 0
268// Moved to ARM32::AssemblerARM32::cmp()
269void Assembler::cmp(Register rn, Operand o, Condition cond) {
270  EmitType01(cond, o.type(), CMP, 1, rn, R0, o);
271}
272
273// Moved to ARM32::AssemblerARM32::cmn()
274void Assembler::cmn(Register rn, Operand o, Condition cond) {
275  EmitType01(cond, o.type(), CMN, 1, rn, R0, o);
276}
277
278// Moved to ARM32::AssemberARM32::orr()
279void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) {
280  EmitType01(cond, o.type(), ORR, 0, rn, rd, o);
281}
282
283// Moved to ARM32::AssemberARM32::orr()
284void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) {
285  EmitType01(cond, o.type(), ORR, 1, rn, rd, o);
286}
287
288// Moved to ARM32::AssemblerARM32::mov()
289// TODO(kschimpf) other forms of move.
290void Assembler::mov(Register rd, Operand o, Condition cond) {
291  EmitType01(cond, o.type(), MOV, 0, R0, rd, o);
292}
293#endif
294
295void Assembler::movs(Register rd, Operand o, Condition cond) {
296  EmitType01(cond, o.type(), MOV, 1, R0, rd, o);
297}
298
299
300#if 0
301// Moved to ARM32::AssemblerARM32::bic()
302void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) {
303  EmitType01(cond, o.type(), BIC, 0, rn, rd, o);
304}
305
306// Moved to ARM32::AssemblerARM32::bic()
307void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) {
308  EmitType01(cond, o.type(), BIC, 1, rn, rd, o);
309}
310
311// Moved to ARM32::AssemblerARM32::mvn()
312void Assembler::mvn(Register rd, Operand o, Condition cond) {
313  EmitType01(cond, o.type(), MVN, 0, R0, rd, o);
314}
315
316// Moved to ARM32::AssemblerARM32::mvn()
317void Assembler::mvns(Register rd, Operand o, Condition cond) {
318  EmitType01(cond, o.type(), MVN, 1, R0, rd, o);
319}
320
321// Moved to ARM32::AssemblerARM32::clz()
322void Assembler::clz(Register rd, Register rm, Condition cond) {
323  ASSERT(rd != kNoRegister);
324  ASSERT(rm != kNoRegister);
325  ASSERT(cond != kNoCondition);
326  ASSERT(rd != PC);
327  ASSERT(rm != PC);
328  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
329                     B24 | B22 | B21 | (0xf << 16) |
330                     (static_cast<int32_t>(rd) << kRdShift) |
331                     (0xf << 8) | B4 | static_cast<int32_t>(rm);
332  Emit(encoding);
333}
334
335// Moved to ARM32::AssemblerARM32::movw()
336void Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
337  ASSERT(cond != kNoCondition);
338  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
339                     B25 | B24 | ((imm16 >> 12) << 16) |
340                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
341  Emit(encoding);
342}
343
344
345// Moved to ARM32::AssemblerARM32::movt()
346void Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
347  ASSERT(cond != kNoCondition);
348  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
349                     B25 | B24 | B22 | ((imm16 >> 12) << 16) |
350                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
351  Emit(encoding);
352}
353
354// Moved to ARM32::AssemblerARM32::emitMulOp()
355void Assembler::EmitMulOp(Condition cond, int32_t opcode,
356                          Register rd, Register rn,
357                          Register rm, Register rs) {
358  ASSERT(rd != kNoRegister);
359  ASSERT(rn != kNoRegister);
360  ASSERT(rm != kNoRegister);
361  ASSERT(rs != kNoRegister);
362  ASSERT(cond != kNoCondition);
363  int32_t encoding = opcode |
364      (static_cast<int32_t>(cond) << kConditionShift) |
365      (static_cast<int32_t>(rn) << kRnShift) |
366      (static_cast<int32_t>(rd) << kRdShift) |
367      (static_cast<int32_t>(rs) << kRsShift) |
368      B7 | B4 |
369      (static_cast<int32_t>(rm) << kRmShift);
370  Emit(encoding);
371}
372
373// Moved to ARM32::AssemblerARM32::mul()
374void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
375  // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
376  EmitMulOp(cond, 0, R0, rd, rn, rm);
377}
378#endif
379
380// Like mul, but sets condition flags.
381void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) {
382  EmitMulOp(cond, B20, R0, rd, rn, rm);
383}
384
385#if 0
386// Moved to ARM32::AssemblerARM32::mla()
387void Assembler::mla(Register rd, Register rn,
388                    Register rm, Register ra, Condition cond) {
389  // rd <- ra + rn * rm.
390  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
391  EmitMulOp(cond, B21, ra, rd, rn, rm);
392}
393
394// Moved to ARM32::AssemblerARM32::mla()
395void Assembler::mls(Register rd, Register rn,
396                    Register rm, Register ra, Condition cond) {
397  // rd <- ra - rn * rm.
398  if (TargetCPUFeatures::arm_version() == ARMv7) {
399    // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
400    EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
401  } else {
402    mul(IP, rn, rm, cond);
403    sub(rd, ra, Operand(IP), cond);
404  }
405}
406#endif
407
408void Assembler::smull(Register rd_lo, Register rd_hi,
409                      Register rn, Register rm, Condition cond) {
410  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
411  EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
412}
413
414#if 0
415// Moved to ARM32::AssemblerARM32::umull()
416void Assembler::umull(Register rd_lo, Register rd_hi,
417                      Register rn, Register rm, Condition cond) {
418  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
419  EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
420}
421#endif
422
423void Assembler::umlal(Register rd_lo, Register rd_hi,
424                      Register rn, Register rm, Condition cond) {
425  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
426  EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm);
427}
428
429
430void Assembler::umaal(Register rd_lo, Register rd_hi,
431                      Register rn, Register rm) {
432  ASSERT(rd_lo != IP);
433  ASSERT(rd_hi != IP);
434  ASSERT(rn != IP);
435  ASSERT(rm != IP);
436  if (TargetCPUFeatures::arm_version() != ARMv5TE) {
437    // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
438    EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm);
439  } else {
440    mov(IP, Operand(0));
441    umlal(rd_lo, IP, rn, rm);
442    adds(rd_lo, rd_lo, Operand(rd_hi));
443    adc(rd_hi, IP, Operand(0));
444  }
445}
446
447
448#if 0
449// Moved to ARM32::AssemblerARM32::emitDivOp()
450void Assembler::EmitDivOp(Condition cond, int32_t opcode,
451                          Register rd, Register rn, Register rm) {
452  ASSERT(TargetCPUFeatures::integer_division_supported());
453  ASSERT(rd != kNoRegister);
454  ASSERT(rn != kNoRegister);
455  ASSERT(rm != kNoRegister);
456  ASSERT(cond != kNoCondition);
457  int32_t encoding = opcode |
458    (static_cast<int32_t>(cond) << kConditionShift) |
459    (static_cast<int32_t>(rn) << kDivRnShift) |
460    (static_cast<int32_t>(rd) << kDivRdShift) |
461      // TODO(kschimpf): Why not also: B15 | B14 | B13 | B12?
462    B26 | B25 | B24 | B20 | B4 |
463    (static_cast<int32_t>(rm) << kDivRmShift);
464  Emit(encoding);
465}
466
467// Moved to ARM32::AssemblerARM32::sdiv()
468void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
469  EmitDivOp(cond, 0, rd, rn, rm);
470}
471
472// Moved to ARM32::AssemblerARM32::udiv()
473void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
474  EmitDivOp(cond, B21 , rd, rn, rm);
475}
476
477// Moved to ARM32::AssemblerARM32::ldr()
478void Assembler::ldr(Register rd, Address ad, Condition cond) {
479  EmitMemOp(cond, true, false, rd, ad);
480}
481
482// Moved to ARM32::AssemblerARM32::str()
483void Assembler::str(Register rd, Address ad, Condition cond) {
484  EmitMemOp(cond, false, false, rd, ad);
485}
486
487// Moved to ARM32::AssemblerARM32::ldr()
488void Assembler::ldrb(Register rd, Address ad, Condition cond) {
489  EmitMemOp(cond, true, true, rd, ad);
490}
491
492// Moved to ARM32::AssemblerARM32::str()
493void Assembler::strb(Register rd, Address ad, Condition cond) {
494  EmitMemOp(cond, false, true, rd, ad);
495}
496#endif
497
498void Assembler::ldrh(Register rd, Address ad, Condition cond) {
499  EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
500}
501
502
503void Assembler::strh(Register rd, Address ad, Condition cond) {
504  EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
505}
506
507
508void Assembler::ldrsb(Register rd, Address ad, Condition cond) {
509  EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
510}
511
512
513void Assembler::ldrsh(Register rd, Address ad, Condition cond) {
514  EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
515}
516
517
518void Assembler::ldrd(Register rd, Register rn, int32_t offset, Condition cond) {
519  ASSERT((rd % 2) == 0);
520  if (TargetCPUFeatures::arm_version() == ARMv5TE) {
521    const Register rd2 = static_cast<Register>(static_cast<int32_t>(rd) + 1);
522    ldr(rd, Address(rn, offset), cond);
523    ldr(rd2, Address(rn, offset + kWordSize), cond);
524  } else {
525    EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset));
526  }
527}
528
529
530void Assembler::strd(Register rd, Register rn, int32_t offset, Condition cond) {
531  ASSERT((rd % 2) == 0);
532  if (TargetCPUFeatures::arm_version() == ARMv5TE) {
533    const Register rd2 = static_cast<Register>(static_cast<int32_t>(rd) + 1);
534    str(rd, Address(rn, offset), cond);
535    str(rd2, Address(rn, offset + kWordSize), cond);
536  } else {
537    EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset));
538  }
539}
540
541#if 0
542// Folded into ARM32::AssemblerARM32::popList(), since it is its only
543// use (and doesn't implement ARM STM instruction).
544void Assembler::ldm(BlockAddressMode am, Register base, RegList regs,
545                    Condition cond) {
546  ASSERT(regs != 0);
547  EmitMultiMemOp(cond, am, true, base, regs);
548}
549
550// Folded into ARM32::AssemblerARM32::pushList(), since it is its only
551// use (and doesn't implement ARM STM instruction).
552void Assembler::stm(BlockAddressMode am, Register base, RegList regs,
553                    Condition cond) {
554  ASSERT(regs != 0);
555  EmitMultiMemOp(cond, am, false, base, regs);
556}
557
558// Moved to ARM::AssemblerARM32::ldrex();
559void Assembler::ldrex(Register rt, Register rn, Condition cond) {
560  ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
561  ASSERT(rn != kNoRegister);
562  ASSERT(rt != kNoRegister);
563  ASSERT(cond != kNoCondition);
564  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
565                     B24 |
566                     B23 |
567                     L   |
568                     (static_cast<int32_t>(rn) << kLdExRnShift) |
569                     (static_cast<int32_t>(rt) << kLdExRtShift) |
570                     B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
571  Emit(encoding);
572}
573
574// Moved to ARM::AssemblerARM32::strex();
575void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) {
576  ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
577  ASSERT(rn != kNoRegister);
578  ASSERT(rd != kNoRegister);
579  ASSERT(rt != kNoRegister);
580  ASSERT(cond != kNoCondition);
581  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
582                     B24 |
583                     B23 |
584                     (static_cast<int32_t>(rn) << kStrExRnShift) |
585                     (static_cast<int32_t>(rd) << kStrExRdShift) |
586                     B11 | B10 | B9 | B8 | B7 | B4 |
587                     (static_cast<int32_t>(rt) << kStrExRtShift);
588  Emit(encoding);
589}
590#endif
591
592void Assembler::clrex() {
593  ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
594  int32_t encoding = (kSpecialCondition << kConditionShift) |
595                     B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
596  Emit(encoding);
597}
598
599#if 0
600// Moved to ARM32::AssemblerARM32::nop().
601void Assembler::nop(Condition cond) {
602  ASSERT(cond != kNoCondition);
603  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
604                     B25 | B24 | B21 | (0xf << 12);
605  Emit(encoding);
606}
607
608// Moved to ARM32::AssemblerARM32::vmovsr().
609void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
610  ASSERT(TargetCPUFeatures::vfp_supported());
611  ASSERT(sn != kNoSRegister);
612  ASSERT(rt != kNoRegister);
613  ASSERT(rt != SP);
614  ASSERT(rt != PC);
615  ASSERT(cond != kNoCondition);
616  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
617                     B27 | B26 | B25 |
618                     ((static_cast<int32_t>(sn) >> 1)*B16) |
619                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
620                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
621  Emit(encoding);
622}
623
624// Moved to ARM32::AssemblerARM32::vmovrs().
625void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
626  ASSERT(TargetCPUFeatures::vfp_supported());
627  ASSERT(sn != kNoSRegister);
628  ASSERT(rt != kNoRegister);
629  ASSERT(rt != SP);
630  ASSERT(rt != PC);
631  ASSERT(cond != kNoCondition);
632  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
633                     B27 | B26 | B25 | B20 |
634                     ((static_cast<int32_t>(sn) >> 1)*B16) |
635                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
636                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
637  Emit(encoding);
638}
639#endif
640
641
642void Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
643                        Condition cond) {
644  ASSERT(TargetCPUFeatures::vfp_supported());
645  ASSERT(sm != kNoSRegister);
646  ASSERT(sm != S31);
647  ASSERT(rt != kNoRegister);
648  ASSERT(rt != SP);
649  ASSERT(rt != PC);
650  ASSERT(rt2 != kNoRegister);
651  ASSERT(rt2 != SP);
652  ASSERT(rt2 != PC);
653  ASSERT(cond != kNoCondition);
654  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
655                     B27 | B26 | B22 |
656                     (static_cast<int32_t>(rt2)*B16) |
657                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
658                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
659                     (static_cast<int32_t>(sm) >> 1);
660  Emit(encoding);
661}
662
663
664void Assembler::vmovrrs(Register rt, Register rt2, SRegister sm,
665                        Condition cond) {
666  ASSERT(TargetCPUFeatures::vfp_supported());
667  ASSERT(sm != kNoSRegister);
668  ASSERT(sm != S31);
669  ASSERT(rt != kNoRegister);
670  ASSERT(rt != SP);
671  ASSERT(rt != PC);
672  ASSERT(rt2 != kNoRegister);
673  ASSERT(rt2 != SP);
674  ASSERT(rt2 != PC);
675  ASSERT(rt != rt2);
676  ASSERT(cond != kNoCondition);
677  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
678                     B27 | B26 | B22 | B20 |
679                     (static_cast<int32_t>(rt2)*B16) |
680                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
681                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
682                     (static_cast<int32_t>(sm) >> 1);
683  Emit(encoding);
684}
685
686#if 0
687// Moved to ARM32::AssemblerARM32::vmovdqir().
688void Assembler::vmovdr(DRegister dn, int i, Register rt, Condition cond) {
689  ASSERT(TargetCPUFeatures::vfp_supported());
690  ASSERT((i == 0) || (i == 1));
691  ASSERT(rt != kNoRegister);
692  ASSERT(rt != SP);
693  ASSERT(rt != PC);
694  ASSERT(dn != kNoDRegister);
695  ASSERT(cond != kNoCondition);
696  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
697                     B27 | B26 | B25 |
698                     (i*B21) |
699                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
700                     ((static_cast<int32_t>(dn) >> 4)*B7) |
701                     ((static_cast<int32_t>(dn) & 0xf)*B16) | B4;
702  Emit(encoding);
703}
704
705// Moved to ARM32::AssemblerARM32::vmovdrr().
706void Assembler::vmovdrr(DRegister dm, Register rt, Register rt2,
707                        Condition cond) {
708  ASSERT(TargetCPUFeatures::vfp_supported());
709  ASSERT(dm != kNoDRegister);
710  ASSERT(rt != kNoRegister);
711  ASSERT(rt != SP);
712  ASSERT(rt != PC);
713  ASSERT(rt2 != kNoRegister);
714  ASSERT(rt2 != SP);
715  ASSERT(rt2 != PC);
716  ASSERT(cond != kNoCondition);
717  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
718                     B27 | B26 | B22 |
719                     (static_cast<int32_t>(rt2)*B16) |
720                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
721                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
722                     (static_cast<int32_t>(dm) & 0xf);
723  Emit(encoding);
724}
725
726// Moved to ARM32::AssemblerARM32::vmovrrd().
727void Assembler::vmovrrd(Register rt, Register rt2, DRegister dm,
728                        Condition cond) {
729  ASSERT(TargetCPUFeatures::vfp_supported());
730  ASSERT(dm != kNoDRegister);
731  ASSERT(rt != kNoRegister);
732  ASSERT(rt != SP);
733  ASSERT(rt != PC);
734  ASSERT(rt2 != kNoRegister);
735  ASSERT(rt2 != SP);
736  ASSERT(rt2 != PC);
737  ASSERT(rt != rt2);
738  ASSERT(cond != kNoCondition);
739  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
740                     B27 | B26 | B22 | B20 |
741                     (static_cast<int32_t>(rt2)*B16) |
742                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
743                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
744                     (static_cast<int32_t>(dm) & 0xf);
745  Emit(encoding);
746}
747
748// Moved to ARM32::AssemblerARM32::vldrs()
749void Assembler::vldrs(SRegister sd, Address ad, Condition cond) {
750  ASSERT(TargetCPUFeatures::vfp_supported());
751  ASSERT(sd != kNoSRegister);
752  ASSERT(cond != kNoCondition);
753  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
754                     B27 | B26 | B24 | B20 |
755                     ((static_cast<int32_t>(sd) & 1)*B22) |
756                     ((static_cast<int32_t>(sd) >> 1)*B12) |
757                     B11 | B9 | ad.vencoding();
758  Emit(encoding);
759}
760
761// Moved to Arm32::AssemblerARM32::vstrs()
762void Assembler::vstrs(SRegister sd, Address ad, Condition cond) {
763  ASSERT(TargetCPUFeatures::vfp_supported());
764  ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
765  ASSERT(sd != kNoSRegister);
766  ASSERT(cond != kNoCondition);
767  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
768                     B27 | B26 | B24 |
769                     ((static_cast<int32_t>(sd) & 1)*B22) |
770                     ((static_cast<int32_t>(sd) >> 1)*B12) |
771                     B11 | B9 | ad.vencoding();
772  Emit(encoding);
773}
774
775void Assembler::vldrd(DRegister dd, Address ad, Condition cond) {
776  ASSERT(TargetCPUFeatures::vfp_supported());
777  ASSERT(dd != kNoDRegister);
778  ASSERT(cond != kNoCondition);
779  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
780                     B27 | B26 | B24 | B20 |
781                     ((static_cast<int32_t>(dd) >> 4)*B22) |
782                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
783                     B11 | B9 | B8 | ad.vencoding();
784  Emit(encoding);
785}
786#endif
787
788void Assembler::vstrd(DRegister dd, Address ad, Condition cond) {
789  ASSERT(TargetCPUFeatures::vfp_supported());
790  ASSERT(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
791  ASSERT(dd != kNoDRegister);
792  ASSERT(cond != kNoCondition);
793  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
794                     B27 | B26 | B24 |
795                     ((static_cast<int32_t>(dd) >> 4)*B22) |
796                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
797                     B11 | B9 | B8 | ad.vencoding();
798  Emit(encoding);
799}
800
801void Assembler::EmitMultiVSMemOp(Condition cond,
802                                BlockAddressMode am,
803                                bool load,
804                                Register base,
805                                SRegister start,
806                                uint32_t count) {
807  ASSERT(TargetCPUFeatures::vfp_supported());
808  ASSERT(base != kNoRegister);
809  ASSERT(cond != kNoCondition);
810  ASSERT(start != kNoSRegister);
811  ASSERT(static_cast<int32_t>(start) + count <= kNumberOfSRegisters);
812
813  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
814                     B27 | B26 | B11 | B9 |
815                     am |
816                     (load ? L : 0) |
817                     (static_cast<int32_t>(base) << kRnShift) |
818                     ((static_cast<int32_t>(start) & 0x1) ? D : 0) |
819                     ((static_cast<int32_t>(start) >> 1) << 12) |
820                     count;
821  Emit(encoding);
822}
823
824
825void Assembler::EmitMultiVDMemOp(Condition cond,
826                                BlockAddressMode am,
827                                bool load,
828                                Register base,
829                                DRegister start,
830                                int32_t count) {
831  ASSERT(TargetCPUFeatures::vfp_supported());
832  ASSERT(base != kNoRegister);
833  ASSERT(cond != kNoCondition);
834  ASSERT(start != kNoDRegister);
835  ASSERT(static_cast<int32_t>(start) + count <= kNumberOfDRegisters);
836  const int armv5te = TargetCPUFeatures::arm_version() == ARMv5TE ? 1 : 0;
837
838  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
839                     B27 | B26 | B11 | B9 | B8 |
840                     am |
841                     (load ? L : 0) |
842                     (static_cast<int32_t>(base) << kRnShift) |
843                     ((static_cast<int32_t>(start) & 0x10) ? D : 0) |
844                     ((static_cast<int32_t>(start) & 0xf) << 12) |
845                     (count << 1) | armv5te;
846  Emit(encoding);
847}
848
849
850void Assembler::vldms(BlockAddressMode am, Register base,
851                      SRegister first, SRegister last, Condition cond) {
852  ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
853  ASSERT(last > first);
854  EmitMultiVSMemOp(cond, am, true, base, first, last - first + 1);
855}
856
857
858void Assembler::vstms(BlockAddressMode am, Register base,
859                      SRegister first, SRegister last, Condition cond) {
860  ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
861  ASSERT(last > first);
862  EmitMultiVSMemOp(cond, am, false, base, first, last - first + 1);
863}
864
865
866void Assembler::vldmd(BlockAddressMode am, Register base,
867                      DRegister first, intptr_t count, Condition cond) {
868  ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
869  ASSERT(count <= 16);
870  ASSERT(first + count <= kNumberOfDRegisters);
871  EmitMultiVDMemOp(cond, am, true, base, first, count);
872}
873
874
875void Assembler::vstmd(BlockAddressMode am, Register base,
876                      DRegister first, intptr_t count, Condition cond) {
877  ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
878  ASSERT(count <= 16);
879  ASSERT(first + count <= kNumberOfDRegisters);
880  EmitMultiVDMemOp(cond, am, false, base, first, count);
881}
882
883#if 0
884// Moved to ARM32::AssemblerARM32::emitVFPsss
885void Assembler::EmitVFPsss(Condition cond, int32_t opcode,
886                           SRegister sd, SRegister sn, SRegister sm) {
887  ASSERT(TargetCPUFeatures::vfp_supported());
888  ASSERT(sd != kNoSRegister);
889  ASSERT(sn != kNoSRegister);
890  ASSERT(sm != kNoSRegister);
891  ASSERT(cond != kNoCondition);
892  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
893                     B27 | B26 | B25 | B11 | B9 | opcode |
894                     ((static_cast<int32_t>(sd) & 1)*B22) |
895                     ((static_cast<int32_t>(sn) >> 1)*B16) |
896                     ((static_cast<int32_t>(sd) >> 1)*B12) |
897                     ((static_cast<int32_t>(sn) & 1)*B7) |
898                     ((static_cast<int32_t>(sm) & 1)*B5) |
899                     (static_cast<int32_t>(sm) >> 1);
900  Emit(encoding);
901}
902
903// Moved to ARM32::AssemblerARM32::emitVFPddd
904void Assembler::EmitVFPddd(Condition cond, int32_t opcode,
905                           DRegister dd, DRegister dn, DRegister dm) {
906  ASSERT(TargetCPUFeatures::vfp_supported());
907  ASSERT(dd != kNoDRegister);
908  ASSERT(dn != kNoDRegister);
909  ASSERT(dm != kNoDRegister);
910  ASSERT(cond != kNoCondition);
911  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
912                     B27 | B26 | B25 | B11 | B9 | B8 | opcode |
913                     ((static_cast<int32_t>(dd) >> 4)*B22) |
914                     ((static_cast<int32_t>(dn) & 0xf)*B16) |
915                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
916                     ((static_cast<int32_t>(dn) >> 4)*B7) |
917                     ((static_cast<int32_t>(dm) >> 4)*B5) |
918                     (static_cast<int32_t>(dm) & 0xf);
919  Emit(encoding);
920}
921
922// Moved to Arm32::AssemblerARM32::vmovss()
923void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
924  EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
925}
926
927// Moved to Arm32::AssemblerARM32::vmovdd()
928void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
929  EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
930}
931
932// Moved to Arm32::AssemblerARM32::vmovs()
933bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
934  if (TargetCPUFeatures::arm_version() != ARMv7) {
935    return false;
936  }
937  uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
938  if (((imm32 & ((1 << 19) - 1)) == 0) &&
939      ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
940       (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
941    uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
942        ((imm32 >> 19) & ((1 << 6) -1));
943    EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
944               sd, S0, S0);
945    return true;
946  }
947  return false;
948}
949
950// Moved to Arm32::AssemblerARM32::vmovd()
951bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
952  if (TargetCPUFeatures::arm_version() != ARMv7) {
953    return false;
954  }
955  uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
956  if (((imm64 & ((1LL << 48) - 1)) == 0) &&
957      ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
958       (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
959    uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
960        ((imm64 >> 48) & ((1 << 6) -1));
961    EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
962               dd, D0, D0);
963    return true;
964  }
965  return false;
966}
967
968// Moved to Arm32::AssemblerARM32::vadds()
969void Assembler::vadds(SRegister sd, SRegister sn, SRegister sm,
970                      Condition cond) {
971  EmitVFPsss(cond, B21 | B20, sd, sn, sm);
972}
973
974// Moved to Arm32::AssemblerARM32::vaddd()
975void Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
976                      Condition cond) {
977  EmitVFPddd(cond, B21 | B20, dd, dn, dm);
978}
979
980// Moved to Arm32::AssemblerARM32::vsubs()
981void Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
982                      Condition cond) {
983  EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
984}
985
986// Moved to Arm32::AssemblerARM32::vsubd()
987void Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
988                      Condition cond) {
989  EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
990}
991
992// Moved to Arm32::AssemblerARM32::vmuls()
993void Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
994                      Condition cond) {
995  EmitVFPsss(cond, B21, sd, sn, sm);
996}
997
998// Moved to Arm32::AssemblerARM32::vmuld()
999void Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
1000                      Condition cond) {
1001  EmitVFPddd(cond, B21, dd, dn, dm);
1002}
1003
1004// Moved to Arm32::AssemblerARM32::vmlas()
1005void Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
1006                      Condition cond) {
1007  EmitVFPsss(cond, 0, sd, sn, sm);
1008}
1009
1010// Moved to Arm32::AssemblerARM32::vmlad()
1011void Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
1012                      Condition cond) {
1013  EmitVFPddd(cond, 0, dd, dn, dm);
1014}
1015
1016// Moved to Arm32::AssemblerARM32::vmlss()
1017void Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
1018                      Condition cond) {
1019  EmitVFPsss(cond, B6, sd, sn, sm);
1020}
1021
1022// Moved to Arm32::AssemblerARM32::vmlsd()
1023void Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
1024                      Condition cond) {
1025  EmitVFPddd(cond, B6, dd, dn, dm);
1026}
1027
1028// Moved to Arm32::AssemblerARM32::vdivs()
1029void Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
1030                      Condition cond) {
1031  EmitVFPsss(cond, B23, sd, sn, sm);
1032}
1033
1034// Moved to Arm32::AssemblerARM32::vdivd()
1035void Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
1036                      Condition cond) {
1037  EmitVFPddd(cond, B23, dd, dn, dm);
1038}
1039
1040// Moved to Arm32::AssemblerARM32::vabss().
1041void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
1042  EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
1043}
1044
1045// Moved to Arm32::AssemblerARM32::vabsd().
1046void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
1047  EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
1048}
1049#endif
1050
1051void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
1052  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
1053}
1054
1055
1056void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
1057  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
1058}
1059
1060#if 0
1061// Moved to ARM32::AssemblerARM32::vsqrts().
1062void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
1063  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
1064}
1065
1066// Moved to ARM32::AssemblerARM32::vsqrtd().
1067void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
1068  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
1069}
1070
1071// Moved to ARM32::AssemblerARM32::emitVFPsd
1072void Assembler::EmitVFPsd(Condition cond, int32_t opcode,
1073                          SRegister sd, DRegister dm) {
1074  ASSERT(TargetCPUFeatures::vfp_supported());
1075  ASSERT(sd != kNoSRegister);
1076  ASSERT(dm != kNoDRegister);
1077  ASSERT(cond != kNoCondition);
1078  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1079                     B27 | B26 | B25 | B11 | B9 | opcode |
1080                     ((static_cast<int32_t>(sd) & 1)*B22) |
1081                     ((static_cast<int32_t>(sd) >> 1)*B12) |
1082                     ((static_cast<int32_t>(dm) >> 4)*B5) |
1083                     (static_cast<int32_t>(dm) & 0xf);
1084  Emit(encoding);
1085}
1086
1087// Moved to ARM32::AssemblerARM32::emitVFPds
1088void Assembler::EmitVFPds(Condition cond, int32_t opcode,
1089                          DRegister dd, SRegister sm) {
1090  ASSERT(TargetCPUFeatures::vfp_supported());
1091  ASSERT(dd != kNoDRegister);
1092  ASSERT(sm != kNoSRegister);
1093  ASSERT(cond != kNoCondition);
1094  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1095                     B27 | B26 | B25 | B11 | B9 | opcode |
1096                     ((static_cast<int32_t>(dd) >> 4)*B22) |
1097                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
1098                     ((static_cast<int32_t>(sm) & 1)*B5) |
1099                     (static_cast<int32_t>(sm) >> 1);
1100  Emit(encoding);
1101}
1102
1103// Moved to ARM32::AssemblerARM32::vcvtsd().
1104void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
1105  EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
1106}
1107
1108// Moved to ARM32::AssemblerARM32::vcvtds().
1109void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
1110  EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
1111}
1112
1113// Moved to ARM32::AssemblerARM32::vcvtis()
1114void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
1115  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
1116}
1117#endif
1118
1119void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
1120  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
1121}
1122
1123#if 0
1124// Moved to ARM32::AssemblerARM32::vcvtsi()
1125void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
1126  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
1127}
1128
1129// Moved to ARM32::AssemblerARM32::vcvtdi()
1130void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
1131  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
1132}
1133
1134// Moved to ARM32::AssemblerARM32::vcvtus().
1135void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
1136  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
1137}
1138
1139// Moved to ARM32::AssemblerARM32::vcvtud().
1140void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
1141  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
1142}
1143
1144// Moved to ARM32::AssemblerARM32::vcvtsu()
1145void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
1146  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
1147}
1148
1149// Moved to ARM32::AssemblerARM32::vcvtdu()
1150void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
1151  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
1152}
1153
1154// Moved to ARM23::AssemblerARM32::vcmps().
1155void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1156  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1157}
1158
1159// Moved to ARM23::AssemblerARM32::vcmpd().
1160void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1161  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1162}
1163
1164// Moved to ARM23::AssemblerARM32::vcmpsz().
1165void Assembler::vcmpsz(SRegister sd, Condition cond) {
1166  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1167}
1168
1169// Moved to ARM23::AssemblerARM32::vcmpdz().
1170void Assembler::vcmpdz(DRegister dd, Condition cond) {
1171  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1172}
1173
1174// APSR_nzcv version moved to ARM32::AssemblerARM32::vmrsAPSR_nzcv()
1175void Assembler::vmrs(Register rd, Condition cond) {
1176  ASSERT(TargetCPUFeatures::vfp_supported());
1177  ASSERT(cond != kNoCondition);
1178  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1179                     B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1180                     (static_cast<int32_t>(rd)*B12) |
1181                     B11 | B9 | B4;
1182  Emit(encoding);
1183}
1184#endif
1185
1186void Assembler::vmstat(Condition cond) {
1187  vmrs(APSR, cond);
1188}
1189
1190
1191static inline int ShiftOfOperandSize(OperandSize size) {
1192  switch (size) {
1193    case kByte:
1194    case kUnsignedByte:
1195      return 0;
1196    case kHalfword:
1197    case kUnsignedHalfword:
1198      return 1;
1199    case kWord:
1200    case kUnsignedWord:
1201      return 2;
1202    case kWordPair:
1203      return 3;
1204    case kSWord:
1205    case kDWord:
1206      return 0;
1207    default:
1208      UNREACHABLE();
1209      break;
1210  }
1211
1212  UNREACHABLE();
1213  return -1;
1214}
1215
1216#if 0
1217// Moved to ARM32::AssemblerARM32::emitSIMDqqq()
1218void Assembler::EmitSIMDqqq(int32_t opcode, OperandSize size,
1219                            QRegister qd, QRegister qn, QRegister qm) {
1220  ASSERT(TargetCPUFeatures::neon_supported());
1221  int sz = ShiftOfOperandSize(size);
1222  int32_t encoding =
1223      (static_cast<int32_t>(kSpecialCondition) << kConditionShift) |
1224      B25 | B6 |
1225      opcode | ((sz & 0x3) * B20) |
1226      ((static_cast<int32_t>(qd * 2) >> 4)*B22) |
1227      ((static_cast<int32_t>(qn * 2) & 0xf)*B16) |
1228      ((static_cast<int32_t>(qd * 2) & 0xf)*B12) |
1229      ((static_cast<int32_t>(qn * 2) >> 4)*B7) |
1230      ((static_cast<int32_t>(qm * 2) >> 4)*B5) |
1231      (static_cast<int32_t>(qm * 2) & 0xf);
1232  Emit(encoding);
1233}
1234#endif
1235
1236void Assembler::EmitSIMDddd(int32_t opcode, OperandSize size,
1237                            DRegister dd, DRegister dn, DRegister dm) {
1238  ASSERT(TargetCPUFeatures::neon_supported());
1239  int sz = ShiftOfOperandSize(size);
1240  int32_t encoding =
1241      (static_cast<int32_t>(kSpecialCondition) << kConditionShift) |
1242      B25 |
1243      opcode | ((sz & 0x3) * B20) |
1244      ((static_cast<int32_t>(dd) >> 4)*B22) |
1245      ((static_cast<int32_t>(dn) & 0xf)*B16) |
1246      ((static_cast<int32_t>(dd) & 0xf)*B12) |
1247      ((static_cast<int32_t>(dn) >> 4)*B7) |
1248      ((static_cast<int32_t>(dm) >> 4)*B5) |
1249      (static_cast<int32_t>(dm) & 0xf);
1250  Emit(encoding);
1251}
1252
1253
1254void Assembler::vmovq(QRegister qd, QRegister qm) {
1255  EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm);
1256}
1257
1258#if 0
1259// Moved to ARM32::AssemblerARM32::vaddqi().
1260void Assembler::vaddqi(OperandSize sz,
1261                       QRegister qd, QRegister qn, QRegister qm) {
1262  EmitSIMDqqq(B11, sz, qd, qn, qm);
1263}
1264
1265// Moved to ARM32::AssemblerARM32::vaddqf().
1266void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) {
1267  EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm);
1268}
1269#endif
1270
1271void Assembler::vsubqi(OperandSize sz,
1272                       QRegister qd, QRegister qn, QRegister qm) {
1273  EmitSIMDqqq(B24 | B11, sz, qd, qn, qm);
1274}
1275
1276
1277void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) {
1278  EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm);
1279}
1280
1281#if 0
1282// Moved to ARM32::AssemblerARM32::vmulqi().
1283void Assembler::vmulqi(OperandSize sz,
1284                       QRegister qd, QRegister qn, QRegister qm) {
1285  EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm);
1286}
1287
1288// Moved to ARM32::AssemblerARM32::vmulqf().
1289void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) {
1290  EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm);
1291}
1292
1293// Moved to ARM32::AssemblerARM32::vshlqi().
1294void Assembler::vshlqi(OperandSize sz,
1295                       QRegister qd, QRegister qm, QRegister qn) {
1296  EmitSIMDqqq(B25 | B10, sz, qd, qn, qm);
1297}
1298
1299
1300// Moved to ARM32::AssemblerARM32::vshlqu().
1301void Assembler::vshlqu(OperandSize sz,
1302                       QRegister qd, QRegister qm, QRegister qn) {
1303  EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm);
1304}
1305
1306// Moved to ARM32::AssemblerARM32::veorq()
1307void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) {
1308  EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm);
1309}
1310
1311// Moved to ARM32::AssemblerARM32::vorrq()
1312void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) {
1313  EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm);
1314}
1315#endif
1316
1317void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) {
1318  EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm);
1319}
1320
1321#if 0
1322// Moved to ARM32::AssemblerARM32::vandq()
1323void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) {
1324  EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm);
1325}
1326
1327void Assembler::vmvnq(QRegister qd, QRegister qm) {
1328  EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm);
1329}
1330#endif
1331
1332
1333void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) {
1334  EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1335}
1336
1337
1338void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) {
1339  EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1340}
1341
1342#if 0
1343// Moved to Arm32::AssemblerARM32::vabsq().
1344void Assembler::vabsqs(QRegister qd, QRegister qm) {
1345  EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord,
1346              qd, Q0, qm);
1347}
1348
1349// Moved to Arm32::AssemblerARM32::vnegqs().
1350void Assembler::vnegqs(QRegister qd, QRegister qm) {
1351  EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord,
1352              qd, Q0, qm);
1353}
1354#endif
1355
1356
1357void Assembler::vrecpeqs(QRegister qd, QRegister qm) {
1358  EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord,
1359              qd, Q0, qm);
1360}
1361
1362
1363void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) {
1364  EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1365}
1366
1367
1368void Assembler::vrsqrteqs(QRegister qd, QRegister qm) {
1369  EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7,
1370              kSWord, qd, Q0, qm);
1371}
1372
1373
1374void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) {
1375  EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1376}
1377
1378
1379void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm, int idx) {
1380  ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair));
1381  int code = 0;
1382
1383  switch (sz) {
1384    case kByte:
1385    case kUnsignedByte: {
1386      ASSERT((idx >= 0) && (idx < 8));
1387      code = 1 | (idx << 1);
1388      break;
1389    }
1390    case kHalfword:
1391    case kUnsignedHalfword: {
1392      ASSERT((idx >= 0) && (idx < 4));
1393      code = 2 | (idx << 2);
1394      break;
1395    }
1396    case kWord:
1397    case kUnsignedWord: {
1398      ASSERT((idx >= 0) && (idx < 2));
1399      code = 4 | (idx << 3);
1400      break;
1401    }
1402    default: {
1403      break;
1404    }
1405  }
1406
1407  EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair,
1408              static_cast<DRegister>(qd * 2),
1409              static_cast<DRegister>(code & 0xf),
1410              dm);
1411}
1412
1413
1414void Assembler::vtbl(DRegister dd, DRegister dn, int len, DRegister dm) {
1415  ASSERT((len >= 1) && (len <= 4));
1416  EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm);
1417}
1418
1419
1420void Assembler::vzipqw(QRegister qd, QRegister qm) {
1421  EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm);
1422}
1423
1424
1425#if 0
1426// Moved to Arm32::AssemblerARM32::vceqqi().
1427void Assembler::vceqqi(OperandSize sz,
1428                      QRegister qd, QRegister qn, QRegister qm) {
1429  EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm);
1430}
1431
1432// Moved to Arm32::AssemblerARM32::vceqqi().
1433void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) {
1434  EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm);
1435}
1436
1437// Moved to Arm32::AssemblerARM32::vcgeqi().
1438void Assembler::vcgeqi(OperandSize sz,
1439                      QRegister qd, QRegister qn, QRegister qm) {
1440  EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm);
1441}
1442
1443// Moved to Arm32::AssemblerARM32::vcugeqi().
1444void Assembler::vcugeqi(OperandSize sz,
1445                      QRegister qd, QRegister qn, QRegister qm) {
1446  EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm);
1447}
1448
1449// Moved to Arm32::AssemblerARM32::vcgeqs().
1450void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) {
1451  EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm);
1452}
1453
1454// Moved to Arm32::AssemblerARM32::vcgtqi().
1455void Assembler::vcgtqi(OperandSize sz,
1456                      QRegister qd, QRegister qn, QRegister qm) {
1457  EmitSIMDqqq(B9 | B8, sz, qd, qn, qm);
1458}
1459
1460// Moved to Arm32::AssemblerARM32::vcugtqi().
1461void Assembler::vcugtqi(OperandSize sz,
1462                      QRegister qd, QRegister qn, QRegister qm) {
1463  EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm);
1464}
1465
1466// Moved to Arm32::AssemblerARM32::vcgtqs().
1467void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) {
1468  EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm);
1469}
1470
1471// Moved to ARM32::AssemblerARM32::bkpt()
1472void Assembler::bkpt(uint16_t imm16) {
1473  Emit(BkptEncoding(imm16));
1474}
1475#endif
1476
1477
1478void Assembler::b(Label* label, Condition cond) {
1479  EmitBranch(cond, label, false);
1480}
1481
1482
1483#if 0
1484// Moved to ARM32::AssemblerARM32::bl()
1485void Assembler::bl(Label* label, Condition cond) {
1486  EmitBranch(cond, label, true);
1487}
1488
1489// Moved to ARM32::AssemblerARM32::bx()
1490void Assembler::bx(Register rm, Condition cond) {
1491  ASSERT(rm != kNoRegister);
1492  ASSERT(cond != kNoCondition);
1493  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1494                     B24 | B21 | (0xfff << 8) | B4 |
1495                     (static_cast<int32_t>(rm) << kRmShift);
1496  Emit(encoding);
1497}
1498
1499// Moved to ARM32::AssemblerARM32::blx()
1500void Assembler::blx(Register rm, Condition cond) {
1501  ASSERT(rm != kNoRegister);
1502  ASSERT(cond != kNoCondition);
1503  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1504                     B24 | B21 | (0xfff << 8) | B5 | B4 |
1505                     (static_cast<int32_t>(rm) << kRmShift);
1506  Emit(encoding);
1507}
1508#endif
1509
1510
1511void Assembler::MarkExceptionHandler(Label* label) {
1512  EmitType01(AL, 1, TST, 1, PC, R0, Operand(0));
1513  Label l;
1514  b(&l);
1515  EmitBranch(AL, label, false);
1516  Bind(&l);
1517}
1518
1519
1520void Assembler::Drop(intptr_t stack_elements) {
1521  ASSERT(stack_elements >= 0);
1522  if (stack_elements > 0) {
1523    AddImmediate(SP, SP, stack_elements * kWordSize);
1524  }
1525}
1526
1527
1528intptr_t Assembler::FindImmediate(int32_t imm) {
1529  return object_pool_wrapper_.FindImmediate(imm);
1530}
1531
1532
1533// Uses a code sequence that can easily be decoded.
1534void Assembler::LoadWordFromPoolOffset(Register rd,
1535                                       int32_t offset,
1536                                       Register pp,
1537                                       Condition cond) {
1538  ASSERT((pp != PP) || constant_pool_allowed());
1539  ASSERT(rd != pp);
1540  int32_t offset_mask = 0;
1541  if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) {
1542    ldr(rd, Address(pp, offset), cond);
1543  } else {
1544    int32_t offset_hi = offset & ~offset_mask;  // signed
1545    uint32_t offset_lo = offset & offset_mask;  // unsigned
1546    // Inline a simplified version of AddImmediate(rd, pp, offset_hi).
1547    Operand o;
1548    if (Operand::CanHold(offset_hi, &o)) {
1549      add(rd, pp, o, cond);
1550    } else {
1551      LoadImmediate(rd, offset_hi, cond);
1552      add(rd, pp, Operand(rd), cond);
1553    }
1554    ldr(rd, Address(rd, offset_lo), cond);
1555  }
1556}
1557
1558void Assembler::CheckCodePointer() {
1559#ifdef DEBUG
1560  Label cid_ok, instructions_ok;
1561  Push(R0);
1562  Push(IP);
1563  CompareClassId(CODE_REG, kCodeCid, R0);
1564  b(&cid_ok, EQ);
1565  bkpt(0);
1566  Bind(&cid_ok);
1567
1568  const intptr_t offset = CodeSize() + Instr::kPCReadOffset +
1569      Instructions::HeaderSize() - kHeapObjectTag;
1570  mov(R0, Operand(PC));
1571  AddImmediate(R0, R0, -offset);
1572  ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
1573  cmp(R0, Operand(IP));
1574  b(&instructions_ok, EQ);
1575  bkpt(1);
1576  Bind(&instructions_ok);
1577  Pop(IP);
1578  Pop(R0);
1579#endif
1580}
1581
1582
1583void Assembler::RestoreCodePointer() {
1584  ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize));
1585  CheckCodePointer();
1586}
1587
1588
1589void Assembler::LoadPoolPointer(Register reg) {
1590  // Load new pool pointer.
1591  CheckCodePointer();
1592  ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset()));
1593  set_constant_pool_allowed(reg == PP);
1594}
1595
1596
1597void Assembler::LoadIsolate(Register rd) {
1598  ldr(rd, Address(THR, Thread::isolate_offset()));
1599}
1600
1601
1602bool Assembler::CanLoadFromObjectPool(const Object& object) const {
1603  ASSERT(!Thread::CanLoadFromThread(object));
1604  if (!constant_pool_allowed()) {
1605    return false;
1606  }
1607
1608  ASSERT(object.IsNotTemporaryScopedHandle());
1609  ASSERT(object.IsOld());
1610  return true;
1611}
1612
1613
1614void Assembler::LoadObjectHelper(Register rd,
1615                                 const Object& object,
1616                                 Condition cond,
1617                                 bool is_unique,
1618                                 Register pp) {
1619  // Load common VM constants from the thread. This works also in places where
1620  // no constant pool is set up (e.g. intrinsic code).
1621  if (Thread::CanLoadFromThread(object)) {
1622    // Load common VM constants from the thread. This works also in places where
1623    // no constant pool is set up (e.g. intrinsic code).
1624    ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond);
1625  } else if (object.IsSmi()) {
1626    // Relocation doesn't apply to Smis.
1627    LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond);
1628  } else if (CanLoadFromObjectPool(object)) {
1629    // Make sure that class CallPattern is able to decode this load from the
1630    // object pool.
1631    const int32_t offset = ObjectPool::element_offset(
1632       is_unique ? object_pool_wrapper_.AddObject(object)
1633                 : object_pool_wrapper_.FindObject(object));
1634    LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
1635  } else {
1636    ASSERT(FLAG_allow_absolute_addresses);
1637    ASSERT(object.IsOld());
1638    // Make sure that class CallPattern is able to decode this load immediate.
1639    const int32_t object_raw = reinterpret_cast<int32_t>(object.raw());
1640    LoadImmediate(rd, object_raw, cond);
1641  }
1642}
1643
1644
1645void Assembler::LoadObject(Register rd, const Object& object, Condition cond) {
1646  LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP);
1647}
1648
1649
1650void Assembler::LoadUniqueObject(Register rd,
1651                                 const Object& object,
1652                                 Condition cond) {
1653  LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP);
1654}
1655
1656
1657void Assembler::LoadFunctionFromCalleePool(Register dst,
1658                                           const Function& function,
1659                                           Register new_pp) {
1660  const int32_t offset =
1661      ObjectPool::element_offset(object_pool_wrapper_.FindObject(function));
1662  LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL);
1663}
1664
1665
1666void Assembler::LoadNativeEntry(Register rd,
1667                                const ExternalLabel* label,
1668                                Patchability patchable,
1669                                Condition cond) {
1670  const int32_t offset = ObjectPool::element_offset(
1671      object_pool_wrapper_.FindNativeEntry(label, patchable));
1672  LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
1673}
1674
1675
1676void Assembler::PushObject(const Object& object) {
1677  LoadObject(IP, object);
1678  Push(IP);
1679}
1680
1681
1682void Assembler::CompareObject(Register rn, const Object& object) {
1683  ASSERT(rn != IP);
1684  if (object.IsSmi()) {
1685    CompareImmediate(rn, reinterpret_cast<int32_t>(object.raw()));
1686  } else {
1687    LoadObject(IP, object);
1688    cmp(rn, Operand(IP));
1689  }
1690}
1691
1692
1693// Preserves object and value registers.
1694void Assembler::StoreIntoObjectFilterNoSmi(Register object,
1695                                           Register value,
1696                                           Label* no_update) {
1697  COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
1698                 (kOldObjectAlignmentOffset == 0));
1699
1700  // Write-barrier triggers if the value is in the new space (has bit set) and
1701  // the object is in the old space (has bit cleared).
1702  // To check that, we compute value & ~object and skip the write barrier
1703  // if the bit is not set. We can't destroy the object.
1704  bic(IP, value, Operand(object));
1705  tst(IP, Operand(kNewObjectAlignmentOffset));
1706  b(no_update, EQ);
1707}
1708
1709
1710// Preserves object and value registers.
1711void Assembler::StoreIntoObjectFilter(Register object,
1712                                      Register value,
1713                                      Label* no_update) {
1714  // For the value we are only interested in the new/old bit and the tag bit.
1715  // And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
1716  and_(IP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1));
1717  // And the result with the negated space bit of the object.
1718  bic(IP, IP, Operand(object));
1719  tst(IP, Operand(kNewObjectAlignmentOffset));
1720  b(no_update, EQ);
1721}
1722
1723
1724Operand Assembler::GetVerifiedMemoryShadow() {
1725  Operand offset;
1726  if (!Operand::CanHold(VerifiedMemory::offset(), &offset)) {
1727    FATAL1("Offset 0x%" Px " not representable", VerifiedMemory::offset());
1728  }
1729  return offset;
1730}
1731
1732
1733void Assembler::WriteShadowedField(Register base,
1734                                   intptr_t offset,
1735                                   Register value,
1736                                   Condition cond) {
1737  if (VerifiedMemory::enabled()) {
1738    ASSERT(base != value);
1739    Operand shadow(GetVerifiedMemoryShadow());
1740    add(base, base, shadow, cond);
1741    str(value, Address(base, offset), cond);
1742    sub(base, base, shadow, cond);
1743  }
1744  str(value, Address(base, offset), cond);
1745}
1746
1747
1748void Assembler::WriteShadowedFieldPair(Register base,
1749                                       intptr_t offset,
1750                                       Register value_even,
1751                                       Register value_odd,
1752                                       Condition cond) {
1753  ASSERT(value_odd == value_even + 1);
1754  if (VerifiedMemory::enabled()) {
1755    ASSERT(base != value_even);
1756    ASSERT(base != value_odd);
1757    Operand shadow(GetVerifiedMemoryShadow());
1758    add(base, base, shadow, cond);
1759    strd(value_even, base, offset, cond);
1760    sub(base, base, shadow, cond);
1761  }
1762  strd(value_even, base, offset, cond);
1763}
1764
1765
1766Register UseRegister(Register reg, RegList* used) {
1767  ASSERT(reg != SP);
1768  ASSERT(reg != PC);
1769  ASSERT((*used & (1 << reg)) == 0);
1770  *used |= (1 << reg);
1771  return reg;
1772}
1773
1774
1775Register AllocateRegister(RegList* used) {
1776  const RegList free = ~*used;
1777  return (free == 0) ?
1778      kNoRegister :
1779      UseRegister(static_cast<Register>(Utils::CountTrailingZeros(free)), used);
1780}
1781
1782
1783void Assembler::VerifiedWrite(const Address& address,
1784                              Register new_value,
1785                              FieldContent old_content) {
1786#if defined(DEBUG)
1787  ASSERT(address.mode() == Address::Offset ||
1788         address.mode() == Address::NegOffset);
1789  // Allocate temporary registers (and check for register collisions).
1790  RegList used = 0;
1791  UseRegister(new_value, &used);
1792  Register base = UseRegister(address.rn(), &used);
1793  if (address.rm() != kNoRegister) {
1794    UseRegister(address.rm(), &used);
1795  }
1796  Register old_value = AllocateRegister(&used);
1797  Register temp = AllocateRegister(&used);
1798  PushList(used);
1799  ldr(old_value, address);
1800  // First check that 'old_value' contains 'old_content'.
1801  // Smi test.
1802  tst(old_value, Operand(kHeapObjectTag));
1803  Label ok;
1804  switch (old_content) {
1805    case kOnlySmi:
1806      b(&ok, EQ);  // Smi is OK.
1807      Stop("Expected smi.");
1808      break;
1809    case kHeapObjectOrSmi:
1810      b(&ok, EQ);  // Smi is OK.
1811      // Non-smi case: Verify object pointer is word-aligned when untagged.
1812      COMPILE_ASSERT(kHeapObjectTag == 1);
1813      tst(old_value, Operand((kWordSize - 1) - kHeapObjectTag));
1814      b(&ok, EQ);
1815      Stop("Expected heap object or Smi");
1816      break;
1817    case kEmptyOrSmiOrNull:
1818      b(&ok, EQ);  // Smi is OK.
1819      // Non-smi case: Check for the special zap word or null.
1820      // Note: Cannot use CompareImmediate, since IP may be in use.
1821      LoadImmediate(temp, Heap::kZap32Bits);
1822      cmp(old_value, Operand(temp));
1823      b(&ok, EQ);
1824      LoadObject(temp, Object::null_object());
1825      cmp(old_value, Operand(temp));
1826      b(&ok, EQ);
1827      Stop("Expected zapped, Smi or null");
1828      break;
1829    default:
1830      UNREACHABLE();
1831  }
1832  Bind(&ok);
1833  if (VerifiedMemory::enabled()) {
1834    Operand shadow_offset(GetVerifiedMemoryShadow());
1835    // Adjust the address to shadow.
1836    add(base, base, shadow_offset);
1837    ldr(temp, address);
1838    cmp(old_value, Operand(temp));
1839    Label match;
1840    b(&match, EQ);
1841    Stop("Write barrier verification failed");
1842    Bind(&match);
1843    // Write new value in shadow.
1844    str(new_value, address);
1845    // Restore original address.
1846    sub(base, base, shadow_offset);
1847  }
1848  str(new_value, address);
1849  PopList(used);
1850#else
1851  str(new_value, address);
1852#endif  // DEBUG
1853}
1854
1855
1856void Assembler::StoreIntoObject(Register object,
1857                                const Address& dest,
1858                                Register value,
1859                                bool can_value_be_smi) {
1860  ASSERT(object != value);
1861  VerifiedWrite(dest, value, kHeapObjectOrSmi);
1862  Label done;
1863  if (can_value_be_smi) {
1864    StoreIntoObjectFilter(object, value, &done);
1865  } else {
1866    StoreIntoObjectFilterNoSmi(object, value, &done);
1867  }
1868  // A store buffer update is required.
1869  RegList regs = (1 << CODE_REG) | (1 << LR);
1870  if (value != R0) {
1871    regs |= (1 << R0);  // Preserve R0.
1872  }
1873  PushList(regs);
1874  if (object != R0) {
1875    mov(R0, Operand(object));
1876  }
1877  ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset()));
1878  ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset()));
1879  blx(LR);
1880  PopList(regs);
1881  Bind(&done);
1882}
1883
1884
1885void Assembler::StoreIntoObjectOffset(Register object,
1886                                      int32_t offset,
1887                                      Register value,
1888                                      bool can_value_be_smi) {
1889  int32_t ignored = 0;
1890  if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
1891    StoreIntoObject(
1892        object, FieldAddress(object, offset), value, can_value_be_smi);
1893  } else {
1894    AddImmediate(IP, object, offset - kHeapObjectTag);
1895    StoreIntoObject(object, Address(IP), value, can_value_be_smi);
1896  }
1897}
1898
1899
1900void Assembler::StoreIntoObjectNoBarrier(Register object,
1901                                         const Address& dest,
1902                                         Register value,
1903                                         FieldContent old_content) {
1904  VerifiedWrite(dest, value, old_content);
1905#if defined(DEBUG)
1906  Label done;
1907  StoreIntoObjectFilter(object, value, &done);
1908  Stop("Store buffer update is required");
1909  Bind(&done);
1910#endif  // defined(DEBUG)
1911  // No store buffer update.
1912}
1913
1914
1915void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
1916                                               int32_t offset,
1917                                               Register value,
1918                                               FieldContent old_content) {
1919  int32_t ignored = 0;
1920  if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
1921    StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
1922                             old_content);
1923  } else {
1924    AddImmediate(IP, object, offset - kHeapObjectTag);
1925    StoreIntoObjectNoBarrier(object, Address(IP), value, old_content);
1926  }
1927}
1928
1929
1930void Assembler::StoreIntoObjectNoBarrier(Register object,
1931                                         const Address& dest,
1932                                         const Object& value,
1933                                         FieldContent old_content) {
1934  ASSERT(value.IsSmi() || value.InVMHeap() ||
1935         (value.IsOld() && value.IsNotTemporaryScopedHandle()));
1936  // No store buffer update.
1937  LoadObject(IP, value);
1938  VerifiedWrite(dest, IP, old_content);
1939}
1940
1941
1942void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
1943                                               int32_t offset,
1944                                               const Object& value,
1945                                               FieldContent old_content) {
1946  int32_t ignored = 0;
1947  if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
1948    StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
1949                             old_content);
1950  } else {
1951    AddImmediate(IP, object, offset - kHeapObjectTag);
1952    StoreIntoObjectNoBarrier(object, Address(IP), value, old_content);
1953  }
1954}
1955
1956
1957void Assembler::InitializeFieldsNoBarrier(Register object,
1958                                          Register begin,
1959                                          Register end,
1960                                          Register value_even,
1961                                          Register value_odd) {
1962  ASSERT(value_odd == value_even + 1);
1963  Label init_loop;
1964  Bind(&init_loop);
1965  AddImmediate(begin, 2 * kWordSize);
1966  cmp(begin, Operand(end));
1967  WriteShadowedFieldPair(begin, -2 * kWordSize, value_even, value_odd, LS);
1968  b(&init_loop, CC);
1969  WriteShadowedField(begin, -2 * kWordSize, value_even, HI);
1970#if defined(DEBUG)
1971  Label done;
1972  StoreIntoObjectFilter(object, value_even, &done);
1973  StoreIntoObjectFilter(object, value_odd, &done);
1974  Stop("Store buffer update is required");
1975  Bind(&done);
1976#endif  // defined(DEBUG)
1977  // No store buffer update.
1978}
1979
1980
1981void Assembler::InitializeFieldsNoBarrierUnrolled(Register object,
1982                                                  Register base,
1983                                                  intptr_t begin_offset,
1984                                                  intptr_t end_offset,
1985                                                  Register value_even,
1986                                                  Register value_odd) {
1987  ASSERT(value_odd == value_even + 1);
1988  intptr_t current_offset = begin_offset;
1989  while (current_offset + kWordSize < end_offset) {
1990    WriteShadowedFieldPair(base, current_offset, value_even, value_odd);
1991    current_offset += 2*kWordSize;
1992  }
1993  while (current_offset < end_offset) {
1994    WriteShadowedField(base, current_offset, value_even);
1995    current_offset += kWordSize;
1996  }
1997#if defined(DEBUG)
1998  Label done;
1999  StoreIntoObjectFilter(object, value_even, &done);
2000  StoreIntoObjectFilter(object, value_odd, &done);
2001  Stop("Store buffer update is required");
2002  Bind(&done);
2003#endif  // defined(DEBUG)
2004  // No store buffer update.
2005}
2006
2007
2008void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
2009#if defined(DEBUG)
2010  Label done;
2011  tst(value, Operand(kHeapObjectTag));
2012  b(&done, EQ);
2013  Stop("New value must be Smi.");
2014  Bind(&done);
2015#endif  // defined(DEBUG)
2016  VerifiedWrite(dest, value, kOnlySmi);
2017}
2018
2019
2020void Assembler::LoadClassId(Register result, Register object, Condition cond) {
2021  ASSERT(RawObject::kClassIdTagPos == 16);
2022  ASSERT(RawObject::kClassIdTagSize == 16);
2023  const intptr_t class_id_offset = Object::tags_offset() +
2024      RawObject::kClassIdTagPos / kBitsPerByte;
2025  ldrh(result, FieldAddress(object, class_id_offset), cond);
2026}
2027
2028
2029void Assembler::LoadClassById(Register result, Register class_id) {
2030  ASSERT(result != class_id);
2031  LoadIsolate(result);
2032  const intptr_t offset =
2033      Isolate::class_table_offset() + ClassTable::table_offset();
2034  LoadFromOffset(kWord, result, result, offset);
2035  ldr(result, Address(result, class_id, LSL, 2));
2036}
2037
2038
2039void Assembler::LoadClass(Register result, Register object, Register scratch) {
2040  ASSERT(scratch != result);
2041  LoadClassId(scratch, object);
2042  LoadClassById(result, scratch);
2043}
2044
2045
2046void Assembler::CompareClassId(Register object,
2047                               intptr_t class_id,
2048                               Register scratch) {
2049  LoadClassId(scratch, object);
2050  CompareImmediate(scratch, class_id);
2051}
2052
2053
2054void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
2055  tst(object, Operand(kSmiTagMask));
2056  LoadClassId(result, object, NE);
2057  LoadImmediate(result, kSmiCid, EQ);
2058}
2059
2060
2061void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
2062  LoadClassIdMayBeSmi(result, object);
2063  SmiTag(result);
2064}
2065
2066
2067void Assembler::ComputeRange(Register result,
2068                             Register value,
2069                             Register scratch,
2070                             Label* not_mint) {
2071  const Register hi = TMP;
2072  const Register lo = scratch;
2073
2074  Label done;
2075  mov(result, Operand(value, LSR, kBitsPerWord - 1));
2076  tst(value, Operand(kSmiTagMask));
2077  b(&done, EQ);
2078  CompareClassId(value, kMintCid, result);
2079  b(not_mint, NE);
2080  ldr(hi, FieldAddress(value, Mint::value_offset() + kWordSize));
2081  ldr(lo, FieldAddress(value, Mint::value_offset()));
2082  rsb(result, hi, Operand(ICData::kInt32RangeBit));
2083  cmp(hi, Operand(lo, ASR, kBitsPerWord - 1));
2084  b(&done, EQ);
2085  LoadImmediate(result, ICData::kUint32RangeBit);  // Uint32
2086  tst(hi, Operand(hi));
2087  LoadImmediate(result, ICData::kInt64RangeBit, NE);  // Int64
2088  Bind(&done);
2089}
2090
2091
2092void Assembler::UpdateRangeFeedback(Register value,
2093                                    intptr_t index,
2094                                    Register ic_data,
2095                                    Register scratch1,
2096                                    Register scratch2,
2097                                    Label* miss) {
2098  ASSERT(ICData::IsValidRangeFeedbackIndex(index));
2099  ComputeRange(scratch1, value, scratch2, miss);
2100  ldr(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()));
2101  orr(scratch2,
2102      scratch2,
2103      Operand(scratch1, LSL, ICData::RangeFeedbackShift(index)));
2104  str(scratch2, FieldAddress(ic_data, ICData::state_bits_offset()));
2105}
2106
2107#if 0
2108// Moved to ::canEncodeBranchoffset() in IceAssemblerARM32.cpp.
2109static bool CanEncodeBranchOffset(int32_t offset) {
2110  ASSERT(Utils::IsAligned(offset, 4));
2111  // Note: This check doesn't take advantage of the fact that offset>>2
2112  // is stored (allowing two more bits in address space).
2113  return Utils::IsInt(Utils::CountOneBits(kBranchOffsetMask), offset);
2114}
2115
2116// Moved to ARM32::AssemblerARM32::encodeBranchOffset()
2117int32_t Assembler::EncodeBranchOffset(int32_t offset, int32_t inst) {
2118  // The offset is off by 8 due to the way the ARM CPUs read PC.
2119  offset -= Instr::kPCReadOffset;
2120
2121  if (!CanEncodeBranchOffset(offset)) {
2122    ASSERT(!use_far_branches());
2123    Thread::Current()->long_jump_base()->Jump(
2124        1, Object::branch_offset_error());
2125  }
2126
2127  // Properly preserve only the bits supported in the instruction.
2128  offset >>= 2;
2129  offset &= kBranchOffsetMask;
2130  return (inst & ~kBranchOffsetMask) | offset;
2131}
2132
2133// Moved to AssemberARM32::decodeBranchOffset()
2134int Assembler::DecodeBranchOffset(int32_t inst) {
2135  // Sign-extend, left-shift by 2, then add 8.
2136  return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset);
2137}
2138#endif
2139
2140static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) {
2141  int32_t offset = 0;
2142  offset |= (movt & 0xf0000) << 12;
2143  offset |= (movt & 0xfff) << 16;
2144  offset |= (movw & 0xf0000) >> 4;
2145  offset |= movw & 0xfff;
2146  return offset;
2147}
2148
2149
2150static int32_t DecodeARMv6LoadImmediate(int32_t mov, int32_t or1,
2151                                        int32_t or2, int32_t or3) {
2152  int32_t offset = 0;
2153  offset |= (mov & 0xff) << 24;
2154  offset |= (or1 & 0xff) << 16;
2155  offset |= (or2 & 0xff) << 8;
2156  offset |= (or3 & 0xff);
2157  return offset;
2158}
2159
2160
2161class PatchFarBranch : public AssemblerFixup {
2162 public:
2163  PatchFarBranch() {}
2164
2165  void Process(const MemoryRegion& region, intptr_t position) {
2166    const ARMVersion version = TargetCPUFeatures::arm_version();
2167    if ((version == ARMv5TE) || (version == ARMv6)) {
2168      ProcessARMv6(region, position);
2169    } else {
2170      ASSERT(version == ARMv7);
2171      ProcessARMv7(region, position);
2172    }
2173  }
2174
2175 private:
2176  void ProcessARMv6(const MemoryRegion& region, intptr_t position) {
2177    const int32_t mov = region.Load<int32_t>(position);
2178    const int32_t or1 = region.Load<int32_t>(position + 1*Instr::kInstrSize);
2179    const int32_t or2 = region.Load<int32_t>(position + 2*Instr::kInstrSize);
2180    const int32_t or3 = region.Load<int32_t>(position + 3*Instr::kInstrSize);
2181    const int32_t bx = region.Load<int32_t>(position + 4*Instr::kInstrSize);
2182
2183    if (((mov & 0xffffff00) == 0xe3a0c400) &&  // mov IP, (byte3 rot 4)
2184        ((or1 & 0xffffff00) == 0xe38cc800) &&  // orr IP, IP, (byte2 rot 8)
2185        ((or2 & 0xffffff00) == 0xe38ccc00) &&  // orr IP, IP, (byte1 rot 12)
2186        ((or3 & 0xffffff00) == 0xe38cc000)) {  // orr IP, IP, byte0
2187      const int32_t offset = DecodeARMv6LoadImmediate(mov, or1, or2, or3);
2188      const int32_t dest = region.start() + offset;
2189      const int32_t dest0 = (dest & 0x000000ff);
2190      const int32_t dest1 = (dest & 0x0000ff00) >> 8;
2191      const int32_t dest2 = (dest & 0x00ff0000) >> 16;
2192      const int32_t dest3 = (dest & 0xff000000) >> 24;
2193      const int32_t patched_mov = 0xe3a0c400 | dest3;
2194      const int32_t patched_or1 = 0xe38cc800 | dest2;
2195      const int32_t patched_or2 = 0xe38ccc00 | dest1;
2196      const int32_t patched_or3 = 0xe38cc000 | dest0;
2197
2198      region.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov);
2199      region.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1);
2200      region.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2);
2201      region.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3);
2202      return;
2203    }
2204
2205    // If the offset loading instructions aren't there, we must have replaced
2206    // the far branch with a near one, and so these instructions
2207    // should be NOPs.
2208    ASSERT((or1 == Instr::kNopInstruction) &&
2209           (or2 == Instr::kNopInstruction) &&
2210           (or3 == Instr::kNopInstruction) &&
2211           (bx == Instr::kNopInstruction));
2212  }
2213
2214
2215  void ProcessARMv7(const MemoryRegion& region, intptr_t position) {
2216    const int32_t movw = region.Load<int32_t>(position);
2217    const int32_t movt = region.Load<int32_t>(position + Instr::kInstrSize);
2218    const int32_t bx = region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2219
2220    if (((movt & 0xfff0f000) == 0xe340c000) &&  // movt IP, high
2221        ((movw & 0xfff0f000) == 0xe300c000)) {   // movw IP, low
2222      const int32_t offset = DecodeARMv7LoadImmediate(movt, movw);
2223      const int32_t dest = region.start() + offset;
2224      const uint16_t dest_high = Utils::High16Bits(dest);
2225      const uint16_t dest_low = Utils::Low16Bits(dest);
2226      const int32_t patched_movt =
2227          0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2228      const int32_t patched_movw =
2229          0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2230
2231      region.Store<int32_t>(position, patched_movw);
2232      region.Store<int32_t>(position + Instr::kInstrSize, patched_movt);
2233      return;
2234    }
2235
2236    // If the offset loading instructions aren't there, we must have replaced
2237    // the far branch with a near one, and so these instructions
2238    // should be NOPs.
2239    ASSERT((movt == Instr::kNopInstruction) &&
2240           (bx == Instr::kNopInstruction));
2241  }
2242
2243  virtual bool IsPointerOffset() const { return false; }
2244};
2245
2246
2247void Assembler::EmitFarBranch(Condition cond, int32_t offset, bool link) {
2248  buffer_.EmitFixup(new PatchFarBranch());
2249  LoadPatchableImmediate(IP, offset);
2250  if (link) {
2251    blx(IP, cond);
2252  } else {
2253    bx(IP, cond);
2254  }
2255}
2256
2257
2258void Assembler::EmitBranch(Condition cond, Label* label, bool link) {
2259  if (label->IsBound()) {
2260    const int32_t dest = label->Position() - buffer_.Size();
2261    if (use_far_branches() && !CanEncodeBranchOffset(dest)) {
2262      EmitFarBranch(cond, label->Position(), link);
2263    } else {
2264      EmitType5(cond, dest, link);
2265    }
2266  } else {
2267    const intptr_t position = buffer_.Size();
2268    if (use_far_branches()) {
2269      const int32_t dest = label->position_;
2270      EmitFarBranch(cond, dest, link);
2271    } else {
2272      // Use the offset field of the branch instruction for linking the sites.
2273      EmitType5(cond, label->position_, link);
2274    }
2275    label->LinkTo(position);
2276  }
2277}
2278
2279
2280void Assembler::BindARMv6(Label* label) {
2281  ASSERT(!label->IsBound());
2282  intptr_t bound_pc = buffer_.Size();
2283  while (label->IsLinked()) {
2284    const int32_t position = label->Position();
2285    int32_t dest = bound_pc - position;
2286    if (use_far_branches() && !CanEncodeBranchOffset(dest)) {
2287      // Far branches are enabled and we can't encode the branch offset.
2288
2289      // Grab instructions that load the offset.
2290      const int32_t mov =
2291          buffer_.Load<int32_t>(position);
2292      const int32_t or1 =
2293          buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2294      const int32_t or2 =
2295          buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2296      const int32_t or3 =
2297          buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize);
2298
2299      // Change from relative to the branch to relative to the assembler
2300      // buffer.
2301      dest = buffer_.Size();
2302      const int32_t dest0 = (dest & 0x000000ff);
2303      const int32_t dest1 = (dest & 0x0000ff00) >> 8;
2304      const int32_t dest2 = (dest & 0x00ff0000) >> 16;
2305      const int32_t dest3 = (dest & 0xff000000) >> 24;
2306      const int32_t patched_mov = 0xe3a0c400 | dest3;
2307      const int32_t patched_or1 = 0xe38cc800 | dest2;
2308      const int32_t patched_or2 = 0xe38ccc00 | dest1;
2309      const int32_t patched_or3 = 0xe38cc000 | dest0;
2310
2311      // Rewrite the instructions.
2312      buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_mov);
2313      buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_or1);
2314      buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize, patched_or2);
2315      buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize, patched_or3);
2316      label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3);
2317    } else if (use_far_branches() && CanEncodeBranchOffset(dest)) {
2318      // Grab instructions that load the offset, and the branch.
2319      const int32_t mov =
2320          buffer_.Load<int32_t>(position);
2321      const int32_t or1 =
2322          buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2323      const int32_t or2 =
2324          buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2325      const int32_t or3 =
2326          buffer_.Load<int32_t>(position + 3 * Instr::kInstrSize);
2327      const int32_t branch =
2328          buffer_.Load<int32_t>(position + 4 * Instr::kInstrSize);
2329
2330      // Grab the branch condition, and encode the link bit.
2331      const int32_t cond = branch & 0xf0000000;
2332      const int32_t link = (branch & 0x20) << 19;
2333
2334      // Encode the branch and the offset.
2335      const int32_t new_branch = cond | link | 0x0a000000;
2336      const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2337
2338      // Write the encoded branch instruction followed by two nops.
2339      buffer_.Store<int32_t>(position, encoded);
2340      buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2341          Instr::kNopInstruction);
2342      buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2343          Instr::kNopInstruction);
2344      buffer_.Store<int32_t>(position + 3 * Instr::kInstrSize,
2345          Instr::kNopInstruction);
2346      buffer_.Store<int32_t>(position + 4 * Instr::kInstrSize,
2347          Instr::kNopInstruction);
2348
2349      label->position_ = DecodeARMv6LoadImmediate(mov, or1, or2, or3);
2350    } else {
2351      int32_t next = buffer_.Load<int32_t>(position);
2352      int32_t encoded = Assembler::EncodeBranchOffset(dest, next);
2353      buffer_.Store<int32_t>(position, encoded);
2354      label->position_ = Assembler::DecodeBranchOffset(next);
2355    }
2356  }
2357  label->BindTo(bound_pc);
2358}
2359
2360#if 0
2361// Moved to ARM32::AssemblerARM32::bind(Label* Label)
2362// Note: Most of this code isn't needed because instruction selection has
2363// already been handler
2364void Assembler::BindARMv7(Label* label) {
2365  ASSERT(!label->IsBound());
2366  intptr_t bound_pc = buffer_.Size();
2367  while (label->IsLinked()) {
2368    const int32_t position = label->Position();
2369    int32_t dest = bound_pc - position;
2370    if (use_far_branches() && !CanEncodeBranchOffset(dest)) {
2371      // Far branches are enabled and we can't encode the branch offset.
2372
2373      // Grab instructions that load the offset.
2374      const int32_t movw =
2375          buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2376      const int32_t movt =
2377          buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2378
2379      // Change from relative to the branch to relative to the assembler
2380      // buffer.
2381      dest = buffer_.Size();
2382      const uint16_t dest_high = Utils::High16Bits(dest);
2383      const uint16_t dest_low = Utils::Low16Bits(dest);
2384      const int32_t patched_movt =
2385          0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2386      const int32_t patched_movw =
2387          0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2388
2389      // Rewrite the instructions.
2390      buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw);
2391      buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt);
2392      label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2393    } else if (use_far_branches() && CanEncodeBranchOffset(dest)) {
2394      // Far branches are enabled, but we can encode the branch offset.
2395
2396      // Grab instructions that load the offset, and the branch.
2397      const int32_t movw =
2398          buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2399      const int32_t movt =
2400          buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2401      const int32_t branch =
2402          buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2403
2404      // Grab the branch condition, and encode the link bit.
2405      const int32_t cond = branch & 0xf0000000;
2406      const int32_t link = (branch & 0x20) << 19;
2407
2408      // Encode the branch and the offset.
2409      const int32_t new_branch = cond | link | 0x0a000000;
2410      const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2411
2412      // Write the encoded branch instruction followed by two nops.
2413      buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
2414          encoded);
2415      buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2416          Instr::kNopInstruction);
2417      buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2418          Instr::kNopInstruction);
2419
2420      label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2421    } else {
2422      int32_t next = buffer_.Load<int32_t>(position);
2423      int32_t encoded = Assembler::EncodeBranchOffset(dest, next);
2424      buffer_.Store<int32_t>(position, encoded);
2425      label->position_ = Assembler::DecodeBranchOffset(next);
2426    }
2427  }
2428  label->BindTo(bound_pc);
2429}
2430#endif
2431
2432
2433void Assembler::Bind(Label* label) {
2434  const ARMVersion version = TargetCPUFeatures::arm_version();
2435  if ((version == ARMv5TE) || (version == ARMv6)) {
2436    BindARMv6(label);
2437  } else {
2438    ASSERT(version == ARMv7);
2439    BindARMv7(label);
2440  }
2441}
2442
2443
2444OperandSize Address::OperandSizeFor(intptr_t cid) {
2445  switch (cid) {
2446    case kArrayCid:
2447    case kImmutableArrayCid:
2448      return kWord;
2449    case kOneByteStringCid:
2450    case kExternalOneByteStringCid:
2451      return kByte;
2452    case kTwoByteStringCid:
2453    case kExternalTwoByteStringCid:
2454      return kHalfword;
2455    case kTypedDataInt8ArrayCid:
2456      return kByte;
2457    case kTypedDataUint8ArrayCid:
2458    case kTypedDataUint8ClampedArrayCid:
2459    case kExternalTypedDataUint8ArrayCid:
2460    case kExternalTypedDataUint8ClampedArrayCid:
2461      return kUnsignedByte;
2462    case kTypedDataInt16ArrayCid:
2463      return kHalfword;
2464    case kTypedDataUint16ArrayCid:
2465      return kUnsignedHalfword;
2466    case kTypedDataInt32ArrayCid:
2467      return kWord;
2468    case kTypedDataUint32ArrayCid:
2469      return kUnsignedWord;
2470    case kTypedDataInt64ArrayCid:
2471    case kTypedDataUint64ArrayCid:
2472      UNREACHABLE();
2473      return kByte;
2474    case kTypedDataFloat32ArrayCid:
2475      return kSWord;
2476    case kTypedDataFloat64ArrayCid:
2477      return kDWord;
2478    case kTypedDataFloat32x4ArrayCid:
2479    case kTypedDataInt32x4ArrayCid:
2480    case kTypedDataFloat64x2ArrayCid:
2481      return kRegList;
2482    case kTypedDataInt8ArrayViewCid:
2483      UNREACHABLE();
2484      return kByte;
2485    default:
2486      UNREACHABLE();
2487      return kByte;
2488  }
2489}
2490
2491
2492bool Address::CanHoldLoadOffset(OperandSize size,
2493                                int32_t offset,
2494                                int32_t* offset_mask) {
2495  switch (size) {
2496    case kByte:
2497    case kHalfword:
2498    case kUnsignedHalfword:
2499    case kWordPair: {
2500      *offset_mask = 0xff;
2501      return Utils::IsAbsoluteUint(8, offset);  // Addressing mode 3.
2502    }
2503    case kUnsignedByte:
2504    case kWord:
2505    case kUnsignedWord: {
2506      *offset_mask = 0xfff;
2507      return Utils::IsAbsoluteUint(12, offset);  // Addressing mode 2.
2508    }
2509    case kSWord:
2510    case kDWord: {
2511      *offset_mask = 0x3fc;  // Multiple of 4.
2512      // VFP addressing mode.
2513      return (Utils::IsAbsoluteUint(10, offset) && Utils::IsAligned(offset, 4));
2514    }
2515    case kRegList: {
2516      *offset_mask = 0x0;
2517      return offset == 0;
2518    }
2519    default: {
2520      UNREACHABLE();
2521      return false;
2522    }
2523  }
2524}
2525
2526
2527bool Address::CanHoldStoreOffset(OperandSize size,
2528                                 int32_t offset,
2529                                 int32_t* offset_mask) {
2530  switch (size) {
2531    case kHalfword:
2532    case kUnsignedHalfword:
2533    case kWordPair: {
2534      *offset_mask = 0xff;
2535      return Utils::IsAbsoluteUint(8, offset);  // Addressing mode 3.
2536    }
2537    case kByte:
2538    case kUnsignedByte:
2539    case kWord:
2540    case kUnsignedWord: {
2541      *offset_mask = 0xfff;
2542      return Utils::IsAbsoluteUint(12, offset);  // Addressing mode 2.
2543    }
2544    case kSWord:
2545    case kDWord: {
2546      *offset_mask = 0x3fc;  // Multiple of 4.
2547      // VFP addressing mode.
2548      return (Utils::IsAbsoluteUint(10, offset) && Utils::IsAligned(offset, 4));
2549    }
2550    case kRegList: {
2551      *offset_mask = 0x0;
2552      return offset == 0;
2553    }
2554    default: {
2555      UNREACHABLE();
2556      return false;
2557    }
2558  }
2559}
2560
2561
2562bool Address::CanHoldImmediateOffset(
2563    bool is_load, intptr_t cid, int64_t offset) {
2564  int32_t offset_mask = 0;
2565  if (is_load) {
2566    return CanHoldLoadOffset(OperandSizeFor(cid), offset, &offset_mask);
2567  } else {
2568    return CanHoldStoreOffset(OperandSizeFor(cid), offset, &offset_mask);
2569  }
2570}
2571
2572#if 0
2573// Moved to ARM32::AssemblerARM32::push().
2574void Assembler::Push(Register rd, Condition cond) {
2575  str(rd, Address(SP, -kWordSize, Address::PreIndex), cond);
2576}
2577
2578// Moved to ARM32::AssemblerARM32::pop().
2579void Assembler::Pop(Register rd, Condition cond) {
2580  ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond);
2581}
2582
2583// Moved to ARM32::AssemblerARM32::pushList().
2584void Assembler::PushList(RegList regs, Condition cond) {
2585  stm(DB_W, SP, regs, cond);
2586}
2587
2588// Moved to ARM32::AssemblerARM32::popList().
2589void Assembler::PopList(RegList regs, Condition cond) {
2590  ldm(IA_W, SP, regs, cond);
2591}
2592#endif
2593
2594void Assembler::MoveRegister(Register rd, Register rm, Condition cond) {
2595  if (rd != rm) {
2596    mov(rd, Operand(rm), cond);
2597  }
2598}
2599
2600#if 0
2601// Moved to ARM32::AssemblerARM32::lsl()
2602void Assembler::Lsl(Register rd, Register rm, const Operand& shift_imm,
2603                    Condition cond) {
2604  ASSERT(shift_imm.type() == 1);
2605  ASSERT(shift_imm.encoding() != 0);  // Do not use Lsl if no shift is wanted.
2606  mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond);
2607}
2608
2609// Moved to ARM32::AssemblerARM32::lsl()
2610void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) {
2611  mov(rd, Operand(rm, LSL, rs), cond);
2612}
2613
2614// Moved to ARM32::AssemblerARM32::lsr()
2615void Assembler::Lsr(Register rd, Register rm, const Operand& shift_imm,
2616                    Condition cond) {
2617  ASSERT(shift_imm.type() == 1);
2618  uint32_t shift = shift_imm.encoding();
2619  ASSERT(shift != 0);  // Do not use Lsr if no shift is wanted.
2620  if (shift == 32) {
2621    shift = 0;  // Comply to UAL syntax.
2622  }
2623  mov(rd, Operand(rm, LSR, shift), cond);
2624}
2625
2626// Moved to ARM32::AssemblerARM32::lsr()
2627void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) {
2628  mov(rd, Operand(rm, LSR, rs), cond);
2629}
2630
2631// Moved to ARM32::AssemblerARM32::asr()
2632void Assembler::Asr(Register rd, Register rm, const Operand& shift_imm,
2633                    Condition cond) {
2634  ASSERT(shift_imm.type() == 1);
2635  uint32_t shift = shift_imm.encoding();
2636  ASSERT(shift != 0);  // Do not use Asr if no shift is wanted.
2637  if (shift == 32) {
2638    shift = 0;  // Comply to UAL syntax.
2639  }
2640  mov(rd, Operand(rm, ASR, shift), cond);
2641}
2642#endif
2643
2644void Assembler::Asrs(Register rd, Register rm, const Operand& shift_imm,
2645                     Condition cond) {
2646  ASSERT(shift_imm.type() == 1);
2647  uint32_t shift = shift_imm.encoding();
2648  ASSERT(shift != 0);  // Do not use Asr if no shift is wanted.
2649  if (shift == 32) {
2650    shift = 0;  // Comply to UAL syntax.
2651  }
2652  movs(rd, Operand(rm, ASR, shift), cond);
2653}
2654
2655#if 0
2656// Moved to ARM32::AssemblerARM32::asr()
2657void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) {
2658  mov(rd, Operand(rm, ASR, rs), cond);
2659}
2660#endif
2661
2662void Assembler::Ror(Register rd, Register rm, const Operand& shift_imm,
2663                    Condition cond) {
2664  ASSERT(shift_imm.type() == 1);
2665  ASSERT(shift_imm.encoding() != 0);  // Use Rrx instruction.
2666  mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond);
2667}
2668
2669
2670void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) {
2671  mov(rd, Operand(rm, ROR, rs), cond);
2672}
2673
2674
2675void Assembler::Rrx(Register rd, Register rm, Condition cond) {
2676  mov(rd, Operand(rm, ROR, 0), cond);
2677}
2678
2679
2680void Assembler::SignFill(Register rd, Register rm, Condition cond) {
2681  Asr(rd, rm, Operand(31), cond);
2682}
2683
2684
2685void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) {
2686  ASSERT(qm != QTMP);
2687  ASSERT(qd != QTMP);
2688
2689  // Reciprocal estimate.
2690  vrecpeqs(qd, qm);
2691  // 2 Newton-Raphson steps.
2692  vrecpsqs(QTMP, qm, qd);
2693  vmulqs(qd, qd, QTMP);
2694  vrecpsqs(QTMP, qm, qd);
2695  vmulqs(qd, qd, QTMP);
2696}
2697
2698
2699void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) {
2700  ASSERT(qm != QTMP);
2701  ASSERT(qd != QTMP);
2702
2703  // Reciprocal square root estimate.
2704  vrsqrteqs(qd, qm);
2705  // 2 Newton-Raphson steps. xn+1 = xn * (3 - Q1*xn^2) / 2.
2706  // First step.
2707  vmulqs(QTMP, qd, qd);  // QTMP <- xn^2
2708  vrsqrtsqs(QTMP, qm, QTMP);  // QTMP <- (3 - Q1*QTMP) / 2.
2709  vmulqs(qd, qd, QTMP);  // xn+1 <- xn * QTMP
2710  // Second step.
2711  vmulqs(QTMP, qd, qd);
2712  vrsqrtsqs(QTMP, qm, QTMP);
2713  vmulqs(qd, qd, QTMP);
2714}
2715
2716
2717void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) {
2718  ASSERT(temp != QTMP);
2719  ASSERT(qm != QTMP);
2720  ASSERT(qd != QTMP);
2721
2722  if (temp != kNoQRegister) {
2723    vmovq(temp, qm);
2724    qm = temp;
2725  }
2726
2727  VreciprocalSqrtqs(qd, qm);
2728  vmovq(qm, qd);
2729  Vreciprocalqs(qd, qm);
2730}
2731
2732
2733void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2734  ASSERT(qd != QTMP);
2735  ASSERT(qn != QTMP);
2736  ASSERT(qm != QTMP);
2737
2738  Vreciprocalqs(qd, qm);
2739  vmulqs(qd, qn, qd);
2740}
2741
2742
2743void Assembler::Branch(const StubEntry& stub_entry,
2744                       Patchability patchable,
2745                       Register pp,
2746                       Condition cond) {
2747  const Code& target_code = Code::Handle(stub_entry.code());
2748  const int32_t offset = ObjectPool::element_offset(
2749      object_pool_wrapper_.FindObject(target_code, patchable));
2750  LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond);
2751  ldr(IP, FieldAddress(CODE_REG, Code::entry_point_offset()), cond);
2752  bx(IP, cond);
2753}
2754
2755
2756void Assembler::BranchLink(const Code& target, Patchability patchable) {
2757  // Make sure that class CallPattern is able to patch the label referred
2758  // to by this code sequence.
2759  // For added code robustness, use 'blx lr' in a patchable sequence and
2760  // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
2761  const int32_t offset = ObjectPool::element_offset(
2762      object_pool_wrapper_.FindObject(target, patchable));
2763  LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
2764  ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset()));
2765  blx(LR);  // Use blx instruction so that the return branch prediction works.
2766}
2767
2768
2769void Assembler::BranchLink(const StubEntry& stub_entry,
2770                           Patchability patchable) {
2771  const Code& code = Code::Handle(stub_entry.code());
2772  BranchLink(code, patchable);
2773}
2774
2775
2776void Assembler::BranchLinkPatchable(const Code& target) {
2777  BranchLink(target, kPatchable);
2778}
2779
2780
2781void Assembler::BranchLink(const ExternalLabel* label) {
2782  LoadImmediate(LR, label->address());  // Target address is never patched.
2783  blx(LR);  // Use blx instruction so that the return branch prediction works.
2784}
2785
2786
2787void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) {
2788  BranchLinkPatchable(Code::Handle(stub_entry.code()));
2789}
2790
2791
2792void Assembler::BranchLinkOffset(Register base, int32_t offset) {
2793  ASSERT(base != PC);
2794  ASSERT(base != IP);
2795  LoadFromOffset(kWord, IP, base, offset);
2796  blx(IP);  // Use blx instruction so that the return branch prediction works.
2797}
2798
2799
2800void Assembler::LoadPatchableImmediate(
2801    Register rd, int32_t value, Condition cond) {
2802  const ARMVersion version = TargetCPUFeatures::arm_version();
2803  if ((version == ARMv5TE) || (version == ARMv6)) {
2804    // This sequence is patched in a few places, and should remain fixed.
2805    const uint32_t byte0 = (value & 0x000000ff);
2806    const uint32_t byte1 = (value & 0x0000ff00) >> 8;
2807    const uint32_t byte2 = (value & 0x00ff0000) >> 16;
2808    const uint32_t byte3 = (value & 0xff000000) >> 24;
2809    mov(rd, Operand(4, byte3), cond);
2810    orr(rd, rd, Operand(8, byte2), cond);
2811    orr(rd, rd, Operand(12, byte1), cond);
2812    orr(rd, rd, Operand(byte0), cond);
2813  } else {
2814    ASSERT(version == ARMv7);
2815    const uint16_t value_low = Utils::Low16Bits(value);
2816    const uint16_t value_high = Utils::High16Bits(value);
2817    movw(rd, value_low, cond);
2818    movt(rd, value_high, cond);
2819  }
2820}
2821
2822
2823void Assembler::LoadDecodableImmediate(
2824    Register rd, int32_t value, Condition cond) {
2825  const ARMVersion version = TargetCPUFeatures::arm_version();
2826  if ((version == ARMv5TE) || (version == ARMv6)) {
2827    if (constant_pool_allowed()) {
2828      const int32_t offset = Array::element_offset(FindImmediate(value));
2829      LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
2830    } else {
2831      LoadPatchableImmediate(rd, value, cond);
2832    }
2833  } else {
2834    ASSERT(version == ARMv7);
2835    movw(rd, Utils::Low16Bits(value), cond);
2836    const uint16_t value_high = Utils::High16Bits(value);
2837    if (value_high != 0) {
2838      movt(rd, value_high, cond);
2839    }
2840  }
2841}
2842
2843
2844void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
2845  Operand o;
2846  if (Operand::CanHold(value, &o)) {
2847    mov(rd, o, cond);
2848  } else if (Operand::CanHold(~value, &o)) {
2849    mvn(rd, o, cond);
2850  } else {
2851    LoadDecodableImmediate(rd, value, cond);
2852  }
2853}
2854
2855
2856void Assembler::LoadSImmediate(SRegister sd, float value, Condition cond) {
2857  if (!vmovs(sd, value, cond)) {
2858    const DRegister dd = static_cast<DRegister>(sd >> 1);
2859    const int index = sd & 1;
2860    LoadImmediate(IP, bit_cast<int32_t, float>(value), cond);
2861    vmovdr(dd, index, IP, cond);
2862  }
2863}
2864
2865
2866void Assembler::LoadDImmediate(DRegister dd,
2867                               double value,
2868                               Register scratch,
2869                               Condition cond) {
2870  ASSERT(scratch != PC);
2871  ASSERT(scratch != IP);
2872  if (!vmovd(dd, value, cond)) {
2873    // A scratch register and IP are needed to load an arbitrary double.
2874    ASSERT(scratch != kNoRegister);
2875    int64_t imm64 = bit_cast<int64_t, double>(value);
2876    LoadImmediate(IP, Utils::Low32Bits(imm64), cond);
2877    LoadImmediate(scratch, Utils::High32Bits(imm64), cond);
2878    vmovdrr(dd, IP, scratch, cond);
2879  }
2880}
2881
2882
2883void Assembler::LoadFromOffset(OperandSize size,
2884                               Register reg,
2885                               Register base,
2886                               int32_t offset,
2887                               Condition cond) {
2888  int32_t offset_mask = 0;
2889  if (!Address::CanHoldLoadOffset(size, offset, &offset_mask)) {
2890    ASSERT(base != IP);
2891    AddImmediate(IP, base, offset & ~offset_mask, cond);
2892    base = IP;
2893    offset = offset & offset_mask;
2894  }
2895  switch (size) {
2896    case kByte:
2897      ldrsb(reg, Address(base, offset), cond);
2898      break;
2899    case kUnsignedByte:
2900      ldrb(reg, Address(base, offset), cond);
2901      break;
2902    case kHalfword:
2903      ldrsh(reg, Address(base, offset), cond);
2904      break;
2905    case kUnsignedHalfword:
2906      ldrh(reg, Address(base, offset), cond);
2907      break;
2908    case kWord:
2909      ldr(reg, Address(base, offset), cond);
2910      break;
2911    case kWordPair:
2912      ldrd(reg, base, offset, cond);
2913      break;
2914    default:
2915      UNREACHABLE();
2916  }
2917}
2918
2919
2920void Assembler::StoreToOffset(OperandSize size,
2921                              Register reg,
2922                              Register base,
2923                              int32_t offset,
2924                              Condition cond) {
2925  int32_t offset_mask = 0;
2926  if (!Address::CanHoldStoreOffset(size, offset, &offset_mask)) {
2927    ASSERT(reg != IP);
2928    ASSERT(base != IP);
2929    AddImmediate(IP, base, offset & ~offset_mask, cond);
2930    base = IP;
2931    offset = offset & offset_mask;
2932  }
2933  switch (size) {
2934    case kByte:
2935      strb(reg, Address(base, offset), cond);
2936      break;
2937    case kHalfword:
2938      strh(reg, Address(base, offset), cond);
2939      break;
2940    case kWord:
2941      str(reg, Address(base, offset), cond);
2942      break;
2943    case kWordPair:
2944      strd(reg, base, offset, cond);
2945      break;
2946    default:
2947      UNREACHABLE();
2948  }
2949}
2950
2951
2952void Assembler::LoadSFromOffset(SRegister reg,
2953                                Register base,
2954                                int32_t offset,
2955                                Condition cond) {
2956  int32_t offset_mask = 0;
2957  if (!Address::CanHoldLoadOffset(kSWord, offset, &offset_mask)) {
2958    ASSERT(base != IP);
2959    AddImmediate(IP, base, offset & ~offset_mask, cond);
2960    base = IP;
2961    offset = offset & offset_mask;
2962  }
2963  vldrs(reg, Address(base, offset), cond);
2964}
2965
2966
2967void Assembler::StoreSToOffset(SRegister reg,
2968                               Register base,
2969                               int32_t offset,
2970                               Condition cond) {
2971  int32_t offset_mask = 0;
2972  if (!Address::CanHoldStoreOffset(kSWord, offset, &offset_mask)) {
2973    ASSERT(base != IP);
2974    AddImmediate(IP, base, offset & ~offset_mask, cond);
2975    base = IP;
2976    offset = offset & offset_mask;
2977  }
2978  vstrs(reg, Address(base, offset), cond);
2979}
2980
2981
2982void Assembler::LoadDFromOffset(DRegister reg,
2983                                Register base,
2984                                int32_t offset,
2985                                Condition cond) {
2986  int32_t offset_mask = 0;
2987  if (!Address::CanHoldLoadOffset(kDWord, offset, &offset_mask)) {
2988    ASSERT(base != IP);
2989    AddImmediate(IP, base, offset & ~offset_mask, cond);
2990    base = IP;
2991    offset = offset & offset_mask;
2992  }
2993  vldrd(reg, Address(base, offset), cond);
2994}
2995
2996
2997void Assembler::StoreDToOffset(DRegister reg,
2998                               Register base,
2999                               int32_t offset,
3000                               Condition cond) {
3001  int32_t offset_mask = 0;
3002  if (!Address::CanHoldStoreOffset(kDWord, offset, &offset_mask)) {
3003    ASSERT(base != IP);
3004    AddImmediate(IP, base, offset & ~offset_mask, cond);
3005    base = IP;
3006    offset = offset & offset_mask;
3007  }
3008  vstrd(reg, Address(base, offset), cond);
3009}
3010
3011
3012void Assembler::LoadMultipleDFromOffset(DRegister first,
3013                                        intptr_t count,
3014                                        Register base,
3015                                        int32_t offset) {
3016  ASSERT(base != IP);
3017  AddImmediate(IP, base, offset);
3018  vldmd(IA, IP, first, count);
3019}
3020
3021void Assembler::StoreMultipleDToOffset(DRegister first,
3022                                       intptr_t count,
3023                                       Register base,
3024                                       int32_t offset) {
3025  ASSERT(base != IP);
3026  AddImmediate(IP, base, offset);
3027  vstmd(IA, IP, first, count);
3028}
3029
3030
3031void Assembler::CopyDoubleField(
3032    Register dst, Register src, Register tmp1, Register tmp2, DRegister dtmp) {
3033  if (TargetCPUFeatures::vfp_supported()) {
3034    LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag);
3035    StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag);
3036  } else {
3037    LoadFromOffset(kWord, tmp1, src,
3038        Double::value_offset() - kHeapObjectTag);
3039    LoadFromOffset(kWord, tmp2, src,
3040        Double::value_offset() + kWordSize - kHeapObjectTag);
3041    StoreToOffset(kWord, tmp1, dst,
3042        Double::value_offset() - kHeapObjectTag);
3043    StoreToOffset(kWord, tmp2, dst,
3044        Double::value_offset() + kWordSize - kHeapObjectTag);
3045  }
3046}
3047
3048
3049void Assembler::CopyFloat32x4Field(
3050    Register dst, Register src, Register tmp1, Register tmp2, DRegister dtmp) {
3051  if (TargetCPUFeatures::neon_supported()) {
3052    LoadMultipleDFromOffset(dtmp, 2, src,
3053        Float32x4::value_offset() - kHeapObjectTag);
3054    StoreMultipleDToOffset(dtmp, 2, dst,
3055        Float32x4::value_offset() - kHeapObjectTag);
3056  } else {
3057    LoadFromOffset(kWord, tmp1, src,
3058        (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
3059    LoadFromOffset(kWord, tmp2, src,
3060        (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
3061    StoreToOffset(kWord, tmp1, dst,
3062        (Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
3063    StoreToOffset(kWord, tmp2, dst,
3064        (Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
3065
3066    LoadFromOffset(kWord, tmp1, src,
3067        (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
3068    LoadFromOffset(kWord, tmp2, src,
3069        (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
3070    StoreToOffset(kWord, tmp1, dst,
3071        (Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
3072    StoreToOffset(kWord, tmp2, dst,
3073        (Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
3074  }
3075}
3076
3077
3078void Assembler::CopyFloat64x2Field(
3079    Register dst, Register src, Register tmp1, Register tmp2, DRegister dtmp) {
3080  if (TargetCPUFeatures::neon_supported()) {
3081    LoadMultipleDFromOffset(dtmp, 2, src,
3082        Float64x2::value_offset() - kHeapObjectTag);
3083    StoreMultipleDToOffset(dtmp, 2, dst,
3084        Float64x2::value_offset() - kHeapObjectTag);
3085  } else {
3086    LoadFromOffset(kWord, tmp1, src,
3087        (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
3088    LoadFromOffset(kWord, tmp2, src,
3089        (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
3090    StoreToOffset(kWord, tmp1, dst,
3091        (Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
3092    StoreToOffset(kWord, tmp2, dst,
3093        (Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
3094
3095    LoadFromOffset(kWord, tmp1, src,
3096        (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
3097    LoadFromOffset(kWord, tmp2, src,
3098        (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
3099    StoreToOffset(kWord, tmp1, dst,
3100        (Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
3101    StoreToOffset(kWord, tmp2, dst,
3102        (Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
3103  }
3104}
3105
3106
3107void Assembler::AddImmediate(Register rd, int32_t value, Condition cond) {
3108  AddImmediate(rd, rd, value, cond);
3109}
3110
3111
3112void Assembler::AddImmediate(Register rd, Register rn, int32_t value,
3113                             Condition cond) {
3114  if (value == 0) {
3115    if (rd != rn) {
3116      mov(rd, Operand(rn), cond);
3117    }
3118    return;
3119  }
3120  // We prefer to select the shorter code sequence rather than selecting add for
3121  // positive values and sub for negatives ones, which would slightly improve
3122  // the readability of generated code for some constants.
3123  Operand o;
3124  if (Operand::CanHold(value, &o)) {
3125    add(rd, rn, o, cond);
3126  } else if (Operand::CanHold(-value, &o)) {
3127    sub(rd, rn, o, cond);
3128  } else {
3129    ASSERT(rn != IP);
3130    if (Operand::CanHold(~value, &o)) {
3131      mvn(IP, o, cond);
3132      add(rd, rn, Operand(IP), cond);
3133    } else if (Operand::CanHold(~(-value), &o)) {
3134      mvn(IP, o, cond);
3135      sub(rd, rn, Operand(IP), cond);
3136    } else {
3137      LoadDecodableImmediate(IP, value, cond);
3138      add(rd, rn, Operand(IP), cond);
3139    }
3140  }
3141}
3142
3143
3144void Assembler::AddImmediateSetFlags(Register rd, Register rn, int32_t value,
3145                                     Condition cond) {
3146  Operand o;
3147  if (Operand::CanHold(value, &o)) {
3148    // Handles value == kMinInt32.
3149    adds(rd, rn, o, cond);
3150  } else if (Operand::CanHold(-value, &o)) {
3151    ASSERT(value != kMinInt32);  // Would cause erroneous overflow detection.
3152    subs(rd, rn, o, cond);
3153  } else {
3154    ASSERT(rn != IP);
3155    if (Operand::CanHold(~value, &o)) {
3156      mvn(IP, o, cond);
3157      adds(rd, rn, Operand(IP), cond);
3158    } else if (Operand::CanHold(~(-value), &o)) {
3159      ASSERT(value != kMinInt32);  // Would cause erroneous overflow detection.
3160      mvn(IP, o, cond);
3161      subs(rd, rn, Operand(IP), cond);
3162    } else {
3163      LoadDecodableImmediate(IP, value, cond);
3164      adds(rd, rn, Operand(IP), cond);
3165    }
3166  }
3167}
3168
3169
3170void Assembler::SubImmediateSetFlags(Register rd, Register rn, int32_t value,
3171                                    Condition cond) {
3172  Operand o;
3173  if (Operand::CanHold(value, &o)) {
3174    // Handles value == kMinInt32.
3175    subs(rd, rn, o, cond);
3176  } else if (Operand::CanHold(-value, &o)) {
3177    ASSERT(value != kMinInt32);  // Would cause erroneous overflow detection.
3178    adds(rd, rn, o, cond);
3179  } else {
3180    ASSERT(rn != IP);
3181    if (Operand::CanHold(~value, &o)) {
3182      mvn(IP, o, cond);
3183      subs(rd, rn, Operand(IP), cond);
3184    } else if (Operand::CanHold(~(-value), &o)) {
3185      ASSERT(value != kMinInt32);  // Would cause erroneous overflow detection.
3186      mvn(IP, o, cond);
3187      adds(rd, rn, Operand(IP), cond);
3188    } else {
3189      LoadDecodableImmediate(IP, value, cond);
3190      subs(rd, rn, Operand(IP), cond);
3191    }
3192  }
3193}
3194
3195
3196void Assembler::AndImmediate(Register rd, Register rs, int32_t imm,
3197                             Condition cond) {
3198  Operand o;
3199  if (Operand::CanHold(imm, &o)) {
3200    and_(rd, rs, Operand(o), cond);
3201  } else {
3202    LoadImmediate(TMP, imm, cond);
3203    and_(rd, rs, Operand(TMP), cond);
3204  }
3205}
3206
3207
3208void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) {
3209  Operand o;
3210  if (Operand::CanHold(value, &o)) {
3211    cmp(rn, o, cond);
3212  } else {
3213    ASSERT(rn != IP);
3214    LoadImmediate(IP, value, cond);
3215    cmp(rn, Operand(IP), cond);
3216  }
3217}
3218
3219
3220void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) {
3221  Operand o;
3222  if (Operand::CanHold(imm, &o)) {
3223    tst(rn, o, cond);
3224  } else {
3225    LoadImmediate(IP, imm);
3226    tst(rn, Operand(IP), cond);
3227  }
3228}
3229
3230void Assembler::IntegerDivide(Register result, Register left, Register right,
3231                              DRegister tmpl, DRegister tmpr) {
3232  ASSERT(tmpl != tmpr);
3233  if (TargetCPUFeatures::integer_division_supported()) {
3234    sdiv(result, left, right);
3235  } else {
3236    ASSERT(TargetCPUFeatures::vfp_supported());
3237    SRegister stmpl = static_cast<SRegister>(2 * tmpl);
3238    SRegister stmpr = static_cast<SRegister>(2 * tmpr);
3239    vmovsr(stmpl, left);
3240    vcvtdi(tmpl, stmpl);  // left is in tmpl.
3241    vmovsr(stmpr, right);
3242    vcvtdi(tmpr, stmpr);  // right is in tmpr.
3243    vdivd(tmpr, tmpl, tmpr);
3244    vcvtid(stmpr, tmpr);
3245    vmovrs(result, stmpr);
3246  }
3247}
3248
3249
3250static int NumRegsBelowFP(RegList regs) {
3251  int count = 0;
3252  for (int i = 0; i < FP; i++) {
3253    if ((regs & (1 << i)) != 0) {
3254      count++;
3255    }
3256  }
3257  return count;
3258}
3259
3260
3261void Assembler::EnterFrame(RegList regs, intptr_t frame_size) {
3262  if (prologue_offset_ == -1) {
3263    prologue_offset_ = CodeSize();
3264  }
3265  PushList(regs);
3266  if ((regs & (1 << FP)) != 0) {
3267    // Set FP to the saved previous FP.
3268    add(FP, SP, Operand(4 * NumRegsBelowFP(regs)));
3269  }
3270  AddImmediate(SP, -frame_size);
3271}
3272
3273
3274void Assembler::LeaveFrame(RegList regs) {
3275  ASSERT((regs & (1 << PC)) == 0);  // Must not pop PC.
3276  if ((regs & (1 << FP)) != 0) {
3277    // Use FP to set SP.
3278    sub(SP, FP, Operand(4 * NumRegsBelowFP(regs)));
3279  }
3280  PopList(regs);
3281}
3282
3283
3284void Assembler::Ret() {
3285  bx(LR);
3286}
3287
3288
3289void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
3290  // Reserve space for arguments and align frame before entering
3291  // the C++ world.
3292  AddImmediate(SP, -frame_space);
3293  if (OS::ActivationFrameAlignment() > 1) {
3294    bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3295  }
3296}
3297
3298
3299void Assembler::EnterCallRuntimeFrame(intptr_t frame_space) {
3300  // Preserve volatile CPU registers and PP.
3301  EnterFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP), 0);
3302  COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3303
3304  // Preserve all volatile FPU registers.
3305  if (TargetCPUFeatures::vfp_supported()) {
3306    DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3307    DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3308    if ((lastv - firstv + 1) >= 16) {
3309      DRegister mid = static_cast<DRegister>(firstv + 16);
3310      vstmd(DB_W, SP, mid, lastv - mid + 1);
3311      vstmd(DB_W, SP, firstv, 16);
3312    } else {
3313      vstmd(DB_W, SP, firstv, lastv - firstv + 1);
3314    }
3315  }
3316
3317  LoadPoolPointer();
3318
3319  ReserveAlignedFrameSpace(frame_space);
3320}
3321
3322
3323void Assembler::LeaveCallRuntimeFrame() {
3324  // SP might have been modified to reserve space for arguments
3325  // and ensure proper alignment of the stack frame.
3326  // We need to restore it before restoring registers.
3327  const intptr_t kPushedFpuRegisterSize =
3328      TargetCPUFeatures::vfp_supported() ?
3329      kDartVolatileFpuRegCount * kFpuRegisterSize : 0;
3330
3331  COMPILE_ASSERT(PP < FP);
3332  COMPILE_ASSERT((kDartVolatileCpuRegs & (1 << PP)) == 0);
3333  // kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
3334  // it is pushed ahead of FP.
3335  const intptr_t kPushedRegistersSize =
3336      kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
3337  AddImmediate(SP, FP, -kPushedRegistersSize);
3338
3339  // Restore all volatile FPU registers.
3340  if (TargetCPUFeatures::vfp_supported()) {
3341    DRegister firstv = EvenDRegisterOf(kDartFirstVolatileFpuReg);
3342    DRegister lastv = OddDRegisterOf(kDartLastVolatileFpuReg);
3343    if ((lastv - firstv + 1) >= 16) {
3344      DRegister mid = static_cast<DRegister>(firstv + 16);
3345      vldmd(IA_W, SP, firstv, 16);
3346      vldmd(IA_W, SP, mid, lastv - mid + 1);
3347    } else {
3348      vldmd(IA_W, SP, firstv, lastv - firstv + 1);
3349    }
3350  }
3351
3352  // Restore volatile CPU registers.
3353  LeaveFrame(kDartVolatileCpuRegs | (1 << PP) | (1 << FP));
3354}
3355
3356
3357void Assembler::CallRuntime(const RuntimeEntry& entry,
3358                            intptr_t argument_count) {
3359  entry.Call(this, argument_count);
3360}
3361
3362
3363void Assembler::EnterDartFrame(intptr_t frame_size) {
3364  ASSERT(!constant_pool_allowed());
3365
3366  // Registers are pushed in descending order: R9 | R10 | R11 | R14.
3367  EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0);
3368
3369  // Setup pool pointer for this dart function.
3370  LoadPoolPointer();
3371
3372  // Reserve space for locals.
3373  AddImmediate(SP, -frame_size);
3374}
3375
3376
3377// On entry to a function compiled for OSR, the caller's frame pointer, the
3378// stack locals, and any copied parameters are already in place.  The frame
3379// pointer is already set up.  The PC marker is not correct for the
3380// optimized function and there may be extra space for spill slots to
3381// allocate. We must also set up the pool pointer for the function.
3382void Assembler::EnterOsrFrame(intptr_t extra_size) {
3383  ASSERT(!constant_pool_allowed());
3384  Comment("EnterOsrFrame");
3385  RestoreCodePointer();
3386  LoadPoolPointer();
3387
3388  AddImmediate(SP, -extra_size);
3389}
3390
3391
3392void Assembler::LeaveDartFrame(RestorePP restore_pp) {
3393  if (restore_pp == kRestoreCallerPP) {
3394    ldr(PP, Address(FP, kSavedCallerPpSlotFromFp * kWordSize));
3395    set_constant_pool_allowed(false);
3396  }
3397  Drop(2);  // Drop saved PP, PC marker.
3398  LeaveFrame((1 << FP) | (1 << LR));
3399}
3400
3401
3402void Assembler::EnterStubFrame() {
3403  EnterDartFrame(0);
3404}
3405
3406
3407void Assembler::LeaveStubFrame() {
3408  LeaveDartFrame();
3409}
3410
3411
3412void Assembler::LoadAllocationStatsAddress(Register dest,
3413                                           intptr_t cid,
3414                                           bool inline_isolate) {
3415  ASSERT(dest != kNoRegister);
3416  ASSERT(dest != TMP);
3417  ASSERT(cid > 0);
3418  const intptr_t class_offset = ClassTable::ClassOffsetFor(cid);
3419  if (inline_isolate) {
3420    ASSERT(FLAG_allow_absolute_addresses);
3421    ClassTable* class_table = Isolate::Current()->class_table();
3422    ClassHeapStats** table_ptr = class_table->TableAddressFor(cid);
3423    if (cid < kNumPredefinedCids) {
3424      LoadImmediate(dest, reinterpret_cast<uword>(*table_ptr) + class_offset);
3425    } else {
3426      LoadImmediate(dest, reinterpret_cast<uword>(table_ptr));
3427      ldr(dest, Address(dest, 0));
3428      AddImmediate(dest, class_offset);
3429    }
3430  } else {
3431    LoadIsolate(dest);
3432    intptr_t table_offset =
3433        Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
3434    ldr(dest, Address(dest, table_offset));
3435    AddImmediate(dest, class_offset);
3436  }
3437}
3438
3439
3440void Assembler::MaybeTraceAllocation(intptr_t cid,
3441                                     Register temp_reg,
3442                                     Label* trace,
3443                                     bool inline_isolate) {
3444  LoadAllocationStatsAddress(temp_reg, cid, inline_isolate);
3445  const uword state_offset = ClassHeapStats::state_offset();
3446  ldr(temp_reg, Address(temp_reg, state_offset));
3447  tst(temp_reg, Operand(ClassHeapStats::TraceAllocationMask()));
3448  b(trace, NE);
3449}
3450
3451
3452void Assembler::IncrementAllocationStats(Register stats_addr_reg,
3453                                         intptr_t cid,
3454                                         Heap::Space space) {
3455  ASSERT(stats_addr_reg != kNoRegister);
3456  ASSERT(stats_addr_reg != TMP);
3457  ASSERT(cid > 0);
3458  const uword count_field_offset = (space == Heap::kNew) ?
3459    ClassHeapStats::allocated_since_gc_new_space_offset() :
3460    ClassHeapStats::allocated_since_gc_old_space_offset();
3461  const Address& count_address = Address(stats_addr_reg, count_field_offset);
3462  ldr(TMP, count_address);
3463  AddImmediate(TMP, 1);
3464  str(TMP, count_address);
3465}
3466
3467
3468void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg,
3469                                                 Register size_reg,
3470                                                 Heap::Space space) {
3471  ASSERT(stats_addr_reg != kNoRegister);
3472  ASSERT(stats_addr_reg != TMP);
3473  const uword count_field_offset = (space == Heap::kNew) ?
3474    ClassHeapStats::allocated_since_gc_new_space_offset() :
3475    ClassHeapStats::allocated_since_gc_old_space_offset();
3476  const uword size_field_offset = (space == Heap::kNew) ?
3477    ClassHeapStats::allocated_size_since_gc_new_space_offset() :
3478    ClassHeapStats::allocated_size_since_gc_old_space_offset();
3479  const Address& count_address = Address(stats_addr_reg, count_field_offset);
3480  const Address& size_address = Address(stats_addr_reg, size_field_offset);
3481  ldr(TMP, count_address);
3482  AddImmediate(TMP, 1);
3483  str(TMP, count_address);
3484  ldr(TMP, size_address);
3485  add(TMP, TMP, Operand(size_reg));
3486  str(TMP, size_address);
3487}
3488
3489
3490void Assembler::TryAllocate(const Class& cls,
3491                            Label* failure,
3492                            Register instance_reg,
3493                            Register temp_reg) {
3494  ASSERT(failure != NULL);
3495  if (FLAG_inline_alloc) {
3496    ASSERT(instance_reg != temp_reg);
3497    ASSERT(temp_reg != IP);
3498    const intptr_t instance_size = cls.instance_size();
3499    ASSERT(instance_size != 0);
3500    // If this allocation is traced, program will jump to failure path
3501    // (i.e. the allocation stub) which will allocate the object and trace the
3502    // allocation call site.
3503    MaybeTraceAllocation(cls.id(), temp_reg, failure,
3504                         /* inline_isolate = */ false);
3505    Heap::Space space = Heap::SpaceForAllocation(cls.id());
3506    ldr(temp_reg, Address(THR, Thread::heap_offset()));
3507    ldr(instance_reg, Address(temp_reg, Heap::TopOffset(space)));
3508    // TODO(koda): Protect against unsigned overflow here.
3509    AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
3510
3511    // instance_reg: potential next object start.
3512    ldr(IP, Address(temp_reg, Heap::EndOffset(space)));
3513    cmp(IP, Operand(instance_reg));
3514    // fail if heap end unsigned less than or equal to instance_reg.
3515    b(failure, LS);
3516
3517    // Successfully allocated the object, now update top to point to
3518    // next object start and store the class in the class field of object.
3519    str(instance_reg, Address(temp_reg, Heap::TopOffset(space)));
3520
3521    LoadAllocationStatsAddress(temp_reg, cls.id(),
3522                               /* inline_isolate = */ false);
3523
3524    ASSERT(instance_size >= kHeapObjectTag);
3525    AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
3526
3527    uword tags = 0;
3528    tags = RawObject::SizeTag::update(instance_size, tags);
3529    ASSERT(cls.id() != kIllegalCid);
3530    tags = RawObject::ClassIdTag::update(cls.id(), tags);
3531    LoadImmediate(IP, tags);
3532    str(IP, FieldAddress(instance_reg, Object::tags_offset()));
3533
3534    IncrementAllocationStats(temp_reg, cls.id(), space);
3535  } else {
3536    b(failure);
3537  }
3538}
3539
3540
3541void Assembler::TryAllocateArray(intptr_t cid,
3542                                 intptr_t instance_size,
3543                                 Label* failure,
3544                                 Register instance,
3545                                 Register end_address,
3546                                 Register temp1,
3547                                 Register temp2) {
3548  if (FLAG_inline_alloc) {
3549    // If this allocation is traced, program will jump to failure path
3550    // (i.e. the allocation stub) which will allocate the object and trace the
3551    // allocation call site.
3552    MaybeTraceAllocation(cid, temp1, failure, /* inline_isolate = */ false);
3553    Heap::Space space = Heap::SpaceForAllocation(cid);
3554    ldr(temp1, Address(THR, Thread::heap_offset()));
3555    // Potential new object start.
3556    ldr(instance, Address(temp1, Heap::TopOffset(space)));
3557    AddImmediateSetFlags(end_address, instance, instance_size);
3558    b(failure, CS);  // Branch if unsigned overflow.
3559
3560    // Check if the allocation fits into the remaining space.
3561    // instance: potential new object start.
3562    // end_address: potential next object start.
3563    ldr(temp2, Address(temp1, Heap::EndOffset(space)));
3564    cmp(end_address, Operand(temp2));
3565    b(failure, CS);
3566
3567    LoadAllocationStatsAddress(temp2, cid, /* inline_isolate = */ false);
3568
3569    // Successfully allocated the object(s), now update top to point to
3570    // next object start and initialize the object.
3571    str(end_address, Address(temp1, Heap::TopOffset(space)));
3572    add(instance, instance, Operand(kHeapObjectTag));
3573
3574    // Initialize the tags.
3575    // instance: new object start as a tagged pointer.
3576    uword tags = 0;
3577    tags = RawObject::ClassIdTag::update(cid, tags);
3578    tags = RawObject::SizeTag::update(instance_size, tags);
3579    LoadImmediate(temp1, tags);
3580    str(temp1, FieldAddress(instance, Array::tags_offset()));  // Store tags.
3581
3582    LoadImmediate(temp1, instance_size);
3583    IncrementAllocationStatsWithSize(temp2, temp1, space);
3584  } else {
3585    b(failure);
3586  }
3587}
3588
3589
3590void Assembler::Stop(const char* message) {
3591  if (FLAG_print_stop_message) {
3592    PushList((1 << R0) | (1 << IP) | (1 << LR));  // Preserve R0, IP, LR.
3593    LoadImmediate(R0, reinterpret_cast<int32_t>(message));
3594    // PrintStopMessage() preserves all registers.
3595    BranchLink(&StubCode::PrintStopMessage_entry()->label());
3596    PopList((1 << R0) | (1 << IP) | (1 << LR));  // Restore R0, IP, LR.
3597  }
3598  // Emit the message address before the svc instruction, so that we can
3599  // 'unstop' and continue execution in the simulator or jump to the next
3600  // instruction in gdb.
3601  Label stop;
3602  b(&stop);
3603  Emit(reinterpret_cast<int32_t>(message));
3604  Bind(&stop);
3605  bkpt(Instr::kStopMessageCode);
3606}
3607
3608
3609Address Assembler::ElementAddressForIntIndex(bool is_load,
3610                                             bool is_external,
3611                                             intptr_t cid,
3612                                             intptr_t index_scale,
3613                                             Register array,
3614                                             intptr_t index,
3615                                             Register temp) {
3616  const int64_t offset_base =
3617      (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
3618  const int64_t offset = offset_base +
3619      static_cast<int64_t>(index) * index_scale;
3620  ASSERT(Utils::IsInt(32, offset));
3621
3622  if (Address::CanHoldImmediateOffset(is_load, cid, offset)) {
3623    return Address(array, static_cast<int32_t>(offset));
3624  } else {
3625    ASSERT(Address::CanHoldImmediateOffset(is_load, cid, offset - offset_base));
3626    AddImmediate(temp, array, static_cast<int32_t>(offset_base));
3627    return Address(temp, static_cast<int32_t>(offset - offset_base));
3628  }
3629}
3630
3631
3632Address Assembler::ElementAddressForRegIndex(bool is_load,
3633                                             bool is_external,
3634                                             intptr_t cid,
3635                                             intptr_t index_scale,
3636                                             Register array,
3637                                             Register index) {
3638  // Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
3639  const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
3640  int32_t offset =
3641      is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
3642  const OperandSize size = Address::OperandSizeFor(cid);
3643  ASSERT(array != IP);
3644  ASSERT(index != IP);
3645  const Register base = is_load ? IP : index;
3646  if ((offset != 0) ||
3647      (size == kSWord) || (size == kDWord) || (size == kRegList)) {
3648    if (shift < 0) {
3649      ASSERT(shift == -1);
3650      add(base, array, Operand(index, ASR, 1));
3651    } else {
3652      add(base, array, Operand(index, LSL, shift));
3653    }
3654  } else {
3655    if (shift < 0) {
3656      ASSERT(shift == -1);
3657      return Address(array, index, ASR, 1);
3658    } else {
3659      return Address(array, index, LSL, shift);
3660    }
3661  }
3662  int32_t offset_mask = 0;
3663  if ((is_load && !Address::CanHoldLoadOffset(size,
3664                                              offset,
3665                                              &offset_mask)) ||
3666      (!is_load && !Address::CanHoldStoreOffset(size,
3667                                                offset,
3668                                                &offset_mask))) {
3669    AddImmediate(base, offset & ~offset_mask);
3670    offset = offset & offset_mask;
3671  }
3672  return Address(base, offset);
3673}
3674
3675
3676static const char* cpu_reg_names[kNumberOfCpuRegisters] = {
3677  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
3678  "r8", "ctx", "pp", "fp", "ip", "sp", "lr", "pc",
3679};
3680
3681
3682const char* Assembler::RegisterName(Register reg) {
3683  ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters));
3684  return cpu_reg_names[reg];
3685}
3686
3687
3688static const char* fpu_reg_names[kNumberOfFpuRegisters] = {
3689  "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
3690#if defined(VFPv3_D32)
3691  "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
3692#endif
3693};
3694
3695
3696const char* Assembler::FpuRegisterName(FpuRegister reg) {
3697  ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters));
3698  return fpu_reg_names[reg];
3699}
3700
3701}  // namespace dart
3702
3703#endif  // defined TARGET_ARCH_ARM
3704