assembler_arm.cc revision 5a7a74a042e73a355f5cedffa0d2faf5340028fa
1// Copyright 2011 Google Inc. All Rights Reserved.
2
3#include "assembler_arm.h"
4
5#include "logging.h"
6#include "offsets.h"
7#include "thread.h"
8#include "utils.h"
9
10namespace art {
11namespace arm {
12
13// Instruction encoding bits.
14enum {
15  H   = 1 << 5,   // halfword (or byte)
16  L   = 1 << 20,  // load (or store)
17  S   = 1 << 20,  // set condition code (or leave unchanged)
18  W   = 1 << 21,  // writeback base register (or leave unchanged)
19  A   = 1 << 21,  // accumulate in multiply instruction (or not)
20  B   = 1 << 22,  // unsigned byte (or word)
21  N   = 1 << 22,  // long (or short)
22  U   = 1 << 23,  // positive (or negative) offset/index
23  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
24  I   = 1 << 25,  // immediate shifter operand (or not)
25
26  B0 = 1,
27  B1 = 1 << 1,
28  B2 = 1 << 2,
29  B3 = 1 << 3,
30  B4 = 1 << 4,
31  B5 = 1 << 5,
32  B6 = 1 << 6,
33  B7 = 1 << 7,
34  B8 = 1 << 8,
35  B9 = 1 << 9,
36  B10 = 1 << 10,
37  B11 = 1 << 11,
38  B12 = 1 << 12,
39  B16 = 1 << 16,
40  B17 = 1 << 17,
41  B18 = 1 << 18,
42  B19 = 1 << 19,
43  B20 = 1 << 20,
44  B21 = 1 << 21,
45  B22 = 1 << 22,
46  B23 = 1 << 23,
47  B24 = 1 << 24,
48  B25 = 1 << 25,
49  B26 = 1 << 26,
50  B27 = 1 << 27,
51
52  // Instruction bit masks.
53  RdMask = 15 << 12,  // in str instruction
54  CondMask = 15 << 28,
55  CoprocessorMask = 15 << 8,
56  OpCodeMask = 15 << 21,  // in data-processing instructions
57  Imm24Mask = (1 << 24) - 1,
58  Off12Mask = (1 << 12) - 1,
59
60  // ldrex/strex register field encodings.
61  kLdExRnShift = 16,
62  kLdExRtShift = 12,
63  kStrExRnShift = 16,
64  kStrExRdShift = 12,
65  kStrExRtShift = 0,
66};
67
68
69static const char* kRegisterNames[] = {
70  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
71  "fp", "ip", "sp", "lr", "pc"
72};
73std::ostream& operator<<(std::ostream& os, const Register& rhs) {
74  if (rhs >= R0 && rhs <= PC) {
75    os << kRegisterNames[rhs];
76  } else {
77    os << "Register[" << static_cast<int>(rhs) << "]";
78  }
79  return os;
80}
81
82
83std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
84  if (rhs >= S0 && rhs < kNumberOfSRegisters) {
85    os << "s" << static_cast<int>(rhs);
86  } else {
87    os << "SRegister[" << static_cast<int>(rhs) << "]";
88  }
89  return os;
90}
91
92
93std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
94  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
95    os << "d" << static_cast<int>(rhs);
96  } else {
97    os << "DRegister[" << static_cast<int>(rhs) << "]";
98  }
99  return os;
100}
101
102
103static const char* kConditionNames[] = {
104  "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
105  "LE", "AL",
106};
107std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
108  if (rhs >= EQ && rhs <= AL) {
109    os << kConditionNames[rhs];
110  } else {
111    os << "Condition[" << static_cast<int>(rhs) << "]";
112  }
113  return os;
114}
115
116void ArmAssembler::Emit(int32_t value) {
117  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
118  buffer_.Emit<int32_t>(value);
119}
120
121
122void ArmAssembler::EmitType01(Condition cond,
123                              int type,
124                              Opcode opcode,
125                              int set_cc,
126                              Register rn,
127                              Register rd,
128                              ShifterOperand so) {
129  CHECK_NE(rd, kNoRegister);
130  CHECK_NE(cond, kNoCondition);
131  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
132                     type << kTypeShift |
133                     static_cast<int32_t>(opcode) << kOpcodeShift |
134                     set_cc << kSShift |
135                     static_cast<int32_t>(rn) << kRnShift |
136                     static_cast<int32_t>(rd) << kRdShift |
137                     so.encoding();
138  Emit(encoding);
139}
140
141
142void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
143  CHECK_NE(cond, kNoCondition);
144  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
145                     5 << kTypeShift |
146                     (link ? 1 : 0) << kLinkShift;
147  Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
148}
149
150
151void ArmAssembler::EmitMemOp(Condition cond,
152                             bool load,
153                             bool byte,
154                             Register rd,
155                             Address ad) {
156  CHECK_NE(rd, kNoRegister);
157  CHECK_NE(cond, kNoCondition);
158  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
159                     B26 |
160                     (load ? L : 0) |
161                     (byte ? B : 0) |
162                     (static_cast<int32_t>(rd) << kRdShift) |
163                     ad.encoding();
164  Emit(encoding);
165}
166
167
168void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
169                                         int32_t mode,
170                                         Register rd,
171                                         Address ad) {
172  CHECK_NE(rd, kNoRegister);
173  CHECK_NE(cond, kNoCondition);
174  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
175                     B22  |
176                     mode |
177                     (static_cast<int32_t>(rd) << kRdShift) |
178                     ad.encoding3();
179  Emit(encoding);
180}
181
182
183void ArmAssembler::EmitMultiMemOp(Condition cond,
184                                  BlockAddressMode am,
185                                  bool load,
186                                  Register base,
187                                  RegList regs) {
188  CHECK_NE(base, kNoRegister);
189  CHECK_NE(cond, kNoCondition);
190  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
191                     B27 |
192                     am |
193                     (load ? L : 0) |
194                     (static_cast<int32_t>(base) << kRnShift) |
195                     regs;
196  Emit(encoding);
197}
198
199
200void ArmAssembler::EmitShiftImmediate(Condition cond,
201                                      Shift opcode,
202                                      Register rd,
203                                      Register rm,
204                                      ShifterOperand so) {
205  CHECK_NE(cond, kNoCondition);
206  CHECK_EQ(so.type(), 1U);
207  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
208                     static_cast<int32_t>(MOV) << kOpcodeShift |
209                     static_cast<int32_t>(rd) << kRdShift |
210                     so.encoding() << kShiftImmShift |
211                     static_cast<int32_t>(opcode) << kShiftShift |
212                     static_cast<int32_t>(rm);
213  Emit(encoding);
214}
215
216
217void ArmAssembler::EmitShiftRegister(Condition cond,
218                                     Shift opcode,
219                                     Register rd,
220                                     Register rm,
221                                     ShifterOperand so) {
222  CHECK_NE(cond, kNoCondition);
223  CHECK_EQ(so.type(), 0U);
224  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
225                     static_cast<int32_t>(MOV) << kOpcodeShift |
226                     static_cast<int32_t>(rd) << kRdShift |
227                     so.encoding() << kShiftRegisterShift |
228                     static_cast<int32_t>(opcode) << kShiftShift |
229                     B4 |
230                     static_cast<int32_t>(rm);
231  Emit(encoding);
232}
233
234
235void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
236  if (label->IsBound()) {
237    EmitType5(cond, label->Position() - buffer_.Size(), link);
238  } else {
239    int position = buffer_.Size();
240    // Use the offset field of the branch instruction for linking the sites.
241    EmitType5(cond, label->position_, link);
242    label->LinkTo(position);
243  }
244}
245
246void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
247                        Condition cond) {
248  EmitType01(cond, so.type(), AND, 0, rn, rd, so);
249}
250
251
252void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
253                       Condition cond) {
254  EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
255}
256
257
258void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
259                       Condition cond) {
260  EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
261}
262
263void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
264                       Condition cond) {
265  EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
266}
267
268void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
269                        Condition cond) {
270  EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
271}
272
273
274void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
275                       Condition cond) {
276  EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
277}
278
279
280void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
281                        Condition cond) {
282  EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
283}
284
285
286void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
287                        Condition cond) {
288  EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
289}
290
291
292void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
293                       Condition cond) {
294  EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
295}
296
297
298void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
299                       Condition cond) {
300  EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
301}
302
303
304void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
305                       Condition cond) {
306  EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
307}
308
309
310void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
311  CHECK_NE(rn, PC);  // Reserve tst pc instruction for exception handler marker.
312  EmitType01(cond, so.type(), TST, 1, rn, R0, so);
313}
314
315
316void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
317  CHECK_NE(rn, PC);  // Reserve teq pc instruction for exception handler marker.
318  EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
319}
320
321
322void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
323  EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
324}
325
326
327void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
328  EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
329}
330
331
332void ArmAssembler::orr(Register rd, Register rn,
333                    ShifterOperand so, Condition cond) {
334  EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
335}
336
337
338void ArmAssembler::orrs(Register rd, Register rn,
339                        ShifterOperand so, Condition cond) {
340  EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
341}
342
343
344void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
345  EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
346}
347
348
349void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
350  EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
351}
352
353
354void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
355                       Condition cond) {
356  EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
357}
358
359
360void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
361  EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
362}
363
364
365void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
366  EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
367}
368
369
370void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
371  CHECK_NE(rd, kNoRegister);
372  CHECK_NE(rm, kNoRegister);
373  CHECK_NE(cond, kNoCondition);
374  CHECK_NE(rd, PC);
375  CHECK_NE(rm, PC);
376  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
377                     B24 | B22 | B21 | (0xf << 16) |
378                     (static_cast<int32_t>(rd) << kRdShift) |
379                     (0xf << 8) | B4 | static_cast<int32_t>(rm);
380  Emit(encoding);
381}
382
383
384void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
385  CHECK_NE(cond, kNoCondition);
386  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
387                     B25 | B24 | ((imm16 >> 12) << 16) |
388                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
389  Emit(encoding);
390}
391
392
393void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
394  CHECK_NE(cond, kNoCondition);
395  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
396                     B25 | B24 | B22 | ((imm16 >> 12) << 16) |
397                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
398  Emit(encoding);
399}
400
401
402void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
403                             Register rd, Register rn,
404                             Register rm, Register rs) {
405  CHECK_NE(rd, kNoRegister);
406  CHECK_NE(rn, kNoRegister);
407  CHECK_NE(rm, kNoRegister);
408  CHECK_NE(rs, kNoRegister);
409  CHECK_NE(cond, kNoCondition);
410  int32_t encoding = opcode |
411      (static_cast<int32_t>(cond) << kConditionShift) |
412      (static_cast<int32_t>(rn) << kRnShift) |
413      (static_cast<int32_t>(rd) << kRdShift) |
414      (static_cast<int32_t>(rs) << kRsShift) |
415      B7 | B4 |
416      (static_cast<int32_t>(rm) << kRmShift);
417  Emit(encoding);
418}
419
420
421void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
422  // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
423  EmitMulOp(cond, 0, R0, rd, rn, rm);
424}
425
426
427void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
428                       Condition cond) {
429  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
430  EmitMulOp(cond, B21, ra, rd, rn, rm);
431}
432
433
434void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
435                       Condition cond) {
436  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
437  EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
438}
439
440
441void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
442                         Register rm, Condition cond) {
443  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
444  EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
445}
446
447
448void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
449  EmitMemOp(cond, true, false, rd, ad);
450}
451
452
453void ArmAssembler::str(Register rd, Address ad, Condition cond) {
454  EmitMemOp(cond, false, false, rd, ad);
455}
456
457
458void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
459  EmitMemOp(cond, true, true, rd, ad);
460}
461
462
463void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
464  EmitMemOp(cond, false, true, rd, ad);
465}
466
467
468void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
469  EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
470}
471
472
473void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
474  EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
475}
476
477
478void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
479  EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
480}
481
482
483void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
484  EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
485}
486
487
488void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
489  CHECK_EQ(rd % 2, 0);
490  EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
491}
492
493
494void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
495  CHECK_EQ(rd % 2, 0);
496  EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
497}
498
499
500void ArmAssembler::ldm(BlockAddressMode am,
501                       Register base,
502                       RegList regs,
503                       Condition cond) {
504  EmitMultiMemOp(cond, am, true, base, regs);
505}
506
507
508void ArmAssembler::stm(BlockAddressMode am,
509                       Register base,
510                       RegList regs,
511                       Condition cond) {
512  EmitMultiMemOp(cond, am, false, base, regs);
513}
514
515
516void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
517  CHECK_NE(rn, kNoRegister);
518  CHECK_NE(rt, kNoRegister);
519  CHECK_NE(cond, kNoCondition);
520  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
521                     B24 |
522                     B23 |
523                     L   |
524                     (static_cast<int32_t>(rn) << kLdExRnShift) |
525                     (static_cast<int32_t>(rt) << kLdExRtShift) |
526                     B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
527  Emit(encoding);
528}
529
530
531void ArmAssembler::strex(Register rd,
532                         Register rt,
533                         Register rn,
534                         Condition cond) {
535  CHECK_NE(rn, kNoRegister);
536  CHECK_NE(rd, kNoRegister);
537  CHECK_NE(rt, kNoRegister);
538  CHECK_NE(cond, kNoCondition);
539  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
540                     B24 |
541                     B23 |
542                     (static_cast<int32_t>(rn) << kStrExRnShift) |
543                     (static_cast<int32_t>(rd) << kStrExRdShift) |
544                     B11 | B10 | B9 | B8 | B7 | B4 |
545                     (static_cast<int32_t>(rt) << kStrExRtShift);
546  Emit(encoding);
547}
548
549
550void ArmAssembler::clrex() {
551  int32_t encoding = (kSpecialCondition << kConditionShift) |
552                     B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
553  Emit(encoding);
554}
555
556
557void ArmAssembler::nop(Condition cond) {
558  CHECK_NE(cond, kNoCondition);
559  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
560                     B25 | B24 | B21 | (0xf << 12);
561  Emit(encoding);
562}
563
564
565void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
566  CHECK_NE(sn, kNoSRegister);
567  CHECK_NE(rt, kNoRegister);
568  CHECK_NE(rt, SP);
569  CHECK_NE(rt, PC);
570  CHECK_NE(cond, kNoCondition);
571  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
572                     B27 | B26 | B25 |
573                     ((static_cast<int32_t>(sn) >> 1)*B16) |
574                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
575                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
576  Emit(encoding);
577}
578
579
580void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
581  CHECK_NE(sn, kNoSRegister);
582  CHECK_NE(rt, kNoRegister);
583  CHECK_NE(rt, SP);
584  CHECK_NE(rt, PC);
585  CHECK_NE(cond, kNoCondition);
586  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
587                     B27 | B26 | B25 | B20 |
588                     ((static_cast<int32_t>(sn) >> 1)*B16) |
589                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
590                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
591  Emit(encoding);
592}
593
594
595void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
596                           Condition cond) {
597  CHECK_NE(sm, kNoSRegister);
598  CHECK_NE(sm, S31);
599  CHECK_NE(rt, kNoRegister);
600  CHECK_NE(rt, SP);
601  CHECK_NE(rt, PC);
602  CHECK_NE(rt2, kNoRegister);
603  CHECK_NE(rt2, SP);
604  CHECK_NE(rt2, PC);
605  CHECK_NE(cond, kNoCondition);
606  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
607                     B27 | B26 | B22 |
608                     (static_cast<int32_t>(rt2)*B16) |
609                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
610                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
611                     (static_cast<int32_t>(sm) >> 1);
612  Emit(encoding);
613}
614
615
616void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
617                           Condition cond) {
618  CHECK_NE(sm, kNoSRegister);
619  CHECK_NE(sm, S31);
620  CHECK_NE(rt, kNoRegister);
621  CHECK_NE(rt, SP);
622  CHECK_NE(rt, PC);
623  CHECK_NE(rt2, kNoRegister);
624  CHECK_NE(rt2, SP);
625  CHECK_NE(rt2, PC);
626  CHECK_NE(rt, rt2);
627  CHECK_NE(cond, kNoCondition);
628  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
629                     B27 | B26 | B22 | B20 |
630                     (static_cast<int32_t>(rt2)*B16) |
631                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
632                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
633                     (static_cast<int32_t>(sm) >> 1);
634  Emit(encoding);
635}
636
637
638void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
639                           Condition cond) {
640  CHECK_NE(dm, kNoDRegister);
641  CHECK_NE(rt, kNoRegister);
642  CHECK_NE(rt, SP);
643  CHECK_NE(rt, PC);
644  CHECK_NE(rt2, kNoRegister);
645  CHECK_NE(rt2, SP);
646  CHECK_NE(rt2, PC);
647  CHECK_NE(cond, kNoCondition);
648  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
649                     B27 | B26 | B22 |
650                     (static_cast<int32_t>(rt2)*B16) |
651                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
652                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
653                     (static_cast<int32_t>(dm) & 0xf);
654  Emit(encoding);
655}
656
657
658void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
659                           Condition cond) {
660  CHECK_NE(dm, kNoDRegister);
661  CHECK_NE(rt, kNoRegister);
662  CHECK_NE(rt, SP);
663  CHECK_NE(rt, PC);
664  CHECK_NE(rt2, kNoRegister);
665  CHECK_NE(rt2, SP);
666  CHECK_NE(rt2, PC);
667  CHECK_NE(rt, rt2);
668  CHECK_NE(cond, kNoCondition);
669  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
670                     B27 | B26 | B22 | B20 |
671                     (static_cast<int32_t>(rt2)*B16) |
672                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
673                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
674                     (static_cast<int32_t>(dm) & 0xf);
675  Emit(encoding);
676}
677
678
679void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
680  CHECK_NE(sd, kNoSRegister);
681  CHECK_NE(cond, kNoCondition);
682  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
683                     B27 | B26 | B24 | B20 |
684                     ((static_cast<int32_t>(sd) & 1)*B22) |
685                     ((static_cast<int32_t>(sd) >> 1)*B12) |
686                     B11 | B9 | ad.vencoding();
687  Emit(encoding);
688}
689
690
691void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
692  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
693  CHECK_NE(sd, kNoSRegister);
694  CHECK_NE(cond, kNoCondition);
695  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
696                     B27 | B26 | B24 |
697                     ((static_cast<int32_t>(sd) & 1)*B22) |
698                     ((static_cast<int32_t>(sd) >> 1)*B12) |
699                     B11 | B9 | ad.vencoding();
700  Emit(encoding);
701}
702
703
704void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
705  CHECK_NE(dd, kNoDRegister);
706  CHECK_NE(cond, kNoCondition);
707  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
708                     B27 | B26 | B24 | B20 |
709                     ((static_cast<int32_t>(dd) >> 4)*B22) |
710                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
711                     B11 | B9 | B8 | ad.vencoding();
712  Emit(encoding);
713}
714
715
716void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
717  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
718  CHECK_NE(dd, kNoDRegister);
719  CHECK_NE(cond, kNoCondition);
720  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
721                     B27 | B26 | B24 |
722                     ((static_cast<int32_t>(dd) >> 4)*B22) |
723                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
724                     B11 | B9 | B8 | ad.vencoding();
725  Emit(encoding);
726}
727
728
729void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
730                              SRegister sd, SRegister sn, SRegister sm) {
731  CHECK_NE(sd, kNoSRegister);
732  CHECK_NE(sn, kNoSRegister);
733  CHECK_NE(sm, kNoSRegister);
734  CHECK_NE(cond, kNoCondition);
735  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
736                     B27 | B26 | B25 | B11 | B9 | opcode |
737                     ((static_cast<int32_t>(sd) & 1)*B22) |
738                     ((static_cast<int32_t>(sn) >> 1)*B16) |
739                     ((static_cast<int32_t>(sd) >> 1)*B12) |
740                     ((static_cast<int32_t>(sn) & 1)*B7) |
741                     ((static_cast<int32_t>(sm) & 1)*B5) |
742                     (static_cast<int32_t>(sm) >> 1);
743  Emit(encoding);
744}
745
746
747void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
748                              DRegister dd, DRegister dn, DRegister dm) {
749  CHECK_NE(dd, kNoDRegister);
750  CHECK_NE(dn, kNoDRegister);
751  CHECK_NE(dm, kNoDRegister);
752  CHECK_NE(cond, kNoCondition);
753  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
754                     B27 | B26 | B25 | B11 | B9 | B8 | opcode |
755                     ((static_cast<int32_t>(dd) >> 4)*B22) |
756                     ((static_cast<int32_t>(dn) & 0xf)*B16) |
757                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
758                     ((static_cast<int32_t>(dn) >> 4)*B7) |
759                     ((static_cast<int32_t>(dm) >> 4)*B5) |
760                     (static_cast<int32_t>(dm) & 0xf);
761  Emit(encoding);
762}
763
764
765void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
766  EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
767}
768
769
770void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
771  EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
772}
773
774
775bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
776  uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
777  if (((imm32 & ((1 << 19) - 1)) == 0) &&
778      ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
779       (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
780    uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
781        ((imm32 >> 19) & ((1 << 6) -1));
782    EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
783               sd, S0, S0);
784    return true;
785  }
786  return false;
787}
788
789
790bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
791  uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
792  if (((imm64 & ((1LL << 48) - 1)) == 0) &&
793      ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
794       (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
795    uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
796        ((imm64 >> 48) & ((1 << 6) -1));
797    EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
798               dd, D0, D0);
799    return true;
800  }
801  return false;
802}
803
804
805void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
806                         Condition cond) {
807  EmitVFPsss(cond, B21 | B20, sd, sn, sm);
808}
809
810
811void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
812                         Condition cond) {
813  EmitVFPddd(cond, B21 | B20, dd, dn, dm);
814}
815
816
817void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
818                         Condition cond) {
819  EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
820}
821
822
823void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
824                         Condition cond) {
825  EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
826}
827
828
829void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
830                         Condition cond) {
831  EmitVFPsss(cond, B21, sd, sn, sm);
832}
833
834
835void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
836                         Condition cond) {
837  EmitVFPddd(cond, B21, dd, dn, dm);
838}
839
840
841void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
842                         Condition cond) {
843  EmitVFPsss(cond, 0, sd, sn, sm);
844}
845
846
847void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
848                         Condition cond) {
849  EmitVFPddd(cond, 0, dd, dn, dm);
850}
851
852
853void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
854                         Condition cond) {
855  EmitVFPsss(cond, B6, sd, sn, sm);
856}
857
858
859void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
860                         Condition cond) {
861  EmitVFPddd(cond, B6, dd, dn, dm);
862}
863
864
865void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
866                         Condition cond) {
867  EmitVFPsss(cond, B23, sd, sn, sm);
868}
869
870
871void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
872                         Condition cond) {
873  EmitVFPddd(cond, B23, dd, dn, dm);
874}
875
876
877void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
878  EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
879}
880
881
882void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
883  EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
884}
885
886
887void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
888  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
889}
890
891
892void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
893  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
894}
895
896
897void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
898  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
899}
900
901void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
902  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
903}
904
905
906void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
907                             SRegister sd, DRegister dm) {
908  CHECK_NE(sd, kNoSRegister);
909  CHECK_NE(dm, kNoDRegister);
910  CHECK_NE(cond, kNoCondition);
911  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
912                     B27 | B26 | B25 | B11 | B9 | opcode |
913                     ((static_cast<int32_t>(sd) & 1)*B22) |
914                     ((static_cast<int32_t>(sd) >> 1)*B12) |
915                     ((static_cast<int32_t>(dm) >> 4)*B5) |
916                     (static_cast<int32_t>(dm) & 0xf);
917  Emit(encoding);
918}
919
920
921void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
922                             DRegister dd, SRegister sm) {
923  CHECK_NE(dd, kNoDRegister);
924  CHECK_NE(sm, kNoSRegister);
925  CHECK_NE(cond, kNoCondition);
926  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
927                     B27 | B26 | B25 | B11 | B9 | opcode |
928                     ((static_cast<int32_t>(dd) >> 4)*B22) |
929                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
930                     ((static_cast<int32_t>(sm) & 1)*B5) |
931                     (static_cast<int32_t>(sm) >> 1);
932  Emit(encoding);
933}
934
935
936void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
937  EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
938}
939
940
941void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
942  EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
943}
944
945
946void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
947  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
948}
949
950
951void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
952  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
953}
954
955
956void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
957  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
958}
959
960
961void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
962  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
963}
964
965
966void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
967  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
968}
969
970
971void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
972  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
973}
974
975
976void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
977  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
978}
979
980
981void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
982  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
983}
984
985
986void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
987  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
988}
989
990
991void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
992  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
993}
994
995
996void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
997  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
998}
999
1000
1001void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
1002  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1003}
1004
1005
1006void ArmAssembler::vmstat(Condition cond) {  // VMRS APSR_nzcv, FPSCR
1007  CHECK_NE(cond, kNoCondition);
1008  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1009                     B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1010                     (static_cast<int32_t>(PC)*B12) |
1011                     B11 | B9 | B4;
1012  Emit(encoding);
1013}
1014
1015
1016void ArmAssembler::svc(uint32_t imm24) {
1017  CHECK(IsUint(24, imm24));
1018  int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
1019  Emit(encoding);
1020}
1021
1022
1023void ArmAssembler::bkpt(uint16_t imm16) {
1024  int32_t encoding = (AL << kConditionShift) | B24 | B21 |
1025                     ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
1026  Emit(encoding);
1027}
1028
1029
1030void ArmAssembler::b(Label* label, Condition cond) {
1031  EmitBranch(cond, label, false);
1032}
1033
1034
1035void ArmAssembler::bl(Label* label, Condition cond) {
1036  EmitBranch(cond, label, true);
1037}
1038
1039
1040void ArmAssembler::blx(Register rm, Condition cond) {
1041  CHECK_NE(rm, kNoRegister);
1042  CHECK_NE(cond, kNoCondition);
1043  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1044                     B24 | B21 | (0xfff << 8) | B5 | B4 |
1045                     (static_cast<int32_t>(rm) << kRmShift);
1046  Emit(encoding);
1047}
1048
1049
1050void ArmAssembler::MarkExceptionHandler(Label* label) {
1051  EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
1052  Label l;
1053  b(&l);
1054  EmitBranch(AL, label, false);
1055  Bind(&l);
1056}
1057
1058
1059void ArmAssembler::Bind(Label* label) {
1060  CHECK(!label->IsBound());
1061  int bound_pc = buffer_.Size();
1062  while (label->IsLinked()) {
1063    int32_t position = label->Position();
1064    int32_t next = buffer_.Load<int32_t>(position);
1065    int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
1066    buffer_.Store<int32_t>(position, encoded);
1067    label->position_ = ArmAssembler::DecodeBranchOffset(next);
1068  }
1069  label->BindTo(bound_pc);
1070}
1071
1072
1073void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
1074  // TODO: Consider using movw ip, <16 bits>.
1075  while (!IsUint(8, data)) {
1076    tst(R0, ShifterOperand(data & 0xFF), VS);
1077    data >>= 8;
1078  }
1079  tst(R0, ShifterOperand(data), MI);
1080}
1081
1082
1083int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
1084  // The offset is off by 8 due to the way the ARM CPUs read PC.
1085  offset -= 8;
1086  CHECK(IsAligned(offset, 4));
1087  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset));
1088
1089  // Properly preserve only the bits supported in the instruction.
1090  offset >>= 2;
1091  offset &= kBranchOffsetMask;
1092  return (inst & ~kBranchOffsetMask) | offset;
1093}
1094
1095
1096int ArmAssembler::DecodeBranchOffset(int32_t inst) {
1097  // Sign-extend, left-shift by 2, then add 8.
1098  return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
1099}
1100
1101void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
1102  AddConstant(rd, rd, value, cond);
1103}
1104
1105
1106void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
1107                               Condition cond) {
1108  if (value == 0) {
1109    if (rd != rn) {
1110      mov(rd, ShifterOperand(rn), cond);
1111    }
1112    return;
1113  }
1114  // We prefer to select the shorter code sequence rather than selecting add for
1115  // positive values and sub for negatives ones, which would slightly improve
1116  // the readability of generated code for some constants.
1117  ShifterOperand shifter_op;
1118  if (ShifterOperand::CanHold(value, &shifter_op)) {
1119    add(rd, rn, shifter_op, cond);
1120  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1121    sub(rd, rn, shifter_op, cond);
1122  } else {
1123    CHECK(rn != IP);
1124    if (ShifterOperand::CanHold(~value, &shifter_op)) {
1125      mvn(IP, shifter_op, cond);
1126      add(rd, rn, ShifterOperand(IP), cond);
1127    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1128      mvn(IP, shifter_op, cond);
1129      sub(rd, rn, ShifterOperand(IP), cond);
1130    } else {
1131      movw(IP, Low16Bits(value), cond);
1132      uint16_t value_high = High16Bits(value);
1133      if (value_high != 0) {
1134        movt(IP, value_high, cond);
1135      }
1136      add(rd, rn, ShifterOperand(IP), cond);
1137    }
1138  }
1139}
1140
1141
1142void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
1143                                       Condition cond) {
1144  ShifterOperand shifter_op;
1145  if (ShifterOperand::CanHold(value, &shifter_op)) {
1146    adds(rd, rn, shifter_op, cond);
1147  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1148    subs(rd, rn, shifter_op, cond);
1149  } else {
1150    CHECK(rn != IP);
1151    if (ShifterOperand::CanHold(~value, &shifter_op)) {
1152      mvn(IP, shifter_op, cond);
1153      adds(rd, rn, ShifterOperand(IP), cond);
1154    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1155      mvn(IP, shifter_op, cond);
1156      subs(rd, rn, ShifterOperand(IP), cond);
1157    } else {
1158      movw(IP, Low16Bits(value), cond);
1159      uint16_t value_high = High16Bits(value);
1160      if (value_high != 0) {
1161        movt(IP, value_high, cond);
1162      }
1163      adds(rd, rn, ShifterOperand(IP), cond);
1164    }
1165  }
1166}
1167
1168
1169void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
1170  ShifterOperand shifter_op;
1171  if (ShifterOperand::CanHold(value, &shifter_op)) {
1172    mov(rd, shifter_op, cond);
1173  } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
1174    mvn(rd, shifter_op, cond);
1175  } else {
1176    movw(rd, Low16Bits(value), cond);
1177    uint16_t value_high = High16Bits(value);
1178    if (value_high != 0) {
1179      movt(rd, value_high, cond);
1180    }
1181  }
1182}
1183
1184
1185bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
1186  switch (type) {
1187    case kLoadSignedByte:
1188    case kLoadSignedHalfword:
1189    case kLoadUnsignedHalfword:
1190    case kLoadWordPair:
1191      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
1192    case kLoadUnsignedByte:
1193    case kLoadWord:
1194      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
1195    case kLoadSWord:
1196    case kLoadDWord:
1197      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
1198    default:
1199      LOG(FATAL) << "UNREACHABLE";
1200      return false;
1201  }
1202}
1203
1204
1205bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
1206  switch (type) {
1207    case kStoreHalfword:
1208    case kStoreWordPair:
1209      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
1210    case kStoreByte:
1211    case kStoreWord:
1212      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
1213    case kStoreSWord:
1214    case kStoreDWord:
1215      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
1216    default:
1217      LOG(FATAL) << "UNREACHABLE";
1218      return false;
1219  }
1220}
1221
1222
1223// Implementation note: this method must emit at most one instruction when
1224// Address::CanHoldLoadOffset.
1225void ArmAssembler::LoadFromOffset(LoadOperandType type,
1226                               Register reg,
1227                               Register base,
1228                               int32_t offset,
1229                               Condition cond) {
1230  if (!Address::CanHoldLoadOffset(type, offset)) {
1231    CHECK(base != IP);
1232    LoadImmediate(IP, offset, cond);
1233    add(IP, IP, ShifterOperand(base), cond);
1234    base = IP;
1235    offset = 0;
1236  }
1237  CHECK(Address::CanHoldLoadOffset(type, offset));
1238  switch (type) {
1239    case kLoadSignedByte:
1240      ldrsb(reg, Address(base, offset), cond);
1241      break;
1242    case kLoadUnsignedByte:
1243      ldrb(reg, Address(base, offset), cond);
1244      break;
1245    case kLoadSignedHalfword:
1246      ldrsh(reg, Address(base, offset), cond);
1247      break;
1248    case kLoadUnsignedHalfword:
1249      ldrh(reg, Address(base, offset), cond);
1250      break;
1251    case kLoadWord:
1252      ldr(reg, Address(base, offset), cond);
1253      break;
1254    case kLoadWordPair:
1255      ldrd(reg, Address(base, offset), cond);
1256      break;
1257    default:
1258      LOG(FATAL) << "UNREACHABLE";
1259  }
1260}
1261
1262// Implementation note: this method must emit at most one instruction when
1263// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
1264void ArmAssembler::LoadSFromOffset(SRegister reg,
1265                                   Register base,
1266                                   int32_t offset,
1267                                   Condition cond) {
1268  if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
1269    CHECK_NE(base, IP);
1270    LoadImmediate(IP, offset, cond);
1271    add(IP, IP, ShifterOperand(base), cond);
1272    base = IP;
1273    offset = 0;
1274  }
1275  CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
1276  vldrs(reg, Address(base, offset), cond);
1277}
1278
1279// Implementation note: this method must emit at most one instruction when
1280// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
1281void ArmAssembler::LoadDFromOffset(DRegister reg,
1282                                   Register base,
1283                                   int32_t offset,
1284                                   Condition cond) {
1285  if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
1286    CHECK_NE(base, IP);
1287    LoadImmediate(IP, offset, cond);
1288    add(IP, IP, ShifterOperand(base), cond);
1289    base = IP;
1290    offset = 0;
1291  }
1292  CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
1293  vldrd(reg, Address(base, offset), cond);
1294}
1295
1296// Implementation note: this method must emit at most one instruction when
1297// Address::CanHoldStoreOffset.
1298void ArmAssembler::StoreToOffset(StoreOperandType type,
1299                                 Register reg,
1300                                 Register base,
1301                                 int32_t offset,
1302                                 Condition cond) {
1303  if (!Address::CanHoldStoreOffset(type, offset)) {
1304    CHECK(reg != IP);
1305    CHECK(base != IP);
1306    LoadImmediate(IP, offset, cond);
1307    add(IP, IP, ShifterOperand(base), cond);
1308    base = IP;
1309    offset = 0;
1310  }
1311  CHECK(Address::CanHoldStoreOffset(type, offset));
1312  switch (type) {
1313    case kStoreByte:
1314      strb(reg, Address(base, offset), cond);
1315      break;
1316    case kStoreHalfword:
1317      strh(reg, Address(base, offset), cond);
1318      break;
1319    case kStoreWord:
1320      str(reg, Address(base, offset), cond);
1321      break;
1322    case kStoreWordPair:
1323      strd(reg, Address(base, offset), cond);
1324      break;
1325    default:
1326      LOG(FATAL) << "UNREACHABLE";
1327  }
1328}
1329
1330// Implementation note: this method must emit at most one instruction when
1331// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
1332void ArmAssembler::StoreSToOffset(SRegister reg,
1333                                  Register base,
1334                                  int32_t offset,
1335                                  Condition cond) {
1336  if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
1337    CHECK_NE(base, IP);
1338    LoadImmediate(IP, offset, cond);
1339    add(IP, IP, ShifterOperand(base), cond);
1340    base = IP;
1341    offset = 0;
1342  }
1343  CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
1344  vstrs(reg, Address(base, offset), cond);
1345}
1346
1347// Implementation note: this method must emit at most one instruction when
1348// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
1349void ArmAssembler::StoreDToOffset(DRegister reg,
1350                                  Register base,
1351                                  int32_t offset,
1352                                  Condition cond) {
1353  if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
1354    CHECK_NE(base, IP);
1355    LoadImmediate(IP, offset, cond);
1356    add(IP, IP, ShifterOperand(base), cond);
1357    base = IP;
1358    offset = 0;
1359  }
1360  CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
1361  vstrd(reg, Address(base, offset), cond);
1362}
1363
1364void ArmAssembler::Push(Register rd, Condition cond) {
1365  str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
1366}
1367
1368void ArmAssembler::Pop(Register rd, Condition cond) {
1369  ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
1370}
1371
1372void ArmAssembler::PushList(RegList regs, Condition cond) {
1373  stm(DB_W, SP, regs, cond);
1374}
1375
1376void ArmAssembler::PopList(RegList regs, Condition cond) {
1377  ldm(IA_W, SP, regs, cond);
1378}
1379
1380void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
1381  if (rd != rm) {
1382    mov(rd, ShifterOperand(rm), cond);
1383  }
1384}
1385
1386void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
1387                       Condition cond) {
1388  CHECK_NE(shift_imm, 0u);  // Do not use Lsl if no shift is wanted.
1389  mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
1390}
1391
1392void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
1393                       Condition cond) {
1394  CHECK_NE(shift_imm, 0u);  // Do not use Lsr if no shift is wanted.
1395  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
1396  mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
1397}
1398
1399void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
1400                       Condition cond) {
1401  CHECK_NE(shift_imm, 0u);  // Do not use Asr if no shift is wanted.
1402  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
1403  mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
1404}
1405
1406void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
1407                       Condition cond) {
1408  CHECK_NE(shift_imm, 0u);  // Use Rrx instruction.
1409  mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
1410}
1411
1412void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
1413  mov(rd, ShifterOperand(rm, ROR, 0), cond);
1414}
1415
1416void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
1417                              const std::vector<ManagedRegister>& callee_save_regs) {
1418  CHECK(IsAligned(frame_size, kStackAlignment));
1419  CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
1420
1421  // Push callee saves and link register
1422  RegList push_list = 1 << LR;
1423  size_t pushed_values = 1;
1424  for (size_t i = 0; i < callee_save_regs.size(); i++) {
1425    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
1426    push_list |= 1 << reg;
1427    pushed_values++;
1428  }
1429  PushList(push_list);
1430
1431  // Increase frame to required size
1432  CHECK_GT(frame_size, pushed_values * kPointerSize);  // Must be at least space to push Method*
1433  size_t adjust = frame_size - (pushed_values * kPointerSize);
1434  IncreaseFrameSize(adjust);
1435
1436  // Write out Method*
1437  StoreToOffset(kStoreWord, R0, SP, 0);
1438}
1439
1440void ArmAssembler::RemoveFrame(size_t frame_size,
1441                              const std::vector<ManagedRegister>& callee_save_regs) {
1442  CHECK(IsAligned(frame_size, kStackAlignment));
1443  // Compute callee saves to pop and PC
1444  RegList pop_list = 1 << PC;
1445  size_t pop_values = 1;
1446  for (size_t i = 0; i < callee_save_regs.size(); i++) {
1447    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
1448    pop_list |= 1 << reg;
1449    pop_values++;
1450  }
1451
1452  // Decrease frame to start of callee saves
1453  CHECK_GT(frame_size, pop_values * kPointerSize);
1454  size_t adjust = frame_size - (pop_values * kPointerSize);
1455  DecreaseFrameSize(adjust);
1456
1457  // Pop callee saves and PC
1458  PopList(pop_list);
1459}
1460
1461void ArmAssembler::IncreaseFrameSize(size_t adjust) {
1462  AddConstant(SP, -adjust);
1463}
1464
1465void ArmAssembler::DecreaseFrameSize(size_t adjust) {
1466  AddConstant(SP, adjust);
1467}
1468
1469void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
1470  ArmManagedRegister src = msrc.AsArm();
1471  if (src.IsNoRegister()) {
1472    CHECK_EQ(0u, size);
1473  } else if (src.IsCoreRegister()) {
1474    CHECK_EQ(4u, size);
1475    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1476  } else if (src.IsRegisterPair()) {
1477    CHECK_EQ(8u, size);
1478    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
1479    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
1480                  SP, dest.Int32Value() + 4);
1481  } else if (src.IsSRegister()) {
1482    StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
1483  } else {
1484    CHECK(src.IsDRegister());
1485    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
1486  }
1487}
1488
1489void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
1490  ArmManagedRegister src = msrc.AsArm();
1491  CHECK(src.IsCoreRegister());
1492  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1493}
1494
1495void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
1496  ArmManagedRegister src = msrc.AsArm();
1497  CHECK(src.IsCoreRegister());
1498  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1499}
1500
1501void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
1502                              FrameOffset in_off, ManagedRegister mscratch) {
1503  ArmManagedRegister src = msrc.AsArm();
1504  ArmManagedRegister scratch = mscratch.AsArm();
1505  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1506  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
1507  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
1508}
1509
1510void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
1511                        ManagedRegister mscratch) {
1512  ArmManagedRegister scratch = mscratch.AsArm();
1513  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1514  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1515}
1516
1517void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
1518                           MemberOffset offs) {
1519  ArmManagedRegister dest = mdest.AsArm();
1520  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
1521  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
1522                 base.AsArm().AsCoreRegister(), offs.Int32Value());
1523}
1524
1525void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
1526  ArmManagedRegister dest = mdest.AsArm();
1527   CHECK(dest.IsCoreRegister());
1528   LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
1529                  SP, src.Int32Value());
1530 }
1531
1532void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
1533                           Offset offs) {
1534  ArmManagedRegister dest = mdest.AsArm();
1535  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
1536  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
1537                 base.AsArm().AsCoreRegister(), offs.Int32Value());
1538}
1539
1540void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
1541                                      ManagedRegister mscratch) {
1542  ArmManagedRegister scratch = mscratch.AsArm();
1543  CHECK(scratch.IsCoreRegister());
1544  LoadImmediate(scratch.AsCoreRegister(), imm);
1545  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1546}
1547
1548void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
1549                                       ManagedRegister mscratch) {
1550  ArmManagedRegister scratch = mscratch.AsArm();
1551  CHECK(scratch.IsCoreRegister());
1552  LoadImmediate(scratch.AsCoreRegister(), imm);
1553  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
1554}
1555
1556void ArmAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
1557  ArmManagedRegister dest = mdest.AsArm();
1558  if (dest.IsNoRegister()) {
1559    CHECK_EQ(0u, size);
1560  } else if (dest.IsCoreRegister()) {
1561    CHECK_EQ(4u, size);
1562    LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
1563  } else if (dest.IsRegisterPair()) {
1564    CHECK_EQ(8u, size);
1565    LoadFromOffset(kLoadWord, dest.AsRegisterPairLow(), SP, src.Int32Value());
1566    LoadFromOffset(kLoadWord, dest.AsRegisterPairHigh(), SP, src.Int32Value() + 4);
1567  } else if (dest.IsSRegister()) {
1568    LoadSFromOffset(dest.AsSRegister(), SP, src.Int32Value());
1569  } else {
1570    CHECK(dest.IsDRegister());
1571    LoadDFromOffset(dest.AsDRegister(), SP, src.Int32Value());
1572  }
1573}
1574
1575void ArmAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
1576  ArmManagedRegister dest = mdest.AsArm();
1577  if (dest.IsNoRegister()) {
1578    CHECK_EQ(0u, size);
1579  } else if (dest.IsCoreRegister()) {
1580    CHECK_EQ(4u, size);
1581    LoadFromOffset(kLoadWord, dest.AsCoreRegister(), TR, src.Int32Value());
1582  } else if (dest.IsRegisterPair()) {
1583    CHECK_EQ(8u, size);
1584    LoadFromOffset(kLoadWord, dest.AsRegisterPairLow(), TR, src.Int32Value());
1585    LoadFromOffset(kLoadWord, dest.AsRegisterPairHigh(), TR, src.Int32Value() + 4);
1586  } else if (dest.IsSRegister()) {
1587    LoadSFromOffset(dest.AsSRegister(), TR, src.Int32Value());
1588  } else {
1589    CHECK(dest.IsDRegister());
1590    LoadDFromOffset(dest.AsDRegister(), TR, src.Int32Value());
1591  }
1592}
1593
1594void ArmAssembler::LoadRawPtrFromThread(ManagedRegister mdest,
1595                                        ThreadOffset offs) {
1596  ArmManagedRegister dest = mdest.AsArm();
1597  CHECK(dest.IsCoreRegister());
1598  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
1599                 TR, offs.Int32Value());
1600}
1601
1602void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
1603                                        ThreadOffset thr_offs,
1604                                        ManagedRegister mscratch) {
1605  ArmManagedRegister scratch = mscratch.AsArm();
1606  CHECK(scratch.IsCoreRegister());
1607  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1608                 TR, thr_offs.Int32Value());
1609  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1610                SP, fr_offs.Int32Value());
1611}
1612
1613void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
1614                                      FrameOffset fr_offs,
1615                                      ManagedRegister mscratch) {
1616  ArmManagedRegister scratch = mscratch.AsArm();
1617  CHECK(scratch.IsCoreRegister());
1618  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1619                 SP, fr_offs.Int32Value());
1620  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1621                TR, thr_offs.Int32Value());
1622}
1623
1624void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
1625                                            FrameOffset fr_offs,
1626                                            ManagedRegister mscratch) {
1627  ArmManagedRegister scratch = mscratch.AsArm();
1628  CHECK(scratch.IsCoreRegister());
1629  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
1630  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1631                TR, thr_offs.Int32Value());
1632}
1633
1634void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
1635  StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
1636}
1637
1638void ArmAssembler::Move(ManagedRegister mdest, ManagedRegister msrc) {
1639  ArmManagedRegister dest = mdest.AsArm();
1640  ArmManagedRegister src = msrc.AsArm();
1641  if (!dest.Equals(src)) {
1642    if (dest.IsCoreRegister()) {
1643      CHECK(src.IsCoreRegister());
1644      mov(dest.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
1645    } else if (dest.IsDRegister()) {
1646      CHECK(src.IsDRegister());
1647      vmovd(dest.AsDRegister(), src.AsDRegister());
1648    } else if (dest.IsSRegister()) {
1649      CHECK(src.IsSRegister());
1650      vmovs(dest.AsSRegister(), src.AsSRegister());
1651    } else {
1652      CHECK(dest.IsRegisterPair());
1653      CHECK(src.IsRegisterPair());
1654      // Ensure that the first move doesn't clobber the input of the second
1655      if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
1656        mov(dest.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
1657        mov(dest.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
1658      } else {
1659        mov(dest.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
1660        mov(dest.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
1661      }
1662    }
1663  }
1664}
1665
1666void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
1667  ArmManagedRegister scratch = mscratch.AsArm();
1668  CHECK(scratch.IsCoreRegister());
1669  CHECK(size == 4 || size == 8);
1670  if (size == 4) {
1671    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1672    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1673  } else if (size == 8) {
1674    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1675    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1676    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
1677    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
1678  }
1679}
1680
1681void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
1682                        ManagedRegister mscratch, size_t size) {
1683  Register scratch = mscratch.AsArm().AsCoreRegister();
1684  CHECK_EQ(size, 4u);
1685  LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
1686  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
1687}
1688
1689void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
1690                        ManagedRegister mscratch, size_t size) {
1691  Register scratch = mscratch.AsArm().AsCoreRegister();
1692  CHECK_EQ(size, 4u);
1693  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
1694  StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
1695}
1696
1697void ArmAssembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
1698                        ManagedRegister mscratch, size_t size) {
1699  UNIMPLEMENTED(FATAL);
1700}
1701
1702void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
1703                        ManagedRegister src, Offset src_offset,
1704                        ManagedRegister mscratch, size_t size) {
1705  CHECK_EQ(size, 4u);
1706  Register scratch = mscratch.AsArm().AsCoreRegister();
1707  LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
1708  StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
1709}
1710
1711void ArmAssembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
1712                        ManagedRegister scratch, size_t size) {
1713  UNIMPLEMENTED(FATAL);
1714}
1715
1716
1717void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
1718#if ANDROID_SMP != 0
1719#if defined(__ARM_HAVE_DMB)
1720  int32_t encoding = 0xf57ff05f;  // dmb
1721  Emit(encoding);
1722#elif  defined(__ARM_HAVE_LDREX_STREX)
1723  CHECK(mscratch.AsArm().AsCoreRegister() == R12);
1724  LoadImmediate(R12, 0);
1725  int32_t encoding = 0xee07cfba;  // mcr p15, 0, r12, c7, c10, 5
1726  Emit(encoding);
1727#else
1728  CHECK(mscratch.AsArm().AsCoreRegister() == R12);
1729  LoadImmediate(R12, 0xffff0fa0);  // kuser_memory_barrier
1730  blx(R12);
1731#endif
1732#endif
1733}
1734
1735void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
1736                                   FrameOffset sirt_offset,
1737                                   ManagedRegister min_reg, bool null_allowed) {
1738  ArmManagedRegister out_reg = mout_reg.AsArm();
1739  ArmManagedRegister in_reg = min_reg.AsArm();
1740  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister());
1741  CHECK(out_reg.IsCoreRegister());
1742  if (null_allowed) {
1743    // Null values get a SIRT entry value of 0.  Otherwise, the SIRT entry is
1744    // the address in the SIRT holding the reference.
1745    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
1746    if (in_reg.IsNoRegister()) {
1747      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1748                     SP, sirt_offset.Int32Value());
1749      in_reg = out_reg;
1750    }
1751    cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1752    if (!out_reg.Equals(in_reg)) {
1753      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1754    }
1755    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
1756  } else {
1757    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
1758  }
1759}
1760
1761void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
1762                                   FrameOffset sirt_offset,
1763                                   ManagedRegister mscratch,
1764                                   bool null_allowed) {
1765  ArmManagedRegister scratch = mscratch.AsArm();
1766  CHECK(scratch.IsCoreRegister());
1767  if (null_allowed) {
1768    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
1769                   sirt_offset.Int32Value());
1770    // Null values get a SIRT entry value of 0.  Otherwise, the sirt entry is
1771    // the address in the SIRT holding the reference.
1772    // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
1773    cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1774    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
1775  } else {
1776    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
1777  }
1778  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
1779}
1780
1781void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
1782                                         ManagedRegister min_reg) {
1783  ArmManagedRegister out_reg = mout_reg.AsArm();
1784  ArmManagedRegister in_reg = min_reg.AsArm();
1785  CHECK(out_reg.IsCoreRegister());
1786  CHECK(in_reg.IsCoreRegister());
1787  Label null_arg;
1788  if (!out_reg.Equals(in_reg)) {
1789    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1790  }
1791  cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1792  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1793                 in_reg.AsCoreRegister(), 0, NE);
1794}
1795
1796void ArmAssembler::VerifyObject(ManagedRegister src, bool could_be_null) {
1797  // TODO: not validating references
1798}
1799
1800void ArmAssembler::VerifyObject(FrameOffset src, bool could_be_null) {
1801  // TODO: not validating references
1802}
1803
1804void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
1805                        ManagedRegister mscratch) {
1806  ArmManagedRegister base = mbase.AsArm();
1807  ArmManagedRegister scratch = mscratch.AsArm();
1808  CHECK(base.IsCoreRegister());
1809  CHECK(scratch.IsCoreRegister());
1810  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1811                 base.AsCoreRegister(), offset.Int32Value());
1812  blx(scratch.AsCoreRegister());
1813  // TODO: place reference map on call
1814}
1815
1816void ArmAssembler::Call(FrameOffset base, Offset offset,
1817                        ManagedRegister mscratch) {
1818  ArmManagedRegister scratch = mscratch.AsArm();
1819  CHECK(scratch.IsCoreRegister());
1820  // Call *(*(SP + base) + offset)
1821  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1822                 SP, base.Int32Value());
1823  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1824                 scratch.AsCoreRegister(), offset.Int32Value());
1825  blx(scratch.AsCoreRegister());
1826  // TODO: place reference map on call
1827}
1828
1829void ArmAssembler::Call(ThreadOffset offset, ManagedRegister scratch) {
1830  UNIMPLEMENTED(FATAL);
1831}
1832
1833void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
1834  mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
1835}
1836
1837void ArmAssembler::GetCurrentThread(FrameOffset offset,
1838                                    ManagedRegister scratch) {
1839  StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
1840}
1841
1842void ArmAssembler::SuspendPoll(ManagedRegister mscratch,
1843                               ManagedRegister return_reg,
1844                               FrameOffset return_save_location,
1845                               size_t return_size) {
1846  ArmManagedRegister scratch = mscratch.AsArm();
1847  ArmSuspendCountSlowPath* slow =
1848      new ArmSuspendCountSlowPath(return_reg.AsArm(), return_save_location,
1849                                  return_size);
1850  buffer_.EnqueueSlowPath(slow);
1851  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1852                 TR, Thread::SuspendCountOffset().Int32Value());
1853  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1854  b(slow->Entry(), NE);
1855  Bind(slow->Continuation());
1856}
1857
1858void ArmSuspendCountSlowPath::Emit(Assembler* sasm) {
1859  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
1860#define __ sp_asm->
1861  __ Bind(&entry_);
1862  // Save return value
1863  __ Store(return_save_location_, return_register_, return_size_);
1864  // Pass thread as argument
1865  __ mov(R0, ShifterOperand(TR));
1866  __ LoadFromOffset(kLoadWord, R12, TR, OFFSETOF_MEMBER(Thread, pCheckSuspendFromCode));
1867  // Note: assume that link register will be spilled/filled on method entry/exit
1868  __ blx(R12);
1869  // Reload return value
1870  __ Load(return_register_, return_save_location_, return_size_);
1871  __ b(&continuation_);
1872#undef __
1873}
1874
1875void ArmAssembler::ExceptionPoll(ManagedRegister mscratch) {
1876  ArmManagedRegister scratch = mscratch.AsArm();
1877  ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch);
1878  buffer_.EnqueueSlowPath(slow);
1879  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1880                 TR, Thread::ExceptionOffset().Int32Value());
1881  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1882  b(slow->Entry(), NE);
1883}
1884
1885void ArmExceptionSlowPath::Emit(Assembler* sasm) {
1886  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
1887#define __ sp_asm->
1888  __ Bind(&entry_);
1889
1890  // Pass exception object as argument
1891  // Don't care about preserving R0 as this call won't return
1892  __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
1893  // Set up call to Thread::Current()->pDeliverException
1894  __ LoadFromOffset(kLoadWord, R12, TR, OFFSETOF_MEMBER(Thread, pDeliverException));
1895  __ blx(R12);
1896  // Call never returns
1897  __ bkpt(0);
1898#undef __
1899}
1900
1901}  // namespace arm
1902}  // namespace art
1903