assembler_arm.cc revision 0d666d8769714dcbc2acc4dd5b06f0deffa6e0a1
1// Copyright 2011 Google Inc. All Rights Reserved.
2
3#include "assembler.h"
4#include "logging.h"
5#include "offsets.h"
6#include "thread.h"
7#include "utils.h"
8
9namespace art {
10
11// Instruction encoding bits.
12enum {
13  H   = 1 << 5,   // halfword (or byte)
14  L   = 1 << 20,  // load (or store)
15  S   = 1 << 20,  // set condition code (or leave unchanged)
16  W   = 1 << 21,  // writeback base register (or leave unchanged)
17  A   = 1 << 21,  // accumulate in multiply instruction (or not)
18  B   = 1 << 22,  // unsigned byte (or word)
19  N   = 1 << 22,  // long (or short)
20  U   = 1 << 23,  // positive (or negative) offset/index
21  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
22  I   = 1 << 25,  // immediate shifter operand (or not)
23
24  B0 = 1,
25  B1 = 1 << 1,
26  B2 = 1 << 2,
27  B3 = 1 << 3,
28  B4 = 1 << 4,
29  B5 = 1 << 5,
30  B6 = 1 << 6,
31  B7 = 1 << 7,
32  B8 = 1 << 8,
33  B9 = 1 << 9,
34  B10 = 1 << 10,
35  B11 = 1 << 11,
36  B12 = 1 << 12,
37  B16 = 1 << 16,
38  B17 = 1 << 17,
39  B18 = 1 << 18,
40  B19 = 1 << 19,
41  B20 = 1 << 20,
42  B21 = 1 << 21,
43  B22 = 1 << 22,
44  B23 = 1 << 23,
45  B24 = 1 << 24,
46  B25 = 1 << 25,
47  B26 = 1 << 26,
48  B27 = 1 << 27,
49
50  // Instruction bit masks.
51  RdMask = 15 << 12,  // in str instruction
52  CondMask = 15 << 28,
53  CoprocessorMask = 15 << 8,
54  OpCodeMask = 15 << 21,  // in data-processing instructions
55  Imm24Mask = (1 << 24) - 1,
56  Off12Mask = (1 << 12) - 1,
57
58  // ldrex/strex register field encodings.
59  kLdExRnShift = 16,
60  kLdExRtShift = 12,
61  kStrExRnShift = 16,
62  kStrExRdShift = 12,
63  kStrExRtShift = 0,
64};
65
66
67static const char* kRegisterNames[] = {
68  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
69  "fp", "ip", "sp", "lr", "pc"
70};
71std::ostream& operator<<(std::ostream& os, const Register& rhs) {
72  if (rhs >= R0 && rhs <= PC) {
73    os << kRegisterNames[rhs];
74  } else {
75    os << "Register[" << static_cast<int>(rhs) << "]";
76  }
77  return os;
78}
79
80
81std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
82  if (rhs >= S0 && rhs < kNumberOfSRegisters) {
83    os << "s" << static_cast<int>(rhs);
84  } else {
85    os << "SRegister[" << static_cast<int>(rhs) << "]";
86  }
87  return os;
88}
89
90
91std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
92  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
93    os << "d" << static_cast<int>(rhs);
94  } else {
95    os << "DRegister[" << static_cast<int>(rhs) << "]";
96  }
97  return os;
98}
99
100
101static const char* kConditionNames[] = {
102  "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
103  "LE", "AL",
104};
105std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
106  if (rhs >= EQ && rhs <= AL) {
107    os << kConditionNames[rhs];
108  } else {
109    os << "Condition[" << static_cast<int>(rhs) << "]";
110  }
111  return os;
112}
113
114
115void Assembler::Emit(int32_t value) {
116  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
117  buffer_.Emit<int32_t>(value);
118}
119
120
121void Assembler::EmitType01(Condition cond,
122                           int type,
123                           Opcode opcode,
124                           int set_cc,
125                           Register rn,
126                           Register rd,
127                           ShifterOperand so) {
128  CHECK_NE(rd, kNoRegister);
129  CHECK_NE(cond, kNoCondition);
130  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
131                     type << kTypeShift |
132                     static_cast<int32_t>(opcode) << kOpcodeShift |
133                     set_cc << kSShift |
134                     static_cast<int32_t>(rn) << kRnShift |
135                     static_cast<int32_t>(rd) << kRdShift |
136                     so.encoding();
137  Emit(encoding);
138}
139
140
141void Assembler::EmitType5(Condition cond, int offset, bool link) {
142  CHECK_NE(cond, kNoCondition);
143  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
144                     5 << kTypeShift |
145                     (link ? 1 : 0) << kLinkShift;
146  Emit(Assembler::EncodeBranchOffset(offset, encoding));
147}
148
149
150void Assembler::EmitMemOp(Condition cond,
151                          bool load,
152                          bool byte,
153                          Register rd,
154                          Address ad) {
155  CHECK_NE(rd, kNoRegister);
156  CHECK_NE(cond, kNoCondition);
157  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
158                     B26 |
159                     (load ? L : 0) |
160                     (byte ? B : 0) |
161                     (static_cast<int32_t>(rd) << kRdShift) |
162                     ad.encoding();
163  Emit(encoding);
164}
165
166
167void Assembler::EmitMemOpAddressMode3(Condition cond,
168                                      int32_t mode,
169                                      Register rd,
170                                      Address ad) {
171  CHECK_NE(rd, kNoRegister);
172  CHECK_NE(cond, kNoCondition);
173  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
174                     B22  |
175                     mode |
176                     (static_cast<int32_t>(rd) << kRdShift) |
177                     ad.encoding3();
178  Emit(encoding);
179}
180
181
182void Assembler::EmitMultiMemOp(Condition cond,
183                               BlockAddressMode am,
184                               bool load,
185                               Register base,
186                               RegList regs) {
187  CHECK_NE(base, kNoRegister);
188  CHECK_NE(cond, kNoCondition);
189  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
190                     B27 |
191                     am |
192                     (load ? L : 0) |
193                     (static_cast<int32_t>(base) << kRnShift) |
194                     regs;
195  Emit(encoding);
196}
197
198
199void Assembler::EmitShiftImmediate(Condition cond,
200                                   Shift opcode,
201                                   Register rd,
202                                   Register rm,
203                                   ShifterOperand so) {
204  CHECK_NE(cond, kNoCondition);
205  CHECK_EQ(so.type(), 1U);
206  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
207                     static_cast<int32_t>(MOV) << kOpcodeShift |
208                     static_cast<int32_t>(rd) << kRdShift |
209                     so.encoding() << kShiftImmShift |
210                     static_cast<int32_t>(opcode) << kShiftShift |
211                     static_cast<int32_t>(rm);
212  Emit(encoding);
213}
214
215
216void Assembler::EmitShiftRegister(Condition cond,
217                                  Shift opcode,
218                                  Register rd,
219                                  Register rm,
220                                  ShifterOperand so) {
221  CHECK_NE(cond, kNoCondition);
222  CHECK_EQ(so.type(), 0U);
223  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
224                     static_cast<int32_t>(MOV) << kOpcodeShift |
225                     static_cast<int32_t>(rd) << kRdShift |
226                     so.encoding() << kShiftRegisterShift |
227                     static_cast<int32_t>(opcode) << kShiftShift |
228                     B4 |
229                     static_cast<int32_t>(rm);
230  Emit(encoding);
231}
232
233
234void Assembler::EmitBranch(Condition cond, Label* label, bool link) {
235  if (label->IsBound()) {
236    EmitType5(cond, label->Position() - buffer_.Size(), link);
237  } else {
238    int position = buffer_.Size();
239    // Use the offset field of the branch instruction for linking the sites.
240    EmitType5(cond, label->position_, link);
241    label->LinkTo(position);
242  }
243}
244
245void Assembler::and_(Register rd, Register rn, ShifterOperand so,
246                     Condition cond) {
247  EmitType01(cond, so.type(), AND, 0, rn, rd, so);
248}
249
250
251void Assembler::eor(Register rd, Register rn, ShifterOperand so,
252                    Condition cond) {
253  EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
254}
255
256
257void Assembler::sub(Register rd, Register rn, ShifterOperand so,
258                    Condition cond) {
259  EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
260}
261
262void Assembler::rsb(Register rd, Register rn, ShifterOperand so,
263                    Condition cond) {
264  EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
265}
266
267void Assembler::rsbs(Register rd, Register rn, ShifterOperand so,
268                     Condition cond) {
269  EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
270}
271
272
273void Assembler::add(Register rd, Register rn, ShifterOperand so,
274                    Condition cond) {
275  EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
276}
277
278
279void Assembler::adds(Register rd, Register rn, ShifterOperand so,
280                     Condition cond) {
281  EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
282}
283
284
285void Assembler::subs(Register rd, Register rn, ShifterOperand so,
286                     Condition cond) {
287  EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
288}
289
290
291void Assembler::adc(Register rd, Register rn, ShifterOperand so,
292                    Condition cond) {
293  EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
294}
295
296
297void Assembler::sbc(Register rd, Register rn, ShifterOperand so,
298                    Condition cond) {
299  EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
300}
301
302
303void Assembler::rsc(Register rd, Register rn, ShifterOperand so,
304                    Condition cond) {
305  EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
306}
307
308
309void Assembler::tst(Register rn, ShifterOperand so, Condition cond) {
310  CHECK_NE(rn, PC);  // Reserve tst pc instruction for exception handler marker.
311  EmitType01(cond, so.type(), TST, 1, rn, R0, so);
312}
313
314
315void Assembler::teq(Register rn, ShifterOperand so, Condition cond) {
316  CHECK_NE(rn, PC);  // Reserve teq pc instruction for exception handler marker.
317  EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
318}
319
320
321void Assembler::cmp(Register rn, ShifterOperand so, Condition cond) {
322  EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
323}
324
325
326void Assembler::cmn(Register rn, ShifterOperand so, Condition cond) {
327  EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
328}
329
330
331void Assembler::orr(Register rd, Register rn,
332                    ShifterOperand so, Condition cond) {
333  EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
334}
335
336
337void Assembler::orrs(Register rd, Register rn,
338                     ShifterOperand so, Condition cond) {
339  EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
340}
341
342
343void Assembler::mov(Register rd, ShifterOperand so, Condition cond) {
344  EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
345}
346
347
348void Assembler::movs(Register rd, ShifterOperand so, Condition cond) {
349  EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
350}
351
352
353void Assembler::bic(Register rd, Register rn, ShifterOperand so,
354                    Condition cond) {
355  EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
356}
357
358
359void Assembler::mvn(Register rd, ShifterOperand so, Condition cond) {
360  EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
361}
362
363
364void Assembler::mvns(Register rd, ShifterOperand so, Condition cond) {
365  EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
366}
367
368
369void Assembler::clz(Register rd, Register rm, Condition cond) {
370  CHECK_NE(rd, kNoRegister);
371  CHECK_NE(rm, kNoRegister);
372  CHECK_NE(cond, kNoCondition);
373  CHECK_NE(rd, PC);
374  CHECK_NE(rm, PC);
375  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
376                     B24 | B22 | B21 | (0xf << 16) |
377                     (static_cast<int32_t>(rd) << kRdShift) |
378                     (0xf << 8) | B4 | static_cast<int32_t>(rm);
379  Emit(encoding);
380}
381
382
383void Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
384  CHECK_NE(cond, kNoCondition);
385  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
386                     B25 | B24 | ((imm16 >> 12) << 16) |
387                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
388  Emit(encoding);
389}
390
391
392void Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
393  CHECK_NE(cond, kNoCondition);
394  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
395                     B25 | B24 | B22 | ((imm16 >> 12) << 16) |
396                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
397  Emit(encoding);
398}
399
400
401void Assembler::EmitMulOp(Condition cond, int32_t opcode,
402                          Register rd, Register rn,
403                          Register rm, Register rs) {
404  CHECK_NE(rd, kNoRegister);
405  CHECK_NE(rn, kNoRegister);
406  CHECK_NE(rm, kNoRegister);
407  CHECK_NE(rs, kNoRegister);
408  CHECK_NE(cond, kNoCondition);
409  int32_t encoding = opcode |
410      (static_cast<int32_t>(cond) << kConditionShift) |
411      (static_cast<int32_t>(rn) << kRnShift) |
412      (static_cast<int32_t>(rd) << kRdShift) |
413      (static_cast<int32_t>(rs) << kRsShift) |
414      B7 | B4 |
415      (static_cast<int32_t>(rm) << kRmShift);
416  Emit(encoding);
417}
418
419
420void Assembler::mul(Register rd, Register rn,
421                    Register rm, Condition cond) {
422  // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
423  EmitMulOp(cond, 0, R0, rd, rn, rm);
424}
425
426
427void Assembler::mla(Register rd, Register rn,
428                    Register rm, Register ra, Condition cond) {
429  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
430  EmitMulOp(cond, B21, ra, rd, rn, rm);
431}
432
433
434void Assembler::mls(Register rd, Register rn,
435                    Register rm, Register ra, Condition cond) {
436  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
437  EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
438}
439
440
441void Assembler::umull(Register rd_lo, Register rd_hi,
442                      Register rn, Register rm, Condition cond) {
443  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
444  EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
445}
446
447
448void Assembler::ldr(Register rd, Address ad, Condition cond) {
449  EmitMemOp(cond, true, false, rd, ad);
450}
451
452
453void Assembler::str(Register rd, Address ad, Condition cond) {
454  EmitMemOp(cond, false, false, rd, ad);
455}
456
457
458void Assembler::ldrb(Register rd, Address ad, Condition cond) {
459  EmitMemOp(cond, true, true, rd, ad);
460}
461
462
463void Assembler::strb(Register rd, Address ad, Condition cond) {
464  EmitMemOp(cond, false, true, rd, ad);
465}
466
467
468void Assembler::ldrh(Register rd, Address ad, Condition cond) {
469  EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
470}
471
472
473void Assembler::strh(Register rd, Address ad, Condition cond) {
474  EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
475}
476
477
478void Assembler::ldrsb(Register rd, Address ad, Condition cond) {
479  EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
480}
481
482
483void Assembler::ldrsh(Register rd, Address ad, Condition cond) {
484  EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
485}
486
487
488void Assembler::ldrd(Register rd, Address ad, Condition cond) {
489  CHECK_EQ(rd % 2, 0);
490  EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
491}
492
493
494void Assembler::strd(Register rd, Address ad, Condition cond) {
495  CHECK_EQ(rd % 2, 0);
496  EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
497}
498
499
500void Assembler::ldm(BlockAddressMode am,
501                    Register base,
502                    RegList regs,
503                    Condition cond) {
504  EmitMultiMemOp(cond, am, true, base, regs);
505}
506
507
508void Assembler::stm(BlockAddressMode am,
509                    Register base,
510                    RegList regs,
511                    Condition cond) {
512  EmitMultiMemOp(cond, am, false, base, regs);
513}
514
515
516void Assembler::ldrex(Register rt, Register rn, Condition cond) {
517  CHECK_NE(rn, kNoRegister);
518  CHECK_NE(rt, kNoRegister);
519  CHECK_NE(cond, kNoCondition);
520  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
521                     B24 |
522                     B23 |
523                     L   |
524                     (static_cast<int32_t>(rn) << kLdExRnShift) |
525                     (static_cast<int32_t>(rt) << kLdExRtShift) |
526                     B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
527  Emit(encoding);
528}
529
530
531void Assembler::strex(Register rd,
532                      Register rt,
533                      Register rn,
534                      Condition cond) {
535  CHECK_NE(rn, kNoRegister);
536  CHECK_NE(rd, kNoRegister);
537  CHECK_NE(rt, kNoRegister);
538  CHECK_NE(cond, kNoCondition);
539  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
540                     B24 |
541                     B23 |
542                     (static_cast<int32_t>(rn) << kStrExRnShift) |
543                     (static_cast<int32_t>(rd) << kStrExRdShift) |
544                     B11 | B10 | B9 | B8 | B7 | B4 |
545                     (static_cast<int32_t>(rt) << kStrExRtShift);
546  Emit(encoding);
547}
548
549
550void Assembler::clrex() {
551  int32_t encoding = (kSpecialCondition << kConditionShift) |
552                     B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
553  Emit(encoding);
554}
555
556
557void Assembler::nop(Condition cond) {
558  CHECK_NE(cond, kNoCondition);
559  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
560                     B25 | B24 | B21 | (0xf << 12);
561  Emit(encoding);
562}
563
564
565void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
566  CHECK_NE(sn, kNoSRegister);
567  CHECK_NE(rt, kNoRegister);
568  CHECK_NE(rt, SP);
569  CHECK_NE(rt, PC);
570  CHECK_NE(cond, kNoCondition);
571  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
572                     B27 | B26 | B25 |
573                     ((static_cast<int32_t>(sn) >> 1)*B16) |
574                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
575                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
576  Emit(encoding);
577}
578
579
580void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
581  CHECK_NE(sn, kNoSRegister);
582  CHECK_NE(rt, kNoRegister);
583  CHECK_NE(rt, SP);
584  CHECK_NE(rt, PC);
585  CHECK_NE(cond, kNoCondition);
586  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
587                     B27 | B26 | B25 | B20 |
588                     ((static_cast<int32_t>(sn) >> 1)*B16) |
589                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
590                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
591  Emit(encoding);
592}
593
594
595void Assembler::vmovsrr(SRegister sm, Register rt, Register rt2,
596                        Condition cond) {
597  CHECK_NE(sm, kNoSRegister);
598  CHECK_NE(sm, S31);
599  CHECK_NE(rt, kNoRegister);
600  CHECK_NE(rt, SP);
601  CHECK_NE(rt, PC);
602  CHECK_NE(rt2, kNoRegister);
603  CHECK_NE(rt2, SP);
604  CHECK_NE(rt2, PC);
605  CHECK_NE(cond, kNoCondition);
606  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
607                     B27 | B26 | B22 |
608                     (static_cast<int32_t>(rt2)*B16) |
609                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
610                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
611                     (static_cast<int32_t>(sm) >> 1);
612  Emit(encoding);
613}
614
615
616void Assembler::vmovrrs(Register rt, Register rt2, SRegister sm,
617                        Condition cond) {
618  CHECK_NE(sm, kNoSRegister);
619  CHECK_NE(sm, S31);
620  CHECK_NE(rt, kNoRegister);
621  CHECK_NE(rt, SP);
622  CHECK_NE(rt, PC);
623  CHECK_NE(rt2, kNoRegister);
624  CHECK_NE(rt2, SP);
625  CHECK_NE(rt2, PC);
626  CHECK_NE(rt, rt2);
627  CHECK_NE(cond, kNoCondition);
628  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
629                     B27 | B26 | B22 | B20 |
630                     (static_cast<int32_t>(rt2)*B16) |
631                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
632                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
633                     (static_cast<int32_t>(sm) >> 1);
634  Emit(encoding);
635}
636
637
638void Assembler::vmovdrr(DRegister dm, Register rt, Register rt2,
639                        Condition cond) {
640  CHECK_NE(dm, kNoDRegister);
641  CHECK_NE(rt, kNoRegister);
642  CHECK_NE(rt, SP);
643  CHECK_NE(rt, PC);
644  CHECK_NE(rt2, kNoRegister);
645  CHECK_NE(rt2, SP);
646  CHECK_NE(rt2, PC);
647  CHECK_NE(cond, kNoCondition);
648  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
649                     B27 | B26 | B22 |
650                     (static_cast<int32_t>(rt2)*B16) |
651                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
652                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
653                     (static_cast<int32_t>(dm) & 0xf);
654  Emit(encoding);
655}
656
657
658void Assembler::vmovrrd(Register rt, Register rt2, DRegister dm,
659                              Condition cond) {
660  CHECK_NE(dm, kNoDRegister);
661  CHECK_NE(rt, kNoRegister);
662  CHECK_NE(rt, SP);
663  CHECK_NE(rt, PC);
664  CHECK_NE(rt2, kNoRegister);
665  CHECK_NE(rt2, SP);
666  CHECK_NE(rt2, PC);
667  CHECK_NE(rt, rt2);
668  CHECK_NE(cond, kNoCondition);
669  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
670                     B27 | B26 | B22 | B20 |
671                     (static_cast<int32_t>(rt2)*B16) |
672                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
673                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
674                     (static_cast<int32_t>(dm) & 0xf);
675  Emit(encoding);
676}
677
678
679void Assembler::vldrs(SRegister sd, Address ad, Condition cond) {
680  CHECK_NE(sd, kNoSRegister);
681  CHECK_NE(cond, kNoCondition);
682  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
683                     B27 | B26 | B24 | B20 |
684                     ((static_cast<int32_t>(sd) & 1)*B22) |
685                     ((static_cast<int32_t>(sd) >> 1)*B12) |
686                     B11 | B9 | ad.vencoding();
687  Emit(encoding);
688}
689
690
691void Assembler::vstrs(SRegister sd, Address ad, Condition cond) {
692  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
693  CHECK_NE(sd, kNoSRegister);
694  CHECK_NE(cond, kNoCondition);
695  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
696                     B27 | B26 | B24 |
697                     ((static_cast<int32_t>(sd) & 1)*B22) |
698                     ((static_cast<int32_t>(sd) >> 1)*B12) |
699                     B11 | B9 | ad.vencoding();
700  Emit(encoding);
701}
702
703
704void Assembler::vldrd(DRegister dd, Address ad, Condition cond) {
705  CHECK_NE(dd, kNoDRegister);
706  CHECK_NE(cond, kNoCondition);
707  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
708                     B27 | B26 | B24 | B20 |
709                     ((static_cast<int32_t>(dd) >> 4)*B22) |
710                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
711                     B11 | B9 | B8 | ad.vencoding();
712  Emit(encoding);
713}
714
715
716void Assembler::vstrd(DRegister dd, Address ad, Condition cond) {
717  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
718  CHECK_NE(dd, kNoDRegister);
719  CHECK_NE(cond, kNoCondition);
720  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
721                     B27 | B26 | B24 |
722                     ((static_cast<int32_t>(dd) >> 4)*B22) |
723                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
724                     B11 | B9 | B8 | ad.vencoding();
725  Emit(encoding);
726}
727
728
729void Assembler::EmitVFPsss(Condition cond, int32_t opcode,
730                           SRegister sd, SRegister sn, SRegister sm) {
731  CHECK_NE(sd, kNoSRegister);
732  CHECK_NE(sn, kNoSRegister);
733  CHECK_NE(sm, kNoSRegister);
734  CHECK_NE(cond, kNoCondition);
735  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
736                     B27 | B26 | B25 | B11 | B9 | opcode |
737                     ((static_cast<int32_t>(sd) & 1)*B22) |
738                     ((static_cast<int32_t>(sn) >> 1)*B16) |
739                     ((static_cast<int32_t>(sd) >> 1)*B12) |
740                     ((static_cast<int32_t>(sn) & 1)*B7) |
741                     ((static_cast<int32_t>(sm) & 1)*B5) |
742                     (static_cast<int32_t>(sm) >> 1);
743  Emit(encoding);
744}
745
746
747void Assembler::EmitVFPddd(Condition cond, int32_t opcode,
748                           DRegister dd, DRegister dn, DRegister dm) {
749  CHECK_NE(dd, kNoDRegister);
750  CHECK_NE(dn, kNoDRegister);
751  CHECK_NE(dm, kNoDRegister);
752  CHECK_NE(cond, kNoCondition);
753  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
754                     B27 | B26 | B25 | B11 | B9 | B8 | opcode |
755                     ((static_cast<int32_t>(dd) >> 4)*B22) |
756                     ((static_cast<int32_t>(dn) & 0xf)*B16) |
757                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
758                     ((static_cast<int32_t>(dn) >> 4)*B7) |
759                     ((static_cast<int32_t>(dm) >> 4)*B5) |
760                     (static_cast<int32_t>(dm) & 0xf);
761  Emit(encoding);
762}
763
764
765void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
766  EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
767}
768
769
770void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
771  EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
772}
773
774
775bool Assembler::vmovs(SRegister sd, float s_imm, Condition cond) {
776  uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
777  if (((imm32 & ((1 << 19) - 1)) == 0) &&
778      ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
779       (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
780    uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
781        ((imm32 >> 19) & ((1 << 6) -1));
782    EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
783               sd, S0, S0);
784    return true;
785  }
786  return false;
787}
788
789
790bool Assembler::vmovd(DRegister dd, double d_imm, Condition cond) {
791  uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
792  if (((imm64 & ((1LL << 48) - 1)) == 0) &&
793      ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
794       (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
795    uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
796        ((imm64 >> 48) & ((1 << 6) -1));
797    EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
798               dd, D0, D0);
799    return true;
800  }
801  return false;
802}
803
804
805void Assembler::vadds(SRegister sd, SRegister sn, SRegister sm,
806                      Condition cond) {
807  EmitVFPsss(cond, B21 | B20, sd, sn, sm);
808}
809
810
811void Assembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
812                      Condition cond) {
813  EmitVFPddd(cond, B21 | B20, dd, dn, dm);
814}
815
816
817void Assembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
818                      Condition cond) {
819  EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
820}
821
822
823void Assembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
824                      Condition cond) {
825  EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
826}
827
828
829void Assembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
830                      Condition cond) {
831  EmitVFPsss(cond, B21, sd, sn, sm);
832}
833
834
835void Assembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
836                      Condition cond) {
837  EmitVFPddd(cond, B21, dd, dn, dm);
838}
839
840
841void Assembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
842                      Condition cond) {
843  EmitVFPsss(cond, 0, sd, sn, sm);
844}
845
846
847void Assembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
848                      Condition cond) {
849  EmitVFPddd(cond, 0, dd, dn, dm);
850}
851
852
853void Assembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
854                      Condition cond) {
855  EmitVFPsss(cond, B6, sd, sn, sm);
856}
857
858
859void Assembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
860                      Condition cond) {
861  EmitVFPddd(cond, B6, dd, dn, dm);
862}
863
864
865void Assembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
866                      Condition cond) {
867  EmitVFPsss(cond, B23, sd, sn, sm);
868}
869
870
871void Assembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
872                      Condition cond) {
873  EmitVFPddd(cond, B23, dd, dn, dm);
874}
875
876
877void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
878  EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
879}
880
881
882void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
883  EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
884}
885
886
887void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
888  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
889}
890
891
892void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
893  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
894}
895
896
897void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
898  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
899}
900
901void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
902  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
903}
904
905
906void Assembler::EmitVFPsd(Condition cond, int32_t opcode,
907                          SRegister sd, DRegister dm) {
908  CHECK_NE(sd, kNoSRegister);
909  CHECK_NE(dm, kNoDRegister);
910  CHECK_NE(cond, kNoCondition);
911  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
912                     B27 | B26 | B25 | B11 | B9 | opcode |
913                     ((static_cast<int32_t>(sd) & 1)*B22) |
914                     ((static_cast<int32_t>(sd) >> 1)*B12) |
915                     ((static_cast<int32_t>(dm) >> 4)*B5) |
916                     (static_cast<int32_t>(dm) & 0xf);
917  Emit(encoding);
918}
919
920
921void Assembler::EmitVFPds(Condition cond, int32_t opcode,
922                          DRegister dd, SRegister sm) {
923  CHECK_NE(dd, kNoDRegister);
924  CHECK_NE(sm, kNoSRegister);
925  CHECK_NE(cond, kNoCondition);
926  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
927                     B27 | B26 | B25 | B11 | B9 | opcode |
928                     ((static_cast<int32_t>(dd) >> 4)*B22) |
929                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
930                     ((static_cast<int32_t>(sm) & 1)*B5) |
931                     (static_cast<int32_t>(sm) >> 1);
932  Emit(encoding);
933}
934
935
936void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
937  EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
938}
939
940
941void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
942  EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
943}
944
945
946void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
947  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
948}
949
950
951void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
952  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
953}
954
955
956void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
957  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
958}
959
960
961void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
962  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
963}
964
965
966void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
967  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
968}
969
970
971void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
972  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
973}
974
975
976void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
977  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
978}
979
980
981void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
982  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
983}
984
985
986void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
987  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
988}
989
990
991void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
992  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
993}
994
995
996void Assembler::vcmpsz(SRegister sd, Condition cond) {
997  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
998}
999
1000
1001void Assembler::vcmpdz(DRegister dd, Condition cond) {
1002  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1003}
1004
1005
1006void Assembler::vmstat(Condition cond) {  // VMRS APSR_nzcv, FPSCR
1007  CHECK_NE(cond, kNoCondition);
1008  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1009                     B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1010                     (static_cast<int32_t>(PC)*B12) |
1011                     B11 | B9 | B4;
1012  Emit(encoding);
1013}
1014
1015
1016void Assembler::svc(uint32_t imm24) {
1017  CHECK(IsUint(24, imm24));
1018  int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
1019  Emit(encoding);
1020}
1021
1022
1023void Assembler::bkpt(uint16_t imm16) {
1024  int32_t encoding = (AL << kConditionShift) | B24 | B21 |
1025                     ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
1026  Emit(encoding);
1027}
1028
1029
1030void Assembler::b(Label* label, Condition cond) {
1031  EmitBranch(cond, label, false);
1032}
1033
1034
1035void Assembler::bl(Label* label, Condition cond) {
1036  EmitBranch(cond, label, true);
1037}
1038
1039
1040void Assembler::blx(Register rm, Condition cond) {
1041  CHECK_NE(rm, kNoRegister);
1042  CHECK_NE(cond, kNoCondition);
1043  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1044                     B24 | B21 | (0xfff << 8) | B5 | B4 |
1045                     (static_cast<int32_t>(rm) << kRmShift);
1046  Emit(encoding);
1047}
1048
1049
1050void Assembler::MarkExceptionHandler(Label* label) {
1051  EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
1052  Label l;
1053  b(&l);
1054  EmitBranch(AL, label, false);
1055  Bind(&l);
1056}
1057
1058
1059void Assembler::Bind(Label* label) {
1060  CHECK(!label->IsBound());
1061  int bound_pc = buffer_.Size();
1062  while (label->IsLinked()) {
1063    int32_t position = label->Position();
1064    int32_t next = buffer_.Load<int32_t>(position);
1065    int32_t encoded = Assembler::EncodeBranchOffset(bound_pc - position, next);
1066    buffer_.Store<int32_t>(position, encoded);
1067    label->position_ = Assembler::DecodeBranchOffset(next);
1068  }
1069  label->BindTo(bound_pc);
1070}
1071
1072
1073void Assembler::EncodeUint32InTstInstructions(uint32_t data) {
1074  // TODO: Consider using movw ip, <16 bits>.
1075  while (!IsUint(8, data)) {
1076    tst(R0, ShifterOperand(data & 0xFF), VS);
1077    data >>= 8;
1078  }
1079  tst(R0, ShifterOperand(data), MI);
1080}
1081
1082
1083int32_t Assembler::EncodeBranchOffset(int offset, int32_t inst) {
1084  // The offset is off by 8 due to the way the ARM CPUs read PC.
1085  offset -= 8;
1086  CHECK(IsAligned(offset, 4));
1087  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset));
1088
1089  // Properly preserve only the bits supported in the instruction.
1090  offset >>= 2;
1091  offset &= kBranchOffsetMask;
1092  return (inst & ~kBranchOffsetMask) | offset;
1093}
1094
1095
1096int Assembler::DecodeBranchOffset(int32_t inst) {
1097  // Sign-extend, left-shift by 2, then add 8.
1098  return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
1099}
1100
1101void Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
1102  AddConstant(rd, rd, value, cond);
1103}
1104
1105
1106void Assembler::AddConstant(Register rd, Register rn, int32_t value,
1107                            Condition cond) {
1108  if (value == 0) {
1109    if (rd != rn) {
1110      mov(rd, ShifterOperand(rn), cond);
1111    }
1112    return;
1113  }
1114  // We prefer to select the shorter code sequence rather than selecting add for
1115  // positive values and sub for negatives ones, which would slightly improve
1116  // the readability of generated code for some constants.
1117  ShifterOperand shifter_op;
1118  if (ShifterOperand::CanHold(value, &shifter_op)) {
1119    add(rd, rn, shifter_op, cond);
1120  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1121    sub(rd, rn, shifter_op, cond);
1122  } else {
1123    CHECK(rn != IP);
1124    if (ShifterOperand::CanHold(~value, &shifter_op)) {
1125      mvn(IP, shifter_op, cond);
1126      add(rd, rn, ShifterOperand(IP), cond);
1127    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1128      mvn(IP, shifter_op, cond);
1129      sub(rd, rn, ShifterOperand(IP), cond);
1130    } else {
1131      movw(IP, Low16Bits(value), cond);
1132      uint16_t value_high = High16Bits(value);
1133      if (value_high != 0) {
1134        movt(IP, value_high, cond);
1135      }
1136      add(rd, rn, ShifterOperand(IP), cond);
1137    }
1138  }
1139}
1140
1141
1142void Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
1143                                    Condition cond) {
1144  ShifterOperand shifter_op;
1145  if (ShifterOperand::CanHold(value, &shifter_op)) {
1146    adds(rd, rn, shifter_op, cond);
1147  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1148    subs(rd, rn, shifter_op, cond);
1149  } else {
1150    CHECK(rn != IP);
1151    if (ShifterOperand::CanHold(~value, &shifter_op)) {
1152      mvn(IP, shifter_op, cond);
1153      adds(rd, rn, ShifterOperand(IP), cond);
1154    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1155      mvn(IP, shifter_op, cond);
1156      subs(rd, rn, ShifterOperand(IP), cond);
1157    } else {
1158      movw(IP, Low16Bits(value), cond);
1159      uint16_t value_high = High16Bits(value);
1160      if (value_high != 0) {
1161        movt(IP, value_high, cond);
1162      }
1163      adds(rd, rn, ShifterOperand(IP), cond);
1164    }
1165  }
1166}
1167
1168
1169void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
1170  ShifterOperand shifter_op;
1171  if (ShifterOperand::CanHold(value, &shifter_op)) {
1172    mov(rd, shifter_op, cond);
1173  } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
1174    mvn(rd, shifter_op, cond);
1175  } else {
1176    movw(rd, Low16Bits(value), cond);
1177    uint16_t value_high = High16Bits(value);
1178    if (value_high != 0) {
1179      movt(rd, value_high, cond);
1180    }
1181  }
1182}
1183
1184
1185bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
1186  switch (type) {
1187    case kLoadSignedByte:
1188    case kLoadSignedHalfword:
1189    case kLoadUnsignedHalfword:
1190    case kLoadWordPair:
1191      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
1192    case kLoadUnsignedByte:
1193    case kLoadWord:
1194      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
1195    case kLoadSWord:
1196    case kLoadDWord:
1197      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
1198    default:
1199      LOG(FATAL) << "UNREACHABLE";
1200      return false;
1201  }
1202}
1203
1204
1205bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
1206  switch (type) {
1207    case kStoreHalfword:
1208    case kStoreWordPair:
1209      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
1210    case kStoreByte:
1211    case kStoreWord:
1212      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
1213    case kStoreSWord:
1214    case kStoreDWord:
1215      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
1216    default:
1217      LOG(FATAL) << "UNREACHABLE";
1218      return false;
1219  }
1220}
1221
1222
1223// Implementation note: this method must emit at most one instruction when
1224// Address::CanHoldLoadOffset.
1225void Assembler::LoadFromOffset(LoadOperandType type,
1226                               Register reg,
1227                               Register base,
1228                               int32_t offset,
1229                               Condition cond) {
1230  if (!Address::CanHoldLoadOffset(type, offset)) {
1231    CHECK(base != IP);
1232    LoadImmediate(IP, offset, cond);
1233    add(IP, IP, ShifterOperand(base), cond);
1234    base = IP;
1235    offset = 0;
1236  }
1237  CHECK(Address::CanHoldLoadOffset(type, offset));
1238  switch (type) {
1239    case kLoadSignedByte:
1240      ldrsb(reg, Address(base, offset), cond);
1241      break;
1242    case kLoadUnsignedByte:
1243      ldrb(reg, Address(base, offset), cond);
1244      break;
1245    case kLoadSignedHalfword:
1246      ldrsh(reg, Address(base, offset), cond);
1247      break;
1248    case kLoadUnsignedHalfword:
1249      ldrh(reg, Address(base, offset), cond);
1250      break;
1251    case kLoadWord:
1252      ldr(reg, Address(base, offset), cond);
1253      break;
1254    case kLoadWordPair:
1255      ldrd(reg, Address(base, offset), cond);
1256      break;
1257    default:
1258      LOG(FATAL) << "UNREACHABLE";
1259  }
1260}
1261
1262// Implementation note: this method must emit at most one instruction when
1263// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
1264void Assembler::LoadSFromOffset(SRegister reg,
1265                                Register base,
1266                                int32_t offset,
1267                                Condition cond) {
1268  if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
1269    CHECK_NE(base, IP);
1270    LoadImmediate(IP, offset, cond);
1271    add(IP, IP, ShifterOperand(base), cond);
1272    base = IP;
1273    offset = 0;
1274  }
1275  CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
1276  vldrs(reg, Address(base, offset), cond);
1277}
1278
1279// Implementation note: this method must emit at most one instruction when
1280// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
1281void Assembler::LoadDFromOffset(DRegister reg,
1282                                Register base,
1283                                int32_t offset,
1284                                Condition cond) {
1285  if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
1286    CHECK_NE(base, IP);
1287    LoadImmediate(IP, offset, cond);
1288    add(IP, IP, ShifterOperand(base), cond);
1289    base = IP;
1290    offset = 0;
1291  }
1292  CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
1293  vldrd(reg, Address(base, offset), cond);
1294}
1295
1296// Implementation note: this method must emit at most one instruction when
1297// Address::CanHoldStoreOffset.
1298void Assembler::StoreToOffset(StoreOperandType type,
1299                              Register reg,
1300                              Register base,
1301                              int32_t offset,
1302                              Condition cond) {
1303  if (!Address::CanHoldStoreOffset(type, offset)) {
1304    CHECK(reg != IP);
1305    CHECK(base != IP);
1306    LoadImmediate(IP, offset, cond);
1307    add(IP, IP, ShifterOperand(base), cond);
1308    base = IP;
1309    offset = 0;
1310  }
1311  CHECK(Address::CanHoldStoreOffset(type, offset));
1312  switch (type) {
1313    case kStoreByte:
1314      strb(reg, Address(base, offset), cond);
1315      break;
1316    case kStoreHalfword:
1317      strh(reg, Address(base, offset), cond);
1318      break;
1319    case kStoreWord:
1320      str(reg, Address(base, offset), cond);
1321      break;
1322    case kStoreWordPair:
1323      strd(reg, Address(base, offset), cond);
1324      break;
1325    default:
1326      LOG(FATAL) << "UNREACHABLE";
1327  }
1328}
1329
1330// Implementation note: this method must emit at most one instruction when
1331// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
1332void Assembler::StoreSToOffset(SRegister reg,
1333                               Register base,
1334                               int32_t offset,
1335                               Condition cond) {
1336  if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
1337    CHECK_NE(base, IP);
1338    LoadImmediate(IP, offset, cond);
1339    add(IP, IP, ShifterOperand(base), cond);
1340    base = IP;
1341    offset = 0;
1342  }
1343  CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
1344  vstrs(reg, Address(base, offset), cond);
1345}
1346
1347// Implementation note: this method must emit at most one instruction when
1348// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
1349void Assembler::StoreDToOffset(DRegister reg,
1350                               Register base,
1351                               int32_t offset,
1352                               Condition cond) {
1353  if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
1354    CHECK_NE(base, IP);
1355    LoadImmediate(IP, offset, cond);
1356    add(IP, IP, ShifterOperand(base), cond);
1357    base = IP;
1358    offset = 0;
1359  }
1360  CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
1361  vstrd(reg, Address(base, offset), cond);
1362}
1363
1364void Assembler::Push(Register rd, Condition cond) {
1365  str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
1366}
1367
1368void Assembler::Pop(Register rd, Condition cond) {
1369  ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
1370}
1371
1372void Assembler::PushList(RegList regs, Condition cond) {
1373  stm(DB_W, SP, regs, cond);
1374}
1375
1376void Assembler::PopList(RegList regs, Condition cond) {
1377  ldm(IA_W, SP, regs, cond);
1378}
1379
1380void Assembler::Mov(Register rd, Register rm, Condition cond) {
1381  if (rd != rm) {
1382    mov(rd, ShifterOperand(rm), cond);
1383  }
1384}
1385
1386void Assembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
1387                    Condition cond) {
1388  CHECK_NE(shift_imm, 0u);  // Do not use Lsl if no shift is wanted.
1389  mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
1390}
1391
1392void Assembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
1393                    Condition cond) {
1394  CHECK_NE(shift_imm, 0u);  // Do not use Lsr if no shift is wanted.
1395  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
1396  mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
1397}
1398
1399void Assembler::Asr(Register rd, Register rm, uint32_t shift_imm,
1400                    Condition cond) {
1401  CHECK_NE(shift_imm, 0u);  // Do not use Asr if no shift is wanted.
1402  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
1403  mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
1404}
1405
1406void Assembler::Ror(Register rd, Register rm, uint32_t shift_imm,
1407                    Condition cond) {
1408  CHECK_NE(shift_imm, 0u);  // Use Rrx instruction.
1409  mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
1410}
1411
1412void Assembler::Rrx(Register rd, Register rm, Condition cond) {
1413  mov(rd, ShifterOperand(rm, ROR, 0), cond);
1414}
1415
1416void Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
1417                           const std::vector<ManagedRegister>& spill_regs) {
1418  CHECK(IsAligned(frame_size, kStackAlignment));
1419  CHECK_EQ(R0, method_reg.AsCoreRegister());
1420  AddConstant(SP, -frame_size);
1421  RegList spill_list = 1 << R0 | 1 << LR;
1422  for(size_t i = 0; i < spill_regs.size(); i++) {
1423    Register reg = spill_regs.at(i).AsCoreRegister();
1424    // check assumption LR is the last register that gets spilled
1425    CHECK_LT(reg, LR);
1426    spill_list |= 1 << reg;
1427  }
1428  // Store spill list from (low to high number register) starting at SP
1429  // incrementing after each store but not updating SP
1430  stm(IA, SP, spill_list, AL);
1431}
1432
1433void Assembler::RemoveFrame(size_t frame_size,
1434                            const std::vector<ManagedRegister>& spill_regs) {
1435  CHECK(IsAligned(frame_size, kStackAlignment));
1436  // Reload LR. TODO: reload any saved callee saves from spill_regs
1437  LoadFromOffset(kLoadWord, LR, SP, (spill_regs.size() + 1) * kPointerSize);
1438  AddConstant(SP, frame_size);
1439  mov(PC, ShifterOperand(LR));
1440}
1441
1442void Assembler::FillFromSpillArea(const std::vector<ManagedRegister>& spill_regs,
1443                                  size_t displacement) {
1444  for(size_t i = 0; i < spill_regs.size(); i++) {
1445    Register reg = spill_regs.at(i).AsCoreRegister();
1446    LoadFromOffset(kLoadWord, reg, SP, displacement + ((i + 1) * kPointerSize));
1447  }
1448}
1449
1450void Assembler::IncreaseFrameSize(size_t adjust) {
1451  CHECK(IsAligned(adjust, kStackAlignment));
1452  AddConstant(SP, -adjust);
1453}
1454
1455void Assembler::DecreaseFrameSize(size_t adjust) {
1456  CHECK(IsAligned(adjust, kStackAlignment));
1457  AddConstant(SP, adjust);
1458}
1459
1460void Assembler::Store(FrameOffset dest, ManagedRegister src, size_t size) {
1461  if (src.IsNoRegister()) {
1462    CHECK_EQ(0u, size);
1463  } else if (src.IsCoreRegister()) {
1464    CHECK_EQ(4u, size);
1465    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1466  } else if (src.IsRegisterPair()) {
1467    CHECK_EQ(8u, size);
1468    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
1469    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
1470                  SP, dest.Int32Value() + 4);
1471  } else if (src.IsSRegister()) {
1472    StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
1473  } else {
1474    CHECK(src.IsDRegister());
1475    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
1476  }
1477}
1478
1479void Assembler::StoreRef(FrameOffset dest, ManagedRegister src) {
1480  CHECK(src.IsCoreRegister());
1481  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1482}
1483
1484void Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister src) {
1485  CHECK(src.IsCoreRegister());
1486  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1487}
1488
1489void Assembler::CopyRef(FrameOffset dest, FrameOffset src,
1490                        ManagedRegister scratch) {
1491  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1492  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1493}
1494
1495void Assembler::LoadRef(ManagedRegister dest, ManagedRegister base,
1496                        MemberOffset offs) {
1497  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
1498  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
1499                 base.AsCoreRegister(), offs.Int32Value());
1500}
1501
1502void Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
1503                                      ManagedRegister scratch) {
1504  CHECK(scratch.IsCoreRegister());
1505  LoadImmediate(scratch.AsCoreRegister(), imm);
1506  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1507}
1508
1509void Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
1510                                       ManagedRegister scratch) {
1511  CHECK(scratch.IsCoreRegister());
1512  LoadImmediate(scratch.AsCoreRegister(), imm);
1513  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
1514}
1515
1516void Assembler::Load(ManagedRegister dest, FrameOffset src, size_t size) {
1517  if (dest.IsNoRegister()) {
1518    CHECK_EQ(0u, size);
1519  } else if (dest.IsCoreRegister()) {
1520    CHECK_EQ(4u, size);
1521    LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
1522  } else if (dest.IsRegisterPair()) {
1523    CHECK_EQ(8u, size);
1524    LoadFromOffset(kLoadWord, dest.AsRegisterPairLow(), SP, src.Int32Value());
1525    LoadFromOffset(kLoadWord, dest.AsRegisterPairHigh(),
1526                   SP, src.Int32Value() + 4);
1527  } else if (dest.IsSRegister()) {
1528    LoadSFromOffset(dest.AsSRegister(), SP, src.Int32Value());
1529  } else {
1530    CHECK(dest.IsDRegister());
1531    LoadDFromOffset(dest.AsDRegister(), SP, src.Int32Value());
1532  }
1533}
1534
1535void Assembler::LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset offs) {
1536  CHECK(dest.IsCoreRegister());
1537  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
1538                 TR, offs.Int32Value());
1539}
1540
1541void Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
1542                                     ManagedRegister scratch) {
1543  CHECK(scratch.IsCoreRegister());
1544  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1545                 TR, thr_offs.Int32Value());
1546  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1547                SP, fr_offs.Int32Value());
1548}
1549
1550void Assembler::CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
1551                                   ManagedRegister scratch) {
1552  CHECK(scratch.IsCoreRegister());
1553  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1554                 SP, fr_offs.Int32Value());
1555  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1556                TR, thr_offs.Int32Value());
1557}
1558
1559void Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
1560                                         FrameOffset fr_offs,
1561                                         ManagedRegister scratch) {
1562  CHECK(scratch.IsCoreRegister());
1563  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
1564  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1565                TR, thr_offs.Int32Value());
1566}
1567
1568void Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
1569  StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
1570}
1571
1572void Assembler::Move(ManagedRegister dest, ManagedRegister src) {
1573  if (!dest.Equals(src)) {
1574    if (dest.IsCoreRegister()) {
1575      CHECK(src.IsCoreRegister());
1576      mov(dest.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
1577    } else {
1578      // TODO: VFP
1579      UNIMPLEMENTED(FATAL) << ": VFP";
1580    }
1581  }
1582}
1583
1584void Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
1585                     size_t size) {
1586  CHECK(scratch.IsCoreRegister());
1587  CHECK(size == 4 || size == 8);
1588  if (size == 4) {
1589    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1590                   SP, src.Int32Value());
1591    StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1592                  SP, dest.Int32Value());
1593  } else if (size == 8) {
1594    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1595                   SP, src.Int32Value());
1596    StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1597                  SP, dest.Int32Value());
1598    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1599                   SP, src.Int32Value() + 4);
1600    StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1601                  SP, dest.Int32Value() + 4);
1602  }
1603}
1604
1605void Assembler::CreateStackHandle(ManagedRegister out_reg,
1606                                  FrameOffset handle_offset,
1607                                  ManagedRegister in_reg, bool null_allowed) {
1608  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister());
1609  CHECK(out_reg.IsCoreRegister());
1610  if (null_allowed) {
1611    // Null values get a handle value of 0.  Otherwise, the handle value is
1612    // the address in the stack handle block holding the reference.
1613    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
1614    if (in_reg.IsNoRegister()) {
1615      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1616                     SP, handle_offset.Int32Value());
1617      in_reg = out_reg;
1618    }
1619    cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1620    if (!out_reg.Equals(in_reg)) {
1621      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1622    }
1623    AddConstant(out_reg.AsCoreRegister(), SP, handle_offset.Int32Value(), NE);
1624  } else {
1625    AddConstant(out_reg.AsCoreRegister(), SP, handle_offset.Int32Value(), AL);
1626  }
1627}
1628
1629void Assembler::CreateStackHandle(FrameOffset out_off,
1630                                  FrameOffset handle_offset,
1631                                  ManagedRegister scratch, bool null_allowed) {
1632  CHECK(scratch.IsCoreRegister());
1633  if (null_allowed) {
1634    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
1635                   handle_offset.Int32Value());
1636    // Null values get a handle value of 0.  Otherwise, the handle value is
1637    // the address in the stack handle block holding the reference.
1638    // e.g. scratch = (handle == 0) ? 0 : (SP+handle_offset)
1639    cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1640    AddConstant(scratch.AsCoreRegister(), SP, handle_offset.Int32Value(), NE);
1641  } else {
1642    AddConstant(scratch.AsCoreRegister(), SP, handle_offset.Int32Value(), AL);
1643  }
1644  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
1645}
1646
1647void Assembler::LoadReferenceFromStackHandle(ManagedRegister out_reg,
1648                                             ManagedRegister in_reg) {
1649  CHECK(out_reg.IsCoreRegister());
1650  CHECK(in_reg.IsCoreRegister());
1651  Label null_arg;
1652  if (!out_reg.Equals(in_reg)) {
1653    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1654  }
1655  cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1656  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1657                 in_reg.AsCoreRegister(), 0, NE);
1658}
1659
1660void Assembler::ValidateRef(ManagedRegister src, bool could_be_null) {
1661  // TODO: not validating references
1662}
1663
1664void Assembler::ValidateRef(FrameOffset src, bool could_be_null) {
1665  // TODO: not validating references
1666}
1667
1668void Assembler::Call(ManagedRegister base, Offset offset,
1669                     ManagedRegister scratch) {
1670  CHECK(base.IsCoreRegister());
1671  CHECK(scratch.IsCoreRegister());
1672  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1673                 base.AsCoreRegister(), offset.Int32Value());
1674  blx(scratch.AsCoreRegister());
1675  // TODO: place reference map on call
1676}
1677
1678void Assembler::Call(FrameOffset base, Offset offset,
1679                     ManagedRegister scratch) {
1680  CHECK(scratch.IsCoreRegister());
1681  // Call *(*(SP + base) + offset)
1682  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1683                 SP, base.Int32Value());
1684  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1685                 scratch.AsCoreRegister(), offset.Int32Value());
1686  blx(scratch.AsCoreRegister());
1687  // TODO: place reference map on call
1688}
1689
1690void Assembler::SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
1691                            FrameOffset return_save_location,
1692                            size_t return_size) {
1693  SuspendCountSlowPath* slow = new SuspendCountSlowPath(return_reg,
1694                                                        return_save_location,
1695                                                        return_size);
1696  buffer_.EnqueueSlowPath(slow);
1697  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1698                 TR, Thread::SuspendCountOffset().Int32Value());
1699  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1700  b(slow->Entry(), NE);
1701  Bind(slow->Continuation());
1702}
1703
1704void SuspendCountSlowPath::Emit(Assembler* sp_asm) {
1705  sp_asm->Bind(&entry_);
1706  // Save return value
1707  sp_asm->Store(return_save_location_, return_register_, return_size_);
1708  // Pass top of stack as argument
1709  sp_asm->mov(R0, ShifterOperand(SP));
1710  sp_asm->LoadFromOffset(kLoadWord, R12, TR,
1711                         Thread::SuspendCountEntryPointOffset().Int32Value());
1712  // Note: assume that link register will be spilled/filled on method entry/exit
1713  sp_asm->blx(R12);
1714  // Reload return value
1715  sp_asm->Load(return_register_, return_save_location_, return_size_);
1716  sp_asm->b(&continuation_);
1717}
1718
1719void Assembler::ExceptionPoll(ManagedRegister scratch) {
1720  ExceptionSlowPath* slow = new ExceptionSlowPath();
1721  buffer_.EnqueueSlowPath(slow);
1722  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1723                 TR, Thread::ExceptionOffset().Int32Value());
1724  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1725  b(slow->Entry(), NE);
1726  Bind(slow->Continuation());
1727}
1728
1729void ExceptionSlowPath::Emit(Assembler* sp_asm) {
1730  sp_asm->Bind(&entry_);
1731  // Pass top of stack as argument
1732  sp_asm->mov(R0, ShifterOperand(SP));
1733  sp_asm->LoadFromOffset(kLoadWord, R12, TR,
1734                         Thread::ExceptionEntryPointOffset().Int32Value());
1735  // Note: assume that link register will be spilled/filled on method entry/exit
1736  sp_asm->blx(R12);
1737  // TODO: this call should never return as it should make a long jump to
1738  // the appropriate catch block
1739  sp_asm->b(&continuation_);
1740}
1741
1742}  // namespace art
1743