assembler_arm.cc revision eb8167a4f4d27fce0530f6724ab8032610cd146b
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_arm.h"
18
19#include "base/logging.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "offsets.h"
22#include "thread.h"
23#include "utils.h"
24
25namespace art {
26namespace arm {
27
28// Instruction encoding bits.
29enum {
30  H   = 1 << 5,   // halfword (or byte)
31  L   = 1 << 20,  // load (or store)
32  S   = 1 << 20,  // set condition code (or leave unchanged)
33  W   = 1 << 21,  // writeback base register (or leave unchanged)
34  A   = 1 << 21,  // accumulate in multiply instruction (or not)
35  B   = 1 << 22,  // unsigned byte (or word)
36  N   = 1 << 22,  // long (or short)
37  U   = 1 << 23,  // positive (or negative) offset/index
38  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
39  I   = 1 << 25,  // immediate shifter operand (or not)
40
41  B0 = 1,
42  B1 = 1 << 1,
43  B2 = 1 << 2,
44  B3 = 1 << 3,
45  B4 = 1 << 4,
46  B5 = 1 << 5,
47  B6 = 1 << 6,
48  B7 = 1 << 7,
49  B8 = 1 << 8,
50  B9 = 1 << 9,
51  B10 = 1 << 10,
52  B11 = 1 << 11,
53  B12 = 1 << 12,
54  B16 = 1 << 16,
55  B17 = 1 << 17,
56  B18 = 1 << 18,
57  B19 = 1 << 19,
58  B20 = 1 << 20,
59  B21 = 1 << 21,
60  B22 = 1 << 22,
61  B23 = 1 << 23,
62  B24 = 1 << 24,
63  B25 = 1 << 25,
64  B26 = 1 << 26,
65  B27 = 1 << 27,
66
67  // Instruction bit masks.
68  RdMask = 15 << 12,  // in str instruction
69  CondMask = 15 << 28,
70  CoprocessorMask = 15 << 8,
71  OpCodeMask = 15 << 21,  // in data-processing instructions
72  Imm24Mask = (1 << 24) - 1,
73  Off12Mask = (1 << 12) - 1,
74
75  // ldrex/strex register field encodings.
76  kLdExRnShift = 16,
77  kLdExRtShift = 12,
78  kStrExRnShift = 16,
79  kStrExRdShift = 12,
80  kStrExRtShift = 0,
81};
82
83
84static const char* kRegisterNames[] = {
85  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
86  "fp", "ip", "sp", "lr", "pc"
87};
88std::ostream& operator<<(std::ostream& os, const Register& rhs) {
89  if (rhs >= R0 && rhs <= PC) {
90    os << kRegisterNames[rhs];
91  } else {
92    os << "Register[" << static_cast<int>(rhs) << "]";
93  }
94  return os;
95}
96
97
98std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
99  if (rhs >= S0 && rhs < kNumberOfSRegisters) {
100    os << "s" << static_cast<int>(rhs);
101  } else {
102    os << "SRegister[" << static_cast<int>(rhs) << "]";
103  }
104  return os;
105}
106
107
108std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
109  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
110    os << "d" << static_cast<int>(rhs);
111  } else {
112    os << "DRegister[" << static_cast<int>(rhs) << "]";
113  }
114  return os;
115}
116
117
118static const char* kConditionNames[] = {
119  "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
120  "LE", "AL",
121};
122std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
123  if (rhs >= EQ && rhs <= AL) {
124    os << kConditionNames[rhs];
125  } else {
126    os << "Condition[" << static_cast<int>(rhs) << "]";
127  }
128  return os;
129}
130
131void ArmAssembler::Emit(int32_t value) {
132  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
133  buffer_.Emit<int32_t>(value);
134}
135
136
137void ArmAssembler::EmitType01(Condition cond,
138                              int type,
139                              Opcode opcode,
140                              int set_cc,
141                              Register rn,
142                              Register rd,
143                              ShifterOperand so) {
144  CHECK_NE(rd, kNoRegister);
145  CHECK_NE(cond, kNoCondition);
146  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
147                     type << kTypeShift |
148                     static_cast<int32_t>(opcode) << kOpcodeShift |
149                     set_cc << kSShift |
150                     static_cast<int32_t>(rn) << kRnShift |
151                     static_cast<int32_t>(rd) << kRdShift |
152                     so.encoding();
153  Emit(encoding);
154}
155
156
157void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
158  CHECK_NE(cond, kNoCondition);
159  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
160                     5 << kTypeShift |
161                     (link ? 1 : 0) << kLinkShift;
162  Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
163}
164
165
166void ArmAssembler::EmitMemOp(Condition cond,
167                             bool load,
168                             bool byte,
169                             Register rd,
170                             Address ad) {
171  CHECK_NE(rd, kNoRegister);
172  CHECK_NE(cond, kNoCondition);
173  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
174                     B26 |
175                     (load ? L : 0) |
176                     (byte ? B : 0) |
177                     (static_cast<int32_t>(rd) << kRdShift) |
178                     ad.encoding();
179  Emit(encoding);
180}
181
182
183void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
184                                         int32_t mode,
185                                         Register rd,
186                                         Address ad) {
187  CHECK_NE(rd, kNoRegister);
188  CHECK_NE(cond, kNoCondition);
189  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
190                     B22  |
191                     mode |
192                     (static_cast<int32_t>(rd) << kRdShift) |
193                     ad.encoding3();
194  Emit(encoding);
195}
196
197
198void ArmAssembler::EmitMultiMemOp(Condition cond,
199                                  BlockAddressMode am,
200                                  bool load,
201                                  Register base,
202                                  RegList regs) {
203  CHECK_NE(base, kNoRegister);
204  CHECK_NE(cond, kNoCondition);
205  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
206                     B27 |
207                     am |
208                     (load ? L : 0) |
209                     (static_cast<int32_t>(base) << kRnShift) |
210                     regs;
211  Emit(encoding);
212}
213
214
215void ArmAssembler::EmitShiftImmediate(Condition cond,
216                                      Shift opcode,
217                                      Register rd,
218                                      Register rm,
219                                      ShifterOperand so) {
220  CHECK_NE(cond, kNoCondition);
221  CHECK_EQ(so.type(), 1U);
222  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
223                     static_cast<int32_t>(MOV) << kOpcodeShift |
224                     static_cast<int32_t>(rd) << kRdShift |
225                     so.encoding() << kShiftImmShift |
226                     static_cast<int32_t>(opcode) << kShiftShift |
227                     static_cast<int32_t>(rm);
228  Emit(encoding);
229}
230
231
232void ArmAssembler::EmitShiftRegister(Condition cond,
233                                     Shift opcode,
234                                     Register rd,
235                                     Register rm,
236                                     ShifterOperand so) {
237  CHECK_NE(cond, kNoCondition);
238  CHECK_EQ(so.type(), 0U);
239  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
240                     static_cast<int32_t>(MOV) << kOpcodeShift |
241                     static_cast<int32_t>(rd) << kRdShift |
242                     so.encoding() << kShiftRegisterShift |
243                     static_cast<int32_t>(opcode) << kShiftShift |
244                     B4 |
245                     static_cast<int32_t>(rm);
246  Emit(encoding);
247}
248
249
250void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
251  if (label->IsBound()) {
252    EmitType5(cond, label->Position() - buffer_.Size(), link);
253  } else {
254    int position = buffer_.Size();
255    // Use the offset field of the branch instruction for linking the sites.
256    EmitType5(cond, label->position_, link);
257    label->LinkTo(position);
258  }
259}
260
261void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
262                        Condition cond) {
263  EmitType01(cond, so.type(), AND, 0, rn, rd, so);
264}
265
266
267void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
268                       Condition cond) {
269  EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
270}
271
272
273void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
274                       Condition cond) {
275  EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
276}
277
278void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
279                       Condition cond) {
280  EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
281}
282
283void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
284                        Condition cond) {
285  EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
286}
287
288
289void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
290                       Condition cond) {
291  EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
292}
293
294
295void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
296                        Condition cond) {
297  EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
298}
299
300
301void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
302                        Condition cond) {
303  EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
304}
305
306
307void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
308                       Condition cond) {
309  EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
310}
311
312
313void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
314                       Condition cond) {
315  EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
316}
317
318
319void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
320                       Condition cond) {
321  EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
322}
323
324
325void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
326  CHECK_NE(rn, PC);  // Reserve tst pc instruction for exception handler marker.
327  EmitType01(cond, so.type(), TST, 1, rn, R0, so);
328}
329
330
331void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
332  CHECK_NE(rn, PC);  // Reserve teq pc instruction for exception handler marker.
333  EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
334}
335
336
337void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
338  EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
339}
340
341
342void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
343  EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
344}
345
346
347void ArmAssembler::orr(Register rd, Register rn,
348                    ShifterOperand so, Condition cond) {
349  EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
350}
351
352
353void ArmAssembler::orrs(Register rd, Register rn,
354                        ShifterOperand so, Condition cond) {
355  EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
356}
357
358
359void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
360  EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
361}
362
363
364void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
365  EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
366}
367
368
369void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
370                       Condition cond) {
371  EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
372}
373
374
375void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
376  EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
377}
378
379
380void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
381  EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
382}
383
384
385void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
386  CHECK_NE(rd, kNoRegister);
387  CHECK_NE(rm, kNoRegister);
388  CHECK_NE(cond, kNoCondition);
389  CHECK_NE(rd, PC);
390  CHECK_NE(rm, PC);
391  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
392                     B24 | B22 | B21 | (0xf << 16) |
393                     (static_cast<int32_t>(rd) << kRdShift) |
394                     (0xf << 8) | B4 | static_cast<int32_t>(rm);
395  Emit(encoding);
396}
397
398
399void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
400  CHECK_NE(cond, kNoCondition);
401  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
402                     B25 | B24 | ((imm16 >> 12) << 16) |
403                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
404  Emit(encoding);
405}
406
407
408void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
409  CHECK_NE(cond, kNoCondition);
410  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
411                     B25 | B24 | B22 | ((imm16 >> 12) << 16) |
412                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
413  Emit(encoding);
414}
415
416
417void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
418                             Register rd, Register rn,
419                             Register rm, Register rs) {
420  CHECK_NE(rd, kNoRegister);
421  CHECK_NE(rn, kNoRegister);
422  CHECK_NE(rm, kNoRegister);
423  CHECK_NE(rs, kNoRegister);
424  CHECK_NE(cond, kNoCondition);
425  int32_t encoding = opcode |
426      (static_cast<int32_t>(cond) << kConditionShift) |
427      (static_cast<int32_t>(rn) << kRnShift) |
428      (static_cast<int32_t>(rd) << kRdShift) |
429      (static_cast<int32_t>(rs) << kRsShift) |
430      B7 | B4 |
431      (static_cast<int32_t>(rm) << kRmShift);
432  Emit(encoding);
433}
434
435
436void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
437  // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
438  EmitMulOp(cond, 0, R0, rd, rn, rm);
439}
440
441
442void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
443                       Condition cond) {
444  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
445  EmitMulOp(cond, B21, ra, rd, rn, rm);
446}
447
448
449void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
450                       Condition cond) {
451  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
452  EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
453}
454
455
456void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
457                         Register rm, Condition cond) {
458  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
459  EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
460}
461
462
463void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
464  EmitMemOp(cond, true, false, rd, ad);
465}
466
467
468void ArmAssembler::str(Register rd, Address ad, Condition cond) {
469  EmitMemOp(cond, false, false, rd, ad);
470}
471
472
473void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
474  EmitMemOp(cond, true, true, rd, ad);
475}
476
477
478void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
479  EmitMemOp(cond, false, true, rd, ad);
480}
481
482
483void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
484  EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
485}
486
487
488void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
489  EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
490}
491
492
493void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
494  EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
495}
496
497
498void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
499  EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
500}
501
502
503void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
504  CHECK_EQ(rd % 2, 0);
505  EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
506}
507
508
509void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
510  CHECK_EQ(rd % 2, 0);
511  EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
512}
513
514
515void ArmAssembler::ldm(BlockAddressMode am,
516                       Register base,
517                       RegList regs,
518                       Condition cond) {
519  EmitMultiMemOp(cond, am, true, base, regs);
520}
521
522
523void ArmAssembler::stm(BlockAddressMode am,
524                       Register base,
525                       RegList regs,
526                       Condition cond) {
527  EmitMultiMemOp(cond, am, false, base, regs);
528}
529
530
531void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
532  CHECK_NE(rn, kNoRegister);
533  CHECK_NE(rt, kNoRegister);
534  CHECK_NE(cond, kNoCondition);
535  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
536                     B24 |
537                     B23 |
538                     L   |
539                     (static_cast<int32_t>(rn) << kLdExRnShift) |
540                     (static_cast<int32_t>(rt) << kLdExRtShift) |
541                     B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
542  Emit(encoding);
543}
544
545
546void ArmAssembler::strex(Register rd,
547                         Register rt,
548                         Register rn,
549                         Condition cond) {
550  CHECK_NE(rn, kNoRegister);
551  CHECK_NE(rd, kNoRegister);
552  CHECK_NE(rt, kNoRegister);
553  CHECK_NE(cond, kNoCondition);
554  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
555                     B24 |
556                     B23 |
557                     (static_cast<int32_t>(rn) << kStrExRnShift) |
558                     (static_cast<int32_t>(rd) << kStrExRdShift) |
559                     B11 | B10 | B9 | B8 | B7 | B4 |
560                     (static_cast<int32_t>(rt) << kStrExRtShift);
561  Emit(encoding);
562}
563
564
565void ArmAssembler::clrex() {
566  int32_t encoding = (kSpecialCondition << kConditionShift) |
567                     B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
568  Emit(encoding);
569}
570
571
572void ArmAssembler::nop(Condition cond) {
573  CHECK_NE(cond, kNoCondition);
574  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
575                     B25 | B24 | B21 | (0xf << 12);
576  Emit(encoding);
577}
578
579
580void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
581  CHECK_NE(sn, kNoSRegister);
582  CHECK_NE(rt, kNoRegister);
583  CHECK_NE(rt, SP);
584  CHECK_NE(rt, PC);
585  CHECK_NE(cond, kNoCondition);
586  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
587                     B27 | B26 | B25 |
588                     ((static_cast<int32_t>(sn) >> 1)*B16) |
589                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
590                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
591  Emit(encoding);
592}
593
594
595void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
596  CHECK_NE(sn, kNoSRegister);
597  CHECK_NE(rt, kNoRegister);
598  CHECK_NE(rt, SP);
599  CHECK_NE(rt, PC);
600  CHECK_NE(cond, kNoCondition);
601  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
602                     B27 | B26 | B25 | B20 |
603                     ((static_cast<int32_t>(sn) >> 1)*B16) |
604                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
605                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
606  Emit(encoding);
607}
608
609
610void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
611                           Condition cond) {
612  CHECK_NE(sm, kNoSRegister);
613  CHECK_NE(sm, S31);
614  CHECK_NE(rt, kNoRegister);
615  CHECK_NE(rt, SP);
616  CHECK_NE(rt, PC);
617  CHECK_NE(rt2, kNoRegister);
618  CHECK_NE(rt2, SP);
619  CHECK_NE(rt2, PC);
620  CHECK_NE(cond, kNoCondition);
621  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
622                     B27 | B26 | B22 |
623                     (static_cast<int32_t>(rt2)*B16) |
624                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
625                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
626                     (static_cast<int32_t>(sm) >> 1);
627  Emit(encoding);
628}
629
630
631void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
632                           Condition cond) {
633  CHECK_NE(sm, kNoSRegister);
634  CHECK_NE(sm, S31);
635  CHECK_NE(rt, kNoRegister);
636  CHECK_NE(rt, SP);
637  CHECK_NE(rt, PC);
638  CHECK_NE(rt2, kNoRegister);
639  CHECK_NE(rt2, SP);
640  CHECK_NE(rt2, PC);
641  CHECK_NE(rt, rt2);
642  CHECK_NE(cond, kNoCondition);
643  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
644                     B27 | B26 | B22 | B20 |
645                     (static_cast<int32_t>(rt2)*B16) |
646                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
647                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
648                     (static_cast<int32_t>(sm) >> 1);
649  Emit(encoding);
650}
651
652
653void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
654                           Condition cond) {
655  CHECK_NE(dm, kNoDRegister);
656  CHECK_NE(rt, kNoRegister);
657  CHECK_NE(rt, SP);
658  CHECK_NE(rt, PC);
659  CHECK_NE(rt2, kNoRegister);
660  CHECK_NE(rt2, SP);
661  CHECK_NE(rt2, PC);
662  CHECK_NE(cond, kNoCondition);
663  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
664                     B27 | B26 | B22 |
665                     (static_cast<int32_t>(rt2)*B16) |
666                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
667                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
668                     (static_cast<int32_t>(dm) & 0xf);
669  Emit(encoding);
670}
671
672
673void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
674                           Condition cond) {
675  CHECK_NE(dm, kNoDRegister);
676  CHECK_NE(rt, kNoRegister);
677  CHECK_NE(rt, SP);
678  CHECK_NE(rt, PC);
679  CHECK_NE(rt2, kNoRegister);
680  CHECK_NE(rt2, SP);
681  CHECK_NE(rt2, PC);
682  CHECK_NE(rt, rt2);
683  CHECK_NE(cond, kNoCondition);
684  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
685                     B27 | B26 | B22 | B20 |
686                     (static_cast<int32_t>(rt2)*B16) |
687                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
688                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
689                     (static_cast<int32_t>(dm) & 0xf);
690  Emit(encoding);
691}
692
693
694void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
695  CHECK_NE(sd, kNoSRegister);
696  CHECK_NE(cond, kNoCondition);
697  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
698                     B27 | B26 | B24 | B20 |
699                     ((static_cast<int32_t>(sd) & 1)*B22) |
700                     ((static_cast<int32_t>(sd) >> 1)*B12) |
701                     B11 | B9 | ad.vencoding();
702  Emit(encoding);
703}
704
705
706void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
707  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
708  CHECK_NE(sd, kNoSRegister);
709  CHECK_NE(cond, kNoCondition);
710  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
711                     B27 | B26 | B24 |
712                     ((static_cast<int32_t>(sd) & 1)*B22) |
713                     ((static_cast<int32_t>(sd) >> 1)*B12) |
714                     B11 | B9 | ad.vencoding();
715  Emit(encoding);
716}
717
718
719void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
720  CHECK_NE(dd, kNoDRegister);
721  CHECK_NE(cond, kNoCondition);
722  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
723                     B27 | B26 | B24 | B20 |
724                     ((static_cast<int32_t>(dd) >> 4)*B22) |
725                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
726                     B11 | B9 | B8 | ad.vencoding();
727  Emit(encoding);
728}
729
730
731void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
732  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
733  CHECK_NE(dd, kNoDRegister);
734  CHECK_NE(cond, kNoCondition);
735  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
736                     B27 | B26 | B24 |
737                     ((static_cast<int32_t>(dd) >> 4)*B22) |
738                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
739                     B11 | B9 | B8 | ad.vencoding();
740  Emit(encoding);
741}
742
743
744void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
745                              SRegister sd, SRegister sn, SRegister sm) {
746  CHECK_NE(sd, kNoSRegister);
747  CHECK_NE(sn, kNoSRegister);
748  CHECK_NE(sm, kNoSRegister);
749  CHECK_NE(cond, kNoCondition);
750  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
751                     B27 | B26 | B25 | B11 | B9 | opcode |
752                     ((static_cast<int32_t>(sd) & 1)*B22) |
753                     ((static_cast<int32_t>(sn) >> 1)*B16) |
754                     ((static_cast<int32_t>(sd) >> 1)*B12) |
755                     ((static_cast<int32_t>(sn) & 1)*B7) |
756                     ((static_cast<int32_t>(sm) & 1)*B5) |
757                     (static_cast<int32_t>(sm) >> 1);
758  Emit(encoding);
759}
760
761
762void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
763                              DRegister dd, DRegister dn, DRegister dm) {
764  CHECK_NE(dd, kNoDRegister);
765  CHECK_NE(dn, kNoDRegister);
766  CHECK_NE(dm, kNoDRegister);
767  CHECK_NE(cond, kNoCondition);
768  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
769                     B27 | B26 | B25 | B11 | B9 | B8 | opcode |
770                     ((static_cast<int32_t>(dd) >> 4)*B22) |
771                     ((static_cast<int32_t>(dn) & 0xf)*B16) |
772                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
773                     ((static_cast<int32_t>(dn) >> 4)*B7) |
774                     ((static_cast<int32_t>(dm) >> 4)*B5) |
775                     (static_cast<int32_t>(dm) & 0xf);
776  Emit(encoding);
777}
778
779
780void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
781  EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
782}
783
784
785void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
786  EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
787}
788
789
790bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
791  uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
792  if (((imm32 & ((1 << 19) - 1)) == 0) &&
793      ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
794       (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
795    uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
796        ((imm32 >> 19) & ((1 << 6) -1));
797    EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
798               sd, S0, S0);
799    return true;
800  }
801  return false;
802}
803
804
805bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
806  uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
807  if (((imm64 & ((1LL << 48) - 1)) == 0) &&
808      ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
809       (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
810    uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
811        ((imm64 >> 48) & ((1 << 6) -1));
812    EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
813               dd, D0, D0);
814    return true;
815  }
816  return false;
817}
818
819
820void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
821                         Condition cond) {
822  EmitVFPsss(cond, B21 | B20, sd, sn, sm);
823}
824
825
826void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
827                         Condition cond) {
828  EmitVFPddd(cond, B21 | B20, dd, dn, dm);
829}
830
831
832void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
833                         Condition cond) {
834  EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
835}
836
837
838void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
839                         Condition cond) {
840  EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
841}
842
843
844void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
845                         Condition cond) {
846  EmitVFPsss(cond, B21, sd, sn, sm);
847}
848
849
850void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
851                         Condition cond) {
852  EmitVFPddd(cond, B21, dd, dn, dm);
853}
854
855
856void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
857                         Condition cond) {
858  EmitVFPsss(cond, 0, sd, sn, sm);
859}
860
861
862void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
863                         Condition cond) {
864  EmitVFPddd(cond, 0, dd, dn, dm);
865}
866
867
868void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
869                         Condition cond) {
870  EmitVFPsss(cond, B6, sd, sn, sm);
871}
872
873
874void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
875                         Condition cond) {
876  EmitVFPddd(cond, B6, dd, dn, dm);
877}
878
879
880void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
881                         Condition cond) {
882  EmitVFPsss(cond, B23, sd, sn, sm);
883}
884
885
886void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
887                         Condition cond) {
888  EmitVFPddd(cond, B23, dd, dn, dm);
889}
890
891
892void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
893  EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
894}
895
896
897void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
898  EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
899}
900
901
902void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
903  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
904}
905
906
907void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
908  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
909}
910
911
912void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
913  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
914}
915
916void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
917  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
918}
919
920
921void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
922                             SRegister sd, DRegister dm) {
923  CHECK_NE(sd, kNoSRegister);
924  CHECK_NE(dm, kNoDRegister);
925  CHECK_NE(cond, kNoCondition);
926  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
927                     B27 | B26 | B25 | B11 | B9 | opcode |
928                     ((static_cast<int32_t>(sd) & 1)*B22) |
929                     ((static_cast<int32_t>(sd) >> 1)*B12) |
930                     ((static_cast<int32_t>(dm) >> 4)*B5) |
931                     (static_cast<int32_t>(dm) & 0xf);
932  Emit(encoding);
933}
934
935
936void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
937                             DRegister dd, SRegister sm) {
938  CHECK_NE(dd, kNoDRegister);
939  CHECK_NE(sm, kNoSRegister);
940  CHECK_NE(cond, kNoCondition);
941  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
942                     B27 | B26 | B25 | B11 | B9 | opcode |
943                     ((static_cast<int32_t>(dd) >> 4)*B22) |
944                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
945                     ((static_cast<int32_t>(sm) & 1)*B5) |
946                     (static_cast<int32_t>(sm) >> 1);
947  Emit(encoding);
948}
949
950
951void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
952  EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
953}
954
955
956void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
957  EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
958}
959
960
961void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
962  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
963}
964
965
966void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
967  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
968}
969
970
971void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
972  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
973}
974
975
976void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
977  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
978}
979
980
981void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
982  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
983}
984
985
986void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
987  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
988}
989
990
991void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
992  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
993}
994
995
996void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
997  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
998}
999
1000
1001void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1002  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1003}
1004
1005
1006void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1007  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1008}
1009
1010
1011void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
1012  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1013}
1014
1015
1016void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
1017  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1018}
1019
1020
1021void ArmAssembler::vmstat(Condition cond) {  // VMRS APSR_nzcv, FPSCR
1022  CHECK_NE(cond, kNoCondition);
1023  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1024                     B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
1025                     (static_cast<int32_t>(PC)*B12) |
1026                     B11 | B9 | B4;
1027  Emit(encoding);
1028}
1029
1030
1031void ArmAssembler::svc(uint32_t imm24) {
1032  CHECK(IsUint(24, imm24)) << imm24;
1033  int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
1034  Emit(encoding);
1035}
1036
1037
1038void ArmAssembler::bkpt(uint16_t imm16) {
1039  int32_t encoding = (AL << kConditionShift) | B24 | B21 |
1040                     ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
1041  Emit(encoding);
1042}
1043
1044
1045void ArmAssembler::b(Label* label, Condition cond) {
1046  EmitBranch(cond, label, false);
1047}
1048
1049
1050void ArmAssembler::bl(Label* label, Condition cond) {
1051  EmitBranch(cond, label, true);
1052}
1053
1054
1055void ArmAssembler::blx(Register rm, Condition cond) {
1056  CHECK_NE(rm, kNoRegister);
1057  CHECK_NE(cond, kNoCondition);
1058  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1059                     B24 | B21 | (0xfff << 8) | B5 | B4 |
1060                     (static_cast<int32_t>(rm) << kRmShift);
1061  Emit(encoding);
1062}
1063
1064void ArmAssembler::bx(Register rm, Condition cond) {
1065  CHECK_NE(rm, kNoRegister);
1066  CHECK_NE(cond, kNoCondition);
1067  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
1068                     B24 | B21 | (0xfff << 8) | B4 |
1069                     (static_cast<int32_t>(rm) << kRmShift);
1070  Emit(encoding);
1071}
1072
1073void ArmAssembler::MarkExceptionHandler(Label* label) {
1074  EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
1075  Label l;
1076  b(&l);
1077  EmitBranch(AL, label, false);
1078  Bind(&l);
1079}
1080
1081
1082void ArmAssembler::Bind(Label* label) {
1083  CHECK(!label->IsBound());
1084  int bound_pc = buffer_.Size();
1085  while (label->IsLinked()) {
1086    int32_t position = label->Position();
1087    int32_t next = buffer_.Load<int32_t>(position);
1088    int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
1089    buffer_.Store<int32_t>(position, encoded);
1090    label->position_ = ArmAssembler::DecodeBranchOffset(next);
1091  }
1092  label->BindTo(bound_pc);
1093}
1094
1095
1096void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
1097  // TODO: Consider using movw ip, <16 bits>.
1098  while (!IsUint(8, data)) {
1099    tst(R0, ShifterOperand(data & 0xFF), VS);
1100    data >>= 8;
1101  }
1102  tst(R0, ShifterOperand(data), MI);
1103}
1104
1105
1106int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
1107  // The offset is off by 8 due to the way the ARM CPUs read PC.
1108  offset -= 8;
1109  CHECK_ALIGNED(offset, 4);
1110  CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
1111
1112  // Properly preserve only the bits supported in the instruction.
1113  offset >>= 2;
1114  offset &= kBranchOffsetMask;
1115  return (inst & ~kBranchOffsetMask) | offset;
1116}
1117
1118
1119int ArmAssembler::DecodeBranchOffset(int32_t inst) {
1120  // Sign-extend, left-shift by 2, then add 8.
1121  return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
1122}
1123
1124void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
1125  AddConstant(rd, rd, value, cond);
1126}
1127
1128
1129void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
1130                               Condition cond) {
1131  if (value == 0) {
1132    if (rd != rn) {
1133      mov(rd, ShifterOperand(rn), cond);
1134    }
1135    return;
1136  }
1137  // We prefer to select the shorter code sequence rather than selecting add for
1138  // positive values and sub for negatives ones, which would slightly improve
1139  // the readability of generated code for some constants.
1140  ShifterOperand shifter_op;
1141  if (ShifterOperand::CanHold(value, &shifter_op)) {
1142    add(rd, rn, shifter_op, cond);
1143  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1144    sub(rd, rn, shifter_op, cond);
1145  } else {
1146    CHECK(rn != IP);
1147    if (ShifterOperand::CanHold(~value, &shifter_op)) {
1148      mvn(IP, shifter_op, cond);
1149      add(rd, rn, ShifterOperand(IP), cond);
1150    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1151      mvn(IP, shifter_op, cond);
1152      sub(rd, rn, ShifterOperand(IP), cond);
1153    } else {
1154      movw(IP, Low16Bits(value), cond);
1155      uint16_t value_high = High16Bits(value);
1156      if (value_high != 0) {
1157        movt(IP, value_high, cond);
1158      }
1159      add(rd, rn, ShifterOperand(IP), cond);
1160    }
1161  }
1162}
1163
1164
1165void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
1166                                       Condition cond) {
1167  ShifterOperand shifter_op;
1168  if (ShifterOperand::CanHold(value, &shifter_op)) {
1169    adds(rd, rn, shifter_op, cond);
1170  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
1171    subs(rd, rn, shifter_op, cond);
1172  } else {
1173    CHECK(rn != IP);
1174    if (ShifterOperand::CanHold(~value, &shifter_op)) {
1175      mvn(IP, shifter_op, cond);
1176      adds(rd, rn, ShifterOperand(IP), cond);
1177    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
1178      mvn(IP, shifter_op, cond);
1179      subs(rd, rn, ShifterOperand(IP), cond);
1180    } else {
1181      movw(IP, Low16Bits(value), cond);
1182      uint16_t value_high = High16Bits(value);
1183      if (value_high != 0) {
1184        movt(IP, value_high, cond);
1185      }
1186      adds(rd, rn, ShifterOperand(IP), cond);
1187    }
1188  }
1189}
1190
1191
1192void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
1193  ShifterOperand shifter_op;
1194  if (ShifterOperand::CanHold(value, &shifter_op)) {
1195    mov(rd, shifter_op, cond);
1196  } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
1197    mvn(rd, shifter_op, cond);
1198  } else {
1199    movw(rd, Low16Bits(value), cond);
1200    uint16_t value_high = High16Bits(value);
1201    if (value_high != 0) {
1202      movt(rd, value_high, cond);
1203    }
1204  }
1205}
1206
1207
1208bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
1209  switch (type) {
1210    case kLoadSignedByte:
1211    case kLoadSignedHalfword:
1212    case kLoadUnsignedHalfword:
1213    case kLoadWordPair:
1214      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
1215    case kLoadUnsignedByte:
1216    case kLoadWord:
1217      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
1218    case kLoadSWord:
1219    case kLoadDWord:
1220      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
1221    default:
1222      LOG(FATAL) << "UNREACHABLE";
1223      return false;
1224  }
1225}
1226
1227
1228bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
1229  switch (type) {
1230    case kStoreHalfword:
1231    case kStoreWordPair:
1232      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
1233    case kStoreByte:
1234    case kStoreWord:
1235      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
1236    case kStoreSWord:
1237    case kStoreDWord:
1238      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
1239    default:
1240      LOG(FATAL) << "UNREACHABLE";
1241      return false;
1242  }
1243}
1244
1245
1246// Implementation note: this method must emit at most one instruction when
1247// Address::CanHoldLoadOffset.
1248void ArmAssembler::LoadFromOffset(LoadOperandType type,
1249                                  Register reg,
1250                                  Register base,
1251                                  int32_t offset,
1252                                  Condition cond) {
1253  if (!Address::CanHoldLoadOffset(type, offset)) {
1254    CHECK(base != IP);
1255    LoadImmediate(IP, offset, cond);
1256    add(IP, IP, ShifterOperand(base), cond);
1257    base = IP;
1258    offset = 0;
1259  }
1260  CHECK(Address::CanHoldLoadOffset(type, offset));
1261  switch (type) {
1262    case kLoadSignedByte:
1263      ldrsb(reg, Address(base, offset), cond);
1264      break;
1265    case kLoadUnsignedByte:
1266      ldrb(reg, Address(base, offset), cond);
1267      break;
1268    case kLoadSignedHalfword:
1269      ldrsh(reg, Address(base, offset), cond);
1270      break;
1271    case kLoadUnsignedHalfword:
1272      ldrh(reg, Address(base, offset), cond);
1273      break;
1274    case kLoadWord:
1275      ldr(reg, Address(base, offset), cond);
1276      break;
1277    case kLoadWordPair:
1278      ldrd(reg, Address(base, offset), cond);
1279      break;
1280    default:
1281      LOG(FATAL) << "UNREACHABLE";
1282  }
1283}
1284
1285// Implementation note: this method must emit at most one instruction when
1286// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
1287void ArmAssembler::LoadSFromOffset(SRegister reg,
1288                                   Register base,
1289                                   int32_t offset,
1290                                   Condition cond) {
1291  if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
1292    CHECK_NE(base, IP);
1293    LoadImmediate(IP, offset, cond);
1294    add(IP, IP, ShifterOperand(base), cond);
1295    base = IP;
1296    offset = 0;
1297  }
1298  CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
1299  vldrs(reg, Address(base, offset), cond);
1300}
1301
1302// Implementation note: this method must emit at most one instruction when
1303// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
1304void ArmAssembler::LoadDFromOffset(DRegister reg,
1305                                   Register base,
1306                                   int32_t offset,
1307                                   Condition cond) {
1308  if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
1309    CHECK_NE(base, IP);
1310    LoadImmediate(IP, offset, cond);
1311    add(IP, IP, ShifterOperand(base), cond);
1312    base = IP;
1313    offset = 0;
1314  }
1315  CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
1316  vldrd(reg, Address(base, offset), cond);
1317}
1318
1319// Implementation note: this method must emit at most one instruction when
1320// Address::CanHoldStoreOffset.
1321void ArmAssembler::StoreToOffset(StoreOperandType type,
1322                                 Register reg,
1323                                 Register base,
1324                                 int32_t offset,
1325                                 Condition cond) {
1326  if (!Address::CanHoldStoreOffset(type, offset)) {
1327    CHECK(reg != IP);
1328    CHECK(base != IP);
1329    LoadImmediate(IP, offset, cond);
1330    add(IP, IP, ShifterOperand(base), cond);
1331    base = IP;
1332    offset = 0;
1333  }
1334  CHECK(Address::CanHoldStoreOffset(type, offset));
1335  switch (type) {
1336    case kStoreByte:
1337      strb(reg, Address(base, offset), cond);
1338      break;
1339    case kStoreHalfword:
1340      strh(reg, Address(base, offset), cond);
1341      break;
1342    case kStoreWord:
1343      str(reg, Address(base, offset), cond);
1344      break;
1345    case kStoreWordPair:
1346      strd(reg, Address(base, offset), cond);
1347      break;
1348    default:
1349      LOG(FATAL) << "UNREACHABLE";
1350  }
1351}
1352
1353// Implementation note: this method must emit at most one instruction when
1354// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
1355void ArmAssembler::StoreSToOffset(SRegister reg,
1356                                  Register base,
1357                                  int32_t offset,
1358                                  Condition cond) {
1359  if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
1360    CHECK_NE(base, IP);
1361    LoadImmediate(IP, offset, cond);
1362    add(IP, IP, ShifterOperand(base), cond);
1363    base = IP;
1364    offset = 0;
1365  }
1366  CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
1367  vstrs(reg, Address(base, offset), cond);
1368}
1369
1370// Implementation note: this method must emit at most one instruction when
1371// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
1372void ArmAssembler::StoreDToOffset(DRegister reg,
1373                                  Register base,
1374                                  int32_t offset,
1375                                  Condition cond) {
1376  if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
1377    CHECK_NE(base, IP);
1378    LoadImmediate(IP, offset, cond);
1379    add(IP, IP, ShifterOperand(base), cond);
1380    base = IP;
1381    offset = 0;
1382  }
1383  CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
1384  vstrd(reg, Address(base, offset), cond);
1385}
1386
1387void ArmAssembler::Push(Register rd, Condition cond) {
1388  str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
1389}
1390
1391void ArmAssembler::Pop(Register rd, Condition cond) {
1392  ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
1393}
1394
1395void ArmAssembler::PushList(RegList regs, Condition cond) {
1396  stm(DB_W, SP, regs, cond);
1397}
1398
1399void ArmAssembler::PopList(RegList regs, Condition cond) {
1400  ldm(IA_W, SP, regs, cond);
1401}
1402
1403void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
1404  if (rd != rm) {
1405    mov(rd, ShifterOperand(rm), cond);
1406  }
1407}
1408
1409void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
1410                       Condition cond) {
1411  CHECK_NE(shift_imm, 0u);  // Do not use Lsl if no shift is wanted.
1412  mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
1413}
1414
1415void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
1416                       Condition cond) {
1417  CHECK_NE(shift_imm, 0u);  // Do not use Lsr if no shift is wanted.
1418  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
1419  mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
1420}
1421
1422void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
1423                       Condition cond) {
1424  CHECK_NE(shift_imm, 0u);  // Do not use Asr if no shift is wanted.
1425  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
1426  mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
1427}
1428
1429void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
1430                       Condition cond) {
1431  CHECK_NE(shift_imm, 0u);  // Use Rrx instruction.
1432  mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
1433}
1434
1435void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
1436  mov(rd, ShifterOperand(rm, ROR, 0), cond);
1437}
1438
1439constexpr size_t kFramePointerSize = 4;
1440
1441void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
1442                              const std::vector<ManagedRegister>& callee_save_regs,
1443                              const ManagedRegisterEntrySpills& entry_spills) {
1444  CHECK_ALIGNED(frame_size, kStackAlignment);
1445  CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
1446
1447  // Push callee saves and link register.
1448  RegList push_list = 1 << LR;
1449  size_t pushed_values = 1;
1450  for (size_t i = 0; i < callee_save_regs.size(); i++) {
1451    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
1452    push_list |= 1 << reg;
1453    pushed_values++;
1454  }
1455  PushList(push_list);
1456
1457  // Increase frame to required size.
1458  CHECK_GT(frame_size, pushed_values * kFramePointerSize);  // Must at least have space for Method*.
1459  size_t adjust = frame_size - (pushed_values * kFramePointerSize);
1460  IncreaseFrameSize(adjust);
1461
1462  // Write out Method*.
1463  StoreToOffset(kStoreWord, R0, SP, 0);
1464
1465  // Write out entry spills.
1466  for (size_t i = 0; i < entry_spills.size(); ++i) {
1467    Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
1468    StoreToOffset(kStoreWord, reg, SP, frame_size + kFramePointerSize + (i * kFramePointerSize));
1469  }
1470}
1471
1472void ArmAssembler::RemoveFrame(size_t frame_size,
1473                              const std::vector<ManagedRegister>& callee_save_regs) {
1474  CHECK_ALIGNED(frame_size, kStackAlignment);
1475  // Compute callee saves to pop and PC
1476  RegList pop_list = 1 << PC;
1477  size_t pop_values = 1;
1478  for (size_t i = 0; i < callee_save_regs.size(); i++) {
1479    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
1480    pop_list |= 1 << reg;
1481    pop_values++;
1482  }
1483
1484  // Decrease frame to start of callee saves
1485  CHECK_GT(frame_size, pop_values * kFramePointerSize);
1486  size_t adjust = frame_size - (pop_values * kFramePointerSize);
1487  DecreaseFrameSize(adjust);
1488
1489  // Pop callee saves and PC
1490  PopList(pop_list);
1491}
1492
1493void ArmAssembler::IncreaseFrameSize(size_t adjust) {
1494  AddConstant(SP, -adjust);
1495}
1496
1497void ArmAssembler::DecreaseFrameSize(size_t adjust) {
1498  AddConstant(SP, adjust);
1499}
1500
1501void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
1502  ArmManagedRegister src = msrc.AsArm();
1503  if (src.IsNoRegister()) {
1504    CHECK_EQ(0u, size);
1505  } else if (src.IsCoreRegister()) {
1506    CHECK_EQ(4u, size);
1507    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1508  } else if (src.IsRegisterPair()) {
1509    CHECK_EQ(8u, size);
1510    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
1511    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
1512                  SP, dest.Int32Value() + 4);
1513  } else if (src.IsSRegister()) {
1514    StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
1515  } else {
1516    CHECK(src.IsDRegister()) << src;
1517    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
1518  }
1519}
1520
1521void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
1522  ArmManagedRegister src = msrc.AsArm();
1523  CHECK(src.IsCoreRegister()) << src;
1524  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1525}
1526
1527void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
1528  ArmManagedRegister src = msrc.AsArm();
1529  CHECK(src.IsCoreRegister()) << src;
1530  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1531}
1532
1533void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
1534                              FrameOffset in_off, ManagedRegister mscratch) {
1535  ArmManagedRegister src = msrc.AsArm();
1536  ArmManagedRegister scratch = mscratch.AsArm();
1537  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
1538  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
1539  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
1540}
1541
1542void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
1543                        ManagedRegister mscratch) {
1544  ArmManagedRegister scratch = mscratch.AsArm();
1545  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1546  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1547}
1548
1549void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
1550                           MemberOffset offs) {
1551  ArmManagedRegister dst = mdest.AsArm();
1552  CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
1553  LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
1554                 base.AsArm().AsCoreRegister(), offs.Int32Value());
1555  if (kPoisonHeapReferences) {
1556    rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0));
1557  }
1558}
1559
1560void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
1561  ArmManagedRegister dst = mdest.AsArm();
1562  CHECK(dst.IsCoreRegister()) << dst;
1563  LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
1564}
1565
1566void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
1567                           Offset offs) {
1568  ArmManagedRegister dst = mdest.AsArm();
1569  CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
1570  LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
1571                 base.AsArm().AsCoreRegister(), offs.Int32Value());
1572}
1573
1574void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
1575                                      ManagedRegister mscratch) {
1576  ArmManagedRegister scratch = mscratch.AsArm();
1577  CHECK(scratch.IsCoreRegister()) << scratch;
1578  LoadImmediate(scratch.AsCoreRegister(), imm);
1579  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1580}
1581
1582void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
1583                                       ManagedRegister mscratch) {
1584  ArmManagedRegister scratch = mscratch.AsArm();
1585  CHECK(scratch.IsCoreRegister()) << scratch;
1586  LoadImmediate(scratch.AsCoreRegister(), imm);
1587  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
1588}
1589
1590static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
1591                     Register src_register, int32_t src_offset, size_t size) {
1592  ArmManagedRegister dst = m_dst.AsArm();
1593  if (dst.IsNoRegister()) {
1594    CHECK_EQ(0u, size) << dst;
1595  } else if (dst.IsCoreRegister()) {
1596    CHECK_EQ(4u, size) << dst;
1597    assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
1598  } else if (dst.IsRegisterPair()) {
1599    CHECK_EQ(8u, size) << dst;
1600    assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
1601    assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
1602  } else if (dst.IsSRegister()) {
1603    assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
1604  } else {
1605    CHECK(dst.IsDRegister()) << dst;
1606    assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
1607  }
1608}
1609
1610void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
1611  return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
1612}
1613
1614void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
1615  return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
1616}
1617
1618void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
1619  ArmManagedRegister dst = m_dst.AsArm();
1620  CHECK(dst.IsCoreRegister()) << dst;
1621  LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
1622}
1623
1624void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
1625                                        ThreadOffset<4> thr_offs,
1626                                        ManagedRegister mscratch) {
1627  ArmManagedRegister scratch = mscratch.AsArm();
1628  CHECK(scratch.IsCoreRegister()) << scratch;
1629  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1630                 TR, thr_offs.Int32Value());
1631  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1632                SP, fr_offs.Int32Value());
1633}
1634
1635void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
1636                                      FrameOffset fr_offs,
1637                                      ManagedRegister mscratch) {
1638  ArmManagedRegister scratch = mscratch.AsArm();
1639  CHECK(scratch.IsCoreRegister()) << scratch;
1640  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1641                 SP, fr_offs.Int32Value());
1642  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1643                TR, thr_offs.Int32Value());
1644}
1645
1646void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
1647                                            FrameOffset fr_offs,
1648                                            ManagedRegister mscratch) {
1649  ArmManagedRegister scratch = mscratch.AsArm();
1650  CHECK(scratch.IsCoreRegister()) << scratch;
1651  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
1652  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
1653                TR, thr_offs.Int32Value());
1654}
1655
1656void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
1657  StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
1658}
1659
1660void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
1661  UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
1662}
1663
1664void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
1665  UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
1666}
1667
1668void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
1669  ArmManagedRegister dst = m_dst.AsArm();
1670  ArmManagedRegister src = m_src.AsArm();
1671  if (!dst.Equals(src)) {
1672    if (dst.IsCoreRegister()) {
1673      CHECK(src.IsCoreRegister()) << src;
1674      mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
1675    } else if (dst.IsDRegister()) {
1676      CHECK(src.IsDRegister()) << src;
1677      vmovd(dst.AsDRegister(), src.AsDRegister());
1678    } else if (dst.IsSRegister()) {
1679      CHECK(src.IsSRegister()) << src;
1680      vmovs(dst.AsSRegister(), src.AsSRegister());
1681    } else {
1682      CHECK(dst.IsRegisterPair()) << dst;
1683      CHECK(src.IsRegisterPair()) << src;
1684      // Ensure that the first move doesn't clobber the input of the second
1685      if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
1686        mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
1687        mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
1688      } else {
1689        mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
1690        mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
1691      }
1692    }
1693  }
1694}
1695
1696void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
1697  ArmManagedRegister scratch = mscratch.AsArm();
1698  CHECK(scratch.IsCoreRegister()) << scratch;
1699  CHECK(size == 4 || size == 8) << size;
1700  if (size == 4) {
1701    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1702    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1703  } else if (size == 8) {
1704    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
1705    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
1706    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
1707    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
1708  }
1709}
1710
1711void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
1712                        ManagedRegister mscratch, size_t size) {
1713  Register scratch = mscratch.AsArm().AsCoreRegister();
1714  CHECK_EQ(size, 4u);
1715  LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
1716  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
1717}
1718
1719void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
1720                        ManagedRegister mscratch, size_t size) {
1721  Register scratch = mscratch.AsArm().AsCoreRegister();
1722  CHECK_EQ(size, 4u);
1723  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
1724  StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
1725}
1726
1727void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
1728                        ManagedRegister /*mscratch*/, size_t /*size*/) {
1729  UNIMPLEMENTED(FATAL);
1730}
1731
1732void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
1733                        ManagedRegister src, Offset src_offset,
1734                        ManagedRegister mscratch, size_t size) {
1735  CHECK_EQ(size, 4u);
1736  Register scratch = mscratch.AsArm().AsCoreRegister();
1737  LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
1738  StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
1739}
1740
1741void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
1742                        ManagedRegister /*scratch*/, size_t /*size*/) {
1743  UNIMPLEMENTED(FATAL);
1744}
1745
1746
1747void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
1748  CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
1749#if ANDROID_SMP != 0
1750  int32_t encoding = 0xf57ff05f;  // dmb
1751  Emit(encoding);
1752#endif
1753}
1754
1755void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
1756                                   FrameOffset handle_scope_offset,
1757                                   ManagedRegister min_reg, bool null_allowed) {
1758  ArmManagedRegister out_reg = mout_reg.AsArm();
1759  ArmManagedRegister in_reg = min_reg.AsArm();
1760  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
1761  CHECK(out_reg.IsCoreRegister()) << out_reg;
1762  if (null_allowed) {
1763    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
1764    // the address in the handle scope holding the reference.
1765    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
1766    if (in_reg.IsNoRegister()) {
1767      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1768                     SP, handle_scope_offset.Int32Value());
1769      in_reg = out_reg;
1770    }
1771    cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1772    if (!out_reg.Equals(in_reg)) {
1773      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1774    }
1775    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
1776  } else {
1777    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
1778  }
1779}
1780
1781void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
1782                                   FrameOffset handle_scope_offset,
1783                                   ManagedRegister mscratch,
1784                                   bool null_allowed) {
1785  ArmManagedRegister scratch = mscratch.AsArm();
1786  CHECK(scratch.IsCoreRegister()) << scratch;
1787  if (null_allowed) {
1788    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
1789                   handle_scope_offset.Int32Value());
1790    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
1791    // the address in the handle scope holding the reference.
1792    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
1793    cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1794    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
1795  } else {
1796    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
1797  }
1798  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
1799}
1800
1801void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
1802                                         ManagedRegister min_reg) {
1803  ArmManagedRegister out_reg = mout_reg.AsArm();
1804  ArmManagedRegister in_reg = min_reg.AsArm();
1805  CHECK(out_reg.IsCoreRegister()) << out_reg;
1806  CHECK(in_reg.IsCoreRegister()) << in_reg;
1807  Label null_arg;
1808  if (!out_reg.Equals(in_reg)) {
1809    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
1810  }
1811  cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
1812  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
1813                 in_reg.AsCoreRegister(), 0, NE);
1814}
1815
1816void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
1817  // TODO: not validating references
1818}
1819
1820void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
1821  // TODO: not validating references
1822}
1823
1824void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
1825                        ManagedRegister mscratch) {
1826  ArmManagedRegister base = mbase.AsArm();
1827  ArmManagedRegister scratch = mscratch.AsArm();
1828  CHECK(base.IsCoreRegister()) << base;
1829  CHECK(scratch.IsCoreRegister()) << scratch;
1830  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1831                 base.AsCoreRegister(), offset.Int32Value());
1832  blx(scratch.AsCoreRegister());
1833  // TODO: place reference map on call
1834}
1835
1836void ArmAssembler::Call(FrameOffset base, Offset offset,
1837                        ManagedRegister mscratch) {
1838  ArmManagedRegister scratch = mscratch.AsArm();
1839  CHECK(scratch.IsCoreRegister()) << scratch;
1840  // Call *(*(SP + base) + offset)
1841  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1842                 SP, base.Int32Value());
1843  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1844                 scratch.AsCoreRegister(), offset.Int32Value());
1845  blx(scratch.AsCoreRegister());
1846  // TODO: place reference map on call
1847}
1848
1849void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
1850  UNIMPLEMENTED(FATAL);
1851}
1852
1853void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
1854  mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
1855}
1856
1857void ArmAssembler::GetCurrentThread(FrameOffset offset,
1858                                    ManagedRegister /*scratch*/) {
1859  StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
1860}
1861
1862void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
1863  ArmManagedRegister scratch = mscratch.AsArm();
1864  ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
1865  buffer_.EnqueueSlowPath(slow);
1866  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
1867                 TR, Thread::ExceptionOffset<4>().Int32Value());
1868  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
1869  b(slow->Entry(), NE);
1870}
1871
1872void ArmExceptionSlowPath::Emit(Assembler* sasm) {
1873  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
1874#define __ sp_asm->
1875  __ Bind(&entry_);
1876  if (stack_adjust_ != 0) {  // Fix up the frame.
1877    __ DecreaseFrameSize(stack_adjust_);
1878  }
1879  // Pass exception object as argument
1880  // Don't care about preserving R0 as this call won't return
1881  __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
1882  // Set up call to Thread::Current()->pDeliverException
1883  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
1884  __ blx(R12);
1885  // Call never returns
1886  __ bkpt(0);
1887#undef __
1888}
1889
1890}  // namespace arm
1891}  // namespace art
1892