assembler_arm.cc revision 96f89a290eb67d7bf4b1636798fa28df14309cc7
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_arm.h"
18
19#include "base/logging.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "offsets.h"
22#include "thread.h"
23#include "utils.h"
24
25namespace art {
26namespace arm {
27
28const char* kRegisterNames[] = {
29  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
30  "fp", "ip", "sp", "lr", "pc"
31};
32
33const char* kConditionNames[] = {
34  "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
35  "LE", "AL",
36};
37
38std::ostream& operator<<(std::ostream& os, const Register& rhs) {
39  if (rhs >= R0 && rhs <= PC) {
40    os << kRegisterNames[rhs];
41  } else {
42    os << "Register[" << static_cast<int>(rhs) << "]";
43  }
44  return os;
45}
46
47
48std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
49  if (rhs >= S0 && rhs < kNumberOfSRegisters) {
50    os << "s" << static_cast<int>(rhs);
51  } else {
52    os << "SRegister[" << static_cast<int>(rhs) << "]";
53  }
54  return os;
55}
56
57
58std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
59  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
60    os << "d" << static_cast<int>(rhs);
61  } else {
62    os << "DRegister[" << static_cast<int>(rhs) << "]";
63  }
64  return os;
65}
66
67std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
68  if (rhs >= EQ && rhs <= AL) {
69    os << kConditionNames[rhs];
70  } else {
71    os << "Condition[" << static_cast<int>(rhs) << "]";
72  }
73  return os;
74}
75
76ShifterOperand::ShifterOperand(uint32_t immed)
77    : type_(kImmediate), rm_(kNoRegister), rs_(kNoRegister),
78      is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(immed) {
79  CHECK(immed < (1u << 12) || ArmAssembler::ModifiedImmediate(immed) != kInvalidModifiedImmediate);
80}
81
82
83uint32_t ShifterOperand::encodingArm() const {
84  CHECK(is_valid());
85  switch (type_) {
86    case kImmediate:
87      if (is_rotate_) {
88        return (rotate_ << kRotateShift) | (immed_ << kImmed8Shift);
89      } else {
90        return immed_;
91      }
92      break;
93    case kRegister:
94      if (is_shift_) {
95        // Shifted immediate or register.
96        if (rs_ == kNoRegister) {
97          // Immediate shift.
98          return immed_ << kShiftImmShift |
99                          static_cast<uint32_t>(shift_) << kShiftShift |
100                          static_cast<uint32_t>(rm_);
101        } else {
102          // Register shift.
103          return static_cast<uint32_t>(rs_) << kShiftRegisterShift |
104              static_cast<uint32_t>(shift_) << kShiftShift | (1 << 4) |
105              static_cast<uint32_t>(rm_);
106        }
107      } else {
108        // Simple register
109        return static_cast<uint32_t>(rm_);
110      }
111      break;
112    default:
113      // Can't get here.
114      LOG(FATAL) << "Invalid shifter operand for ARM";
115      return 0;
116  }
117}
118
119uint32_t ShifterOperand::encodingThumb() const {
120  switch (type_) {
121    case kImmediate:
122      return immed_;
123    case kRegister:
124      if (is_shift_) {
125        // Shifted immediate or register.
126        if (rs_ == kNoRegister) {
127          // Immediate shift.
128          if (shift_ == RRX) {
129            // RRX is encoded as an ROR with imm 0.
130            return ROR << 4 | static_cast<uint32_t>(rm_);
131          } else {
132            uint32_t imm3 = immed_ >> 2;
133            uint32_t imm2 = immed_ & 0b11;
134
135            return imm3 << 12 | imm2 << 6 | shift_ << 4 |
136                static_cast<uint32_t>(rm_);
137          }
138        } else {
139          LOG(FATAL) << "No register-shifted register instruction available in thumb";
140          return 0;
141        }
142      } else {
143        // Simple register
144        return static_cast<uint32_t>(rm_);
145      }
146      break;
147    default:
148      // Can't get here.
149      LOG(FATAL) << "Invalid shifter operand for thumb";
150      return 0;
151  }
152  return 0;
153}
154
155bool ShifterOperand::CanHoldThumb(Register rd, Register rn, Opcode opcode,
156                                  uint32_t immediate, ShifterOperand* shifter_op) {
157  shifter_op->type_ = kImmediate;
158  shifter_op->immed_ = immediate;
159  shifter_op->is_shift_ = false;
160  shifter_op->is_rotate_ = false;
161  switch (opcode) {
162    case ADD:
163    case SUB:
164      if (rn == SP) {
165        if (rd == SP) {
166          return immediate < (1 << 9);    // 9 bits allowed.
167        } else {
168          return immediate < (1 << 12);   // 12 bits.
169        }
170      }
171      if (immediate < (1 << 12)) {    // Less than (or equal to) 12 bits can always be done.
172        return true;
173      }
174      return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
175
176    case MOV:
177      // TODO: Support less than or equal to 12bits.
178      return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
179    case MVN:
180    default:
181      return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
182  }
183}
184
185uint32_t Address::encodingArm() const {
186  CHECK(IsAbsoluteUint(12, offset_));
187  uint32_t encoding;
188  if (is_immed_offset_) {
189    if (offset_ < 0) {
190      encoding = (am_ ^ (1 << kUShift)) | -offset_;  // Flip U to adjust sign.
191    } else {
192      encoding =  am_ | offset_;
193    }
194  } else {
195    uint32_t imm5 = offset_;
196    uint32_t shift = shift_;
197    if (shift == RRX) {
198      imm5 = 0;
199      shift = ROR;
200    }
201    encoding = am_ | static_cast<uint32_t>(rm_) | shift << 5 | offset_ << 7 | B25;
202  }
203  encoding |= static_cast<uint32_t>(rn_) << kRnShift;
204  return encoding;
205}
206
207
208uint32_t Address::encodingThumb(bool is_32bit) const {
209  uint32_t encoding = 0;
210  if (is_immed_offset_) {
211    encoding = static_cast<uint32_t>(rn_) << 16;
212    // Check for the T3/T4 encoding.
213    // PUW must Offset for T3
214    // Convert ARM PU0W to PUW
215    // The Mode is in ARM encoding format which is:
216    // |P|U|0|W|
217    // we need this in thumb2 mode:
218    // |P|U|W|
219
220    uint32_t am = am_;
221    int32_t offset = offset_;
222    if (offset < 0) {
223      am ^= 1 << kUShift;
224      offset = -offset;
225    }
226    if (offset_ < 0 || (offset >= 0 && offset < 256 &&
227        am_ != Mode::Offset)) {
228      // T4 encoding.
229      uint32_t PUW = am >> 21;   // Move down to bottom of word.
230      PUW = (PUW >> 1) | (PUW & 1);   // Bits 3, 2 and 0.
231      // If P is 0 then W must be 1 (Different from ARM).
232      if ((PUW & 0b100) == 0) {
233        PUW |= 0b1;
234      }
235      encoding |= B11 | PUW << 8 | offset;
236    } else {
237      // T3 encoding (also sets op1 to 0b01).
238      encoding |= B23 | offset_;
239    }
240  } else {
241    // Register offset, possibly shifted.
242    // Need to choose between encoding T1 (16 bit) or T2.
243    // Only Offset mode is supported.  Shift must be LSL and the count
244    // is only 2 bits.
245    CHECK_EQ(shift_, LSL);
246    CHECK_LE(offset_, 4);
247    CHECK_EQ(am_, Offset);
248    bool is_t2 = is_32bit;
249    if (ArmAssembler::IsHighRegister(rn_) || ArmAssembler::IsHighRegister(rm_)) {
250      is_t2 = true;
251    } else if (offset_ != 0) {
252      is_t2 = true;
253    }
254    if (is_t2) {
255      encoding = static_cast<uint32_t>(rn_) << 16 | static_cast<uint32_t>(rm_) |
256          offset_ << 4;
257    } else {
258      encoding = static_cast<uint32_t>(rn_) << 3 | static_cast<uint32_t>(rm_) << 6;
259    }
260  }
261  return encoding;
262}
263
264// This is very like the ARM encoding except the offset is 10 bits.
265uint32_t Address::encodingThumbLdrdStrd() const {
266  uint32_t encoding;
267  uint32_t am = am_;
268  // If P is 0 then W must be 1 (Different from ARM).
269  uint32_t PU1W = am_ >> 21;   // Move down to bottom of word.
270  if ((PU1W & 0b1000) == 0) {
271    am |= 1 << 21;      // Set W bit.
272  }
273  if (offset_ < 0) {
274    int32_t off = -offset_;
275    CHECK_LT(off, 1024);
276    CHECK_EQ((off & 0b11), 0);    // Must be multiple of 4.
277    encoding = (am ^ (1 << kUShift)) | off >> 2;  // Flip U to adjust sign.
278  } else {
279    CHECK_LT(offset_, 1024);
280    CHECK_EQ((offset_ & 0b11), 0);    // Must be multiple of 4.
281    encoding =  am | offset_ >> 2;
282  }
283  encoding |= static_cast<uint32_t>(rn_) << 16;
284  return encoding;
285}
286
287// Encoding for ARM addressing mode 3.
288uint32_t Address::encoding3() const {
289  const uint32_t offset_mask = (1 << 12) - 1;
290  uint32_t encoding = encodingArm();
291  uint32_t offset = encoding & offset_mask;
292  CHECK_LT(offset, 256u);
293  return (encoding & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
294}
295
296// Encoding for vfp load/store addressing.
297uint32_t Address::vencoding() const {
298  const uint32_t offset_mask = (1 << 12) - 1;
299  uint32_t encoding = encodingArm();
300  uint32_t offset = encoding & offset_mask;
301  CHECK(IsAbsoluteUint(10, offset));  // In the range -1020 to +1020.
302  CHECK_ALIGNED(offset, 2);  // Multiple of 4.
303  CHECK((am_ == Offset) || (am_ == NegOffset));
304  uint32_t vencoding = (encoding & (0xf << kRnShift)) | (offset >> 2);
305  if (am_ == Offset) {
306    vencoding |= 1 << 23;
307  }
308  return vencoding;
309}
310
311
312bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
313  switch (type) {
314    case kLoadSignedByte:
315    case kLoadSignedHalfword:
316    case kLoadUnsignedHalfword:
317    case kLoadWordPair:
318      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
319    case kLoadUnsignedByte:
320    case kLoadWord:
321      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
322    case kLoadSWord:
323    case kLoadDWord:
324      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
325    default:
326      LOG(FATAL) << "UNREACHABLE";
327      return false;
328  }
329}
330
331
332bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
333  switch (type) {
334    case kStoreHalfword:
335    case kStoreWordPair:
336      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
337    case kStoreByte:
338    case kStoreWord:
339      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
340    case kStoreSWord:
341    case kStoreDWord:
342      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
343    default:
344      LOG(FATAL) << "UNREACHABLE";
345      return false;
346  }
347}
348
349bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
350  switch (type) {
351    case kLoadSignedByte:
352    case kLoadSignedHalfword:
353    case kLoadUnsignedHalfword:
354    case kLoadUnsignedByte:
355    case kLoadWord:
356      return IsAbsoluteUint(12, offset);
357    case kLoadSWord:
358    case kLoadDWord:
359      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
360    case kLoadWordPair:
361      return IsAbsoluteUint(10, offset);
362  default:
363      LOG(FATAL) << "UNREACHABLE";
364      return false;
365  }
366}
367
368
369bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
370  switch (type) {
371    case kStoreHalfword:
372    case kStoreByte:
373    case kStoreWord:
374      return IsAbsoluteUint(12, offset);
375    case kStoreSWord:
376    case kStoreDWord:
377      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
378    case kStoreWordPair:
379      return IsAbsoluteUint(10, offset);
380  default:
381      LOG(FATAL) << "UNREACHABLE";
382      return false;
383  }
384}
385
386void ArmAssembler::Pad(uint32_t bytes) {
387  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
388  for (uint32_t i = 0; i < bytes; ++i) {
389    buffer_.Emit<byte>(0);
390  }
391}
392
393constexpr size_t kFramePointerSize = 4;
394
395void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
396                              const std::vector<ManagedRegister>& callee_save_regs,
397                              const ManagedRegisterEntrySpills& entry_spills) {
398  CHECK_ALIGNED(frame_size, kStackAlignment);
399  CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
400
401  // Push callee saves and link register.
402  RegList push_list = 1 << LR;
403  size_t pushed_values = 1;
404  for (size_t i = 0; i < callee_save_regs.size(); i++) {
405    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
406    push_list |= 1 << reg;
407    pushed_values++;
408  }
409  PushList(push_list);
410
411  // Increase frame to required size.
412  CHECK_GT(frame_size, pushed_values * kFramePointerSize);  // Must at least have space for Method*.
413  size_t adjust = frame_size - (pushed_values * kFramePointerSize);
414  IncreaseFrameSize(adjust);
415
416  // Write out Method*.
417  StoreToOffset(kStoreWord, R0, SP, 0);
418
419  // Write out entry spills.
420  for (size_t i = 0; i < entry_spills.size(); ++i) {
421    Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
422    StoreToOffset(kStoreWord, reg, SP, frame_size + kFramePointerSize + (i * kFramePointerSize));
423  }
424}
425
426void ArmAssembler::RemoveFrame(size_t frame_size,
427                              const std::vector<ManagedRegister>& callee_save_regs) {
428  CHECK_ALIGNED(frame_size, kStackAlignment);
429  // Compute callee saves to pop and PC.
430  RegList pop_list = 1 << PC;
431  size_t pop_values = 1;
432  for (size_t i = 0; i < callee_save_regs.size(); i++) {
433    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
434    pop_list |= 1 << reg;
435    pop_values++;
436  }
437
438  // Decrease frame to start of callee saves.
439  CHECK_GT(frame_size, pop_values * kFramePointerSize);
440  size_t adjust = frame_size - (pop_values * kFramePointerSize);
441  DecreaseFrameSize(adjust);
442
443  // Pop callee saves and PC.
444  PopList(pop_list);
445}
446
447void ArmAssembler::IncreaseFrameSize(size_t adjust) {
448  AddConstant(SP, -adjust);
449}
450
451void ArmAssembler::DecreaseFrameSize(size_t adjust) {
452  AddConstant(SP, adjust);
453}
454
455void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
456  ArmManagedRegister src = msrc.AsArm();
457  if (src.IsNoRegister()) {
458    CHECK_EQ(0u, size);
459  } else if (src.IsCoreRegister()) {
460    CHECK_EQ(4u, size);
461    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
462  } else if (src.IsRegisterPair()) {
463    CHECK_EQ(8u, size);
464    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
465    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
466                  SP, dest.Int32Value() + 4);
467  } else if (src.IsSRegister()) {
468    StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
469  } else {
470    CHECK(src.IsDRegister()) << src;
471    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
472  }
473}
474
475void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
476  ArmManagedRegister src = msrc.AsArm();
477  CHECK(src.IsCoreRegister()) << src;
478  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
479}
480
481void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
482  ArmManagedRegister src = msrc.AsArm();
483  CHECK(src.IsCoreRegister()) << src;
484  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
485}
486
487void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
488                              FrameOffset in_off, ManagedRegister mscratch) {
489  ArmManagedRegister src = msrc.AsArm();
490  ArmManagedRegister scratch = mscratch.AsArm();
491  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
492  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
493  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
494}
495
496void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
497                        ManagedRegister mscratch) {
498  ArmManagedRegister scratch = mscratch.AsArm();
499  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
500  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
501}
502
503void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
504                           MemberOffset offs) {
505  ArmManagedRegister dst = mdest.AsArm();
506  CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
507  LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
508                 base.AsArm().AsCoreRegister(), offs.Int32Value());
509  if (kPoisonHeapReferences) {
510    rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0));
511  }
512}
513
514void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
515  ArmManagedRegister dst = mdest.AsArm();
516  CHECK(dst.IsCoreRegister()) << dst;
517  LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
518}
519
520void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
521                           Offset offs) {
522  ArmManagedRegister dst = mdest.AsArm();
523  CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
524  LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
525                 base.AsArm().AsCoreRegister(), offs.Int32Value());
526}
527
528void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
529                                      ManagedRegister mscratch) {
530  ArmManagedRegister scratch = mscratch.AsArm();
531  CHECK(scratch.IsCoreRegister()) << scratch;
532  LoadImmediate(scratch.AsCoreRegister(), imm);
533  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
534}
535
536void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
537                                       ManagedRegister mscratch) {
538  ArmManagedRegister scratch = mscratch.AsArm();
539  CHECK(scratch.IsCoreRegister()) << scratch;
540  LoadImmediate(scratch.AsCoreRegister(), imm);
541  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
542}
543
544static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
545                     Register src_register, int32_t src_offset, size_t size) {
546  ArmManagedRegister dst = m_dst.AsArm();
547  if (dst.IsNoRegister()) {
548    CHECK_EQ(0u, size) << dst;
549  } else if (dst.IsCoreRegister()) {
550    CHECK_EQ(4u, size) << dst;
551    assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
552  } else if (dst.IsRegisterPair()) {
553    CHECK_EQ(8u, size) << dst;
554    assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
555    assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
556  } else if (dst.IsSRegister()) {
557    assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
558  } else {
559    CHECK(dst.IsDRegister()) << dst;
560    assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
561  }
562}
563
564void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
565  return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
566}
567
568void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
569  return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
570}
571
572void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
573  ArmManagedRegister dst = m_dst.AsArm();
574  CHECK(dst.IsCoreRegister()) << dst;
575  LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
576}
577
578void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
579                                        ThreadOffset<4> thr_offs,
580                                        ManagedRegister mscratch) {
581  ArmManagedRegister scratch = mscratch.AsArm();
582  CHECK(scratch.IsCoreRegister()) << scratch;
583  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
584                 TR, thr_offs.Int32Value());
585  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
586                SP, fr_offs.Int32Value());
587}
588
589void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
590                                      FrameOffset fr_offs,
591                                      ManagedRegister mscratch) {
592  ArmManagedRegister scratch = mscratch.AsArm();
593  CHECK(scratch.IsCoreRegister()) << scratch;
594  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
595                 SP, fr_offs.Int32Value());
596  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
597                TR, thr_offs.Int32Value());
598}
599
600void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
601                                            FrameOffset fr_offs,
602                                            ManagedRegister mscratch) {
603  ArmManagedRegister scratch = mscratch.AsArm();
604  CHECK(scratch.IsCoreRegister()) << scratch;
605  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
606  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
607                TR, thr_offs.Int32Value());
608}
609
610void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
611  StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
612}
613
614void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
615  UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
616}
617
618void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
619  UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
620}
621
622void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
623  ArmManagedRegister dst = m_dst.AsArm();
624  ArmManagedRegister src = m_src.AsArm();
625  if (!dst.Equals(src)) {
626    if (dst.IsCoreRegister()) {
627      CHECK(src.IsCoreRegister()) << src;
628      mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
629    } else if (dst.IsDRegister()) {
630      CHECK(src.IsDRegister()) << src;
631      vmovd(dst.AsDRegister(), src.AsDRegister());
632    } else if (dst.IsSRegister()) {
633      CHECK(src.IsSRegister()) << src;
634      vmovs(dst.AsSRegister(), src.AsSRegister());
635    } else {
636      CHECK(dst.IsRegisterPair()) << dst;
637      CHECK(src.IsRegisterPair()) << src;
638      // Ensure that the first move doesn't clobber the input of the second.
639      if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
640        mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
641        mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
642      } else {
643        mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
644        mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
645      }
646    }
647  }
648}
649
650void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
651  ArmManagedRegister scratch = mscratch.AsArm();
652  CHECK(scratch.IsCoreRegister()) << scratch;
653  CHECK(size == 4 || size == 8) << size;
654  if (size == 4) {
655    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
656    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
657  } else if (size == 8) {
658    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
659    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
660    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
661    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
662  }
663}
664
665void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
666                        ManagedRegister mscratch, size_t size) {
667  Register scratch = mscratch.AsArm().AsCoreRegister();
668  CHECK_EQ(size, 4u);
669  LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
670  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
671}
672
673void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
674                        ManagedRegister mscratch, size_t size) {
675  Register scratch = mscratch.AsArm().AsCoreRegister();
676  CHECK_EQ(size, 4u);
677  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
678  StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
679}
680
681void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
682                        ManagedRegister /*mscratch*/, size_t /*size*/) {
683  UNIMPLEMENTED(FATAL);
684}
685
686void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
687                        ManagedRegister src, Offset src_offset,
688                        ManagedRegister mscratch, size_t size) {
689  CHECK_EQ(size, 4u);
690  Register scratch = mscratch.AsArm().AsCoreRegister();
691  LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
692  StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
693}
694
695void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
696                        ManagedRegister /*scratch*/, size_t /*size*/) {
697  UNIMPLEMENTED(FATAL);
698}
699
700void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
701                                   FrameOffset handle_scope_offset,
702                                   ManagedRegister min_reg, bool null_allowed) {
703  ArmManagedRegister out_reg = mout_reg.AsArm();
704  ArmManagedRegister in_reg = min_reg.AsArm();
705  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
706  CHECK(out_reg.IsCoreRegister()) << out_reg;
707  if (null_allowed) {
708    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
709    // the address in the handle scope holding the reference.
710    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
711    if (in_reg.IsNoRegister()) {
712      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
713                     SP, handle_scope_offset.Int32Value());
714      in_reg = out_reg;
715    }
716    cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
717    if (!out_reg.Equals(in_reg)) {
718      it(EQ, kItElse);
719      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
720    } else {
721      it(NE);
722    }
723    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
724  } else {
725    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
726  }
727}
728
729void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
730                                   FrameOffset handle_scope_offset,
731                                   ManagedRegister mscratch,
732                                   bool null_allowed) {
733  ArmManagedRegister scratch = mscratch.AsArm();
734  CHECK(scratch.IsCoreRegister()) << scratch;
735  if (null_allowed) {
736    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
737                   handle_scope_offset.Int32Value());
738    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
739    // the address in the handle scope holding the reference.
740    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
741    cmp(scratch.AsCoreRegister(), ShifterOperand(0));
742    it(NE);
743    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
744  } else {
745    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
746  }
747  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
748}
749
750void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
751                                         ManagedRegister min_reg) {
752  ArmManagedRegister out_reg = mout_reg.AsArm();
753  ArmManagedRegister in_reg = min_reg.AsArm();
754  CHECK(out_reg.IsCoreRegister()) << out_reg;
755  CHECK(in_reg.IsCoreRegister()) << in_reg;
756  Label null_arg;
757  if (!out_reg.Equals(in_reg)) {
758    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);     // TODO: why EQ?
759  }
760  cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
761  it(NE);
762  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
763                 in_reg.AsCoreRegister(), 0, NE);
764}
765
766void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
767  // TODO: not validating references.
768}
769
770void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
771  // TODO: not validating references.
772}
773
774void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
775                        ManagedRegister mscratch) {
776  ArmManagedRegister base = mbase.AsArm();
777  ArmManagedRegister scratch = mscratch.AsArm();
778  CHECK(base.IsCoreRegister()) << base;
779  CHECK(scratch.IsCoreRegister()) << scratch;
780  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
781                 base.AsCoreRegister(), offset.Int32Value());
782  blx(scratch.AsCoreRegister());
783  // TODO: place reference map on call.
784}
785
786void ArmAssembler::Call(FrameOffset base, Offset offset,
787                        ManagedRegister mscratch) {
788  ArmManagedRegister scratch = mscratch.AsArm();
789  CHECK(scratch.IsCoreRegister()) << scratch;
790  // Call *(*(SP + base) + offset)
791  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
792                 SP, base.Int32Value());
793  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
794                 scratch.AsCoreRegister(), offset.Int32Value());
795  blx(scratch.AsCoreRegister());
796  // TODO: place reference map on call
797}
798
799void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
800  UNIMPLEMENTED(FATAL);
801}
802
803void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
804  mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
805}
806
807void ArmAssembler::GetCurrentThread(FrameOffset offset,
808                                    ManagedRegister /*scratch*/) {
809  StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
810}
811
812void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
813  ArmManagedRegister scratch = mscratch.AsArm();
814  ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
815  buffer_.EnqueueSlowPath(slow);
816  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
817                 TR, Thread::ExceptionOffset<4>().Int32Value());
818  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
819  b(slow->Entry(), NE);
820}
821
822void ArmExceptionSlowPath::Emit(Assembler* sasm) {
823  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
824#define __ sp_asm->
825  __ Bind(&entry_);
826  if (stack_adjust_ != 0) {  // Fix up the frame.
827    __ DecreaseFrameSize(stack_adjust_);
828  }
829  // Pass exception object as argument.
830  // Don't care about preserving R0 as this call won't return.
831  __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
832  // Set up call to Thread::Current()->pDeliverException.
833  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
834  __ blx(R12);
835  // Call never returns.
836  __ bkpt(0);
837#undef __
838}
839
840
841static int LeadingZeros(uint32_t val) {
842  uint32_t alt;
843  int32_t n;
844  int32_t count;
845
846  count = 16;
847  n = 32;
848  do {
849    alt = val >> count;
850    if (alt != 0) {
851      n = n - count;
852      val = alt;
853    }
854    count >>= 1;
855  } while (count);
856  return n - val;
857}
858
859
860uint32_t ArmAssembler::ModifiedImmediate(uint32_t value) {
861  int32_t z_leading;
862  int32_t z_trailing;
863  uint32_t b0 = value & 0xff;
864
865  /* Note: case of value==0 must use 0:000:0:0000000 encoding */
866  if (value <= 0xFF)
867    return b0;  // 0:000:a:bcdefgh.
868  if (value == ((b0 << 16) | b0))
869    return (0x1 << 12) | b0; /* 0:001:a:bcdefgh */
870  if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
871    return (0x3 << 12) | b0; /* 0:011:a:bcdefgh */
872  b0 = (value >> 8) & 0xff;
873  if (value == ((b0 << 24) | (b0 << 8)))
874    return (0x2 << 12) | b0; /* 0:010:a:bcdefgh */
875  /* Can we do it with rotation? */
876  z_leading = LeadingZeros(value);
877  z_trailing = 32 - LeadingZeros(~value & (value - 1));
878  /* A run of eight or fewer active bits? */
879  if ((z_leading + z_trailing) < 24)
880    return kInvalidModifiedImmediate;  /* No - bail */
881  /* left-justify the constant, discarding msb (known to be 1) */
882  value <<= z_leading + 1;
883  /* Create bcdefgh */
884  value >>= 25;
885
886  /* Put it all together */
887  uint32_t v = 8 + z_leading;
888
889  uint32_t i = (v & 0b10000) >> 4;
890  uint32_t imm3 = (v >> 1) & 0b111;
891  uint32_t a = v & 1;
892  return value | i << 26 | imm3 << 12 | a << 7;
893}
894
895}  // namespace arm
896}  // namespace art
897