1// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27
28#include <cmath>
29#include "a64/assembler-a64.h"
30
31namespace vixl {
32
33// CPURegList utilities.
34CPURegister CPURegList::PopLowestIndex() {
35  if (IsEmpty()) {
36    return NoCPUReg;
37  }
38  int index = CountTrailingZeros(list_, kRegListSizeInBits);
39  VIXL_ASSERT((1 << index) & list_);
40  Remove(index);
41  return CPURegister(index, size_, type_);
42}
43
44
45CPURegister CPURegList::PopHighestIndex() {
46  VIXL_ASSERT(IsValid());
47  if (IsEmpty()) {
48    return NoCPUReg;
49  }
50  int index = CountLeadingZeros(list_, kRegListSizeInBits);
51  index = kRegListSizeInBits - 1 - index;
52  VIXL_ASSERT((1 << index) & list_);
53  Remove(index);
54  return CPURegister(index, size_, type_);
55}
56
57
58bool CPURegList::IsValid() const {
59  if ((type_ == CPURegister::kRegister) ||
60      (type_ == CPURegister::kFPRegister)) {
61    bool is_valid = true;
62    // Try to create a CPURegister for each element in the list.
63    for (int i = 0; i < kRegListSizeInBits; i++) {
64      if (((list_ >> i) & 1) != 0) {
65        is_valid &= CPURegister(i, size_, type_).IsValid();
66      }
67    }
68    return is_valid;
69  } else if (type_ == CPURegister::kNoRegister) {
70    // We can't use IsEmpty here because that asserts IsValid().
71    return list_ == 0;
72  } else {
73    return false;
74  }
75}
76
77
78void CPURegList::RemoveCalleeSaved() {
79  if (type() == CPURegister::kRegister) {
80    Remove(GetCalleeSaved(RegisterSizeInBits()));
81  } else if (type() == CPURegister::kFPRegister) {
82    Remove(GetCalleeSavedFP(RegisterSizeInBits()));
83  } else {
84    VIXL_ASSERT(type() == CPURegister::kNoRegister);
85    VIXL_ASSERT(IsEmpty());
86    // The list must already be empty, so do nothing.
87  }
88}
89
90
91CPURegList CPURegList::GetCalleeSaved(unsigned size) {
92  return CPURegList(CPURegister::kRegister, size, 19, 29);
93}
94
95
96CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
97  return CPURegList(CPURegister::kFPRegister, size, 8, 15);
98}
99
100
101CPURegList CPURegList::GetCallerSaved(unsigned size) {
102  // Registers x0-x18 and lr (x30) are caller-saved.
103  CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
104  list.Combine(lr);
105  return list;
106}
107
108
109CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
110  // Registers d0-d7 and d16-d31 are caller-saved.
111  CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
112  list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
113  return list;
114}
115
116
117const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
118const CPURegList kCalleeSavedFP = CPURegList::GetCalleeSavedFP();
119const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
120const CPURegList kCallerSavedFP = CPURegList::GetCallerSavedFP();
121
122
123// Registers.
124#define WREG(n) w##n,
125const Register Register::wregisters[] = {
126REGISTER_CODE_LIST(WREG)
127};
128#undef WREG
129
130#define XREG(n) x##n,
131const Register Register::xregisters[] = {
132REGISTER_CODE_LIST(XREG)
133};
134#undef XREG
135
136#define SREG(n) s##n,
137const FPRegister FPRegister::sregisters[] = {
138REGISTER_CODE_LIST(SREG)
139};
140#undef SREG
141
142#define DREG(n) d##n,
143const FPRegister FPRegister::dregisters[] = {
144REGISTER_CODE_LIST(DREG)
145};
146#undef DREG
147
148
149const Register& Register::WRegFromCode(unsigned code) {
150  if (code == kSPRegInternalCode) {
151    return wsp;
152  } else {
153    VIXL_ASSERT(code < kNumberOfRegisters);
154    return wregisters[code];
155  }
156}
157
158
159const Register& Register::XRegFromCode(unsigned code) {
160  if (code == kSPRegInternalCode) {
161    return sp;
162  } else {
163    VIXL_ASSERT(code < kNumberOfRegisters);
164    return xregisters[code];
165  }
166}
167
168
169const FPRegister& FPRegister::SRegFromCode(unsigned code) {
170  VIXL_ASSERT(code < kNumberOfFPRegisters);
171  return sregisters[code];
172}
173
174
175const FPRegister& FPRegister::DRegFromCode(unsigned code) {
176  VIXL_ASSERT(code < kNumberOfFPRegisters);
177  return dregisters[code];
178}
179
180
181const Register& CPURegister::W() const {
182  VIXL_ASSERT(IsValidRegister());
183  return Register::WRegFromCode(code_);
184}
185
186
187const Register& CPURegister::X() const {
188  VIXL_ASSERT(IsValidRegister());
189  return Register::XRegFromCode(code_);
190}
191
192
193const FPRegister& CPURegister::S() const {
194  VIXL_ASSERT(IsValidFPRegister());
195  return FPRegister::SRegFromCode(code_);
196}
197
198
199const FPRegister& CPURegister::D() const {
200  VIXL_ASSERT(IsValidFPRegister());
201  return FPRegister::DRegFromCode(code_);
202}
203
204
205// Operand.
206Operand::Operand(int64_t immediate)
207    : immediate_(immediate),
208      reg_(NoReg),
209      shift_(NO_SHIFT),
210      extend_(NO_EXTEND),
211      shift_amount_(0) {}
212
213
214Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
215    : reg_(reg),
216      shift_(shift),
217      extend_(NO_EXTEND),
218      shift_amount_(shift_amount) {
219  VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
220  VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
221  VIXL_ASSERT(!reg.IsSP());
222}
223
224
225Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
226    : reg_(reg),
227      shift_(NO_SHIFT),
228      extend_(extend),
229      shift_amount_(shift_amount) {
230  VIXL_ASSERT(reg.IsValid());
231  VIXL_ASSERT(shift_amount <= 4);
232  VIXL_ASSERT(!reg.IsSP());
233
234  // Extend modes SXTX and UXTX require a 64-bit register.
235  VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
236}
237
238
239bool Operand::IsImmediate() const {
240  return reg_.Is(NoReg);
241}
242
243
244bool Operand::IsShiftedRegister() const {
245  return reg_.IsValid() && (shift_ != NO_SHIFT);
246}
247
248
249bool Operand::IsExtendedRegister() const {
250  return reg_.IsValid() && (extend_ != NO_EXTEND);
251}
252
253
254bool Operand::IsZero() const {
255  if (IsImmediate()) {
256    return immediate() == 0;
257  } else {
258    return reg().IsZero();
259  }
260}
261
262
263Operand Operand::ToExtendedRegister() const {
264  VIXL_ASSERT(IsShiftedRegister());
265  VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
266  return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
267}
268
269
270// MemOperand
271MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
272  : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
273  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
274}
275
276
277MemOperand::MemOperand(Register base,
278                       Register regoffset,
279                       Extend extend,
280                       unsigned shift_amount)
281  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
282    shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
283  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
284  VIXL_ASSERT(!regoffset.IsSP());
285  VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
286
287  // SXTX extend mode requires a 64-bit offset register.
288  VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
289}
290
291
292MemOperand::MemOperand(Register base,
293                       Register regoffset,
294                       Shift shift,
295                       unsigned shift_amount)
296  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
297    shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
298  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
299  VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
300  VIXL_ASSERT(shift == LSL);
301}
302
303
304MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
305  : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
306  VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
307
308  if (offset.IsImmediate()) {
309    offset_ = offset.immediate();
310  } else if (offset.IsShiftedRegister()) {
311    VIXL_ASSERT(addrmode == Offset);
312
313    regoffset_ = offset.reg();
314    shift_= offset.shift();
315    shift_amount_ = offset.shift_amount();
316
317    extend_ = NO_EXTEND;
318    offset_ = 0;
319
320    // These assertions match those in the shifted-register constructor.
321    VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
322    VIXL_ASSERT(shift_ == LSL);
323  } else {
324    VIXL_ASSERT(offset.IsExtendedRegister());
325    VIXL_ASSERT(addrmode == Offset);
326
327    regoffset_ = offset.reg();
328    extend_ = offset.extend();
329    shift_amount_ = offset.shift_amount();
330
331    shift_= NO_SHIFT;
332    offset_ = 0;
333
334    // These assertions match those in the extended-register constructor.
335    VIXL_ASSERT(!regoffset_.IsSP());
336    VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
337    VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
338  }
339}
340
341
342bool MemOperand::IsImmediateOffset() const {
343  return (addrmode_ == Offset) && regoffset_.Is(NoReg);
344}
345
346
347bool MemOperand::IsRegisterOffset() const {
348  return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
349}
350
351
352bool MemOperand::IsPreIndex() const {
353  return addrmode_ == PreIndex;
354}
355
356
357bool MemOperand::IsPostIndex() const {
358  return addrmode_ == PostIndex;
359}
360
361
362// Assembler
363Assembler::Assembler(byte* buffer, unsigned buffer_size)
364    : buffer_size_(buffer_size), literal_pool_monitor_(0) {
365
366  buffer_ = reinterpret_cast<Instruction*>(buffer);
367  pc_ = buffer_;
368  Reset();
369}
370
371
372Assembler::~Assembler() {
373  VIXL_ASSERT(finalized_ || (pc_ == buffer_));
374  VIXL_ASSERT(literals_.empty());
375}
376
377
378void Assembler::Reset() {
379#ifdef DEBUG
380  VIXL_ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
381  VIXL_ASSERT(literal_pool_monitor_ == 0);
382  memset(buffer_, 0, pc_ - buffer_);
383  finalized_ = false;
384#endif
385  pc_ = buffer_;
386  literals_.clear();
387  next_literal_pool_check_ = pc_ + kLiteralPoolCheckInterval;
388}
389
390
391void Assembler::FinalizeCode() {
392  EmitLiteralPool();
393#ifdef DEBUG
394  finalized_ = true;
395#endif
396}
397
398
399void Assembler::bind(Label* label) {
400  label->is_bound_ = true;
401  label->target_ = pc_;
402  while (label->IsLinked()) {
403    // Get the address of the following instruction in the chain.
404    Instruction* next_link = label->link_->ImmPCOffsetTarget();
405    // Update the instruction target.
406    label->link_->SetImmPCOffsetTarget(label->target_);
407    // Update the label's link.
408    // If the offset of the branch we just updated was 0 (kEndOfChain) we are
409    // done.
410    label->link_ = (label->link_ != next_link) ? next_link : NULL;
411  }
412}
413
414
415int Assembler::UpdateAndGetByteOffsetTo(Label* label) {
416  int offset;
417  VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
418  if (label->IsBound()) {
419    offset = label->target() - pc_;
420  } else if (label->IsLinked()) {
421    offset = label->link() - pc_;
422  } else {
423    offset = Label::kEndOfChain;
424  }
425  label->set_link(pc_);
426  return offset;
427}
428
429
430// Code generation.
431void Assembler::br(const Register& xn) {
432  VIXL_ASSERT(xn.Is64Bits());
433  Emit(BR | Rn(xn));
434}
435
436
437void Assembler::blr(const Register& xn) {
438  VIXL_ASSERT(xn.Is64Bits());
439  Emit(BLR | Rn(xn));
440}
441
442
443void Assembler::ret(const Register& xn) {
444  VIXL_ASSERT(xn.Is64Bits());
445  Emit(RET | Rn(xn));
446}
447
448
449void Assembler::b(int imm26) {
450  Emit(B | ImmUncondBranch(imm26));
451}
452
453
454void Assembler::b(int imm19, Condition cond) {
455  Emit(B_cond | ImmCondBranch(imm19) | cond);
456}
457
458
459void Assembler::b(Label* label) {
460  b(UpdateAndGetInstructionOffsetTo(label));
461}
462
463
464void Assembler::b(Label* label, Condition cond) {
465  b(UpdateAndGetInstructionOffsetTo(label), cond);
466}
467
468
469void Assembler::bl(int imm26) {
470  Emit(BL | ImmUncondBranch(imm26));
471}
472
473
474void Assembler::bl(Label* label) {
475  bl(UpdateAndGetInstructionOffsetTo(label));
476}
477
478
479void Assembler::cbz(const Register& rt,
480                    int imm19) {
481  Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
482}
483
484
485void Assembler::cbz(const Register& rt,
486                    Label* label) {
487  cbz(rt, UpdateAndGetInstructionOffsetTo(label));
488}
489
490
491void Assembler::cbnz(const Register& rt,
492                     int imm19) {
493  Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
494}
495
496
497void Assembler::cbnz(const Register& rt,
498                     Label* label) {
499  cbnz(rt, UpdateAndGetInstructionOffsetTo(label));
500}
501
502
503void Assembler::tbz(const Register& rt,
504                    unsigned bit_pos,
505                    int imm14) {
506  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
507  Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
508}
509
510
511void Assembler::tbz(const Register& rt,
512                    unsigned bit_pos,
513                    Label* label) {
514  tbz(rt, bit_pos, UpdateAndGetInstructionOffsetTo(label));
515}
516
517
518void Assembler::tbnz(const Register& rt,
519                     unsigned bit_pos,
520                     int imm14) {
521  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
522  Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
523}
524
525
526void Assembler::tbnz(const Register& rt,
527                     unsigned bit_pos,
528                     Label* label) {
529  tbnz(rt, bit_pos, UpdateAndGetInstructionOffsetTo(label));
530}
531
532
533void Assembler::adr(const Register& rd, int imm21) {
534  VIXL_ASSERT(rd.Is64Bits());
535  Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
536}
537
538
539void Assembler::adr(const Register& rd, Label* label) {
540  adr(rd, UpdateAndGetByteOffsetTo(label));
541}
542
543
544void Assembler::add(const Register& rd,
545                    const Register& rn,
546                    const Operand& operand) {
547  AddSub(rd, rn, operand, LeaveFlags, ADD);
548}
549
550
551void Assembler::adds(const Register& rd,
552                     const Register& rn,
553                     const Operand& operand) {
554  AddSub(rd, rn, operand, SetFlags, ADD);
555}
556
557
558void Assembler::cmn(const Register& rn,
559                    const Operand& operand) {
560  Register zr = AppropriateZeroRegFor(rn);
561  adds(zr, rn, operand);
562}
563
564
565void Assembler::sub(const Register& rd,
566                    const Register& rn,
567                    const Operand& operand) {
568  AddSub(rd, rn, operand, LeaveFlags, SUB);
569}
570
571
572void Assembler::subs(const Register& rd,
573                     const Register& rn,
574                     const Operand& operand) {
575  AddSub(rd, rn, operand, SetFlags, SUB);
576}
577
578
579void Assembler::cmp(const Register& rn, const Operand& operand) {
580  Register zr = AppropriateZeroRegFor(rn);
581  subs(zr, rn, operand);
582}
583
584
585void Assembler::neg(const Register& rd, const Operand& operand) {
586  Register zr = AppropriateZeroRegFor(rd);
587  sub(rd, zr, operand);
588}
589
590
591void Assembler::negs(const Register& rd, const Operand& operand) {
592  Register zr = AppropriateZeroRegFor(rd);
593  subs(rd, zr, operand);
594}
595
596
597void Assembler::adc(const Register& rd,
598                    const Register& rn,
599                    const Operand& operand) {
600  AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
601}
602
603
604void Assembler::adcs(const Register& rd,
605                     const Register& rn,
606                     const Operand& operand) {
607  AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
608}
609
610
611void Assembler::sbc(const Register& rd,
612                    const Register& rn,
613                    const Operand& operand) {
614  AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
615}
616
617
618void Assembler::sbcs(const Register& rd,
619                     const Register& rn,
620                     const Operand& operand) {
621  AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
622}
623
624
625void Assembler::ngc(const Register& rd, const Operand& operand) {
626  Register zr = AppropriateZeroRegFor(rd);
627  sbc(rd, zr, operand);
628}
629
630
631void Assembler::ngcs(const Register& rd, const Operand& operand) {
632  Register zr = AppropriateZeroRegFor(rd);
633  sbcs(rd, zr, operand);
634}
635
636
637// Logical instructions.
638void Assembler::and_(const Register& rd,
639                     const Register& rn,
640                     const Operand& operand) {
641  Logical(rd, rn, operand, AND);
642}
643
644
645void Assembler::ands(const Register& rd,
646                     const Register& rn,
647                     const Operand& operand) {
648  Logical(rd, rn, operand, ANDS);
649}
650
651
652void Assembler::tst(const Register& rn,
653                    const Operand& operand) {
654  ands(AppropriateZeroRegFor(rn), rn, operand);
655}
656
657
658void Assembler::bic(const Register& rd,
659                    const Register& rn,
660                    const Operand& operand) {
661  Logical(rd, rn, operand, BIC);
662}
663
664
665void Assembler::bics(const Register& rd,
666                     const Register& rn,
667                     const Operand& operand) {
668  Logical(rd, rn, operand, BICS);
669}
670
671
672void Assembler::orr(const Register& rd,
673                    const Register& rn,
674                    const Operand& operand) {
675  Logical(rd, rn, operand, ORR);
676}
677
678
679void Assembler::orn(const Register& rd,
680                    const Register& rn,
681                    const Operand& operand) {
682  Logical(rd, rn, operand, ORN);
683}
684
685
686void Assembler::eor(const Register& rd,
687                    const Register& rn,
688                    const Operand& operand) {
689  Logical(rd, rn, operand, EOR);
690}
691
692
693void Assembler::eon(const Register& rd,
694                    const Register& rn,
695                    const Operand& operand) {
696  Logical(rd, rn, operand, EON);
697}
698
699
700void Assembler::lslv(const Register& rd,
701                     const Register& rn,
702                     const Register& rm) {
703  VIXL_ASSERT(rd.size() == rn.size());
704  VIXL_ASSERT(rd.size() == rm.size());
705  Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
706}
707
708
709void Assembler::lsrv(const Register& rd,
710                     const Register& rn,
711                     const Register& rm) {
712  VIXL_ASSERT(rd.size() == rn.size());
713  VIXL_ASSERT(rd.size() == rm.size());
714  Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
715}
716
717
718void Assembler::asrv(const Register& rd,
719                     const Register& rn,
720                     const Register& rm) {
721  VIXL_ASSERT(rd.size() == rn.size());
722  VIXL_ASSERT(rd.size() == rm.size());
723  Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
724}
725
726
727void Assembler::rorv(const Register& rd,
728                     const Register& rn,
729                     const Register& rm) {
730  VIXL_ASSERT(rd.size() == rn.size());
731  VIXL_ASSERT(rd.size() == rm.size());
732  Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
733}
734
735
736// Bitfield operations.
737void Assembler::bfm(const Register& rd,
738                     const Register& rn,
739                     unsigned immr,
740                     unsigned imms) {
741  VIXL_ASSERT(rd.size() == rn.size());
742  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
743  Emit(SF(rd) | BFM | N |
744       ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
745}
746
747
748void Assembler::sbfm(const Register& rd,
749                     const Register& rn,
750                     unsigned immr,
751                     unsigned imms) {
752  VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
753  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
754  Emit(SF(rd) | SBFM | N |
755       ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
756}
757
758
759void Assembler::ubfm(const Register& rd,
760                     const Register& rn,
761                     unsigned immr,
762                     unsigned imms) {
763  VIXL_ASSERT(rd.size() == rn.size());
764  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
765  Emit(SF(rd) | UBFM | N |
766       ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
767}
768
769
770void Assembler::extr(const Register& rd,
771                     const Register& rn,
772                     const Register& rm,
773                     unsigned lsb) {
774  VIXL_ASSERT(rd.size() == rn.size());
775  VIXL_ASSERT(rd.size() == rm.size());
776  Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
777  Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd));
778}
779
780
781void Assembler::csel(const Register& rd,
782                     const Register& rn,
783                     const Register& rm,
784                     Condition cond) {
785  ConditionalSelect(rd, rn, rm, cond, CSEL);
786}
787
788
789void Assembler::csinc(const Register& rd,
790                      const Register& rn,
791                      const Register& rm,
792                      Condition cond) {
793  ConditionalSelect(rd, rn, rm, cond, CSINC);
794}
795
796
797void Assembler::csinv(const Register& rd,
798                      const Register& rn,
799                      const Register& rm,
800                      Condition cond) {
801  ConditionalSelect(rd, rn, rm, cond, CSINV);
802}
803
804
805void Assembler::csneg(const Register& rd,
806                      const Register& rn,
807                      const Register& rm,
808                      Condition cond) {
809  ConditionalSelect(rd, rn, rm, cond, CSNEG);
810}
811
812
813void Assembler::cset(const Register &rd, Condition cond) {
814  VIXL_ASSERT((cond != al) && (cond != nv));
815  Register zr = AppropriateZeroRegFor(rd);
816  csinc(rd, zr, zr, InvertCondition(cond));
817}
818
819
820void Assembler::csetm(const Register &rd, Condition cond) {
821  VIXL_ASSERT((cond != al) && (cond != nv));
822  Register zr = AppropriateZeroRegFor(rd);
823  csinv(rd, zr, zr, InvertCondition(cond));
824}
825
826
827void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
828  VIXL_ASSERT((cond != al) && (cond != nv));
829  csinc(rd, rn, rn, InvertCondition(cond));
830}
831
832
833void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
834  VIXL_ASSERT((cond != al) && (cond != nv));
835  csinv(rd, rn, rn, InvertCondition(cond));
836}
837
838
839void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
840  VIXL_ASSERT((cond != al) && (cond != nv));
841  csneg(rd, rn, rn, InvertCondition(cond));
842}
843
844
845void Assembler::ConditionalSelect(const Register& rd,
846                                  const Register& rn,
847                                  const Register& rm,
848                                  Condition cond,
849                                  ConditionalSelectOp op) {
850  VIXL_ASSERT(rd.size() == rn.size());
851  VIXL_ASSERT(rd.size() == rm.size());
852  Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
853}
854
855
856void Assembler::ccmn(const Register& rn,
857                     const Operand& operand,
858                     StatusFlags nzcv,
859                     Condition cond) {
860  ConditionalCompare(rn, operand, nzcv, cond, CCMN);
861}
862
863
864void Assembler::ccmp(const Register& rn,
865                     const Operand& operand,
866                     StatusFlags nzcv,
867                     Condition cond) {
868  ConditionalCompare(rn, operand, nzcv, cond, CCMP);
869}
870
871
872void Assembler::DataProcessing3Source(const Register& rd,
873                     const Register& rn,
874                     const Register& rm,
875                     const Register& ra,
876                     DataProcessing3SourceOp op) {
877  Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
878}
879
880
881void Assembler::mul(const Register& rd,
882                    const Register& rn,
883                    const Register& rm) {
884  VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
885  DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
886}
887
888
889void Assembler::madd(const Register& rd,
890                     const Register& rn,
891                     const Register& rm,
892                     const Register& ra) {
893  DataProcessing3Source(rd, rn, rm, ra, MADD);
894}
895
896
897void Assembler::mneg(const Register& rd,
898                     const Register& rn,
899                     const Register& rm) {
900  VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
901  DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
902}
903
904
905void Assembler::msub(const Register& rd,
906                     const Register& rn,
907                     const Register& rm,
908                     const Register& ra) {
909  DataProcessing3Source(rd, rn, rm, ra, MSUB);
910}
911
912
913void Assembler::umaddl(const Register& rd,
914                       const Register& rn,
915                       const Register& rm,
916                       const Register& ra) {
917  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
918  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
919  DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
920}
921
922
923void Assembler::smaddl(const Register& rd,
924                       const Register& rn,
925                       const Register& rm,
926                       const Register& ra) {
927  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
928  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
929  DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
930}
931
932
933void Assembler::umsubl(const Register& rd,
934                       const Register& rn,
935                       const Register& rm,
936                       const Register& ra) {
937  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
938  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
939  DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
940}
941
942
943void Assembler::smsubl(const Register& rd,
944                       const Register& rn,
945                       const Register& rm,
946                       const Register& ra) {
947  VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
948  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
949  DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
950}
951
952
953void Assembler::smull(const Register& rd,
954                      const Register& rn,
955                      const Register& rm) {
956  VIXL_ASSERT(rd.Is64Bits());
957  VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
958  DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
959}
960
961
962void Assembler::sdiv(const Register& rd,
963                     const Register& rn,
964                     const Register& rm) {
965  VIXL_ASSERT(rd.size() == rn.size());
966  VIXL_ASSERT(rd.size() == rm.size());
967  Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
968}
969
970
971void Assembler::smulh(const Register& xd,
972                      const Register& xn,
973                      const Register& xm) {
974  VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
975  DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
976}
977
978void Assembler::udiv(const Register& rd,
979                     const Register& rn,
980                     const Register& rm) {
981  VIXL_ASSERT(rd.size() == rn.size());
982  VIXL_ASSERT(rd.size() == rm.size());
983  Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
984}
985
986
987void Assembler::rbit(const Register& rd,
988                     const Register& rn) {
989  DataProcessing1Source(rd, rn, RBIT);
990}
991
992
993void Assembler::rev16(const Register& rd,
994                      const Register& rn) {
995  DataProcessing1Source(rd, rn, REV16);
996}
997
998
999void Assembler::rev32(const Register& rd,
1000                      const Register& rn) {
1001  VIXL_ASSERT(rd.Is64Bits());
1002  DataProcessing1Source(rd, rn, REV);
1003}
1004
1005
1006void Assembler::rev(const Register& rd,
1007                    const Register& rn) {
1008  DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1009}
1010
1011
1012void Assembler::clz(const Register& rd,
1013                    const Register& rn) {
1014  DataProcessing1Source(rd, rn, CLZ);
1015}
1016
1017
1018void Assembler::cls(const Register& rd,
1019                    const Register& rn) {
1020  DataProcessing1Source(rd, rn, CLS);
1021}
1022
1023
1024void Assembler::ldp(const CPURegister& rt,
1025                    const CPURegister& rt2,
1026                    const MemOperand& src) {
1027  LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1028}
1029
1030
1031void Assembler::stp(const CPURegister& rt,
1032                    const CPURegister& rt2,
1033                    const MemOperand& dst) {
1034  LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1035}
1036
1037
1038void Assembler::ldpsw(const Register& rt,
1039                      const Register& rt2,
1040                      const MemOperand& src) {
1041  VIXL_ASSERT(rt.Is64Bits());
1042  LoadStorePair(rt, rt2, src, LDPSW_x);
1043}
1044
1045
1046void Assembler::LoadStorePair(const CPURegister& rt,
1047                              const CPURegister& rt2,
1048                              const MemOperand& addr,
1049                              LoadStorePairOp op) {
1050  // 'rt' and 'rt2' can only be aliased for stores.
1051  VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1052  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
1053
1054  Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1055                ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1056
1057  Instr addrmodeop;
1058  if (addr.IsImmediateOffset()) {
1059    addrmodeop = LoadStorePairOffsetFixed;
1060  } else {
1061    VIXL_ASSERT(addr.offset() != 0);
1062    if (addr.IsPreIndex()) {
1063      addrmodeop = LoadStorePairPreIndexFixed;
1064    } else {
1065      VIXL_ASSERT(addr.IsPostIndex());
1066      addrmodeop = LoadStorePairPostIndexFixed;
1067    }
1068  }
1069  Emit(addrmodeop | memop);
1070}
1071
1072
1073void Assembler::ldnp(const CPURegister& rt,
1074                     const CPURegister& rt2,
1075                     const MemOperand& src) {
1076  LoadStorePairNonTemporal(rt, rt2, src,
1077                           LoadPairNonTemporalOpFor(rt, rt2));
1078}
1079
1080
1081void Assembler::stnp(const CPURegister& rt,
1082                     const CPURegister& rt2,
1083                     const MemOperand& dst) {
1084  LoadStorePairNonTemporal(rt, rt2, dst,
1085                           StorePairNonTemporalOpFor(rt, rt2));
1086}
1087
1088
1089void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1090                                         const CPURegister& rt2,
1091                                         const MemOperand& addr,
1092                                         LoadStorePairNonTemporalOp op) {
1093  VIXL_ASSERT(!rt.Is(rt2));
1094  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
1095  VIXL_ASSERT(addr.IsImmediateOffset());
1096
1097  LSDataSize size = CalcLSPairDataSize(
1098    static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1099  Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1100       ImmLSPair(addr.offset(), size));
1101}
1102
1103
1104// Memory instructions.
1105void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1106  LoadStore(rt, src, LDRB_w);
1107}
1108
1109
1110void Assembler::strb(const Register& rt, const MemOperand& dst) {
1111  LoadStore(rt, dst, STRB_w);
1112}
1113
1114
1115void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1116  LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1117}
1118
1119
1120void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1121  LoadStore(rt, src, LDRH_w);
1122}
1123
1124
1125void Assembler::strh(const Register& rt, const MemOperand& dst) {
1126  LoadStore(rt, dst, STRH_w);
1127}
1128
1129
1130void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1131  LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1132}
1133
1134
1135void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1136  LoadStore(rt, src, LoadOpFor(rt));
1137}
1138
1139
1140void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1141  LoadStore(rt, src, StoreOpFor(rt));
1142}
1143
1144
1145void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1146  VIXL_ASSERT(rt.Is64Bits());
1147  LoadStore(rt, src, LDRSW_x);
1148}
1149
1150
1151void Assembler::ldr(const Register& rt, uint64_t imm) {
1152  LoadLiteral(rt, imm, rt.Is64Bits() ? LDR_x_lit : LDR_w_lit);
1153}
1154
1155
1156void Assembler::ldr(const FPRegister& ft, double imm) {
1157  VIXL_ASSERT(ft.Is64Bits());
1158  LoadLiteral(ft, double_to_rawbits(imm), LDR_d_lit);
1159}
1160
1161
1162void Assembler::ldr(const FPRegister& ft, float imm) {
1163  VIXL_ASSERT(ft.Is32Bits());
1164  LoadLiteral(ft, float_to_rawbits(imm), LDR_s_lit);
1165}
1166
1167
1168void Assembler::mov(const Register& rd, const Register& rm) {
1169  // Moves involving the stack pointer are encoded as add immediate with
1170  // second operand of zero. Otherwise, orr with first operand zr is
1171  // used.
1172  if (rd.IsSP() || rm.IsSP()) {
1173    add(rd, rm, 0);
1174  } else {
1175    orr(rd, AppropriateZeroRegFor(rd), rm);
1176  }
1177}
1178
1179
1180void Assembler::mvn(const Register& rd, const Operand& operand) {
1181  orn(rd, AppropriateZeroRegFor(rd), operand);
1182}
1183
1184
1185void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1186  VIXL_ASSERT(rt.Is64Bits());
1187  Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1188}
1189
1190
1191void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1192  VIXL_ASSERT(rt.Is64Bits());
1193  Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1194}
1195
1196
1197void Assembler::hint(SystemHint code) {
1198  Emit(HINT | ImmHint(code) | Rt(xzr));
1199}
1200
1201
1202void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1203  Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1204}
1205
1206
1207void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1208  Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1209}
1210
1211
1212void Assembler::isb() {
1213  Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1214}
1215
1216
1217void Assembler::fmov(const FPRegister& fd, double imm) {
1218  VIXL_ASSERT(fd.Is64Bits());
1219  VIXL_ASSERT(IsImmFP64(imm));
1220  Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1221}
1222
1223
1224void Assembler::fmov(const FPRegister& fd, float imm) {
1225  VIXL_ASSERT(fd.Is32Bits());
1226  VIXL_ASSERT(IsImmFP32(imm));
1227  Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1228}
1229
1230
1231void Assembler::fmov(const Register& rd, const FPRegister& fn) {
1232  VIXL_ASSERT(rd.size() == fn.size());
1233  FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1234  Emit(op | Rd(rd) | Rn(fn));
1235}
1236
1237
1238void Assembler::fmov(const FPRegister& fd, const Register& rn) {
1239  VIXL_ASSERT(fd.size() == rn.size());
1240  FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1241  Emit(op | Rd(fd) | Rn(rn));
1242}
1243
1244
1245void Assembler::fmov(const FPRegister& fd, const FPRegister& fn) {
1246  VIXL_ASSERT(fd.size() == fn.size());
1247  Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1248}
1249
1250
1251void Assembler::fadd(const FPRegister& fd,
1252                     const FPRegister& fn,
1253                     const FPRegister& fm) {
1254  FPDataProcessing2Source(fd, fn, fm, FADD);
1255}
1256
1257
1258void Assembler::fsub(const FPRegister& fd,
1259                     const FPRegister& fn,
1260                     const FPRegister& fm) {
1261  FPDataProcessing2Source(fd, fn, fm, FSUB);
1262}
1263
1264
1265void Assembler::fmul(const FPRegister& fd,
1266                     const FPRegister& fn,
1267                     const FPRegister& fm) {
1268  FPDataProcessing2Source(fd, fn, fm, FMUL);
1269}
1270
1271
1272void Assembler::fmadd(const FPRegister& fd,
1273                      const FPRegister& fn,
1274                      const FPRegister& fm,
1275                      const FPRegister& fa) {
1276  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1277}
1278
1279
1280void Assembler::fmsub(const FPRegister& fd,
1281                      const FPRegister& fn,
1282                      const FPRegister& fm,
1283                      const FPRegister& fa) {
1284  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1285}
1286
1287
1288void Assembler::fnmadd(const FPRegister& fd,
1289                       const FPRegister& fn,
1290                       const FPRegister& fm,
1291                       const FPRegister& fa) {
1292  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1293}
1294
1295
1296void Assembler::fnmsub(const FPRegister& fd,
1297                       const FPRegister& fn,
1298                       const FPRegister& fm,
1299                       const FPRegister& fa) {
1300  FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1301}
1302
1303
1304void Assembler::fdiv(const FPRegister& fd,
1305                     const FPRegister& fn,
1306                     const FPRegister& fm) {
1307  FPDataProcessing2Source(fd, fn, fm, FDIV);
1308}
1309
1310
1311void Assembler::fmax(const FPRegister& fd,
1312                     const FPRegister& fn,
1313                     const FPRegister& fm) {
1314  FPDataProcessing2Source(fd, fn, fm, FMAX);
1315}
1316
1317
1318void Assembler::fmaxnm(const FPRegister& fd,
1319                       const FPRegister& fn,
1320                       const FPRegister& fm) {
1321  FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1322}
1323
1324
1325void Assembler::fmin(const FPRegister& fd,
1326                     const FPRegister& fn,
1327                     const FPRegister& fm) {
1328  FPDataProcessing2Source(fd, fn, fm, FMIN);
1329}
1330
1331
1332void Assembler::fminnm(const FPRegister& fd,
1333                       const FPRegister& fn,
1334                       const FPRegister& fm) {
1335  FPDataProcessing2Source(fd, fn, fm, FMINNM);
1336}
1337
1338
1339void Assembler::fabs(const FPRegister& fd,
1340                     const FPRegister& fn) {
1341  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1342  FPDataProcessing1Source(fd, fn, FABS);
1343}
1344
1345
1346void Assembler::fneg(const FPRegister& fd,
1347                     const FPRegister& fn) {
1348  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1349  FPDataProcessing1Source(fd, fn, FNEG);
1350}
1351
1352
1353void Assembler::fsqrt(const FPRegister& fd,
1354                      const FPRegister& fn) {
1355  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1356  FPDataProcessing1Source(fd, fn, FSQRT);
1357}
1358
1359
1360void Assembler::frinta(const FPRegister& fd,
1361                       const FPRegister& fn) {
1362  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1363  FPDataProcessing1Source(fd, fn, FRINTA);
1364}
1365
1366
1367void Assembler::frintm(const FPRegister& fd,
1368                       const FPRegister& fn) {
1369  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1370  FPDataProcessing1Source(fd, fn, FRINTM);
1371}
1372
1373
1374void Assembler::frintn(const FPRegister& fd,
1375                       const FPRegister& fn) {
1376  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1377  FPDataProcessing1Source(fd, fn, FRINTN);
1378}
1379
1380
1381void Assembler::frintz(const FPRegister& fd,
1382                       const FPRegister& fn) {
1383  VIXL_ASSERT(fd.SizeInBits() == fn.SizeInBits());
1384  FPDataProcessing1Source(fd, fn, FRINTZ);
1385}
1386
1387
1388void Assembler::fcmp(const FPRegister& fn,
1389                     const FPRegister& fm) {
1390  VIXL_ASSERT(fn.size() == fm.size());
1391  Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1392}
1393
1394
1395void Assembler::fcmp(const FPRegister& fn,
1396                     double value) {
1397  USE(value);
1398  // Although the fcmp instruction can strictly only take an immediate value of
1399  // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1400  // affect the result of the comparison.
1401  VIXL_ASSERT(value == 0.0);
1402  Emit(FPType(fn) | FCMP_zero | Rn(fn));
1403}
1404
1405
1406void Assembler::fccmp(const FPRegister& fn,
1407                      const FPRegister& fm,
1408                      StatusFlags nzcv,
1409                      Condition cond) {
1410  VIXL_ASSERT(fn.size() == fm.size());
1411  Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1412}
1413
1414
1415void Assembler::fcsel(const FPRegister& fd,
1416                      const FPRegister& fn,
1417                      const FPRegister& fm,
1418                      Condition cond) {
1419  VIXL_ASSERT(fd.size() == fn.size());
1420  VIXL_ASSERT(fd.size() == fm.size());
1421  Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1422}
1423
1424
1425void Assembler::FPConvertToInt(const Register& rd,
1426                               const FPRegister& fn,
1427                               FPIntegerConvertOp op) {
1428  Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1429}
1430
1431
1432void Assembler::fcvt(const FPRegister& fd,
1433                     const FPRegister& fn) {
1434  if (fd.Is64Bits()) {
1435    // Convert float to double.
1436    VIXL_ASSERT(fn.Is32Bits());
1437    FPDataProcessing1Source(fd, fn, FCVT_ds);
1438  } else {
1439    // Convert double to float.
1440    VIXL_ASSERT(fn.Is64Bits());
1441    FPDataProcessing1Source(fd, fn, FCVT_sd);
1442  }
1443}
1444
1445
1446void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
1447  FPConvertToInt(rd, fn, FCVTAU);
1448}
1449
1450
1451void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
1452  FPConvertToInt(rd, fn, FCVTAS);
1453}
1454
1455
1456void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
1457  FPConvertToInt(rd, fn, FCVTMU);
1458}
1459
1460
1461void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
1462  FPConvertToInt(rd, fn, FCVTMS);
1463}
1464
1465
1466void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
1467  FPConvertToInt(rd, fn, FCVTNU);
1468}
1469
1470
1471void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
1472  FPConvertToInt(rd, fn, FCVTNS);
1473}
1474
1475
1476void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
1477  FPConvertToInt(rd, fn, FCVTZU);
1478}
1479
1480
1481void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
1482  FPConvertToInt(rd, fn, FCVTZS);
1483}
1484
1485
1486void Assembler::scvtf(const FPRegister& fd,
1487                      const Register& rn,
1488                      unsigned fbits) {
1489  if (fbits == 0) {
1490    Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
1491  } else {
1492    Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1493         Rd(fd));
1494  }
1495}
1496
1497
1498void Assembler::ucvtf(const FPRegister& fd,
1499                      const Register& rn,
1500                      unsigned fbits) {
1501  if (fbits == 0) {
1502    Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
1503  } else {
1504    Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1505         Rd(fd));
1506  }
1507}
1508
1509
1510// Note:
1511// Below, a difference in case for the same letter indicates a
1512// negated bit.
1513// If b is 1, then B is 0.
1514Instr Assembler::ImmFP32(float imm) {
1515  VIXL_ASSERT(IsImmFP32(imm));
1516  // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
1517  uint32_t bits = float_to_rawbits(imm);
1518  // bit7: a000.0000
1519  uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
1520  // bit6: 0b00.0000
1521  uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
1522  // bit5_to_0: 00cd.efgh
1523  uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
1524
1525  return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1526}
1527
1528
1529Instr Assembler::ImmFP64(double imm) {
1530  VIXL_ASSERT(IsImmFP64(imm));
1531  // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
1532  //       0000.0000.0000.0000.0000.0000.0000.0000
1533  uint64_t bits = double_to_rawbits(imm);
1534  // bit7: a000.0000
1535  uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
1536  // bit6: 0b00.0000
1537  uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
1538  // bit5_to_0: 00cd.efgh
1539  uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
1540
1541  return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1542}
1543
1544
1545// Code generation helpers.
1546void Assembler::MoveWide(const Register& rd,
1547                         uint64_t imm,
1548                         int shift,
1549                         MoveWideImmediateOp mov_op) {
1550  if (shift >= 0) {
1551    // Explicit shift specified.
1552    VIXL_ASSERT((shift == 0) || (shift == 16) ||
1553                (shift == 32) || (shift == 48));
1554    VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
1555    shift /= 16;
1556  } else {
1557    // Calculate a new immediate and shift combination to encode the immediate
1558    // argument.
1559    shift = 0;
1560    if ((imm & UINT64_C(0xffffffffffff0000)) == 0) {
1561      // Nothing to do.
1562    } else if ((imm & UINT64_C(0xffffffff0000ffff)) == 0) {
1563      imm >>= 16;
1564      shift = 1;
1565    } else if ((imm & UINT64_C(0xffff0000ffffffff)) == 0) {
1566      VIXL_ASSERT(rd.Is64Bits());
1567      imm >>= 32;
1568      shift = 2;
1569    } else if ((imm & UINT64_C(0x0000ffffffffffff)) == 0) {
1570      VIXL_ASSERT(rd.Is64Bits());
1571      imm >>= 48;
1572      shift = 3;
1573    }
1574  }
1575
1576  VIXL_ASSERT(is_uint16(imm));
1577
1578  Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
1579       Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
1580}
1581
1582
1583void Assembler::AddSub(const Register& rd,
1584                       const Register& rn,
1585                       const Operand& operand,
1586                       FlagsUpdate S,
1587                       AddSubOp op) {
1588  VIXL_ASSERT(rd.size() == rn.size());
1589  if (operand.IsImmediate()) {
1590    int64_t immediate = operand.immediate();
1591    VIXL_ASSERT(IsImmAddSub(immediate));
1592    Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1593    Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
1594         ImmAddSub(immediate) | dest_reg | RnSP(rn));
1595  } else if (operand.IsShiftedRegister()) {
1596    VIXL_ASSERT(operand.reg().size() == rd.size());
1597    VIXL_ASSERT(operand.shift() != ROR);
1598
1599    // For instructions of the form:
1600    //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
1601    //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
1602    //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
1603    //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
1604    // or their 64-bit register equivalents, convert the operand from shifted to
1605    // extended register mode, and emit an add/sub extended instruction.
1606    if (rn.IsSP() || rd.IsSP()) {
1607      VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
1608      DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
1609                               AddSubExtendedFixed | op);
1610    } else {
1611      DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
1612    }
1613  } else {
1614    VIXL_ASSERT(operand.IsExtendedRegister());
1615    DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
1616  }
1617}
1618
1619
1620void Assembler::AddSubWithCarry(const Register& rd,
1621                                const Register& rn,
1622                                const Operand& operand,
1623                                FlagsUpdate S,
1624                                AddSubWithCarryOp op) {
1625  VIXL_ASSERT(rd.size() == rn.size());
1626  VIXL_ASSERT(rd.size() == operand.reg().size());
1627  VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1628  Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
1629}
1630
1631
1632void Assembler::hlt(int code) {
1633  VIXL_ASSERT(is_uint16(code));
1634  Emit(HLT | ImmException(code));
1635}
1636
1637
1638void Assembler::brk(int code) {
1639  VIXL_ASSERT(is_uint16(code));
1640  Emit(BRK | ImmException(code));
1641}
1642
1643
1644void Assembler::Logical(const Register& rd,
1645                        const Register& rn,
1646                        const Operand& operand,
1647                        LogicalOp op) {
1648  VIXL_ASSERT(rd.size() == rn.size());
1649  if (operand.IsImmediate()) {
1650    int64_t immediate = operand.immediate();
1651    unsigned reg_size = rd.size();
1652
1653    VIXL_ASSERT(immediate != 0);
1654    VIXL_ASSERT(immediate != -1);
1655    VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
1656
1657    // If the operation is NOT, invert the operation and immediate.
1658    if ((op & NOT) == NOT) {
1659      op = static_cast<LogicalOp>(op & ~NOT);
1660      immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
1661    }
1662
1663    unsigned n, imm_s, imm_r;
1664    if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
1665      // Immediate can be encoded in the instruction.
1666      LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
1667    } else {
1668      // This case is handled in the macro assembler.
1669      VIXL_UNREACHABLE();
1670    }
1671  } else {
1672    VIXL_ASSERT(operand.IsShiftedRegister());
1673    VIXL_ASSERT(operand.reg().size() == rd.size());
1674    Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
1675    DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
1676  }
1677}
1678
1679
1680void Assembler::LogicalImmediate(const Register& rd,
1681                                 const Register& rn,
1682                                 unsigned n,
1683                                 unsigned imm_s,
1684                                 unsigned imm_r,
1685                                 LogicalOp op) {
1686  unsigned reg_size = rd.size();
1687  Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
1688  Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
1689       ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
1690       Rn(rn));
1691}
1692
1693
1694void Assembler::ConditionalCompare(const Register& rn,
1695                                   const Operand& operand,
1696                                   StatusFlags nzcv,
1697                                   Condition cond,
1698                                   ConditionalCompareOp op) {
1699  Instr ccmpop;
1700  if (operand.IsImmediate()) {
1701    int64_t immediate = operand.immediate();
1702    VIXL_ASSERT(IsImmConditionalCompare(immediate));
1703    ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
1704  } else {
1705    VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1706    ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
1707  }
1708  Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
1709}
1710
1711
1712void Assembler::DataProcessing1Source(const Register& rd,
1713                                      const Register& rn,
1714                                      DataProcessing1SourceOp op) {
1715  VIXL_ASSERT(rd.size() == rn.size());
1716  Emit(SF(rn) | op | Rn(rn) | Rd(rd));
1717}
1718
1719
1720void Assembler::FPDataProcessing1Source(const FPRegister& fd,
1721                                        const FPRegister& fn,
1722                                        FPDataProcessing1SourceOp op) {
1723  Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
1724}
1725
1726
1727void Assembler::FPDataProcessing2Source(const FPRegister& fd,
1728                                        const FPRegister& fn,
1729                                        const FPRegister& fm,
1730                                        FPDataProcessing2SourceOp op) {
1731  VIXL_ASSERT(fd.size() == fn.size());
1732  VIXL_ASSERT(fd.size() == fm.size());
1733  Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
1734}
1735
1736
1737void Assembler::FPDataProcessing3Source(const FPRegister& fd,
1738                                        const FPRegister& fn,
1739                                        const FPRegister& fm,
1740                                        const FPRegister& fa,
1741                                        FPDataProcessing3SourceOp op) {
1742  VIXL_ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
1743  Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
1744}
1745
1746
1747void Assembler::EmitShift(const Register& rd,
1748                          const Register& rn,
1749                          Shift shift,
1750                          unsigned shift_amount) {
1751  switch (shift) {
1752    case LSL:
1753      lsl(rd, rn, shift_amount);
1754      break;
1755    case LSR:
1756      lsr(rd, rn, shift_amount);
1757      break;
1758    case ASR:
1759      asr(rd, rn, shift_amount);
1760      break;
1761    case ROR:
1762      ror(rd, rn, shift_amount);
1763      break;
1764    default:
1765      VIXL_UNREACHABLE();
1766  }
1767}
1768
1769
1770void Assembler::EmitExtendShift(const Register& rd,
1771                                const Register& rn,
1772                                Extend extend,
1773                                unsigned left_shift) {
1774  VIXL_ASSERT(rd.size() >= rn.size());
1775  unsigned reg_size = rd.size();
1776  // Use the correct size of register.
1777  Register rn_ = Register(rn.code(), rd.size());
1778  // Bits extracted are high_bit:0.
1779  unsigned high_bit = (8 << (extend & 0x3)) - 1;
1780  // Number of bits left in the result that are not introduced by the shift.
1781  unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
1782
1783  if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
1784    switch (extend) {
1785      case UXTB:
1786      case UXTH:
1787      case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
1788      case SXTB:
1789      case SXTH:
1790      case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
1791      case UXTX:
1792      case SXTX: {
1793        VIXL_ASSERT(rn.size() == kXRegSize);
1794        // Nothing to extend. Just shift.
1795        lsl(rd, rn_, left_shift);
1796        break;
1797      }
1798      default: VIXL_UNREACHABLE();
1799    }
1800  } else {
1801    // No need to extend as the extended bits would be shifted away.
1802    lsl(rd, rn_, left_shift);
1803  }
1804}
1805
1806
1807void Assembler::DataProcShiftedRegister(const Register& rd,
1808                                        const Register& rn,
1809                                        const Operand& operand,
1810                                        FlagsUpdate S,
1811                                        Instr op) {
1812  VIXL_ASSERT(operand.IsShiftedRegister());
1813  VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() &&
1814              is_uint5(operand.shift_amount())));
1815  Emit(SF(rd) | op | Flags(S) |
1816       ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
1817       Rm(operand.reg()) | Rn(rn) | Rd(rd));
1818}
1819
1820
1821void Assembler::DataProcExtendedRegister(const Register& rd,
1822                                         const Register& rn,
1823                                         const Operand& operand,
1824                                         FlagsUpdate S,
1825                                         Instr op) {
1826  Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1827  Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
1828       ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
1829       dest_reg | RnSP(rn));
1830}
1831
1832
1833bool Assembler::IsImmAddSub(int64_t immediate) {
1834  return is_uint12(immediate) ||
1835         (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
1836}
1837
1838void Assembler::LoadStore(const CPURegister& rt,
1839                          const MemOperand& addr,
1840                          LoadStoreOp op) {
1841  Instr memop = op | Rt(rt) | RnSP(addr.base());
1842  ptrdiff_t offset = addr.offset();
1843
1844  if (addr.IsImmediateOffset()) {
1845    LSDataSize size = CalcLSDataSize(op);
1846    if (IsImmLSScaled(offset, size)) {
1847      // Use the scaled addressing mode.
1848      Emit(LoadStoreUnsignedOffsetFixed | memop |
1849           ImmLSUnsigned(offset >> size));
1850    } else if (IsImmLSUnscaled(offset)) {
1851      // Use the unscaled addressing mode.
1852      Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
1853    } else {
1854      // This case is handled in the macro assembler.
1855      VIXL_UNREACHABLE();
1856    }
1857  } else if (addr.IsRegisterOffset()) {
1858    Extend ext = addr.extend();
1859    Shift shift = addr.shift();
1860    unsigned shift_amount = addr.shift_amount();
1861
1862    // LSL is encoded in the option field as UXTX.
1863    if (shift == LSL) {
1864      ext = UXTX;
1865    }
1866
1867    // Shifts are encoded in one bit, indicating a left shift by the memory
1868    // access size.
1869    VIXL_ASSERT((shift_amount == 0) ||
1870           (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
1871    Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
1872         ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
1873  } else {
1874    if (IsImmLSUnscaled(offset)) {
1875      if (addr.IsPreIndex()) {
1876        Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
1877      } else {
1878        VIXL_ASSERT(addr.IsPostIndex());
1879        Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
1880      }
1881    } else {
1882      // This case is handled in the macro assembler.
1883      VIXL_UNREACHABLE();
1884    }
1885  }
1886}
1887
1888
1889bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
1890  return is_int9(offset);
1891}
1892
1893
1894bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
1895  bool offset_is_size_multiple = (((offset >> size) << size) == offset);
1896  return offset_is_size_multiple && is_uint12(offset >> size);
1897}
1898
1899
1900void Assembler::LoadLiteral(const CPURegister& rt,
1901                            uint64_t imm,
1902                            LoadLiteralOp op) {
1903  VIXL_ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
1904
1905  BlockLiteralPoolScope scope(this);
1906  RecordLiteral(imm, rt.SizeInBytes());
1907  Emit(op | ImmLLiteral(0) | Rt(rt));
1908}
1909
1910
1911// Test if a given value can be encoded in the immediate field of a logical
1912// instruction.
1913// If it can be encoded, the function returns true, and values pointed to by n,
1914// imm_s and imm_r are updated with immediates encoded in the format required
1915// by the corresponding fields in the logical instruction.
1916// If it can not be encoded, the function returns false, and the values pointed
1917// to by n, imm_s and imm_r are undefined.
1918bool Assembler::IsImmLogical(uint64_t value,
1919                             unsigned width,
1920                             unsigned* n,
1921                             unsigned* imm_s,
1922                             unsigned* imm_r) {
1923  VIXL_ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
1924  VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
1925
1926  // Logical immediates are encoded using parameters n, imm_s and imm_r using
1927  // the following table:
1928  //
1929  //  N   imms    immr    size        S             R
1930  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
1931  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
1932  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
1933  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
1934  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
1935  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
1936  // (s bits must not be all set)
1937  //
1938  // A pattern is constructed of size bits, where the least significant S+1
1939  // bits are set. The pattern is rotated right by R, and repeated across a
1940  // 32 or 64-bit value, depending on destination register width.
1941  //
1942  // To test if an arbitrary immediate can be encoded using this scheme, an
1943  // iterative algorithm is used.
1944  //
1945  // TODO: This code does not consider using X/W register overlap to support
1946  // 64-bit immediates where the top 32-bits are zero, and the bottom 32-bits
1947  // are an encodable logical immediate.
1948
1949  // 1. If the value has all set or all clear bits, it can't be encoded.
1950  if ((value == 0) || (value == kXRegMask) ||
1951      ((width == kWRegSize) && (value == kWRegMask))) {
1952    return false;
1953  }
1954
1955  unsigned lead_zero = CountLeadingZeros(value, width);
1956  unsigned lead_one = CountLeadingZeros(~value, width);
1957  unsigned trail_zero = CountTrailingZeros(value, width);
1958  unsigned trail_one = CountTrailingZeros(~value, width);
1959  unsigned set_bits = CountSetBits(value, width);
1960
1961  // The fixed bits in the immediate s field.
1962  // If width == 64 (X reg), start at 0xFFFFFF80.
1963  // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
1964  // widths won't be executed.
1965  int imm_s_fixed = (width == kXRegSize) ? -128 : -64;
1966  int imm_s_mask = 0x3F;
1967
1968  for (;;) {
1969    // 2. If the value is two bits wide, it can be encoded.
1970    if (width == 2) {
1971      *n = 0;
1972      *imm_s = 0x3C;
1973      *imm_r = (value & 3) - 1;
1974      return true;
1975    }
1976
1977    *n = (width == 64) ? 1 : 0;
1978    *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
1979    if ((lead_zero + set_bits) == width) {
1980      *imm_r = 0;
1981    } else {
1982      *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
1983    }
1984
1985    // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
1986    //    the bit width of the value, it can be encoded.
1987    if (lead_zero + trail_zero + set_bits == width) {
1988      return true;
1989    }
1990
1991    // 4. If the sum of leading ones, trailing ones and unset bits in the
1992    //    value is equal to the bit width of the value, it can be encoded.
1993    if (lead_one + trail_one + (width - set_bits) == width) {
1994      return true;
1995    }
1996
1997    // 5. If the most-significant half of the bitwise value is equal to the
1998    //    least-significant half, return to step 2 using the least-significant
1999    //    half of the value.
2000    uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
2001    if ((value & mask) == ((value >> (width >> 1)) & mask)) {
2002      width >>= 1;
2003      set_bits >>= 1;
2004      imm_s_fixed >>= 1;
2005      continue;
2006    }
2007
2008    // 6. Otherwise, the value can't be encoded.
2009    return false;
2010  }
2011}
2012
2013bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2014  return is_uint5(immediate);
2015}
2016
2017
2018bool Assembler::IsImmFP32(float imm) {
2019  // Valid values will have the form:
2020  // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2021  uint32_t bits = float_to_rawbits(imm);
2022  // bits[19..0] are cleared.
2023  if ((bits & 0x7ffff) != 0) {
2024    return false;
2025  }
2026
2027  // bits[29..25] are all set or all cleared.
2028  uint32_t b_pattern = (bits >> 16) & 0x3e00;
2029  if (b_pattern != 0 && b_pattern != 0x3e00) {
2030    return false;
2031  }
2032
2033  // bit[30] and bit[29] are opposite.
2034  if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2035    return false;
2036  }
2037
2038  return true;
2039}
2040
2041
2042bool Assembler::IsImmFP64(double imm) {
2043  // Valid values will have the form:
2044  // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2045  // 0000.0000.0000.0000.0000.0000.0000.0000
2046  uint64_t bits = double_to_rawbits(imm);
2047  // bits[47..0] are cleared.
2048  if ((bits & UINT64_C(0x0000ffffffffffff)) != 0) {
2049    return false;
2050  }
2051
2052  // bits[61..54] are all set or all cleared.
2053  uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2054  if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
2055    return false;
2056  }
2057
2058  // bit[62] and bit[61] are opposite.
2059  if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
2060    return false;
2061  }
2062
2063  return true;
2064}
2065
2066
2067LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
2068  VIXL_ASSERT(rt.IsValid());
2069  if (rt.IsRegister()) {
2070    return rt.Is64Bits() ? LDR_x : LDR_w;
2071  } else {
2072    VIXL_ASSERT(rt.IsFPRegister());
2073    return rt.Is64Bits() ? LDR_d : LDR_s;
2074  }
2075}
2076
2077
2078LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
2079    const CPURegister& rt2) {
2080  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2081  USE(rt2);
2082  if (rt.IsRegister()) {
2083    return rt.Is64Bits() ? LDP_x : LDP_w;
2084  } else {
2085    VIXL_ASSERT(rt.IsFPRegister());
2086    return rt.Is64Bits() ? LDP_d : LDP_s;
2087  }
2088}
2089
2090
2091LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
2092  VIXL_ASSERT(rt.IsValid());
2093  if (rt.IsRegister()) {
2094    return rt.Is64Bits() ? STR_x : STR_w;
2095  } else {
2096    VIXL_ASSERT(rt.IsFPRegister());
2097    return rt.Is64Bits() ? STR_d : STR_s;
2098  }
2099}
2100
2101
2102LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
2103    const CPURegister& rt2) {
2104  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2105  USE(rt2);
2106  if (rt.IsRegister()) {
2107    return rt.Is64Bits() ? STP_x : STP_w;
2108  } else {
2109    VIXL_ASSERT(rt.IsFPRegister());
2110    return rt.Is64Bits() ? STP_d : STP_s;
2111  }
2112}
2113
2114
2115LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
2116    const CPURegister& rt, const CPURegister& rt2) {
2117  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2118  USE(rt2);
2119  if (rt.IsRegister()) {
2120    return rt.Is64Bits() ? LDNP_x : LDNP_w;
2121  } else {
2122    VIXL_ASSERT(rt.IsFPRegister());
2123    return rt.Is64Bits() ? LDNP_d : LDNP_s;
2124  }
2125}
2126
2127
2128LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
2129    const CPURegister& rt, const CPURegister& rt2) {
2130  VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
2131  USE(rt2);
2132  if (rt.IsRegister()) {
2133    return rt.Is64Bits() ? STNP_x : STNP_w;
2134  } else {
2135    VIXL_ASSERT(rt.IsFPRegister());
2136    return rt.Is64Bits() ? STNP_d : STNP_s;
2137  }
2138}
2139
2140
2141void Assembler::RecordLiteral(int64_t imm, unsigned size) {
2142  literals_.push_front(new Literal(pc_, imm, size));
2143}
2144
2145
2146// Check if a literal pool should be emitted. Currently a literal is emitted
2147// when:
2148//  * the distance to the first literal load handled by this pool is greater
2149//    than the recommended distance and the literal pool can be emitted without
2150//    generating a jump over it.
2151//  * the distance to the first literal load handled by this pool is greater
2152//    than twice the recommended distance.
2153// TODO: refine this heuristic using real world data.
2154void Assembler::CheckLiteralPool(LiteralPoolEmitOption option) {
2155  if (IsLiteralPoolBlocked()) {
2156    // Literal pool emission is forbidden, no point in doing further checks.
2157    return;
2158  }
2159
2160  if (literals_.empty()) {
2161    // No literal pool to emit.
2162    next_literal_pool_check_ += kLiteralPoolCheckInterval;
2163    return;
2164  }
2165
2166  intptr_t distance = pc_ - literals_.back()->pc_;
2167  if ((distance < kRecommendedLiteralPoolRange) ||
2168      ((option == JumpRequired) &&
2169       (distance < (2 * kRecommendedLiteralPoolRange)))) {
2170    // We prefer not to have to jump over the literal pool.
2171    next_literal_pool_check_ += kLiteralPoolCheckInterval;
2172    return;
2173  }
2174
2175  EmitLiteralPool(option);
2176}
2177
2178
2179void Assembler::EmitLiteralPool(LiteralPoolEmitOption option) {
2180  // Prevent recursive calls while emitting the literal pool.
2181  BlockLiteralPoolScope scope(this);
2182
2183  Label marker;
2184  Label start_of_pool;
2185  Label end_of_pool;
2186
2187  if (option == JumpRequired) {
2188    b(&end_of_pool);
2189  }
2190
2191  // Leave space for a literal pool marker. This is populated later, once the
2192  // size of the pool is known.
2193  bind(&marker);
2194  nop();
2195
2196  // Now populate the literal pool.
2197  bind(&start_of_pool);
2198  std::list<Literal*>::iterator it;
2199  for (it = literals_.begin(); it != literals_.end(); it++) {
2200    // Update the load-literal instruction to point to this pool entry.
2201    Instruction* load_literal = (*it)->pc_;
2202    load_literal->SetImmLLiteral(pc_);
2203    // Copy the data into the pool.
2204    uint64_t value= (*it)->value_;
2205    unsigned size = (*it)->size_;
2206    VIXL_ASSERT((size == kXRegSizeInBytes) || (size == kWRegSizeInBytes));
2207    VIXL_ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
2208    memcpy(pc_, &value, size);
2209    pc_ += size;
2210    delete *it;
2211  }
2212  literals_.clear();
2213  bind(&end_of_pool);
2214
2215  // The pool size should always be a multiple of four bytes because that is the
2216  // scaling applied by the LDR(literal) instruction, even for X-register loads.
2217  VIXL_ASSERT((SizeOfCodeGeneratedSince(&start_of_pool) % 4) == 0);
2218  uint64_t pool_size = SizeOfCodeGeneratedSince(&start_of_pool) / 4;
2219
2220  // Literal pool marker indicating the size in words of the literal pool.
2221  // We use a literal load to the zero register, the offset indicating the
2222  // size in words. This instruction can encode a large enough offset to span
2223  // the entire pool at its maximum size.
2224  Instr marker_instruction = LDR_x_lit | ImmLLiteral(pool_size) | Rt(xzr);
2225  memcpy(marker.target(), &marker_instruction, kInstructionSize);
2226
2227  next_literal_pool_check_ = pc_ + kLiteralPoolCheckInterval;
2228}
2229
2230
2231// Return the size in bytes, required by the literal pool entries. This does
2232// not include any marker or branch over the literal pool itself.
2233size_t Assembler::LiteralPoolSize() {
2234  size_t size = 0;
2235
2236  std::list<Literal*>::iterator it;
2237  for (it = literals_.begin(); it != literals_.end(); it++) {
2238    size += (*it)->size_;
2239  }
2240
2241  return size;
2242}
2243
2244
2245bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
2246                const CPURegister& reg3, const CPURegister& reg4,
2247                const CPURegister& reg5, const CPURegister& reg6,
2248                const CPURegister& reg7, const CPURegister& reg8) {
2249  int number_of_valid_regs = 0;
2250  int number_of_valid_fpregs = 0;
2251
2252  RegList unique_regs = 0;
2253  RegList unique_fpregs = 0;
2254
2255  const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
2256
2257  for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
2258    if (regs[i].IsRegister()) {
2259      number_of_valid_regs++;
2260      unique_regs |= regs[i].Bit();
2261    } else if (regs[i].IsFPRegister()) {
2262      number_of_valid_fpregs++;
2263      unique_fpregs |= regs[i].Bit();
2264    } else {
2265      VIXL_ASSERT(!regs[i].IsValid());
2266    }
2267  }
2268
2269  int number_of_unique_regs =
2270    CountSetBits(unique_regs, sizeof(unique_regs) * 8);
2271  int number_of_unique_fpregs =
2272    CountSetBits(unique_fpregs, sizeof(unique_fpregs) * 8);
2273
2274  VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs);
2275  VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
2276
2277  return (number_of_valid_regs != number_of_unique_regs) ||
2278         (number_of_valid_fpregs != number_of_unique_fpregs);
2279}
2280
2281
2282bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
2283                        const CPURegister& reg3, const CPURegister& reg4,
2284                        const CPURegister& reg5, const CPURegister& reg6,
2285                        const CPURegister& reg7, const CPURegister& reg8) {
2286  VIXL_ASSERT(reg1.IsValid());
2287  bool match = true;
2288  match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
2289  match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
2290  match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
2291  match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
2292  match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
2293  match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
2294  match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
2295  return match;
2296}
2297
2298
2299}  // namespace vixl
2300