1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
6#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
7
8#include "src/arm64/assembler-arm64.h"
9#include "src/cpu.h"
10#include "src/debug.h"
11
12
13namespace v8 {
14namespace internal {
15
16
17bool CpuFeatures::SupportsCrankshaft() { return true; }
18
19
20void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
21  UNIMPLEMENTED();
22}
23
24
25void RelocInfo::set_target_address(Address target,
26                                   WriteBarrierMode write_barrier_mode,
27                                   ICacheFlushMode icache_flush_mode) {
28  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
29  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
30  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
31      IsCodeTarget(rmode_)) {
32    Object* target_code = Code::GetCodeFromTargetAddress(target);
33    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
34        host(), this, HeapObject::cast(target_code));
35  }
36}
37
38
39inline unsigned CPURegister::code() const {
40  ASSERT(IsValid());
41  return reg_code;
42}
43
44
45inline CPURegister::RegisterType CPURegister::type() const {
46  ASSERT(IsValidOrNone());
47  return reg_type;
48}
49
50
51inline RegList CPURegister::Bit() const {
52  ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
53  return IsValid() ? 1UL << reg_code : 0;
54}
55
56
57inline unsigned CPURegister::SizeInBits() const {
58  ASSERT(IsValid());
59  return reg_size;
60}
61
62
63inline int CPURegister::SizeInBytes() const {
64  ASSERT(IsValid());
65  ASSERT(SizeInBits() % 8 == 0);
66  return reg_size / 8;
67}
68
69
70inline bool CPURegister::Is32Bits() const {
71  ASSERT(IsValid());
72  return reg_size == 32;
73}
74
75
76inline bool CPURegister::Is64Bits() const {
77  ASSERT(IsValid());
78  return reg_size == 64;
79}
80
81
82inline bool CPURegister::IsValid() const {
83  if (IsValidRegister() || IsValidFPRegister()) {
84    ASSERT(!IsNone());
85    return true;
86  } else {
87    ASSERT(IsNone());
88    return false;
89  }
90}
91
92
93inline bool CPURegister::IsValidRegister() const {
94  return IsRegister() &&
95         ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
96         ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
97}
98
99
100inline bool CPURegister::IsValidFPRegister() const {
101  return IsFPRegister() &&
102         ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
103         (reg_code < kNumberOfFPRegisters);
104}
105
106
107inline bool CPURegister::IsNone() const {
108  // kNoRegister types should always have size 0 and code 0.
109  ASSERT((reg_type != kNoRegister) || (reg_code == 0));
110  ASSERT((reg_type != kNoRegister) || (reg_size == 0));
111
112  return reg_type == kNoRegister;
113}
114
115
116inline bool CPURegister::Is(const CPURegister& other) const {
117  ASSERT(IsValidOrNone() && other.IsValidOrNone());
118  return Aliases(other) && (reg_size == other.reg_size);
119}
120
121
122inline bool CPURegister::Aliases(const CPURegister& other) const {
123  ASSERT(IsValidOrNone() && other.IsValidOrNone());
124  return (reg_code == other.reg_code) && (reg_type == other.reg_type);
125}
126
127
128inline bool CPURegister::IsRegister() const {
129  return reg_type == kRegister;
130}
131
132
133inline bool CPURegister::IsFPRegister() const {
134  return reg_type == kFPRegister;
135}
136
137
138inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
139  return (reg_size == other.reg_size) && (reg_type == other.reg_type);
140}
141
142
143inline bool CPURegister::IsValidOrNone() const {
144  return IsValid() || IsNone();
145}
146
147
148inline bool CPURegister::IsZero() const {
149  ASSERT(IsValid());
150  return IsRegister() && (reg_code == kZeroRegCode);
151}
152
153
154inline bool CPURegister::IsSP() const {
155  ASSERT(IsValid());
156  return IsRegister() && (reg_code == kSPRegInternalCode);
157}
158
159
160inline void CPURegList::Combine(const CPURegList& other) {
161  ASSERT(IsValid());
162  ASSERT(other.type() == type_);
163  ASSERT(other.RegisterSizeInBits() == size_);
164  list_ |= other.list();
165}
166
167
168inline void CPURegList::Remove(const CPURegList& other) {
169  ASSERT(IsValid());
170  if (other.type() == type_) {
171    list_ &= ~other.list();
172  }
173}
174
175
176inline void CPURegList::Combine(const CPURegister& other) {
177  ASSERT(other.type() == type_);
178  ASSERT(other.SizeInBits() == size_);
179  Combine(other.code());
180}
181
182
183inline void CPURegList::Remove(const CPURegister& other1,
184                               const CPURegister& other2,
185                               const CPURegister& other3,
186                               const CPURegister& other4) {
187  if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
188  if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
189  if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
190  if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
191}
192
193
194inline void CPURegList::Combine(int code) {
195  ASSERT(IsValid());
196  ASSERT(CPURegister::Create(code, size_, type_).IsValid());
197  list_ |= (1UL << code);
198}
199
200
201inline void CPURegList::Remove(int code) {
202  ASSERT(IsValid());
203  ASSERT(CPURegister::Create(code, size_, type_).IsValid());
204  list_ &= ~(1UL << code);
205}
206
207
208inline Register Register::XRegFromCode(unsigned code) {
209  if (code == kSPRegInternalCode) {
210    return csp;
211  } else {
212    ASSERT(code < kNumberOfRegisters);
213    return Register::Create(code, kXRegSizeInBits);
214  }
215}
216
217
218inline Register Register::WRegFromCode(unsigned code) {
219  if (code == kSPRegInternalCode) {
220    return wcsp;
221  } else {
222    ASSERT(code < kNumberOfRegisters);
223    return Register::Create(code, kWRegSizeInBits);
224  }
225}
226
227
228inline FPRegister FPRegister::SRegFromCode(unsigned code) {
229  ASSERT(code < kNumberOfFPRegisters);
230  return FPRegister::Create(code, kSRegSizeInBits);
231}
232
233
234inline FPRegister FPRegister::DRegFromCode(unsigned code) {
235  ASSERT(code < kNumberOfFPRegisters);
236  return FPRegister::Create(code, kDRegSizeInBits);
237}
238
239
240inline Register CPURegister::W() const {
241  ASSERT(IsValidRegister());
242  return Register::WRegFromCode(reg_code);
243}
244
245
246inline Register CPURegister::X() const {
247  ASSERT(IsValidRegister());
248  return Register::XRegFromCode(reg_code);
249}
250
251
252inline FPRegister CPURegister::S() const {
253  ASSERT(IsValidFPRegister());
254  return FPRegister::SRegFromCode(reg_code);
255}
256
257
258inline FPRegister CPURegister::D() const {
259  ASSERT(IsValidFPRegister());
260  return FPRegister::DRegFromCode(reg_code);
261}
262
263
264// Immediate.
265// Default initializer is for int types
266template<typename T>
267struct ImmediateInitializer {
268  static const bool kIsIntType = true;
269  static inline RelocInfo::Mode rmode_for(T) {
270    return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
271  }
272  static inline int64_t immediate_for(T t) {
273    STATIC_ASSERT(sizeof(T) <= 8);
274    return t;
275  }
276};
277
278
279template<>
280struct ImmediateInitializer<Smi*> {
281  static const bool kIsIntType = false;
282  static inline RelocInfo::Mode rmode_for(Smi* t) {
283    return RelocInfo::NONE64;
284  }
285  static inline int64_t immediate_for(Smi* t) {;
286    return reinterpret_cast<int64_t>(t);
287  }
288};
289
290
291template<>
292struct ImmediateInitializer<ExternalReference> {
293  static const bool kIsIntType = false;
294  static inline RelocInfo::Mode rmode_for(ExternalReference t) {
295    return RelocInfo::EXTERNAL_REFERENCE;
296  }
297  static inline int64_t immediate_for(ExternalReference t) {;
298    return reinterpret_cast<int64_t>(t.address());
299  }
300};
301
302
303template<typename T>
304Immediate::Immediate(Handle<T> value) {
305  InitializeHandle(value);
306}
307
308
309template<typename T>
310Immediate::Immediate(T t)
311    : value_(ImmediateInitializer<T>::immediate_for(t)),
312      rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
313
314
315template<typename T>
316Immediate::Immediate(T t, RelocInfo::Mode rmode)
317    : value_(ImmediateInitializer<T>::immediate_for(t)),
318      rmode_(rmode) {
319  STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
320}
321
322
323// Operand.
324template<typename T>
325Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
326
327
328template<typename T>
329Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
330
331
332template<typename T>
333Operand::Operand(T t, RelocInfo::Mode rmode)
334    : immediate_(t, rmode),
335      reg_(NoReg) {}
336
337
338Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
339    : immediate_(0),
340      reg_(reg),
341      shift_(shift),
342      extend_(NO_EXTEND),
343      shift_amount_(shift_amount) {
344  ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
345  ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
346  ASSERT(!reg.IsSP());
347}
348
349
350Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
351    : immediate_(0),
352      reg_(reg),
353      shift_(NO_SHIFT),
354      extend_(extend),
355      shift_amount_(shift_amount) {
356  ASSERT(reg.IsValid());
357  ASSERT(shift_amount <= 4);
358  ASSERT(!reg.IsSP());
359
360  // Extend modes SXTX and UXTX require a 64-bit register.
361  ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
362}
363
364
365bool Operand::IsImmediate() const {
366  return reg_.Is(NoReg);
367}
368
369
370bool Operand::IsShiftedRegister() const {
371  return reg_.IsValid() && (shift_ != NO_SHIFT);
372}
373
374
375bool Operand::IsExtendedRegister() const {
376  return reg_.IsValid() && (extend_ != NO_EXTEND);
377}
378
379
380bool Operand::IsZero() const {
381  if (IsImmediate()) {
382    return ImmediateValue() == 0;
383  } else {
384    return reg().IsZero();
385  }
386}
387
388
389Operand Operand::ToExtendedRegister() const {
390  ASSERT(IsShiftedRegister());
391  ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
392  return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
393}
394
395
396Immediate Operand::immediate() const {
397  ASSERT(IsImmediate());
398  return immediate_;
399}
400
401
402int64_t Operand::ImmediateValue() const {
403  ASSERT(IsImmediate());
404  return immediate_.value();
405}
406
407
408Register Operand::reg() const {
409  ASSERT(IsShiftedRegister() || IsExtendedRegister());
410  return reg_;
411}
412
413
414Shift Operand::shift() const {
415  ASSERT(IsShiftedRegister());
416  return shift_;
417}
418
419
420Extend Operand::extend() const {
421  ASSERT(IsExtendedRegister());
422  return extend_;
423}
424
425
426unsigned Operand::shift_amount() const {
427  ASSERT(IsShiftedRegister() || IsExtendedRegister());
428  return shift_amount_;
429}
430
431
432Operand Operand::UntagSmi(Register smi) {
433  ASSERT(smi.Is64Bits());
434  return Operand(smi, ASR, kSmiShift);
435}
436
437
438Operand Operand::UntagSmiAndScale(Register smi, int scale) {
439  ASSERT(smi.Is64Bits());
440  ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
441  if (scale > kSmiShift) {
442    return Operand(smi, LSL, scale - kSmiShift);
443  } else if (scale < kSmiShift) {
444    return Operand(smi, ASR, kSmiShift - scale);
445  }
446  return Operand(smi);
447}
448
449
450MemOperand::MemOperand()
451  : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
452    shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
453}
454
455
456MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
457  : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
458    shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
459  ASSERT(base.Is64Bits() && !base.IsZero());
460}
461
462
463MemOperand::MemOperand(Register base,
464                       Register regoffset,
465                       Extend extend,
466                       unsigned shift_amount)
467  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
468    shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
469  ASSERT(base.Is64Bits() && !base.IsZero());
470  ASSERT(!regoffset.IsSP());
471  ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
472
473  // SXTX extend mode requires a 64-bit offset register.
474  ASSERT(regoffset.Is64Bits() || (extend != SXTX));
475}
476
477
478MemOperand::MemOperand(Register base,
479                       Register regoffset,
480                       Shift shift,
481                       unsigned shift_amount)
482  : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
483    shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
484  ASSERT(base.Is64Bits() && !base.IsZero());
485  ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
486  ASSERT(shift == LSL);
487}
488
489
490MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
491  : base_(base), addrmode_(addrmode) {
492  ASSERT(base.Is64Bits() && !base.IsZero());
493
494  if (offset.IsImmediate()) {
495    offset_ = offset.ImmediateValue();
496
497    regoffset_ = NoReg;
498  } else if (offset.IsShiftedRegister()) {
499    ASSERT(addrmode == Offset);
500
501    regoffset_ = offset.reg();
502    shift_= offset.shift();
503    shift_amount_ = offset.shift_amount();
504
505    extend_ = NO_EXTEND;
506    offset_ = 0;
507
508    // These assertions match those in the shifted-register constructor.
509    ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
510    ASSERT(shift_ == LSL);
511  } else {
512    ASSERT(offset.IsExtendedRegister());
513    ASSERT(addrmode == Offset);
514
515    regoffset_ = offset.reg();
516    extend_ = offset.extend();
517    shift_amount_ = offset.shift_amount();
518
519    shift_= NO_SHIFT;
520    offset_ = 0;
521
522    // These assertions match those in the extended-register constructor.
523    ASSERT(!regoffset_.IsSP());
524    ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
525    ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
526  }
527}
528
529bool MemOperand::IsImmediateOffset() const {
530  return (addrmode_ == Offset) && regoffset_.Is(NoReg);
531}
532
533
534bool MemOperand::IsRegisterOffset() const {
535  return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
536}
537
538
539bool MemOperand::IsPreIndex() const {
540  return addrmode_ == PreIndex;
541}
542
543
544bool MemOperand::IsPostIndex() const {
545  return addrmode_ == PostIndex;
546}
547
548Operand MemOperand::OffsetAsOperand() const {
549  if (IsImmediateOffset()) {
550    return offset();
551  } else {
552    ASSERT(IsRegisterOffset());
553    if (extend() == NO_EXTEND) {
554      return Operand(regoffset(), shift(), shift_amount());
555    } else {
556      return Operand(regoffset(), extend(), shift_amount());
557    }
558  }
559}
560
561
562void Assembler::Unreachable() {
563#ifdef USE_SIMULATOR
564  debug("UNREACHABLE", __LINE__, BREAK);
565#else
566  // Crash by branching to 0. lr now points near the fault.
567  Emit(BLR | Rn(xzr));
568#endif
569}
570
571
572Address Assembler::target_pointer_address_at(Address pc) {
573  Instruction* instr = reinterpret_cast<Instruction*>(pc);
574  ASSERT(instr->IsLdrLiteralX());
575  return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
576}
577
578
579// Read/Modify the code target address in the branch/call instruction at pc.
580Address Assembler::target_address_at(Address pc,
581                                     ConstantPoolArray* constant_pool) {
582  return Memory::Address_at(target_pointer_address_at(pc));
583}
584
585
586Address Assembler::target_address_at(Address pc, Code* code) {
587  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
588  return target_address_at(pc, constant_pool);
589}
590
591
592Address Assembler::target_address_from_return_address(Address pc) {
593  // Returns the address of the call target from the return address that will
594  // be returned to after a call.
595  // Call sequence on ARM64 is:
596  //  ldr ip0, #... @ load from literal pool
597  //  blr ip0
598  Address candidate = pc - 2 * kInstructionSize;
599  Instruction* instr = reinterpret_cast<Instruction*>(candidate);
600  USE(instr);
601  ASSERT(instr->IsLdrLiteralX());
602  return candidate;
603}
604
605
606Address Assembler::return_address_from_call_start(Address pc) {
607  // The call, generated by MacroAssembler::Call, is one of two possible
608  // sequences:
609  //
610  // Without relocation:
611  //  movz  temp, #(target & 0x000000000000ffff)
612  //  movk  temp, #(target & 0x00000000ffff0000)
613  //  movk  temp, #(target & 0x0000ffff00000000)
614  //  blr   temp
615  //
616  // With relocation:
617  //  ldr   temp, =target
618  //  blr   temp
619  //
620  // The return address is immediately after the blr instruction in both cases,
621  // so it can be found by adding the call size to the address at the start of
622  // the call sequence.
623  STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 4 * kInstructionSize);
624  STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
625
626  Instruction* instr = reinterpret_cast<Instruction*>(pc);
627  if (instr->IsMovz()) {
628    // Verify the instruction sequence.
629    ASSERT(instr->following(1)->IsMovk());
630    ASSERT(instr->following(2)->IsMovk());
631    ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
632    return pc + Assembler::kCallSizeWithoutRelocation;
633  } else {
634    // Verify the instruction sequence.
635    ASSERT(instr->IsLdrLiteralX());
636    ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
637    return pc + Assembler::kCallSizeWithRelocation;
638  }
639}
640
641
642void Assembler::deserialization_set_special_target_at(
643    Address constant_pool_entry, Code* code, Address target) {
644  Memory::Address_at(constant_pool_entry) = target;
645}
646
647
648void Assembler::set_target_address_at(Address pc,
649                                      ConstantPoolArray* constant_pool,
650                                      Address target,
651                                      ICacheFlushMode icache_flush_mode) {
652  Memory::Address_at(target_pointer_address_at(pc)) = target;
653  // Intuitively, we would think it is necessary to always flush the
654  // instruction cache after patching a target address in the code as follows:
655  //   CPU::FlushICache(pc, sizeof(target));
656  // However, on ARM, an instruction is actually patched in the case of
657  // embedded constants of the form:
658  // ldr   ip, [pc, #...]
659  // since the instruction accessing this address in the constant pool remains
660  // unchanged, a flush is not required.
661}
662
663
664void Assembler::set_target_address_at(Address pc,
665                                      Code* code,
666                                      Address target,
667                                      ICacheFlushMode icache_flush_mode) {
668  ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
669  set_target_address_at(pc, constant_pool, target, icache_flush_mode);
670}
671
672
673int RelocInfo::target_address_size() {
674  return kPointerSize;
675}
676
677
678Address RelocInfo::target_address() {
679  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
680  return Assembler::target_address_at(pc_, host_);
681}
682
683
684Address RelocInfo::target_address_address() {
685  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
686                              || rmode_ == EMBEDDED_OBJECT
687                              || rmode_ == EXTERNAL_REFERENCE);
688  return Assembler::target_pointer_address_at(pc_);
689}
690
691
692Address RelocInfo::constant_pool_entry_address() {
693  ASSERT(IsInConstantPool());
694  return Assembler::target_pointer_address_at(pc_);
695}
696
697
698Object* RelocInfo::target_object() {
699  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
700  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
701}
702
703
704Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
705  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
706  return Handle<Object>(reinterpret_cast<Object**>(
707      Assembler::target_address_at(pc_, host_)));
708}
709
710
711void RelocInfo::set_target_object(Object* target,
712                                  WriteBarrierMode write_barrier_mode,
713                                  ICacheFlushMode icache_flush_mode) {
714  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
715  ASSERT(!target->IsConsString());
716  Assembler::set_target_address_at(pc_, host_,
717                                   reinterpret_cast<Address>(target),
718                                   icache_flush_mode);
719  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
720      host() != NULL &&
721      target->IsHeapObject()) {
722    host()->GetHeap()->incremental_marking()->RecordWrite(
723        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
724  }
725}
726
727
728Address RelocInfo::target_reference() {
729  ASSERT(rmode_ == EXTERNAL_REFERENCE);
730  return Assembler::target_address_at(pc_, host_);
731}
732
733
734Address RelocInfo::target_runtime_entry(Assembler* origin) {
735  ASSERT(IsRuntimeEntry(rmode_));
736  return target_address();
737}
738
739
740void RelocInfo::set_target_runtime_entry(Address target,
741                                         WriteBarrierMode write_barrier_mode,
742                                         ICacheFlushMode icache_flush_mode) {
743  ASSERT(IsRuntimeEntry(rmode_));
744  if (target_address() != target) {
745    set_target_address(target, write_barrier_mode, icache_flush_mode);
746  }
747}
748
749
750Handle<Cell> RelocInfo::target_cell_handle() {
751  UNIMPLEMENTED();
752  Cell *null_cell = NULL;
753  return Handle<Cell>(null_cell);
754}
755
756
757Cell* RelocInfo::target_cell() {
758  ASSERT(rmode_ == RelocInfo::CELL);
759  return Cell::FromValueAddress(Memory::Address_at(pc_));
760}
761
762
763void RelocInfo::set_target_cell(Cell* cell,
764                                WriteBarrierMode write_barrier_mode,
765                                ICacheFlushMode icache_flush_mode) {
766  UNIMPLEMENTED();
767}
768
769
770static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
771static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
772
773
774Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
775  UNREACHABLE();  // This should never be reached on ARM64.
776  return Handle<Object>();
777}
778
779
780Code* RelocInfo::code_age_stub() {
781  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
782  // Read the stub entry point from the code age sequence.
783  Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
784  return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
785}
786
787
788void RelocInfo::set_code_age_stub(Code* stub,
789                                  ICacheFlushMode icache_flush_mode) {
790  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
791  ASSERT(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
792  // Overwrite the stub entry point in the code age sequence. This is loaded as
793  // a literal so there is no need to call FlushICache here.
794  Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
795  Memory::Address_at(stub_entry_address) = stub->instruction_start();
796}
797
798
799Address RelocInfo::call_address() {
800  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
801         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
802  // For the above sequences the Relocinfo points to the load literal loading
803  // the call address.
804  return Assembler::target_address_at(pc_, host_);
805}
806
807
808void RelocInfo::set_call_address(Address target) {
809  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
810         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
811  Assembler::set_target_address_at(pc_, host_, target);
812  if (host() != NULL) {
813    Object* target_code = Code::GetCodeFromTargetAddress(target);
814    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
815        host(), this, HeapObject::cast(target_code));
816  }
817}
818
819
820void RelocInfo::WipeOut() {
821  ASSERT(IsEmbeddedObject(rmode_) ||
822         IsCodeTarget(rmode_) ||
823         IsRuntimeEntry(rmode_) ||
824         IsExternalReference(rmode_));
825  Assembler::set_target_address_at(pc_, host_, NULL);
826}
827
828
829bool RelocInfo::IsPatchedReturnSequence() {
830  // The sequence must be:
831  //   ldr ip0, [pc, #offset]
832  //   blr ip0
833  // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
834  Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
835  Instruction* i2 = i1->following();
836  return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
837         i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
838}
839
840
841bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
842  Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
843  return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
844}
845
846
847void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
848  RelocInfo::Mode mode = rmode();
849  if (mode == RelocInfo::EMBEDDED_OBJECT) {
850    visitor->VisitEmbeddedPointer(this);
851  } else if (RelocInfo::IsCodeTarget(mode)) {
852    visitor->VisitCodeTarget(this);
853  } else if (mode == RelocInfo::CELL) {
854    visitor->VisitCell(this);
855  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
856    visitor->VisitExternalReference(this);
857  } else if (((RelocInfo::IsJSReturn(mode) &&
858              IsPatchedReturnSequence()) ||
859             (RelocInfo::IsDebugBreakSlot(mode) &&
860              IsPatchedDebugBreakSlotSequence())) &&
861             isolate->debug()->has_break_points()) {
862    visitor->VisitDebugTarget(this);
863  } else if (RelocInfo::IsRuntimeEntry(mode)) {
864    visitor->VisitRuntimeEntry(this);
865  }
866}
867
868
869template<typename StaticVisitor>
870void RelocInfo::Visit(Heap* heap) {
871  RelocInfo::Mode mode = rmode();
872  if (mode == RelocInfo::EMBEDDED_OBJECT) {
873    StaticVisitor::VisitEmbeddedPointer(heap, this);
874  } else if (RelocInfo::IsCodeTarget(mode)) {
875    StaticVisitor::VisitCodeTarget(heap, this);
876  } else if (mode == RelocInfo::CELL) {
877    StaticVisitor::VisitCell(heap, this);
878  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
879    StaticVisitor::VisitExternalReference(this);
880  } else if (heap->isolate()->debug()->has_break_points() &&
881             ((RelocInfo::IsJSReturn(mode) &&
882              IsPatchedReturnSequence()) ||
883             (RelocInfo::IsDebugBreakSlot(mode) &&
884              IsPatchedDebugBreakSlotSequence()))) {
885    StaticVisitor::VisitDebugTarget(heap, this);
886  } else if (RelocInfo::IsRuntimeEntry(mode)) {
887    StaticVisitor::VisitRuntimeEntry(this);
888  }
889}
890
891
892LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
893  ASSERT(rt.IsValid());
894  if (rt.IsRegister()) {
895    return rt.Is64Bits() ? LDR_x : LDR_w;
896  } else {
897    ASSERT(rt.IsFPRegister());
898    return rt.Is64Bits() ? LDR_d : LDR_s;
899  }
900}
901
902
903LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
904                                         const CPURegister& rt2) {
905  ASSERT(AreSameSizeAndType(rt, rt2));
906  USE(rt2);
907  if (rt.IsRegister()) {
908    return rt.Is64Bits() ? LDP_x : LDP_w;
909  } else {
910    ASSERT(rt.IsFPRegister());
911    return rt.Is64Bits() ? LDP_d : LDP_s;
912  }
913}
914
915
916LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
917  ASSERT(rt.IsValid());
918  if (rt.IsRegister()) {
919    return rt.Is64Bits() ? STR_x : STR_w;
920  } else {
921    ASSERT(rt.IsFPRegister());
922    return rt.Is64Bits() ? STR_d : STR_s;
923  }
924}
925
926
927LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
928                                          const CPURegister& rt2) {
929  ASSERT(AreSameSizeAndType(rt, rt2));
930  USE(rt2);
931  if (rt.IsRegister()) {
932    return rt.Is64Bits() ? STP_x : STP_w;
933  } else {
934    ASSERT(rt.IsFPRegister());
935    return rt.Is64Bits() ? STP_d : STP_s;
936  }
937}
938
939
940LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
941    const CPURegister& rt, const CPURegister& rt2) {
942  ASSERT(AreSameSizeAndType(rt, rt2));
943  USE(rt2);
944  if (rt.IsRegister()) {
945    return rt.Is64Bits() ? LDNP_x : LDNP_w;
946  } else {
947    ASSERT(rt.IsFPRegister());
948    return rt.Is64Bits() ? LDNP_d : LDNP_s;
949  }
950}
951
952
953LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
954    const CPURegister& rt, const CPURegister& rt2) {
955  ASSERT(AreSameSizeAndType(rt, rt2));
956  USE(rt2);
957  if (rt.IsRegister()) {
958    return rt.Is64Bits() ? STNP_x : STNP_w;
959  } else {
960    ASSERT(rt.IsFPRegister());
961    return rt.Is64Bits() ? STNP_d : STNP_s;
962  }
963}
964
965
966LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
967  if (rt.IsRegister()) {
968    return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
969  } else {
970    ASSERT(rt.IsFPRegister());
971    return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
972  }
973}
974
975
976int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
977  ASSERT(kStartOfLabelLinkChain == 0);
978  int offset = LinkAndGetByteOffsetTo(label);
979  ASSERT(IsAligned(offset, kInstructionSize));
980  return offset >> kInstructionSizeLog2;
981}
982
983
984Instr Assembler::Flags(FlagsUpdate S) {
985  if (S == SetFlags) {
986    return 1 << FlagsUpdate_offset;
987  } else if (S == LeaveFlags) {
988    return 0 << FlagsUpdate_offset;
989  }
990  UNREACHABLE();
991  return 0;
992}
993
994
995Instr Assembler::Cond(Condition cond) {
996  return cond << Condition_offset;
997}
998
999
1000Instr Assembler::ImmPCRelAddress(int imm21) {
1001  CHECK(is_int21(imm21));
1002  Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
1003  Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
1004  Instr immlo = imm << ImmPCRelLo_offset;
1005  return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
1006}
1007
1008
1009Instr Assembler::ImmUncondBranch(int imm26) {
1010  CHECK(is_int26(imm26));
1011  return truncate_to_int26(imm26) << ImmUncondBranch_offset;
1012}
1013
1014
1015Instr Assembler::ImmCondBranch(int imm19) {
1016  CHECK(is_int19(imm19));
1017  return truncate_to_int19(imm19) << ImmCondBranch_offset;
1018}
1019
1020
1021Instr Assembler::ImmCmpBranch(int imm19) {
1022  CHECK(is_int19(imm19));
1023  return truncate_to_int19(imm19) << ImmCmpBranch_offset;
1024}
1025
1026
1027Instr Assembler::ImmTestBranch(int imm14) {
1028  CHECK(is_int14(imm14));
1029  return truncate_to_int14(imm14) << ImmTestBranch_offset;
1030}
1031
1032
1033Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
1034  ASSERT(is_uint6(bit_pos));
1035  // Subtract five from the shift offset, as we need bit 5 from bit_pos.
1036  unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
1037  unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
1038  b5 &= ImmTestBranchBit5_mask;
1039  b40 &= ImmTestBranchBit40_mask;
1040  return b5 | b40;
1041}
1042
1043
1044Instr Assembler::SF(Register rd) {
1045    return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
1046}
1047
1048
1049Instr Assembler::ImmAddSub(int64_t imm) {
1050  ASSERT(IsImmAddSub(imm));
1051  if (is_uint12(imm)) {  // No shift required.
1052    return imm << ImmAddSub_offset;
1053  } else {
1054    return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
1055  }
1056}
1057
1058
1059Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
1060  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
1061         ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
1062  USE(reg_size);
1063  return imms << ImmS_offset;
1064}
1065
1066
1067Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
1068  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1069         ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1070  USE(reg_size);
1071  ASSERT(is_uint6(immr));
1072  return immr << ImmR_offset;
1073}
1074
1075
1076Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
1077  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1078  ASSERT(is_uint6(imms));
1079  ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
1080  USE(reg_size);
1081  return imms << ImmSetBits_offset;
1082}
1083
1084
1085Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
1086  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1087  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
1088         ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
1089  USE(reg_size);
1090  return immr << ImmRotate_offset;
1091}
1092
1093
1094Instr Assembler::ImmLLiteral(int imm19) {
1095  CHECK(is_int19(imm19));
1096  return truncate_to_int19(imm19) << ImmLLiteral_offset;
1097}
1098
1099
1100Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
1101  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
1102  ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
1103  USE(reg_size);
1104  return bitn << BitN_offset;
1105}
1106
1107
1108Instr Assembler::ShiftDP(Shift shift) {
1109  ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
1110  return shift << ShiftDP_offset;
1111}
1112
1113
1114Instr Assembler::ImmDPShift(unsigned amount) {
1115  ASSERT(is_uint6(amount));
1116  return amount << ImmDPShift_offset;
1117}
1118
1119
1120Instr Assembler::ExtendMode(Extend extend) {
1121  return extend << ExtendMode_offset;
1122}
1123
1124
1125Instr Assembler::ImmExtendShift(unsigned left_shift) {
1126  ASSERT(left_shift <= 4);
1127  return left_shift << ImmExtendShift_offset;
1128}
1129
1130
1131Instr Assembler::ImmCondCmp(unsigned imm) {
1132  ASSERT(is_uint5(imm));
1133  return imm << ImmCondCmp_offset;
1134}
1135
1136
1137Instr Assembler::Nzcv(StatusFlags nzcv) {
1138  return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1139}
1140
1141
1142Instr Assembler::ImmLSUnsigned(int imm12) {
1143  ASSERT(is_uint12(imm12));
1144  return imm12 << ImmLSUnsigned_offset;
1145}
1146
1147
1148Instr Assembler::ImmLS(int imm9) {
1149  ASSERT(is_int9(imm9));
1150  return truncate_to_int9(imm9) << ImmLS_offset;
1151}
1152
1153
1154Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
1155  ASSERT(((imm7 >> size) << size) == imm7);
1156  int scaled_imm7 = imm7 >> size;
1157  ASSERT(is_int7(scaled_imm7));
1158  return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1159}
1160
1161
1162Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1163  ASSERT(is_uint1(shift_amount));
1164  return shift_amount << ImmShiftLS_offset;
1165}
1166
1167
1168Instr Assembler::ImmException(int imm16) {
1169  ASSERT(is_uint16(imm16));
1170  return imm16 << ImmException_offset;
1171}
1172
1173
1174Instr Assembler::ImmSystemRegister(int imm15) {
1175  ASSERT(is_uint15(imm15));
1176  return imm15 << ImmSystemRegister_offset;
1177}
1178
1179
1180Instr Assembler::ImmHint(int imm7) {
1181  ASSERT(is_uint7(imm7));
1182  return imm7 << ImmHint_offset;
1183}
1184
1185
1186Instr Assembler::ImmBarrierDomain(int imm2) {
1187  ASSERT(is_uint2(imm2));
1188  return imm2 << ImmBarrierDomain_offset;
1189}
1190
1191
1192Instr Assembler::ImmBarrierType(int imm2) {
1193  ASSERT(is_uint2(imm2));
1194  return imm2 << ImmBarrierType_offset;
1195}
1196
1197
1198LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
1199  ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1200  return static_cast<LSDataSize>(op >> SizeLS_offset);
1201}
1202
1203
1204Instr Assembler::ImmMoveWide(uint64_t imm) {
1205  ASSERT(is_uint16(imm));
1206  return imm << ImmMoveWide_offset;
1207}
1208
1209
1210Instr Assembler::ShiftMoveWide(int64_t shift) {
1211  ASSERT(is_uint2(shift));
1212  return shift << ShiftMoveWide_offset;
1213}
1214
1215
1216Instr Assembler::FPType(FPRegister fd) {
1217  return fd.Is64Bits() ? FP64 : FP32;
1218}
1219
1220
1221Instr Assembler::FPScale(unsigned scale) {
1222  ASSERT(is_uint6(scale));
1223  return scale << FPScale_offset;
1224}
1225
1226
1227const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
1228  return reg.Is64Bits() ? xzr : wzr;
1229}
1230
1231
1232inline void Assembler::CheckBufferSpace() {
1233  ASSERT(pc_ < (buffer_ + buffer_size_));
1234  if (buffer_space() < kGap) {
1235    GrowBuffer();
1236  }
1237}
1238
1239
1240inline void Assembler::CheckBuffer() {
1241  CheckBufferSpace();
1242  if (pc_offset() >= next_veneer_pool_check_) {
1243    CheckVeneerPool(false, true);
1244  }
1245  if (pc_offset() >= next_constant_pool_check_) {
1246    CheckConstPool(false, true);
1247  }
1248}
1249
1250
1251TypeFeedbackId Assembler::RecordedAstId() {
1252  ASSERT(!recorded_ast_id_.IsNone());
1253  return recorded_ast_id_;
1254}
1255
1256
1257void Assembler::ClearRecordedAstId() {
1258  recorded_ast_id_ = TypeFeedbackId::None();
1259}
1260
1261
1262} }  // namespace v8::internal
1263
1264#endif  // V8_ARM64_ASSEMBLER_ARM64_INL_H_
1265