1// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MIPS_CODE_STUBS_ARM_H_
6#define V8_MIPS_CODE_STUBS_ARM_H_
7
8namespace v8 {
9namespace internal {
10
11
12void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
13
14
15class StringHelper : public AllStatic {
16 public:
17  // Generate code for copying a large number of characters. This function
18  // is allowed to spend extra time setting up conditions to make copying
19  // faster. Copying of overlapping regions is not supported.
20  // Dest register ends at the position after the last character written.
21  static void GenerateCopyCharacters(MacroAssembler* masm,
22                                     Register dest,
23                                     Register src,
24                                     Register count,
25                                     Register scratch,
26                                     String::Encoding encoding);
27
28  // Compares two flat one-byte strings and returns result in v0.
29  static void GenerateCompareFlatOneByteStrings(
30      MacroAssembler* masm, Register left, Register right, Register scratch1,
31      Register scratch2, Register scratch3, Register scratch4);
32
33  // Compares two flat one-byte strings for equality and returns result in v0.
34  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
35                                              Register left, Register right,
36                                              Register scratch1,
37                                              Register scratch2,
38                                              Register scratch3);
39
40 private:
41  static void GenerateOneByteCharsCompareLoop(
42      MacroAssembler* masm, Register left, Register right, Register length,
43      Register scratch1, Register scratch2, Register scratch3,
44      Label* chars_not_equal);
45
46  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
47};
48
49
50class StoreRegistersStateStub: public PlatformCodeStub {
51 public:
52  explicit StoreRegistersStateStub(Isolate* isolate)
53      : PlatformCodeStub(isolate) {}
54
55  static void GenerateAheadOfTime(Isolate* isolate);
56
57 private:
58  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
59  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
60};
61
62
63class RestoreRegistersStateStub: public PlatformCodeStub {
64 public:
65  explicit RestoreRegistersStateStub(Isolate* isolate)
66      : PlatformCodeStub(isolate) {}
67
68  static void GenerateAheadOfTime(Isolate* isolate);
69
70 private:
71  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
72  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
73};
74
75
76// This stub can convert a signed int32 to a heap number (double).  It does
77// not work for int32s that are in Smi range!  No GC occurs during this stub
78// so you don't have to set up the frame.
79class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
80 public:
81  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
82                             Register the_heap_number, Register scratch,
83                             Register scratch2)
84      : PlatformCodeStub(isolate) {
85    minor_key_ = IntRegisterBits::encode(the_int.code()) |
86                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
87                 ScratchRegisterBits::encode(scratch.code()) |
88                 SignRegisterBits::encode(scratch2.code());
89    DCHECK(IntRegisterBits::is_valid(the_int.code()));
90    DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
91    DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
92    DCHECK(SignRegisterBits::is_valid(scratch2.code()));
93  }
94
95  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
96
97 private:
98  Register the_int() const {
99    return Register::from_code(IntRegisterBits::decode(minor_key_));
100  }
101
102  Register the_heap_number() const {
103    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
104  }
105
106  Register scratch() const {
107    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
108  }
109
110  Register sign() const {
111    return Register::from_code(SignRegisterBits::decode(minor_key_));
112  }
113
114  // Minor key encoding in 16 bits.
115  class IntRegisterBits: public BitField<int, 0, 4> {};
116  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
117  class ScratchRegisterBits: public BitField<int, 8, 4> {};
118  class SignRegisterBits: public BitField<int, 12, 4> {};
119
120  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
121  DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
122};
123
124
125class RecordWriteStub: public PlatformCodeStub {
126 public:
127  RecordWriteStub(Isolate* isolate,
128                  Register object,
129                  Register value,
130                  Register address,
131                  RememberedSetAction remembered_set_action,
132                  SaveFPRegsMode fp_mode)
133      : PlatformCodeStub(isolate),
134        regs_(object,   // An input reg.
135              address,  // An input reg.
136              value) {  // One scratch reg.
137    minor_key_ = ObjectBits::encode(object.code()) |
138                 ValueBits::encode(value.code()) |
139                 AddressBits::encode(address.code()) |
140                 RememberedSetActionBits::encode(remembered_set_action) |
141                 SaveFPRegsModeBits::encode(fp_mode);
142  }
143
144  RecordWriteStub(uint32_t key, Isolate* isolate)
145      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
146
147  enum Mode {
148    STORE_BUFFER_ONLY,
149    INCREMENTAL,
150    INCREMENTAL_COMPACTION
151  };
152
153  virtual bool SometimesSetsUpAFrame() { return false; }
154
155  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
156    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
157    masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
158        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
159    DCHECK(Assembler::IsBne(masm->instr_at(pos)));
160  }
161
162  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
163    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
164    masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
165        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
166    DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
167  }
168
169  static Mode GetMode(Code* stub) {
170    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
171    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
172                                                   2 * Assembler::kInstrSize);
173
174    if (Assembler::IsBeq(first_instruction)) {
175      return INCREMENTAL;
176    }
177
178    DCHECK(Assembler::IsBne(first_instruction));
179
180    if (Assembler::IsBeq(second_instruction)) {
181      return INCREMENTAL_COMPACTION;
182    }
183
184    DCHECK(Assembler::IsBne(second_instruction));
185
186    return STORE_BUFFER_ONLY;
187  }
188
189  static void Patch(Code* stub, Mode mode) {
190    MacroAssembler masm(NULL,
191                        stub->instruction_start(),
192                        stub->instruction_size());
193    switch (mode) {
194      case STORE_BUFFER_ONLY:
195        DCHECK(GetMode(stub) == INCREMENTAL ||
196               GetMode(stub) == INCREMENTAL_COMPACTION);
197        PatchBranchIntoNop(&masm, 0);
198        PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
199        break;
200      case INCREMENTAL:
201        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
202        PatchNopIntoBranch(&masm, 0);
203        break;
204      case INCREMENTAL_COMPACTION:
205        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
206        PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
207        break;
208    }
209    DCHECK(GetMode(stub) == mode);
210    CpuFeatures::FlushICache(stub->instruction_start(),
211                             4 * Assembler::kInstrSize);
212  }
213
214  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
215
216 private:
217  // This is a helper class for freeing up 3 scratch registers.  The input is
218  // two registers that must be preserved and one scratch register provided by
219  // the caller.
220  class RegisterAllocation {
221   public:
222    RegisterAllocation(Register object,
223                       Register address,
224                       Register scratch0)
225        : object_(object),
226          address_(address),
227          scratch0_(scratch0) {
228      DCHECK(!AreAliased(scratch0, object, address, no_reg));
229      scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
230    }
231
232    void Save(MacroAssembler* masm) {
233      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
234      // We don't have to save scratch0_ because it was given to us as
235      // a scratch register.
236      masm->push(scratch1_);
237    }
238
239    void Restore(MacroAssembler* masm) {
240      masm->pop(scratch1_);
241    }
242
243    // If we have to call into C then we need to save and restore all caller-
244    // saved registers that were not already preserved.  The scratch registers
245    // will be restored by other means so we don't bother pushing them here.
246    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
247      masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
248      if (mode == kSaveFPRegs) {
249        masm->MultiPushFPU(kCallerSavedFPU);
250      }
251    }
252
253    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
254                                           SaveFPRegsMode mode) {
255      if (mode == kSaveFPRegs) {
256        masm->MultiPopFPU(kCallerSavedFPU);
257      }
258      masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
259    }
260
261    inline Register object() { return object_; }
262    inline Register address() { return address_; }
263    inline Register scratch0() { return scratch0_; }
264    inline Register scratch1() { return scratch1_; }
265
266   private:
267    Register object_;
268    Register address_;
269    Register scratch0_;
270    Register scratch1_;
271
272    friend class RecordWriteStub;
273  };
274
275  enum OnNoNeedToInformIncrementalMarker {
276    kReturnOnNoNeedToInformIncrementalMarker,
277    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
278  };
279
280  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
281
282  virtual void Generate(MacroAssembler* masm) OVERRIDE;
283  void GenerateIncremental(MacroAssembler* masm, Mode mode);
284  void CheckNeedsToInformIncrementalMarker(
285      MacroAssembler* masm,
286      OnNoNeedToInformIncrementalMarker on_no_need,
287      Mode mode);
288  void InformIncrementalMarker(MacroAssembler* masm);
289
290  void Activate(Code* code) {
291    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
292  }
293
294  Register object() const {
295    return Register::from_code(ObjectBits::decode(minor_key_));
296  }
297
298  Register value() const {
299    return Register::from_code(ValueBits::decode(minor_key_));
300  }
301
302  Register address() const {
303    return Register::from_code(AddressBits::decode(minor_key_));
304  }
305
306  RememberedSetAction remembered_set_action() const {
307    return RememberedSetActionBits::decode(minor_key_);
308  }
309
310  SaveFPRegsMode save_fp_regs_mode() const {
311    return SaveFPRegsModeBits::decode(minor_key_);
312  }
313
314  class ObjectBits: public BitField<int, 0, 5> {};
315  class ValueBits: public BitField<int, 5, 5> {};
316  class AddressBits: public BitField<int, 10, 5> {};
317  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
318  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
319
320  Label slow_;
321  RegisterAllocation regs_;
322
323  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
324};
325
326
327// Trampoline stub to call into native code. To call safely into native code
328// in the presence of compacting GC (which can move code objects) we need to
329// keep the code which called into native pinned in the memory. Currently the
330// simplest approach is to generate such stub early enough so it can never be
331// moved by GC
332class DirectCEntryStub: public PlatformCodeStub {
333 public:
334  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
335  void GenerateCall(MacroAssembler* masm, Register target);
336
337 private:
338  bool NeedsImmovableCode() { return true; }
339
340  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
341  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
342};
343
344
345class NameDictionaryLookupStub: public PlatformCodeStub {
346 public:
347  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
348
349  NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
350      : PlatformCodeStub(isolate) {
351    minor_key_ = LookupModeBits::encode(mode);
352  }
353
354  static void GenerateNegativeLookup(MacroAssembler* masm,
355                                     Label* miss,
356                                     Label* done,
357                                     Register receiver,
358                                     Register properties,
359                                     Handle<Name> name,
360                                     Register scratch0);
361
362  static void GeneratePositiveLookup(MacroAssembler* masm,
363                                     Label* miss,
364                                     Label* done,
365                                     Register elements,
366                                     Register name,
367                                     Register r0,
368                                     Register r1);
369
370  virtual bool SometimesSetsUpAFrame() { return false; }
371
372 private:
373  static const int kInlinedProbes = 4;
374  static const int kTotalProbes = 20;
375
376  static const int kCapacityOffset =
377      NameDictionary::kHeaderSize +
378      NameDictionary::kCapacityIndex * kPointerSize;
379
380  static const int kElementsStartOffset =
381      NameDictionary::kHeaderSize +
382      NameDictionary::kElementsStartIndex * kPointerSize;
383
384  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
385
386  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
387
388  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
389  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
390};
391
392
393} }  // namespace v8::internal
394
395#endif  // V8_MIPS_CODE_STUBS_ARM_H_
396