1// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_MIPS_CODE_STUBS_ARM_H_
6#define V8_MIPS_CODE_STUBS_ARM_H_
7
8namespace v8 {
9namespace internal {
10
11
12void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
13
14
15class StringHelper : public AllStatic {
16 public:
17  // Generate code for copying a large number of characters. This function
18  // is allowed to spend extra time setting up conditions to make copying
19  // faster. Copying of overlapping regions is not supported.
20  // Dest register ends at the position after the last character written.
21  static void GenerateCopyCharacters(MacroAssembler* masm,
22                                     Register dest,
23                                     Register src,
24                                     Register count,
25                                     Register scratch,
26                                     String::Encoding encoding);
27
28  // Compares two flat one-byte strings and returns result in v0.
29  static void GenerateCompareFlatOneByteStrings(
30      MacroAssembler* masm, Register left, Register right, Register scratch1,
31      Register scratch2, Register scratch3, Register scratch4);
32
33  // Compares two flat one-byte strings for equality and returns result in v0.
34  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
35                                              Register left, Register right,
36                                              Register scratch1,
37                                              Register scratch2,
38                                              Register scratch3);
39
40 private:
41  static void GenerateOneByteCharsCompareLoop(
42      MacroAssembler* masm, Register left, Register right, Register length,
43      Register scratch1, Register scratch2, Register scratch3,
44      Label* chars_not_equal);
45
46 private:
47  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
48};
49
50
51class StoreRegistersStateStub: public PlatformCodeStub {
52 public:
53  explicit StoreRegistersStateStub(Isolate* isolate)
54      : PlatformCodeStub(isolate) {}
55
56  static void GenerateAheadOfTime(Isolate* isolate);
57
58 private:
59  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
60  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
61};
62
63
64class RestoreRegistersStateStub: public PlatformCodeStub {
65 public:
66  explicit RestoreRegistersStateStub(Isolate* isolate)
67      : PlatformCodeStub(isolate) {}
68
69  static void GenerateAheadOfTime(Isolate* isolate);
70
71 private:
72  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
73  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
74};
75
76// This stub can convert a signed int32 to a heap number (double).  It does
77// not work for int32s that are in Smi range!  No GC occurs during this stub
78// so you don't have to set up the frame.
79class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
80 public:
81  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
82                             Register the_heap_number, Register scratch,
83                             Register scratch2)
84      : PlatformCodeStub(isolate) {
85    minor_key_ = IntRegisterBits::encode(the_int.code()) |
86                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
87                 ScratchRegisterBits::encode(scratch.code()) |
88                 SignRegisterBits::encode(scratch2.code());
89    DCHECK(IntRegisterBits::is_valid(the_int.code()));
90    DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
91    DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
92    DCHECK(SignRegisterBits::is_valid(scratch2.code()));
93  }
94
95  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
96
97 private:
98  void Generate(MacroAssembler* masm);
99
100  Register the_int() const {
101    return Register::from_code(IntRegisterBits::decode(minor_key_));
102  }
103
104  Register the_heap_number() const {
105    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
106  }
107
108  Register scratch() const {
109    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
110  }
111
112  Register sign() const {
113    return Register::from_code(SignRegisterBits::decode(minor_key_));
114  }
115
116  // Minor key encoding in 16 bits.
117  class IntRegisterBits: public BitField<int, 0, 4> {};
118  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
119  class ScratchRegisterBits: public BitField<int, 8, 4> {};
120  class SignRegisterBits: public BitField<int, 12, 4> {};
121
122  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
123  DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
124};
125
126
127class RecordWriteStub: public PlatformCodeStub {
128 public:
129  RecordWriteStub(Isolate* isolate,
130                  Register object,
131                  Register value,
132                  Register address,
133                  RememberedSetAction remembered_set_action,
134                  SaveFPRegsMode fp_mode)
135      : PlatformCodeStub(isolate),
136        regs_(object,   // An input reg.
137              address,  // An input reg.
138              value) {  // One scratch reg.
139    minor_key_ = ObjectBits::encode(object.code()) |
140                 ValueBits::encode(value.code()) |
141                 AddressBits::encode(address.code()) |
142                 RememberedSetActionBits::encode(remembered_set_action) |
143                 SaveFPRegsModeBits::encode(fp_mode);
144  }
145
146  RecordWriteStub(uint32_t key, Isolate* isolate)
147      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
148
149  enum Mode {
150    STORE_BUFFER_ONLY,
151    INCREMENTAL,
152    INCREMENTAL_COMPACTION
153  };
154
155  virtual bool SometimesSetsUpAFrame() { return false; }
156
157  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
158    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
159    masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
160        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
161    DCHECK(Assembler::IsBne(masm->instr_at(pos)));
162  }
163
164  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
165    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
166    masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
167        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
168    DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
169  }
170
171  static Mode GetMode(Code* stub) {
172    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
173    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
174                                                   2 * Assembler::kInstrSize);
175
176    if (Assembler::IsBeq(first_instruction)) {
177      return INCREMENTAL;
178    }
179
180    DCHECK(Assembler::IsBne(first_instruction));
181
182    if (Assembler::IsBeq(second_instruction)) {
183      return INCREMENTAL_COMPACTION;
184    }
185
186    DCHECK(Assembler::IsBne(second_instruction));
187
188    return STORE_BUFFER_ONLY;
189  }
190
191  static void Patch(Code* stub, Mode mode) {
192    MacroAssembler masm(NULL,
193                        stub->instruction_start(),
194                        stub->instruction_size());
195    switch (mode) {
196      case STORE_BUFFER_ONLY:
197        DCHECK(GetMode(stub) == INCREMENTAL ||
198               GetMode(stub) == INCREMENTAL_COMPACTION);
199        PatchBranchIntoNop(&masm, 0);
200        PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
201        break;
202      case INCREMENTAL:
203        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
204        PatchNopIntoBranch(&masm, 0);
205        break;
206      case INCREMENTAL_COMPACTION:
207        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
208        PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
209        break;
210    }
211    DCHECK(GetMode(stub) == mode);
212    CpuFeatures::FlushICache(stub->instruction_start(),
213                             4 * Assembler::kInstrSize);
214  }
215
216  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
217
218 private:
219  // This is a helper class for freeing up 3 scratch registers.  The input is
220  // two registers that must be preserved and one scratch register provided by
221  // the caller.
222  class RegisterAllocation {
223   public:
224    RegisterAllocation(Register object,
225                       Register address,
226                       Register scratch0)
227        : object_(object),
228          address_(address),
229          scratch0_(scratch0) {
230      DCHECK(!AreAliased(scratch0, object, address, no_reg));
231      scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
232    }
233
234    void Save(MacroAssembler* masm) {
235      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
236      // We don't have to save scratch0_ because it was given to us as
237      // a scratch register.
238      masm->push(scratch1_);
239    }
240
241    void Restore(MacroAssembler* masm) {
242      masm->pop(scratch1_);
243    }
244
245    // If we have to call into C then we need to save and restore all caller-
246    // saved registers that were not already preserved.  The scratch registers
247    // will be restored by other means so we don't bother pushing them here.
248    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
249      masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
250      if (mode == kSaveFPRegs) {
251        masm->MultiPushFPU(kCallerSavedFPU);
252      }
253    }
254
255    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
256                                           SaveFPRegsMode mode) {
257      if (mode == kSaveFPRegs) {
258        masm->MultiPopFPU(kCallerSavedFPU);
259      }
260      masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
261    }
262
263    inline Register object() { return object_; }
264    inline Register address() { return address_; }
265    inline Register scratch0() { return scratch0_; }
266    inline Register scratch1() { return scratch1_; }
267
268   private:
269    Register object_;
270    Register address_;
271    Register scratch0_;
272    Register scratch1_;
273
274    friend class RecordWriteStub;
275  };
276
277  enum OnNoNeedToInformIncrementalMarker {
278    kReturnOnNoNeedToInformIncrementalMarker,
279    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
280  };
281
282  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
283
284  virtual void Generate(MacroAssembler* masm) OVERRIDE;
285  void GenerateIncremental(MacroAssembler* masm, Mode mode);
286  void CheckNeedsToInformIncrementalMarker(
287      MacroAssembler* masm,
288      OnNoNeedToInformIncrementalMarker on_no_need,
289      Mode mode);
290  void InformIncrementalMarker(MacroAssembler* masm);
291
292  void Activate(Code* code) {
293    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
294  }
295
296  Register object() const {
297    return Register::from_code(ObjectBits::decode(minor_key_));
298  }
299
300  Register value() const {
301    return Register::from_code(ValueBits::decode(minor_key_));
302  }
303
304  Register address() const {
305    return Register::from_code(AddressBits::decode(minor_key_));
306  }
307
308  RememberedSetAction remembered_set_action() const {
309    return RememberedSetActionBits::decode(minor_key_);
310  }
311
312  SaveFPRegsMode save_fp_regs_mode() const {
313    return SaveFPRegsModeBits::decode(minor_key_);
314  }
315
316  class ObjectBits: public BitField<int, 0, 5> {};
317  class ValueBits: public BitField<int, 5, 5> {};
318  class AddressBits: public BitField<int, 10, 5> {};
319  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
320  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
321
322  Label slow_;
323  RegisterAllocation regs_;
324
325  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
326};
327
328
329// Trampoline stub to call into native code. To call safely into native code
330// in the presence of compacting GC (which can move code objects) we need to
331// keep the code which called into native pinned in the memory. Currently the
332// simplest approach is to generate such stub early enough so it can never be
333// moved by GC
334class DirectCEntryStub: public PlatformCodeStub {
335 public:
336  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
337  void GenerateCall(MacroAssembler* masm, Register target);
338
339 private:
340  bool NeedsImmovableCode() { return true; }
341
342  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
343  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
344};
345
346
347class NameDictionaryLookupStub: public PlatformCodeStub {
348 public:
349  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
350
351  NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
352      : PlatformCodeStub(isolate) {
353    minor_key_ = LookupModeBits::encode(mode);
354  }
355
356  static void GenerateNegativeLookup(MacroAssembler* masm,
357                                     Label* miss,
358                                     Label* done,
359                                     Register receiver,
360                                     Register properties,
361                                     Handle<Name> name,
362                                     Register scratch0);
363
364  static void GeneratePositiveLookup(MacroAssembler* masm,
365                                     Label* miss,
366                                     Label* done,
367                                     Register elements,
368                                     Register name,
369                                     Register r0,
370                                     Register r1);
371
372  virtual bool SometimesSetsUpAFrame() { return false; }
373
374 private:
375  static const int kInlinedProbes = 4;
376  static const int kTotalProbes = 20;
377
378  static const int kCapacityOffset =
379      NameDictionary::kHeaderSize +
380      NameDictionary::kCapacityIndex * kPointerSize;
381
382  static const int kElementsStartOffset =
383      NameDictionary::kHeaderSize +
384      NameDictionary::kElementsStartIndex * kPointerSize;
385
386  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
387
388  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
389
390  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
391  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
392};
393
394
395} }  // namespace v8::internal
396
397#endif  // V8_MIPS_CODE_STUBS_ARM_H_
398