1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_S390_CODE_STUBS_S390_H_
6#define V8_S390_CODE_STUBS_S390_H_
7
8#include "src/s390/frames-s390.h"
9
10namespace v8 {
11namespace internal {
12
13void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
14
15class StringHelper : public AllStatic {
16 public:
17  // Compares two flat one-byte strings and returns result in r0.
18  static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
19                                                Register left, Register right,
20                                                Register scratch1,
21                                                Register scratch2,
22                                                Register scratch3);
23
24  // Compares two flat one-byte strings for equality and returns result in r0.
25  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
26                                              Register left, Register right,
27                                              Register scratch1,
28                                              Register scratch2);
29
30 private:
31  static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
32                                              Register left, Register right,
33                                              Register length,
34                                              Register scratch1,
35                                              Label* chars_not_equal);
36
37  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
38};
39
40class StoreRegistersStateStub : public PlatformCodeStub {
41 public:
42  explicit StoreRegistersStateStub(Isolate* isolate)
43      : PlatformCodeStub(isolate) {}
44
45  static void GenerateAheadOfTime(Isolate* isolate);
46
47 private:
48  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
49  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
50};
51
52class RestoreRegistersStateStub : public PlatformCodeStub {
53 public:
54  explicit RestoreRegistersStateStub(Isolate* isolate)
55      : PlatformCodeStub(isolate) {}
56
57  static void GenerateAheadOfTime(Isolate* isolate);
58
59 private:
60  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
61  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
62};
63
64class RecordWriteStub : public PlatformCodeStub {
65 public:
66  RecordWriteStub(Isolate* isolate, Register object, Register value,
67                  Register address, RememberedSetAction remembered_set_action,
68                  SaveFPRegsMode fp_mode)
69      : PlatformCodeStub(isolate),
70        regs_(object,   // An input reg.
71              address,  // An input reg.
72              value) {  // One scratch reg.
73    minor_key_ = ObjectBits::encode(object.code()) |
74                 ValueBits::encode(value.code()) |
75                 AddressBits::encode(address.code()) |
76                 RememberedSetActionBits::encode(remembered_set_action) |
77                 SaveFPRegsModeBits::encode(fp_mode);
78  }
79
80  RecordWriteStub(uint32_t key, Isolate* isolate)
81      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
82
83  enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
84
85  bool SometimesSetsUpAFrame() override { return false; }
86
87  // Patch an always taken branch into a NOP branch
88  static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
89    int32_t instrLen = masm->instr_length_at(pos);
90    DCHECK(instrLen == 4 || instrLen == 6);
91
92    if (instrLen == 4) {
93      // BRC - Branch Mask @ Bits 23-20
94      FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
95      masm->instr_at_put<FourByteInstr>(
96          pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
97    } else {
98      // BRCL - Branch Mask @ Bits 39-36
99      SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
100      masm->instr_at_put<SixByteInstr>(
101          pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
102    }
103  }
104
105  static bool isBranchNop(SixByteInstr instr, int instrLength) {
106    if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
107        // BRC - Check for 0x0 mask condition.
108        (6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
109      // BRCL - Check for 0x0 mask condition
110      return true;
111    }
112    return false;
113  }
114
115  static Mode GetMode(Code* stub) {
116    int32_t first_instr_length =
117        Instruction::InstructionLength(stub->instruction_start());
118    int32_t second_instr_length = Instruction::InstructionLength(
119        stub->instruction_start() + first_instr_length);
120
121    uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
122    uint64_t second_instr =
123        Assembler::instr_at(stub->instruction_start() + first_instr_length);
124
125    DCHECK(first_instr_length == 4 || first_instr_length == 6);
126    DCHECK(second_instr_length == 4 || second_instr_length == 6);
127
128    bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
129    bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
130
131    // STORE_BUFFER_ONLY has NOP on both branches
132    if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
133    // INCREMENTAL_COMPACTION has NOP on second branch.
134    else if (isFirstInstrNOP && !isSecondInstrNOP)
135      return INCREMENTAL_COMPACTION;
136    // INCREMENTAL has NOP on first branch.
137    else if (!isFirstInstrNOP && isSecondInstrNOP)
138      return INCREMENTAL;
139
140    DCHECK(false);
141    return STORE_BUFFER_ONLY;
142  }
143
144  static void Patch(Code* stub, Mode mode) {
145    MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
146                        stub->instruction_size(), CodeObjectRequired::kNo);
147
148    // Get instruction lengths of two branches
149    int32_t first_instr_length = masm.instr_length_at(0);
150    int32_t second_instr_length = masm.instr_length_at(first_instr_length);
151
152    switch (mode) {
153      case STORE_BUFFER_ONLY:
154        DCHECK(GetMode(stub) == INCREMENTAL ||
155               GetMode(stub) == INCREMENTAL_COMPACTION);
156
157        PatchBranchCondMask(&masm, 0, CC_NOP);
158        PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
159        break;
160      case INCREMENTAL:
161        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
162        PatchBranchCondMask(&masm, 0, CC_ALWAYS);
163        break;
164      case INCREMENTAL_COMPACTION:
165        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
166        PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
167        break;
168    }
169    DCHECK(GetMode(stub) == mode);
170    Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
171                           first_instr_length + second_instr_length);
172  }
173
174  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
175
176 private:
177  // This is a helper class for freeing up 3 scratch registers.  The input is
178  // two registers that must be preserved and one scratch register provided by
179  // the caller.
180  class RegisterAllocation {
181   public:
182    RegisterAllocation(Register object, Register address, Register scratch0)
183        : object_(object), address_(address), scratch0_(scratch0) {
184      DCHECK(!AreAliased(scratch0, object, address, no_reg));
185      scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
186    }
187
188    void Save(MacroAssembler* masm) {
189      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
190      // We don't have to save scratch0_ because it was given to us as
191      // a scratch register.
192      masm->push(scratch1_);
193    }
194
195    void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
196
197    // If we have to call into C then we need to save and restore all caller-
198    // saved registers that were not already preserved.  The scratch registers
199    // will be restored by other means so we don't bother pushing them here.
200    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
201      masm->push(r14);
202      masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
203      if (mode == kSaveFPRegs) {
204        // Save all volatile FP registers except d0.
205        masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
206      }
207    }
208
209    inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
210                                           SaveFPRegsMode mode) {
211      if (mode == kSaveFPRegs) {
212        // Restore all volatile FP registers except d0.
213        masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
214      }
215      masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
216      masm->pop(r14);
217    }
218
219    inline Register object() { return object_; }
220    inline Register address() { return address_; }
221    inline Register scratch0() { return scratch0_; }
222    inline Register scratch1() { return scratch1_; }
223
224   private:
225    Register object_;
226    Register address_;
227    Register scratch0_;
228    Register scratch1_;
229
230    friend class RecordWriteStub;
231  };
232
233  enum OnNoNeedToInformIncrementalMarker {
234    kReturnOnNoNeedToInformIncrementalMarker,
235    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
236  };
237
238  inline Major MajorKey() const final { return RecordWrite; }
239
240  void Generate(MacroAssembler* masm) override;
241  void GenerateIncremental(MacroAssembler* masm, Mode mode);
242  void CheckNeedsToInformIncrementalMarker(
243      MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
244      Mode mode);
245  void InformIncrementalMarker(MacroAssembler* masm);
246
247  void Activate(Code* code) override {
248    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
249  }
250
251  Register object() const {
252    return Register::from_code(ObjectBits::decode(minor_key_));
253  }
254
255  Register value() const {
256    return Register::from_code(ValueBits::decode(minor_key_));
257  }
258
259  Register address() const {
260    return Register::from_code(AddressBits::decode(minor_key_));
261  }
262
263  RememberedSetAction remembered_set_action() const {
264    return RememberedSetActionBits::decode(minor_key_);
265  }
266
267  SaveFPRegsMode save_fp_regs_mode() const {
268    return SaveFPRegsModeBits::decode(minor_key_);
269  }
270
271  class ObjectBits : public BitField<int, 0, 4> {};
272  class ValueBits : public BitField<int, 4, 4> {};
273  class AddressBits : public BitField<int, 8, 4> {};
274  class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
275  };
276  class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
277
278  Label slow_;
279  RegisterAllocation regs_;
280
281  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
282};
283
284// Trampoline stub to call into native code. To call safely into native code
285// in the presence of compacting GC (which can move code objects) we need to
286// keep the code which called into native pinned in the memory. Currently the
287// simplest approach is to generate such stub early enough so it can never be
288// moved by GC
289class DirectCEntryStub : public PlatformCodeStub {
290 public:
291  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
292  void GenerateCall(MacroAssembler* masm, Register target);
293
294 private:
295  bool NeedsImmovableCode() override { return true; }
296
297  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
298  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
299};
300
301class NameDictionaryLookupStub : public PlatformCodeStub {
302 public:
303  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
304
305  NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
306      : PlatformCodeStub(isolate) {
307    minor_key_ = LookupModeBits::encode(mode);
308  }
309
310  static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
311                                     Label* done, Register receiver,
312                                     Register properties, Handle<Name> name,
313                                     Register scratch0);
314
315  bool SometimesSetsUpAFrame() override { return false; }
316
317 private:
318  static const int kInlinedProbes = 4;
319  static const int kTotalProbes = 20;
320
321  static const int kCapacityOffset =
322      NameDictionary::kHeaderSize +
323      NameDictionary::kCapacityIndex * kPointerSize;
324
325  static const int kElementsStartOffset =
326      NameDictionary::kHeaderSize +
327      NameDictionary::kElementsStartIndex * kPointerSize;
328
329  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
330
331  class LookupModeBits : public BitField<LookupMode, 0, 1> {};
332
333  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
334  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
335};
336
337class FloatingPointHelper : public AllStatic {
338 public:
339  enum Destination { kFPRegisters, kCoreRegisters };
340
341  // Loads smis from r0 and r1 (right and left in binary operations) into
342  // floating point registers. Depending on the destination the values ends up
343  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
344  // floating point registers VFP3 must be supported. If core registers are
345  // requested when VFP3 is supported d6 and d7 will be scratched.
346  static void LoadSmis(MacroAssembler* masm, Register scratch1,
347                       Register scratch2);
348
349  // Loads objects from r0 and r1 (right and left in binary operations) into
350  // floating point registers. Depending on the destination the values ends up
351  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
352  // floating point registers VFP3 must be supported. If core registers are
353  // requested when VFP3 is supported d6 and d7 will still be scratched. If
354  // either r0 or r1 is not a number (not smi and not heap number object) the
355  // not_number label is jumped to with r0 and r1 intact.
356  static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
357                           Register scratch1, Register scratch2,
358                           Label* not_number);
359
360  // Convert the smi or heap number in object to an int32 using the rules
361  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
362  // and brought into the range -2^31 .. +2^31 - 1.
363  static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
364                                   Register dst, Register heap_number_map,
365                                   Register scratch1, Register scratch2,
366                                   Register scratch3,
367                                   DoubleRegister double_scratch,
368                                   Label* not_int32);
369
370  // Converts the integer (untagged smi) in |src| to a double, storing
371  // the result to |double_dst|
372  static void ConvertIntToDouble(MacroAssembler* masm, Register src,
373                                 DoubleRegister double_dst);
374
375  // Converts the unsigned integer (untagged smi) in |src| to
376  // a double, storing the result to |double_dst|
377  static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
378                                         DoubleRegister double_dst);
379
380  // Converts the integer (untagged smi) in |src| to
381  // a float, storing the result in |dst|
382  static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
383                                const Register src);
384
385  // Load the number from object into double_dst in the double format.
386  // Control will jump to not_int32 if the value cannot be exactly represented
387  // by a 32-bit integer.
388  // Floating point value in the 32-bit integer range that are not exact integer
389  // won't be loaded.
390  static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
391                                      DoubleRegister double_dst,
392                                      DoubleRegister double_scratch,
393                                      Register heap_number_map,
394                                      Register scratch1, Register scratch2,
395                                      Label* not_int32);
396
397  // Loads the number from object into dst as a 32-bit integer.
398  // Control will jump to not_int32 if the object cannot be exactly represented
399  // by a 32-bit integer.
400  // Floating point value in the 32-bit integer range that are not exact integer
401  // won't be converted.
402  // scratch3 is not used when VFP3 is supported.
403  static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
404                                Register dst, Register heap_number_map,
405                                Register scratch1, Register scratch2,
406                                Register scratch3,
407                                DoubleRegister double_scratch0,
408                                DoubleRegister double_scratch1,
409                                Label* not_int32);
410
411  // Generate non VFP3 code to check if a double can be exactly represented by a
412  // 32-bit integer. This does not check for 0 or -0, which need
413  // to be checked for separately.
414  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
415  // through otherwise.
416  // src1 and src2 will be cloberred.
417  //
418  // Expected input:
419  // - src1: higher (exponent) part of the double value.
420  // - src2: lower (mantissa) part of the double value.
421  // Output status:
422  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
423  // - src2: contains 1.
424  // - other registers are clobbered.
425  static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
426                                   Register src2, Register dst,
427                                   Register scratch, Label* not_int32);
428
429  // Generates code to call a C function to do a double operation using core
430  // registers. (Used when VFP3 is not supported.)
431  // This code never falls through, but returns with a heap number containing
432  // the result in r0.
433  // Register heapnumber_result must be a heap number in which the
434  // result of the operation will be stored.
435  // Requires the following layout on entry:
436  // r0: Left value (least significant part of mantissa).
437  // r1: Left value (sign, exponent, top of mantissa).
438  // r2: Right value (least significant part of mantissa).
439  // r3: Right value (sign, exponent, top of mantissa).
440  static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
441                                          Register heap_number_result,
442                                          Register scratch);
443
444 private:
445  static void LoadNumber(MacroAssembler* masm, Register object,
446                         DoubleRegister dst, Register heap_number_map,
447                         Register scratch1, Register scratch2,
448                         Label* not_number);
449};
450
451}  // namespace internal
452}  // namespace v8
453
454#endif  // V8_S390_CODE_STUBS_S390_H_
455