1// Copyright 2012 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_ARM_CODE_STUBS_ARM_H_ 6#define V8_ARM_CODE_STUBS_ARM_H_ 7 8#include "src/arm/frames-arm.h" 9 10namespace v8 { 11namespace internal { 12 13 14void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); 15 16 17class StringHelper : public AllStatic { 18 public: 19 // Generate code for copying a large number of characters. This function 20 // is allowed to spend extra time setting up conditions to make copying 21 // faster. Copying of overlapping regions is not supported. 22 // Dest register ends at the position after the last character written. 23 static void GenerateCopyCharacters(MacroAssembler* masm, 24 Register dest, 25 Register src, 26 Register count, 27 Register scratch, 28 String::Encoding encoding); 29 30 // Compares two flat one-byte strings and returns result in r0. 31 static void GenerateCompareFlatOneByteStrings( 32 MacroAssembler* masm, Register left, Register right, Register scratch1, 33 Register scratch2, Register scratch3, Register scratch4); 34 35 // Compares two flat one-byte strings for equality and returns result in r0. 36 static void GenerateFlatOneByteStringEquals(MacroAssembler* masm, 37 Register left, Register right, 38 Register scratch1, 39 Register scratch2, 40 Register scratch3); 41 42 private: 43 static void GenerateOneByteCharsCompareLoop( 44 MacroAssembler* masm, Register left, Register right, Register length, 45 Register scratch1, Register scratch2, Label* chars_not_equal); 46 47 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); 48}; 49 50 51class RecordWriteStub: public PlatformCodeStub { 52 public: 53 RecordWriteStub(Isolate* isolate, 54 Register object, 55 Register value, 56 Register address, 57 RememberedSetAction remembered_set_action, 58 SaveFPRegsMode fp_mode) 59 : PlatformCodeStub(isolate), 60 regs_(object, // An input reg. 61 address, // An input reg. 62 value) { // One scratch reg. 63 minor_key_ = ObjectBits::encode(object.code()) | 64 ValueBits::encode(value.code()) | 65 AddressBits::encode(address.code()) | 66 RememberedSetActionBits::encode(remembered_set_action) | 67 SaveFPRegsModeBits::encode(fp_mode); 68 } 69 70 RecordWriteStub(uint32_t key, Isolate* isolate) 71 : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {} 72 73 enum Mode { 74 STORE_BUFFER_ONLY, 75 INCREMENTAL, 76 INCREMENTAL_COMPACTION 77 }; 78 79 bool SometimesSetsUpAFrame() override { return false; } 80 81 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { 82 masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); 83 DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos))); 84 } 85 86 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { 87 masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); 88 DCHECK(Assembler::IsBranch(masm->instr_at(pos))); 89 } 90 91 static Mode GetMode(Code* stub) { 92 Instr first_instruction = Assembler::instr_at(stub->instruction_start()); 93 Instr second_instruction = Assembler::instr_at(stub->instruction_start() + 94 Assembler::kInstrSize); 95 96 if (Assembler::IsBranch(first_instruction)) { 97 return INCREMENTAL; 98 } 99 100 DCHECK(Assembler::IsTstImmediate(first_instruction)); 101 102 if (Assembler::IsBranch(second_instruction)) { 103 return INCREMENTAL_COMPACTION; 104 } 105 106 DCHECK(Assembler::IsTstImmediate(second_instruction)); 107 108 return STORE_BUFFER_ONLY; 109 } 110 111 static void Patch(Code* stub, Mode mode) { 112 MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(), 113 stub->instruction_size(), CodeObjectRequired::kNo); 114 switch (mode) { 115 case STORE_BUFFER_ONLY: 116 DCHECK(GetMode(stub) == INCREMENTAL || 117 GetMode(stub) == INCREMENTAL_COMPACTION); 118 PatchBranchIntoNop(&masm, 0); 119 PatchBranchIntoNop(&masm, Assembler::kInstrSize); 120 break; 121 case INCREMENTAL: 122 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); 123 PatchNopIntoBranch(&masm, 0); 124 break; 125 case INCREMENTAL_COMPACTION: 126 DCHECK(GetMode(stub) == STORE_BUFFER_ONLY); 127 PatchNopIntoBranch(&masm, Assembler::kInstrSize); 128 break; 129 } 130 DCHECK(GetMode(stub) == mode); 131 Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 132 2 * Assembler::kInstrSize); 133 } 134 135 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 136 137 private: 138 // This is a helper class for freeing up 3 scratch registers. The input is 139 // two registers that must be preserved and one scratch register provided by 140 // the caller. 141 class RegisterAllocation { 142 public: 143 RegisterAllocation(Register object, 144 Register address, 145 Register scratch0) 146 : object_(object), 147 address_(address), 148 scratch0_(scratch0) { 149 DCHECK(!AreAliased(scratch0, object, address, no_reg)); 150 scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_); 151 } 152 153 void Save(MacroAssembler* masm) { 154 DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_)); 155 // We don't have to save scratch0_ because it was given to us as 156 // a scratch register. 157 masm->push(scratch1_); 158 } 159 160 void Restore(MacroAssembler* masm) { 161 masm->pop(scratch1_); 162 } 163 164 // If we have to call into C then we need to save and restore all caller- 165 // saved registers that were not already preserved. The scratch registers 166 // will be restored by other means so we don't bother pushing them here. 167 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { 168 masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 169 if (mode == kSaveFPRegs) { 170 masm->SaveFPRegs(sp, scratch0_); 171 } 172 } 173 174 inline void RestoreCallerSaveRegisters(MacroAssembler*masm, 175 SaveFPRegsMode mode) { 176 if (mode == kSaveFPRegs) { 177 masm->RestoreFPRegs(sp, scratch0_); 178 } 179 masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 180 } 181 182 inline Register object() { return object_; } 183 inline Register address() { return address_; } 184 inline Register scratch0() { return scratch0_; } 185 inline Register scratch1() { return scratch1_; } 186 187 private: 188 Register object_; 189 Register address_; 190 Register scratch0_; 191 Register scratch1_; 192 193 friend class RecordWriteStub; 194 }; 195 196 enum OnNoNeedToInformIncrementalMarker { 197 kReturnOnNoNeedToInformIncrementalMarker, 198 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker 199 }; 200 201 inline Major MajorKey() const final { return RecordWrite; } 202 203 void Generate(MacroAssembler* masm) override; 204 void GenerateIncremental(MacroAssembler* masm, Mode mode); 205 void CheckNeedsToInformIncrementalMarker( 206 MacroAssembler* masm, 207 OnNoNeedToInformIncrementalMarker on_no_need, 208 Mode mode); 209 void InformIncrementalMarker(MacroAssembler* masm); 210 211 void Activate(Code* code) override { 212 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); 213 } 214 215 Register object() const { 216 return Register::from_code(ObjectBits::decode(minor_key_)); 217 } 218 219 Register value() const { 220 return Register::from_code(ValueBits::decode(minor_key_)); 221 } 222 223 Register address() const { 224 return Register::from_code(AddressBits::decode(minor_key_)); 225 } 226 227 RememberedSetAction remembered_set_action() const { 228 return RememberedSetActionBits::decode(minor_key_); 229 } 230 231 SaveFPRegsMode save_fp_regs_mode() const { 232 return SaveFPRegsModeBits::decode(minor_key_); 233 } 234 235 class ObjectBits: public BitField<int, 0, 4> {}; 236 class ValueBits: public BitField<int, 4, 4> {}; 237 class AddressBits: public BitField<int, 8, 4> {}; 238 class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; 239 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; 240 241 Label slow_; 242 RegisterAllocation regs_; 243 244 DISALLOW_COPY_AND_ASSIGN(RecordWriteStub); 245}; 246 247 248// Trampoline stub to call into native code. To call safely into native code 249// in the presence of compacting GC (which can move code objects) we need to 250// keep the code which called into native pinned in the memory. Currently the 251// simplest approach is to generate such stub early enough so it can never be 252// moved by GC 253class DirectCEntryStub: public PlatformCodeStub { 254 public: 255 explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} 256 void GenerateCall(MacroAssembler* masm, Register target); 257 258 private: 259 bool NeedsImmovableCode() override { return true; } 260 261 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 262 DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub); 263}; 264 265 266class NameDictionaryLookupStub: public PlatformCodeStub { 267 public: 268 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; 269 270 NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) 271 : PlatformCodeStub(isolate) { 272 minor_key_ = LookupModeBits::encode(mode); 273 } 274 275 static void GenerateNegativeLookup(MacroAssembler* masm, 276 Label* miss, 277 Label* done, 278 Register receiver, 279 Register properties, 280 Handle<Name> name, 281 Register scratch0); 282 283 static void GeneratePositiveLookup(MacroAssembler* masm, 284 Label* miss, 285 Label* done, 286 Register elements, 287 Register name, 288 Register r0, 289 Register r1); 290 291 bool SometimesSetsUpAFrame() override { return false; } 292 293 private: 294 static const int kInlinedProbes = 4; 295 static const int kTotalProbes = 20; 296 297 static const int kCapacityOffset = 298 NameDictionary::kHeaderSize + 299 NameDictionary::kCapacityIndex * kPointerSize; 300 301 static const int kElementsStartOffset = 302 NameDictionary::kHeaderSize + 303 NameDictionary::kElementsStartIndex * kPointerSize; 304 305 LookupMode mode() const { return LookupModeBits::decode(minor_key_); } 306 307 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; 308 309 DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR(); 310 DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub); 311}; 312 313} // namespace internal 314} // namespace v8 315 316#endif // V8_ARM_CODE_STUBS_ARM_H_ 317