1// Copyright 2011 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#ifndef V8_ARM_CODE_STUBS_ARM_H_ 29#define V8_ARM_CODE_STUBS_ARM_H_ 30 31#include "ic-inl.h" 32 33namespace v8 { 34namespace internal { 35 36 37// Compute a transcendental math function natively, or call the 38// TranscendentalCache runtime function. 39class TranscendentalCacheStub: public CodeStub { 40 public: 41 enum ArgumentType { 42 TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, 43 UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits 44 }; 45 46 TranscendentalCacheStub(TranscendentalCache::Type type, 47 ArgumentType argument_type) 48 : type_(type), argument_type_(argument_type) { } 49 void Generate(MacroAssembler* masm); 50 private: 51 TranscendentalCache::Type type_; 52 ArgumentType argument_type_; 53 void GenerateCallCFunction(MacroAssembler* masm, Register scratch); 54 55 Major MajorKey() { return TranscendentalCache; } 56 int MinorKey() { return type_ | argument_type_; } 57 Runtime::FunctionId RuntimeFunction(); 58}; 59 60 61class StoreBufferOverflowStub: public CodeStub { 62 public: 63 explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) 64 : save_doubles_(save_fp) { } 65 66 void Generate(MacroAssembler* masm); 67 68 virtual bool IsPregenerated(); 69 static void GenerateFixedRegStubsAheadOfTime(); 70 virtual bool SometimesSetsUpAFrame() { return false; } 71 72 private: 73 SaveFPRegsMode save_doubles_; 74 75 Major MajorKey() { return StoreBufferOverflow; } 76 int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } 77}; 78 79 80class UnaryOpStub: public CodeStub { 81 public: 82 UnaryOpStub(Token::Value op, 83 UnaryOverwriteMode mode, 84 UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) 85 : op_(op), 86 mode_(mode), 87 operand_type_(operand_type) { 88 } 89 90 private: 91 Token::Value op_; 92 UnaryOverwriteMode mode_; 93 94 // Operand type information determined at runtime. 95 UnaryOpIC::TypeInfo operand_type_; 96 97 virtual void PrintName(StringStream* stream); 98 99 class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; 100 class OpBits: public BitField<Token::Value, 1, 7> {}; 101 class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {}; 102 103 Major MajorKey() { return UnaryOp; } 104 int MinorKey() { 105 return ModeBits::encode(mode_) 106 | OpBits::encode(op_) 107 | OperandTypeInfoBits::encode(operand_type_); 108 } 109 110 // Note: A lot of the helper functions below will vanish when we use virtual 111 // function instead of switch more often. 112 void Generate(MacroAssembler* masm); 113 114 void GenerateTypeTransition(MacroAssembler* masm); 115 116 void GenerateSmiStub(MacroAssembler* masm); 117 void GenerateSmiStubSub(MacroAssembler* masm); 118 void GenerateSmiStubBitNot(MacroAssembler* masm); 119 void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); 120 void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); 121 122 void GenerateHeapNumberStub(MacroAssembler* masm); 123 void GenerateHeapNumberStubSub(MacroAssembler* masm); 124 void GenerateHeapNumberStubBitNot(MacroAssembler* masm); 125 void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); 126 void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); 127 128 void GenerateGenericStub(MacroAssembler* masm); 129 void GenerateGenericStubSub(MacroAssembler* masm); 130 void GenerateGenericStubBitNot(MacroAssembler* masm); 131 void GenerateGenericCodeFallback(MacroAssembler* masm); 132 133 virtual int GetCodeKind() { return Code::UNARY_OP_IC; } 134 135 virtual InlineCacheState GetICState() { 136 return UnaryOpIC::ToState(operand_type_); 137 } 138 139 virtual void FinishCode(Handle<Code> code) { 140 code->set_unary_op_type(operand_type_); 141 } 142}; 143 144 145class BinaryOpStub: public CodeStub { 146 public: 147 BinaryOpStub(Token::Value op, OverwriteMode mode) 148 : op_(op), 149 mode_(mode), 150 operands_type_(BinaryOpIC::UNINITIALIZED), 151 result_type_(BinaryOpIC::UNINITIALIZED) { 152 use_vfp3_ = CpuFeatures::IsSupported(VFP3); 153 ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); 154 } 155 156 BinaryOpStub( 157 int key, 158 BinaryOpIC::TypeInfo operands_type, 159 BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) 160 : op_(OpBits::decode(key)), 161 mode_(ModeBits::decode(key)), 162 use_vfp3_(VFP3Bits::decode(key)), 163 operands_type_(operands_type), 164 result_type_(result_type) { } 165 166 private: 167 enum SmiCodeGenerateHeapNumberResults { 168 ALLOW_HEAPNUMBER_RESULTS, 169 NO_HEAPNUMBER_RESULTS 170 }; 171 172 Token::Value op_; 173 OverwriteMode mode_; 174 bool use_vfp3_; 175 176 // Operand type information determined at runtime. 177 BinaryOpIC::TypeInfo operands_type_; 178 BinaryOpIC::TypeInfo result_type_; 179 180 virtual void PrintName(StringStream* stream); 181 182 // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. 183 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; 184 class OpBits: public BitField<Token::Value, 2, 7> {}; 185 class VFP3Bits: public BitField<bool, 9, 1> {}; 186 class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; 187 class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; 188 189 Major MajorKey() { return BinaryOp; } 190 int MinorKey() { 191 return OpBits::encode(op_) 192 | ModeBits::encode(mode_) 193 | VFP3Bits::encode(use_vfp3_) 194 | OperandTypeInfoBits::encode(operands_type_) 195 | ResultTypeInfoBits::encode(result_type_); 196 } 197 198 void Generate(MacroAssembler* masm); 199 void GenerateGeneric(MacroAssembler* masm); 200 void GenerateSmiSmiOperation(MacroAssembler* masm); 201 void GenerateFPOperation(MacroAssembler* masm, 202 bool smi_operands, 203 Label* not_numbers, 204 Label* gc_required); 205 void GenerateSmiCode(MacroAssembler* masm, 206 Label* use_runtime, 207 Label* gc_required, 208 SmiCodeGenerateHeapNumberResults heapnumber_results); 209 void GenerateLoadArguments(MacroAssembler* masm); 210 void GenerateReturn(MacroAssembler* masm); 211 void GenerateUninitializedStub(MacroAssembler* masm); 212 void GenerateSmiStub(MacroAssembler* masm); 213 void GenerateInt32Stub(MacroAssembler* masm); 214 void GenerateHeapNumberStub(MacroAssembler* masm); 215 void GenerateOddballStub(MacroAssembler* masm); 216 void GenerateStringStub(MacroAssembler* masm); 217 void GenerateBothStringStub(MacroAssembler* masm); 218 void GenerateGenericStub(MacroAssembler* masm); 219 void GenerateAddStrings(MacroAssembler* masm); 220 void GenerateCallRuntime(MacroAssembler* masm); 221 222 void GenerateHeapResultAllocation(MacroAssembler* masm, 223 Register result, 224 Register heap_number_map, 225 Register scratch1, 226 Register scratch2, 227 Label* gc_required); 228 void GenerateRegisterArgsPush(MacroAssembler* masm); 229 void GenerateTypeTransition(MacroAssembler* masm); 230 void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); 231 232 virtual int GetCodeKind() { return Code::BINARY_OP_IC; } 233 234 virtual InlineCacheState GetICState() { 235 return BinaryOpIC::ToState(operands_type_); 236 } 237 238 virtual void FinishCode(Handle<Code> code) { 239 code->set_binary_op_type(operands_type_); 240 code->set_binary_op_result_type(result_type_); 241 } 242 243 friend class CodeGenerator; 244}; 245 246 247class StringHelper : public AllStatic { 248 public: 249 // Generate code for copying characters using a simple loop. This should only 250 // be used in places where the number of characters is small and the 251 // additional setup and checking in GenerateCopyCharactersLong adds too much 252 // overhead. Copying of overlapping regions is not supported. 253 // Dest register ends at the position after the last character written. 254 static void GenerateCopyCharacters(MacroAssembler* masm, 255 Register dest, 256 Register src, 257 Register count, 258 Register scratch, 259 bool ascii); 260 261 // Generate code for copying a large number of characters. This function 262 // is allowed to spend extra time setting up conditions to make copying 263 // faster. Copying of overlapping regions is not supported. 264 // Dest register ends at the position after the last character written. 265 static void GenerateCopyCharactersLong(MacroAssembler* masm, 266 Register dest, 267 Register src, 268 Register count, 269 Register scratch1, 270 Register scratch2, 271 Register scratch3, 272 Register scratch4, 273 Register scratch5, 274 int flags); 275 276 277 // Probe the symbol table for a two character string. If the string is 278 // not found by probing a jump to the label not_found is performed. This jump 279 // does not guarantee that the string is not in the symbol table. If the 280 // string is found the code falls through with the string in register r0. 281 // Contents of both c1 and c2 registers are modified. At the exit c1 is 282 // guaranteed to contain halfword with low and high bytes equal to 283 // initial contents of c1 and c2 respectively. 284 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, 285 Register c1, 286 Register c2, 287 Register scratch1, 288 Register scratch2, 289 Register scratch3, 290 Register scratch4, 291 Register scratch5, 292 Label* not_found); 293 294 // Generate string hash. 295 static void GenerateHashInit(MacroAssembler* masm, 296 Register hash, 297 Register character); 298 299 static void GenerateHashAddCharacter(MacroAssembler* masm, 300 Register hash, 301 Register character); 302 303 static void GenerateHashGetHash(MacroAssembler* masm, 304 Register hash); 305 306 private: 307 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); 308}; 309 310 311// Flag that indicates how to generate code for the stub StringAddStub. 312enum StringAddFlags { 313 NO_STRING_ADD_FLAGS = 0, 314 // Omit left string check in stub (left is definitely a string). 315 NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, 316 // Omit right string check in stub (right is definitely a string). 317 NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, 318 // Omit both string checks in stub. 319 NO_STRING_CHECK_IN_STUB = 320 NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB 321}; 322 323 324class StringAddStub: public CodeStub { 325 public: 326 explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} 327 328 private: 329 Major MajorKey() { return StringAdd; } 330 int MinorKey() { return flags_; } 331 332 void Generate(MacroAssembler* masm); 333 334 void GenerateConvertArgument(MacroAssembler* masm, 335 int stack_offset, 336 Register arg, 337 Register scratch1, 338 Register scratch2, 339 Register scratch3, 340 Register scratch4, 341 Label* slow); 342 343 const StringAddFlags flags_; 344}; 345 346 347class SubStringStub: public CodeStub { 348 public: 349 SubStringStub() {} 350 351 private: 352 Major MajorKey() { return SubString; } 353 int MinorKey() { return 0; } 354 355 void Generate(MacroAssembler* masm); 356}; 357 358 359 360class StringCompareStub: public CodeStub { 361 public: 362 StringCompareStub() { } 363 364 // Compares two flat ASCII strings and returns result in r0. 365 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, 366 Register left, 367 Register right, 368 Register scratch1, 369 Register scratch2, 370 Register scratch3, 371 Register scratch4); 372 373 // Compares two flat ASCII strings for equality and returns result 374 // in r0. 375 static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, 376 Register left, 377 Register right, 378 Register scratch1, 379 Register scratch2, 380 Register scratch3); 381 382 private: 383 virtual Major MajorKey() { return StringCompare; } 384 virtual int MinorKey() { return 0; } 385 virtual void Generate(MacroAssembler* masm); 386 387 static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, 388 Register left, 389 Register right, 390 Register length, 391 Register scratch1, 392 Register scratch2, 393 Label* chars_not_equal); 394}; 395 396 397// This stub can convert a signed int32 to a heap number (double). It does 398// not work for int32s that are in Smi range! No GC occurs during this stub 399// so you don't have to set up the frame. 400class WriteInt32ToHeapNumberStub : public CodeStub { 401 public: 402 WriteInt32ToHeapNumberStub(Register the_int, 403 Register the_heap_number, 404 Register scratch) 405 : the_int_(the_int), 406 the_heap_number_(the_heap_number), 407 scratch_(scratch) { } 408 409 bool IsPregenerated(); 410 static void GenerateFixedRegStubsAheadOfTime(); 411 412 private: 413 Register the_int_; 414 Register the_heap_number_; 415 Register scratch_; 416 417 // Minor key encoding in 16 bits. 418 class IntRegisterBits: public BitField<int, 0, 4> {}; 419 class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; 420 class ScratchRegisterBits: public BitField<int, 8, 4> {}; 421 422 Major MajorKey() { return WriteInt32ToHeapNumber; } 423 int MinorKey() { 424 // Encode the parameters in a unique 16 bit value. 425 return IntRegisterBits::encode(the_int_.code()) 426 | HeapNumberRegisterBits::encode(the_heap_number_.code()) 427 | ScratchRegisterBits::encode(scratch_.code()); 428 } 429 430 void Generate(MacroAssembler* masm); 431}; 432 433 434class NumberToStringStub: public CodeStub { 435 public: 436 NumberToStringStub() { } 437 438 // Generate code to do a lookup in the number string cache. If the number in 439 // the register object is found in the cache the generated code falls through 440 // with the result in the result register. The object and the result register 441 // can be the same. If the number is not found in the cache the code jumps to 442 // the label not_found with only the content of register object unchanged. 443 static void GenerateLookupNumberStringCache(MacroAssembler* masm, 444 Register object, 445 Register result, 446 Register scratch1, 447 Register scratch2, 448 Register scratch3, 449 bool object_is_smi, 450 Label* not_found); 451 452 private: 453 Major MajorKey() { return NumberToString; } 454 int MinorKey() { return 0; } 455 456 void Generate(MacroAssembler* masm); 457}; 458 459 460class RecordWriteStub: public CodeStub { 461 public: 462 RecordWriteStub(Register object, 463 Register value, 464 Register address, 465 RememberedSetAction remembered_set_action, 466 SaveFPRegsMode fp_mode) 467 : object_(object), 468 value_(value), 469 address_(address), 470 remembered_set_action_(remembered_set_action), 471 save_fp_regs_mode_(fp_mode), 472 regs_(object, // An input reg. 473 address, // An input reg. 474 value) { // One scratch reg. 475 } 476 477 enum Mode { 478 STORE_BUFFER_ONLY, 479 INCREMENTAL, 480 INCREMENTAL_COMPACTION 481 }; 482 483 virtual bool IsPregenerated(); 484 static void GenerateFixedRegStubsAheadOfTime(); 485 virtual bool SometimesSetsUpAFrame() { return false; } 486 487 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { 488 masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); 489 ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); 490 } 491 492 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { 493 masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); 494 ASSERT(Assembler::IsBranch(masm->instr_at(pos))); 495 } 496 497 static Mode GetMode(Code* stub) { 498 Instr first_instruction = Assembler::instr_at(stub->instruction_start()); 499 Instr second_instruction = Assembler::instr_at(stub->instruction_start() + 500 Assembler::kInstrSize); 501 502 if (Assembler::IsBranch(first_instruction)) { 503 return INCREMENTAL; 504 } 505 506 ASSERT(Assembler::IsTstImmediate(first_instruction)); 507 508 if (Assembler::IsBranch(second_instruction)) { 509 return INCREMENTAL_COMPACTION; 510 } 511 512 ASSERT(Assembler::IsTstImmediate(second_instruction)); 513 514 return STORE_BUFFER_ONLY; 515 } 516 517 static void Patch(Code* stub, Mode mode) { 518 MacroAssembler masm(NULL, 519 stub->instruction_start(), 520 stub->instruction_size()); 521 switch (mode) { 522 case STORE_BUFFER_ONLY: 523 ASSERT(GetMode(stub) == INCREMENTAL || 524 GetMode(stub) == INCREMENTAL_COMPACTION); 525 PatchBranchIntoNop(&masm, 0); 526 PatchBranchIntoNop(&masm, Assembler::kInstrSize); 527 break; 528 case INCREMENTAL: 529 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); 530 PatchNopIntoBranch(&masm, 0); 531 break; 532 case INCREMENTAL_COMPACTION: 533 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); 534 PatchNopIntoBranch(&masm, Assembler::kInstrSize); 535 break; 536 } 537 ASSERT(GetMode(stub) == mode); 538 CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); 539 } 540 541 private: 542 // This is a helper class for freeing up 3 scratch registers. The input is 543 // two registers that must be preserved and one scratch register provided by 544 // the caller. 545 class RegisterAllocation { 546 public: 547 RegisterAllocation(Register object, 548 Register address, 549 Register scratch0) 550 : object_(object), 551 address_(address), 552 scratch0_(scratch0) { 553 ASSERT(!AreAliased(scratch0, object, address, no_reg)); 554 scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); 555 } 556 557 void Save(MacroAssembler* masm) { 558 ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); 559 // We don't have to save scratch0_ because it was given to us as 560 // a scratch register. 561 masm->push(scratch1_); 562 } 563 564 void Restore(MacroAssembler* masm) { 565 masm->pop(scratch1_); 566 } 567 568 // If we have to call into C then we need to save and restore all caller- 569 // saved registers that were not already preserved. The scratch registers 570 // will be restored by other means so we don't bother pushing them here. 571 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { 572 masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 573 if (mode == kSaveFPRegs) { 574 CpuFeatures::Scope scope(VFP3); 575 masm->sub(sp, 576 sp, 577 Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); 578 // Save all VFP registers except d0. 579 for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { 580 DwVfpRegister reg = DwVfpRegister::from_code(i); 581 masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); 582 } 583 } 584 } 585 586 inline void RestoreCallerSaveRegisters(MacroAssembler*masm, 587 SaveFPRegsMode mode) { 588 if (mode == kSaveFPRegs) { 589 CpuFeatures::Scope scope(VFP3); 590 // Restore all VFP registers except d0. 591 for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { 592 DwVfpRegister reg = DwVfpRegister::from_code(i); 593 masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); 594 } 595 masm->add(sp, 596 sp, 597 Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); 598 } 599 masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 600 } 601 602 inline Register object() { return object_; } 603 inline Register address() { return address_; } 604 inline Register scratch0() { return scratch0_; } 605 inline Register scratch1() { return scratch1_; } 606 607 private: 608 Register object_; 609 Register address_; 610 Register scratch0_; 611 Register scratch1_; 612 613 Register GetRegThatIsNotOneOf(Register r1, 614 Register r2, 615 Register r3) { 616 for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { 617 Register candidate = Register::FromAllocationIndex(i); 618 if (candidate.is(r1)) continue; 619 if (candidate.is(r2)) continue; 620 if (candidate.is(r3)) continue; 621 return candidate; 622 } 623 UNREACHABLE(); 624 return no_reg; 625 } 626 friend class RecordWriteStub; 627 }; 628 629 enum OnNoNeedToInformIncrementalMarker { 630 kReturnOnNoNeedToInformIncrementalMarker, 631 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker 632 }; 633 634 void Generate(MacroAssembler* masm); 635 void GenerateIncremental(MacroAssembler* masm, Mode mode); 636 void CheckNeedsToInformIncrementalMarker( 637 MacroAssembler* masm, 638 OnNoNeedToInformIncrementalMarker on_no_need, 639 Mode mode); 640 void InformIncrementalMarker(MacroAssembler* masm, Mode mode); 641 642 Major MajorKey() { return RecordWrite; } 643 644 int MinorKey() { 645 return ObjectBits::encode(object_.code()) | 646 ValueBits::encode(value_.code()) | 647 AddressBits::encode(address_.code()) | 648 RememberedSetActionBits::encode(remembered_set_action_) | 649 SaveFPRegsModeBits::encode(save_fp_regs_mode_); 650 } 651 652 void Activate(Code* code) { 653 code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); 654 } 655 656 class ObjectBits: public BitField<int, 0, 4> {}; 657 class ValueBits: public BitField<int, 4, 4> {}; 658 class AddressBits: public BitField<int, 8, 4> {}; 659 class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; 660 class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; 661 662 Register object_; 663 Register value_; 664 Register address_; 665 RememberedSetAction remembered_set_action_; 666 SaveFPRegsMode save_fp_regs_mode_; 667 Label slow_; 668 RegisterAllocation regs_; 669}; 670 671 672// Enter C code from generated RegExp code in a way that allows 673// the C code to fix the return address in case of a GC. 674// Currently only needed on ARM. 675class RegExpCEntryStub: public CodeStub { 676 public: 677 RegExpCEntryStub() {} 678 virtual ~RegExpCEntryStub() {} 679 void Generate(MacroAssembler* masm); 680 681 private: 682 Major MajorKey() { return RegExpCEntry; } 683 int MinorKey() { return 0; } 684 685 bool NeedsImmovableCode() { return true; } 686}; 687 688 689// Trampoline stub to call into native code. To call safely into native code 690// in the presence of compacting GC (which can move code objects) we need to 691// keep the code which called into native pinned in the memory. Currently the 692// simplest approach is to generate such stub early enough so it can never be 693// moved by GC 694class DirectCEntryStub: public CodeStub { 695 public: 696 DirectCEntryStub() {} 697 void Generate(MacroAssembler* masm); 698 void GenerateCall(MacroAssembler* masm, ExternalReference function); 699 void GenerateCall(MacroAssembler* masm, Register target); 700 701 private: 702 Major MajorKey() { return DirectCEntry; } 703 int MinorKey() { return 0; } 704 705 bool NeedsImmovableCode() { return true; } 706}; 707 708 709class FloatingPointHelper : public AllStatic { 710 public: 711 enum Destination { 712 kVFPRegisters, 713 kCoreRegisters 714 }; 715 716 717 // Loads smis from r0 and r1 (right and left in binary operations) into 718 // floating point registers. Depending on the destination the values ends up 719 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is 720 // floating point registers VFP3 must be supported. If core registers are 721 // requested when VFP3 is supported d6 and d7 will be scratched. 722 static void LoadSmis(MacroAssembler* masm, 723 Destination destination, 724 Register scratch1, 725 Register scratch2); 726 727 // Loads objects from r0 and r1 (right and left in binary operations) into 728 // floating point registers. Depending on the destination the values ends up 729 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is 730 // floating point registers VFP3 must be supported. If core registers are 731 // requested when VFP3 is supported d6 and d7 will still be scratched. If 732 // either r0 or r1 is not a number (not smi and not heap number object) the 733 // not_number label is jumped to with r0 and r1 intact. 734 static void LoadOperands(MacroAssembler* masm, 735 FloatingPointHelper::Destination destination, 736 Register heap_number_map, 737 Register scratch1, 738 Register scratch2, 739 Label* not_number); 740 741 // Convert the smi or heap number in object to an int32 using the rules 742 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 743 // and brought into the range -2^31 .. +2^31 - 1. 744 static void ConvertNumberToInt32(MacroAssembler* masm, 745 Register object, 746 Register dst, 747 Register heap_number_map, 748 Register scratch1, 749 Register scratch2, 750 Register scratch3, 751 DwVfpRegister double_scratch, 752 Label* not_int32); 753 754 // Converts the integer (untagged smi) in |int_scratch| to a double, storing 755 // the result either in |double_dst| or |dst2:dst1|, depending on 756 // |destination|. 757 // Warning: The value in |int_scratch| will be changed in the process! 758 static void ConvertIntToDouble(MacroAssembler* masm, 759 Register int_scratch, 760 Destination destination, 761 DwVfpRegister double_dst, 762 Register dst1, 763 Register dst2, 764 Register scratch2, 765 SwVfpRegister single_scratch); 766 767 // Load the number from object into double_dst in the double format. 768 // Control will jump to not_int32 if the value cannot be exactly represented 769 // by a 32-bit integer. 770 // Floating point value in the 32-bit integer range that are not exact integer 771 // won't be loaded. 772 static void LoadNumberAsInt32Double(MacroAssembler* masm, 773 Register object, 774 Destination destination, 775 DwVfpRegister double_dst, 776 Register dst1, 777 Register dst2, 778 Register heap_number_map, 779 Register scratch1, 780 Register scratch2, 781 SwVfpRegister single_scratch, 782 Label* not_int32); 783 784 // Loads the number from object into dst as a 32-bit integer. 785 // Control will jump to not_int32 if the object cannot be exactly represented 786 // by a 32-bit integer. 787 // Floating point value in the 32-bit integer range that are not exact integer 788 // won't be converted. 789 // scratch3 is not used when VFP3 is supported. 790 static void LoadNumberAsInt32(MacroAssembler* masm, 791 Register object, 792 Register dst, 793 Register heap_number_map, 794 Register scratch1, 795 Register scratch2, 796 Register scratch3, 797 DwVfpRegister double_scratch, 798 Label* not_int32); 799 800 // Generate non VFP3 code to check if a double can be exactly represented by a 801 // 32-bit integer. This does not check for 0 or -0, which need 802 // to be checked for separately. 803 // Control jumps to not_int32 if the value is not a 32-bit integer, and falls 804 // through otherwise. 805 // src1 and src2 will be cloberred. 806 // 807 // Expected input: 808 // - src1: higher (exponent) part of the double value. 809 // - src2: lower (mantissa) part of the double value. 810 // Output status: 811 // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) 812 // - src2: contains 1. 813 // - other registers are clobbered. 814 static void DoubleIs32BitInteger(MacroAssembler* masm, 815 Register src1, 816 Register src2, 817 Register dst, 818 Register scratch, 819 Label* not_int32); 820 821 // Generates code to call a C function to do a double operation using core 822 // registers. (Used when VFP3 is not supported.) 823 // This code never falls through, but returns with a heap number containing 824 // the result in r0. 825 // Register heapnumber_result must be a heap number in which the 826 // result of the operation will be stored. 827 // Requires the following layout on entry: 828 // r0: Left value (least significant part of mantissa). 829 // r1: Left value (sign, exponent, top of mantissa). 830 // r2: Right value (least significant part of mantissa). 831 // r3: Right value (sign, exponent, top of mantissa). 832 static void CallCCodeForDoubleOperation(MacroAssembler* masm, 833 Token::Value op, 834 Register heap_number_result, 835 Register scratch); 836 837 private: 838 static void LoadNumber(MacroAssembler* masm, 839 FloatingPointHelper::Destination destination, 840 Register object, 841 DwVfpRegister dst, 842 Register dst1, 843 Register dst2, 844 Register heap_number_map, 845 Register scratch1, 846 Register scratch2, 847 Label* not_number); 848}; 849 850 851class StringDictionaryLookupStub: public CodeStub { 852 public: 853 enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; 854 855 explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { } 856 857 void Generate(MacroAssembler* masm); 858 859 static void GenerateNegativeLookup(MacroAssembler* masm, 860 Label* miss, 861 Label* done, 862 Register receiver, 863 Register properties, 864 Handle<String> name, 865 Register scratch0); 866 867 static void GeneratePositiveLookup(MacroAssembler* masm, 868 Label* miss, 869 Label* done, 870 Register elements, 871 Register name, 872 Register r0, 873 Register r1); 874 875 virtual bool SometimesSetsUpAFrame() { return false; } 876 877 private: 878 static const int kInlinedProbes = 4; 879 static const int kTotalProbes = 20; 880 881 static const int kCapacityOffset = 882 StringDictionary::kHeaderSize + 883 StringDictionary::kCapacityIndex * kPointerSize; 884 885 static const int kElementsStartOffset = 886 StringDictionary::kHeaderSize + 887 StringDictionary::kElementsStartIndex * kPointerSize; 888 889 Major MajorKey() { return StringDictionaryLookup; } 890 891 int MinorKey() { 892 return LookupModeBits::encode(mode_); 893 } 894 895 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; 896 897 LookupMode mode_; 898}; 899 900 901} } // namespace v8::internal 902 903#endif // V8_ARM_CODE_STUBS_ARM_H_ 904