1// Copyright 2012 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 6#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 7 8#include "src/assembler.h" 9#include "src/bailout-reason.h" 10#include "src/frames.h" 11#include "src/globals.h" 12 13namespace v8 { 14namespace internal { 15 16// ---------------------------------------------------------------------------- 17// Static helper functions 18 19// Generate a MemOperand for loading a field from an object. 20inline MemOperand FieldMemOperand(Register object, int offset) { 21 return MemOperand(object, offset - kHeapObjectTag); 22} 23 24 25// Give alias names to registers 26const Register cp = { kRegister_r7_Code }; // JavaScript context pointer. 27const Register pp = { kRegister_r8_Code }; // Constant pool pointer. 28const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer. 29 30// Flags used for AllocateHeapNumber 31enum TaggingMode { 32 // Tag the result. 33 TAG_RESULT, 34 // Don't tag 35 DONT_TAG_RESULT 36}; 37 38 39enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 40enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 41enum PointersToHereCheck { 42 kPointersToHereMaybeInteresting, 43 kPointersToHereAreAlwaysInteresting 44}; 45enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 46 47 48Register GetRegisterThatIsNotOneOf(Register reg1, 49 Register reg2 = no_reg, 50 Register reg3 = no_reg, 51 Register reg4 = no_reg, 52 Register reg5 = no_reg, 53 Register reg6 = no_reg); 54 55 56#ifdef DEBUG 57bool AreAliased(Register reg1, 58 Register reg2, 59 Register reg3 = no_reg, 60 Register reg4 = no_reg, 61 Register reg5 = no_reg, 62 Register reg6 = no_reg, 63 Register reg7 = no_reg, 64 Register reg8 = no_reg); 65#endif 66 67 68enum TargetAddressStorageMode { 69 CAN_INLINE_TARGET_ADDRESS, 70 NEVER_INLINE_TARGET_ADDRESS 71}; 72 73// MacroAssembler implements a collection of frequently used macros. 74class MacroAssembler: public Assembler { 75 public: 76 // The isolate parameter can be NULL if the macro assembler should 77 // not use isolate-dependent functionality. In this case, it's the 78 // responsibility of the caller to never invoke such function on the 79 // macro assembler. 80 MacroAssembler(Isolate* isolate, void* buffer, int size); 81 82 83 // Returns the size of a call in instructions. Note, the value returned is 84 // only valid as long as no entries are added to the constant pool between 85 // checking the call size and emitting the actual call. 86 static int CallSize(Register target, Condition cond = al); 87 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 88 int CallStubSize(CodeStub* stub, 89 TypeFeedbackId ast_id = TypeFeedbackId::None(), 90 Condition cond = al); 91 static int CallSizeNotPredictableCodeSize(Isolate* isolate, 92 Address target, 93 RelocInfo::Mode rmode, 94 Condition cond = al); 95 96 // Jump, Call, and Ret pseudo instructions implementing inter-working. 97 void Jump(Register target, Condition cond = al); 98 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 99 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 100 void Call(Register target, Condition cond = al); 101 void Call(Address target, RelocInfo::Mode rmode, 102 Condition cond = al, 103 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 104 int CallSize(Handle<Code> code, 105 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 106 TypeFeedbackId ast_id = TypeFeedbackId::None(), 107 Condition cond = al); 108 void Call(Handle<Code> code, 109 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 110 TypeFeedbackId ast_id = TypeFeedbackId::None(), 111 Condition cond = al, 112 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 113 void Ret(Condition cond = al); 114 115 // Emit code to discard a non-negative number of pointer-sized elements 116 // from the stack, clobbering only the sp register. 117 void Drop(int count, Condition cond = al); 118 119 void Ret(int drop, Condition cond = al); 120 121 // Swap two registers. If the scratch register is omitted then a slightly 122 // less efficient form using xor instead of mov is emitted. 123 void Swap(Register reg1, 124 Register reg2, 125 Register scratch = no_reg, 126 Condition cond = al); 127 128 void Mls(Register dst, Register src1, Register src2, Register srcA, 129 Condition cond = al); 130 void And(Register dst, Register src1, const Operand& src2, 131 Condition cond = al); 132 void Ubfx(Register dst, Register src, int lsb, int width, 133 Condition cond = al); 134 void Sbfx(Register dst, Register src, int lsb, int width, 135 Condition cond = al); 136 // The scratch register is not used for ARMv7. 137 // scratch can be the same register as src (in which case it is trashed), but 138 // not the same as dst. 139 void Bfi(Register dst, 140 Register src, 141 Register scratch, 142 int lsb, 143 int width, 144 Condition cond = al); 145 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); 146 void Usat(Register dst, int satpos, const Operand& src, 147 Condition cond = al); 148 149 void Call(Label* target); 150 void Push(Register src) { push(src); } 151 void Pop(Register dst) { pop(dst); } 152 153 // Register move. May do nothing if the registers are identical. 154 void Move(Register dst, Handle<Object> value); 155 void Move(Register dst, Register src, Condition cond = al); 156 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, 157 Condition cond = al) { 158 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) { 159 mov(dst, src, sbit, cond); 160 } 161 } 162 void Move(DwVfpRegister dst, DwVfpRegister src); 163 164 void Load(Register dst, const MemOperand& src, Representation r); 165 void Store(Register src, const MemOperand& dst, Representation r); 166 167 // Load an object from the root table. 168 void LoadRoot(Register destination, 169 Heap::RootListIndex index, 170 Condition cond = al); 171 // Store an object to the root table. 172 void StoreRoot(Register source, 173 Heap::RootListIndex index, 174 Condition cond = al); 175 176 // --------------------------------------------------------------------------- 177 // GC Support 178 179 void IncrementalMarkingRecordWriteHelper(Register object, 180 Register value, 181 Register address); 182 183 enum RememberedSetFinalAction { 184 kReturnAtEnd, 185 kFallThroughAtEnd 186 }; 187 188 // Record in the remembered set the fact that we have a pointer to new space 189 // at the address pointed to by the addr register. Only works if addr is not 190 // in new space. 191 void RememberedSetHelper(Register object, // Used for debug code. 192 Register addr, 193 Register scratch, 194 SaveFPRegsMode save_fp, 195 RememberedSetFinalAction and_then); 196 197 void CheckPageFlag(Register object, 198 Register scratch, 199 int mask, 200 Condition cc, 201 Label* condition_met); 202 203 void CheckMapDeprecated(Handle<Map> map, 204 Register scratch, 205 Label* if_deprecated); 206 207 // Check if object is in new space. Jumps if the object is not in new space. 208 // The register scratch can be object itself, but scratch will be clobbered. 209 void JumpIfNotInNewSpace(Register object, 210 Register scratch, 211 Label* branch) { 212 InNewSpace(object, scratch, ne, branch); 213 } 214 215 // Check if object is in new space. Jumps if the object is in new space. 216 // The register scratch can be object itself, but it will be clobbered. 217 void JumpIfInNewSpace(Register object, 218 Register scratch, 219 Label* branch) { 220 InNewSpace(object, scratch, eq, branch); 221 } 222 223 // Check if an object has a given incremental marking color. 224 void HasColor(Register object, 225 Register scratch0, 226 Register scratch1, 227 Label* has_color, 228 int first_bit, 229 int second_bit); 230 231 void JumpIfBlack(Register object, 232 Register scratch0, 233 Register scratch1, 234 Label* on_black); 235 236 // Checks the color of an object. If the object is already grey or black 237 // then we just fall through, since it is already live. If it is white and 238 // we can determine that it doesn't need to be scanned, then we just mark it 239 // black and fall through. For the rest we jump to the label so the 240 // incremental marker can fix its assumptions. 241 void EnsureNotWhite(Register object, 242 Register scratch1, 243 Register scratch2, 244 Register scratch3, 245 Label* object_is_white_and_not_data); 246 247 // Detects conservatively whether an object is data-only, i.e. it does need to 248 // be scanned by the garbage collector. 249 void JumpIfDataObject(Register value, 250 Register scratch, 251 Label* not_data_object); 252 253 // Notify the garbage collector that we wrote a pointer into an object. 254 // |object| is the object being stored into, |value| is the object being 255 // stored. value and scratch registers are clobbered by the operation. 256 // The offset is the offset from the start of the object, not the offset from 257 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 258 void RecordWriteField( 259 Register object, 260 int offset, 261 Register value, 262 Register scratch, 263 LinkRegisterStatus lr_status, 264 SaveFPRegsMode save_fp, 265 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 266 SmiCheck smi_check = INLINE_SMI_CHECK, 267 PointersToHereCheck pointers_to_here_check_for_value = 268 kPointersToHereMaybeInteresting); 269 270 // As above, but the offset has the tag presubtracted. For use with 271 // MemOperand(reg, off). 272 inline void RecordWriteContextSlot( 273 Register context, 274 int offset, 275 Register value, 276 Register scratch, 277 LinkRegisterStatus lr_status, 278 SaveFPRegsMode save_fp, 279 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 280 SmiCheck smi_check = INLINE_SMI_CHECK, 281 PointersToHereCheck pointers_to_here_check_for_value = 282 kPointersToHereMaybeInteresting) { 283 RecordWriteField(context, 284 offset + kHeapObjectTag, 285 value, 286 scratch, 287 lr_status, 288 save_fp, 289 remembered_set_action, 290 smi_check, 291 pointers_to_here_check_for_value); 292 } 293 294 void RecordWriteForMap( 295 Register object, 296 Register map, 297 Register dst, 298 LinkRegisterStatus lr_status, 299 SaveFPRegsMode save_fp); 300 301 // For a given |object| notify the garbage collector that the slot |address| 302 // has been written. |value| is the object being stored. The value and 303 // address registers are clobbered by the operation. 304 void RecordWrite( 305 Register object, 306 Register address, 307 Register value, 308 LinkRegisterStatus lr_status, 309 SaveFPRegsMode save_fp, 310 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 311 SmiCheck smi_check = INLINE_SMI_CHECK, 312 PointersToHereCheck pointers_to_here_check_for_value = 313 kPointersToHereMaybeInteresting); 314 315 // Push a handle. 316 void Push(Handle<Object> handle); 317 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 318 319 // Push two registers. Pushes leftmost register first (to highest address). 320 void Push(Register src1, Register src2, Condition cond = al) { 321 DCHECK(!src1.is(src2)); 322 if (src1.code() > src2.code()) { 323 stm(db_w, sp, src1.bit() | src2.bit(), cond); 324 } else { 325 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 326 str(src2, MemOperand(sp, 4, NegPreIndex), cond); 327 } 328 } 329 330 // Push three registers. Pushes leftmost register first (to highest address). 331 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 332 DCHECK(!src1.is(src2)); 333 DCHECK(!src2.is(src3)); 334 DCHECK(!src1.is(src3)); 335 if (src1.code() > src2.code()) { 336 if (src2.code() > src3.code()) { 337 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 338 } else { 339 stm(db_w, sp, src1.bit() | src2.bit(), cond); 340 str(src3, MemOperand(sp, 4, NegPreIndex), cond); 341 } 342 } else { 343 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 344 Push(src2, src3, cond); 345 } 346 } 347 348 // Push four registers. Pushes leftmost register first (to highest address). 349 void Push(Register src1, 350 Register src2, 351 Register src3, 352 Register src4, 353 Condition cond = al) { 354 DCHECK(!src1.is(src2)); 355 DCHECK(!src2.is(src3)); 356 DCHECK(!src1.is(src3)); 357 DCHECK(!src1.is(src4)); 358 DCHECK(!src2.is(src4)); 359 DCHECK(!src3.is(src4)); 360 if (src1.code() > src2.code()) { 361 if (src2.code() > src3.code()) { 362 if (src3.code() > src4.code()) { 363 stm(db_w, 364 sp, 365 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 366 cond); 367 } else { 368 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 369 str(src4, MemOperand(sp, 4, NegPreIndex), cond); 370 } 371 } else { 372 stm(db_w, sp, src1.bit() | src2.bit(), cond); 373 Push(src3, src4, cond); 374 } 375 } else { 376 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 377 Push(src2, src3, src4, cond); 378 } 379 } 380 381 // Pop two registers. Pops rightmost register first (from lower address). 382 void Pop(Register src1, Register src2, Condition cond = al) { 383 DCHECK(!src1.is(src2)); 384 if (src1.code() > src2.code()) { 385 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 386 } else { 387 ldr(src2, MemOperand(sp, 4, PostIndex), cond); 388 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 389 } 390 } 391 392 // Pop three registers. Pops rightmost register first (from lower address). 393 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { 394 DCHECK(!src1.is(src2)); 395 DCHECK(!src2.is(src3)); 396 DCHECK(!src1.is(src3)); 397 if (src1.code() > src2.code()) { 398 if (src2.code() > src3.code()) { 399 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 400 } else { 401 ldr(src3, MemOperand(sp, 4, PostIndex), cond); 402 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 403 } 404 } else { 405 Pop(src2, src3, cond); 406 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 407 } 408 } 409 410 // Pop four registers. Pops rightmost register first (from lower address). 411 void Pop(Register src1, 412 Register src2, 413 Register src3, 414 Register src4, 415 Condition cond = al) { 416 DCHECK(!src1.is(src2)); 417 DCHECK(!src2.is(src3)); 418 DCHECK(!src1.is(src3)); 419 DCHECK(!src1.is(src4)); 420 DCHECK(!src2.is(src4)); 421 DCHECK(!src3.is(src4)); 422 if (src1.code() > src2.code()) { 423 if (src2.code() > src3.code()) { 424 if (src3.code() > src4.code()) { 425 ldm(ia_w, 426 sp, 427 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 428 cond); 429 } else { 430 ldr(src4, MemOperand(sp, 4, PostIndex), cond); 431 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 432 } 433 } else { 434 Pop(src3, src4, cond); 435 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 436 } 437 } else { 438 Pop(src2, src3, src4, cond); 439 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 440 } 441 } 442 443 // Push a fixed frame, consisting of lr, fp, constant pool (if 444 // FLAG_enable_ool_constant_pool), context and JS function / marker id if 445 // marker_reg is a valid register. 446 void PushFixedFrame(Register marker_reg = no_reg); 447 void PopFixedFrame(Register marker_reg = no_reg); 448 449 // Push and pop the registers that can hold pointers, as defined by the 450 // RegList constant kSafepointSavedRegisters. 451 void PushSafepointRegisters(); 452 void PopSafepointRegisters(); 453 // Store value in register src in the safepoint stack slot for 454 // register dst. 455 void StoreToSafepointRegisterSlot(Register src, Register dst); 456 // Load the value of the src register from its safepoint stack slot 457 // into register dst. 458 void LoadFromSafepointRegisterSlot(Register dst, Register src); 459 460 // Load two consecutive registers with two consecutive memory locations. 461 void Ldrd(Register dst1, 462 Register dst2, 463 const MemOperand& src, 464 Condition cond = al); 465 466 // Store two consecutive registers to two consecutive memory locations. 467 void Strd(Register src1, 468 Register src2, 469 const MemOperand& dst, 470 Condition cond = al); 471 472 // Ensure that FPSCR contains values needed by JavaScript. 473 // We need the NaNModeControlBit to be sure that operations like 474 // vadd and vsub generate the Canonical NaN (if a NaN must be generated). 475 // In VFP3 it will be always the Canonical NaN. 476 // In VFP2 it will be either the Canonical NaN or the negative version 477 // of the Canonical NaN. It doesn't matter if we have two values. The aim 478 // is to be sure to never generate the hole NaN. 479 void VFPEnsureFPSCRState(Register scratch); 480 481 // If the value is a NaN, canonicalize the value else, do nothing. 482 void VFPCanonicalizeNaN(const DwVfpRegister dst, 483 const DwVfpRegister src, 484 const Condition cond = al); 485 void VFPCanonicalizeNaN(const DwVfpRegister value, 486 const Condition cond = al) { 487 VFPCanonicalizeNaN(value, value, cond); 488 } 489 490 // Compare double values and move the result to the normal condition flags. 491 void VFPCompareAndSetFlags(const DwVfpRegister src1, 492 const DwVfpRegister src2, 493 const Condition cond = al); 494 void VFPCompareAndSetFlags(const DwVfpRegister src1, 495 const double src2, 496 const Condition cond = al); 497 498 // Compare double values and then load the fpscr flags to a register. 499 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 500 const DwVfpRegister src2, 501 const Register fpscr_flags, 502 const Condition cond = al); 503 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 504 const double src2, 505 const Register fpscr_flags, 506 const Condition cond = al); 507 508 void Vmov(const DwVfpRegister dst, 509 const double imm, 510 const Register scratch = no_reg); 511 512 void VmovHigh(Register dst, DwVfpRegister src); 513 void VmovHigh(DwVfpRegister dst, Register src); 514 void VmovLow(Register dst, DwVfpRegister src); 515 void VmovLow(DwVfpRegister dst, Register src); 516 517 // Loads the number from object into dst register. 518 // If |object| is neither smi nor heap number, |not_number| is jumped to 519 // with |object| still intact. 520 void LoadNumber(Register object, 521 LowDwVfpRegister dst, 522 Register heap_number_map, 523 Register scratch, 524 Label* not_number); 525 526 // Loads the number from object into double_dst in the double format. 527 // Control will jump to not_int32 if the value cannot be exactly represented 528 // by a 32-bit integer. 529 // Floating point value in the 32-bit integer range that are not exact integer 530 // won't be loaded. 531 void LoadNumberAsInt32Double(Register object, 532 DwVfpRegister double_dst, 533 Register heap_number_map, 534 Register scratch, 535 LowDwVfpRegister double_scratch, 536 Label* not_int32); 537 538 // Loads the number from object into dst as a 32-bit integer. 539 // Control will jump to not_int32 if the object cannot be exactly represented 540 // by a 32-bit integer. 541 // Floating point value in the 32-bit integer range that are not exact integer 542 // won't be converted. 543 void LoadNumberAsInt32(Register object, 544 Register dst, 545 Register heap_number_map, 546 Register scratch, 547 DwVfpRegister double_scratch0, 548 LowDwVfpRegister double_scratch1, 549 Label* not_int32); 550 551 // Generates function and stub prologue code. 552 void StubPrologue(); 553 void Prologue(bool code_pre_aging); 554 555 // Enter exit frame. 556 // stack_space - extra stack space, used for alignment before call to C. 557 void EnterExitFrame(bool save_doubles, int stack_space = 0); 558 559 // Leave the current exit frame. Expects the return value in r0. 560 // Expect the number of values, pushed prior to the exit frame, to 561 // remove in a register (or no_reg, if there is nothing to remove). 562 void LeaveExitFrame(bool save_doubles, 563 Register argument_count, 564 bool restore_context); 565 566 // Get the actual activation frame alignment for target environment. 567 static int ActivationFrameAlignment(); 568 569 void LoadContext(Register dst, int context_chain_length); 570 571 // Conditionally load the cached Array transitioned map of type 572 // transitioned_kind from the native context if the map in register 573 // map_in_out is the cached Array map in the native context of 574 // expected_kind. 575 void LoadTransitionedArrayMapConditional( 576 ElementsKind expected_kind, 577 ElementsKind transitioned_kind, 578 Register map_in_out, 579 Register scratch, 580 Label* no_map_match); 581 582 void LoadGlobalFunction(int index, Register function); 583 584 // Load the initial map from the global function. The registers 585 // function and map can be the same, function is then overwritten. 586 void LoadGlobalFunctionInitialMap(Register function, 587 Register map, 588 Register scratch); 589 590 void InitializeRootRegister() { 591 ExternalReference roots_array_start = 592 ExternalReference::roots_array_start(isolate()); 593 mov(kRootRegister, Operand(roots_array_start)); 594 } 595 596 // --------------------------------------------------------------------------- 597 // JavaScript invokes 598 599 // Invoke the JavaScript function code by either calling or jumping. 600 void InvokeCode(Register code, 601 const ParameterCount& expected, 602 const ParameterCount& actual, 603 InvokeFlag flag, 604 const CallWrapper& call_wrapper); 605 606 // Invoke the JavaScript function in the given register. Changes the 607 // current context to the context in the function before invoking. 608 void InvokeFunction(Register function, 609 const ParameterCount& actual, 610 InvokeFlag flag, 611 const CallWrapper& call_wrapper); 612 613 void InvokeFunction(Register function, 614 const ParameterCount& expected, 615 const ParameterCount& actual, 616 InvokeFlag flag, 617 const CallWrapper& call_wrapper); 618 619 void InvokeFunction(Handle<JSFunction> function, 620 const ParameterCount& expected, 621 const ParameterCount& actual, 622 InvokeFlag flag, 623 const CallWrapper& call_wrapper); 624 625 void IsObjectJSObjectType(Register heap_object, 626 Register map, 627 Register scratch, 628 Label* fail); 629 630 void IsInstanceJSObjectType(Register map, 631 Register scratch, 632 Label* fail); 633 634 void IsObjectJSStringType(Register object, 635 Register scratch, 636 Label* fail); 637 638 void IsObjectNameType(Register object, 639 Register scratch, 640 Label* fail); 641 642 // --------------------------------------------------------------------------- 643 // Debugger Support 644 645 void DebugBreak(); 646 647 // --------------------------------------------------------------------------- 648 // Exception handling 649 650 // Push a new try handler and link into try handler chain. 651 void PushTryHandler(StackHandler::Kind kind, int handler_index); 652 653 // Unlink the stack handler on top of the stack from the try handler chain. 654 // Must preserve the result register. 655 void PopTryHandler(); 656 657 // Passes thrown value to the handler of top of the try handler chain. 658 void Throw(Register value); 659 660 // Propagates an uncatchable exception to the top of the current JS stack's 661 // handler chain. 662 void ThrowUncatchable(Register value); 663 664 // --------------------------------------------------------------------------- 665 // Inline caching support 666 667 // Generate code for checking access rights - used for security checks 668 // on access to global objects across environments. The holder register 669 // is left untouched, whereas both scratch registers are clobbered. 670 void CheckAccessGlobalProxy(Register holder_reg, 671 Register scratch, 672 Label* miss); 673 674 void GetNumberHash(Register t0, Register scratch); 675 676 void LoadFromNumberDictionary(Label* miss, 677 Register elements, 678 Register key, 679 Register result, 680 Register t0, 681 Register t1, 682 Register t2); 683 684 685 inline void MarkCode(NopMarkerTypes type) { 686 nop(type); 687 } 688 689 // Check if the given instruction is a 'type' marker. 690 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 691 // These instructions are generated to mark special location in the code, 692 // like some special IC code. 693 static inline bool IsMarkedCode(Instr instr, int type) { 694 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 695 return IsNop(instr, type); 696 } 697 698 699 static inline int GetCodeMarker(Instr instr) { 700 int dst_reg_offset = 12; 701 int dst_mask = 0xf << dst_reg_offset; 702 int src_mask = 0xf; 703 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 704 int src_reg = instr & src_mask; 705 uint32_t non_register_mask = ~(dst_mask | src_mask); 706 uint32_t mov_mask = al | 13 << 21; 707 708 // Return <n> if we have a mov rn rn, else return -1. 709 int type = ((instr & non_register_mask) == mov_mask) && 710 (dst_reg == src_reg) && 711 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER) 712 ? src_reg 713 : -1; 714 DCHECK((type == -1) || 715 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 716 return type; 717 } 718 719 720 // --------------------------------------------------------------------------- 721 // Allocation support 722 723 // Allocate an object in new space or old pointer space. The object_size is 724 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 725 // is passed. If the space is exhausted control continues at the gc_required 726 // label. The allocated object is returned in result. If the flag 727 // tag_allocated_object is true the result is tagged as as a heap object. 728 // All registers are clobbered also when control continues at the gc_required 729 // label. 730 void Allocate(int object_size, 731 Register result, 732 Register scratch1, 733 Register scratch2, 734 Label* gc_required, 735 AllocationFlags flags); 736 737 void Allocate(Register object_size, 738 Register result, 739 Register scratch1, 740 Register scratch2, 741 Label* gc_required, 742 AllocationFlags flags); 743 744 // Undo allocation in new space. The object passed and objects allocated after 745 // it will no longer be allocated. The caller must make sure that no pointers 746 // are left to the object(s) no longer allocated as they would be invalid when 747 // allocation is undone. 748 void UndoAllocationInNewSpace(Register object, Register scratch); 749 750 751 void AllocateTwoByteString(Register result, 752 Register length, 753 Register scratch1, 754 Register scratch2, 755 Register scratch3, 756 Label* gc_required); 757 void AllocateOneByteString(Register result, Register length, 758 Register scratch1, Register scratch2, 759 Register scratch3, Label* gc_required); 760 void AllocateTwoByteConsString(Register result, 761 Register length, 762 Register scratch1, 763 Register scratch2, 764 Label* gc_required); 765 void AllocateOneByteConsString(Register result, Register length, 766 Register scratch1, Register scratch2, 767 Label* gc_required); 768 void AllocateTwoByteSlicedString(Register result, 769 Register length, 770 Register scratch1, 771 Register scratch2, 772 Label* gc_required); 773 void AllocateOneByteSlicedString(Register result, Register length, 774 Register scratch1, Register scratch2, 775 Label* gc_required); 776 777 // Allocates a heap number or jumps to the gc_required label if the young 778 // space is full and a scavenge is needed. All registers are clobbered also 779 // when control continues at the gc_required label. 780 void AllocateHeapNumber(Register result, 781 Register scratch1, 782 Register scratch2, 783 Register heap_number_map, 784 Label* gc_required, 785 TaggingMode tagging_mode = TAG_RESULT, 786 MutableMode mode = IMMUTABLE); 787 void AllocateHeapNumberWithValue(Register result, 788 DwVfpRegister value, 789 Register scratch1, 790 Register scratch2, 791 Register heap_number_map, 792 Label* gc_required); 793 794 // Copies a fixed number of fields of heap objects from src to dst. 795 void CopyFields(Register dst, 796 Register src, 797 LowDwVfpRegister double_scratch, 798 int field_count); 799 800 // Copies a number of bytes from src to dst. All registers are clobbered. On 801 // exit src and dst will point to the place just after where the last byte was 802 // read or written and length will be zero. 803 void CopyBytes(Register src, 804 Register dst, 805 Register length, 806 Register scratch); 807 808 // Initialize fields with filler values. Fields starting at |start_offset| 809 // not including end_offset are overwritten with the value in |filler|. At 810 // the end the loop, |start_offset| takes the value of |end_offset|. 811 void InitializeFieldsWithFiller(Register start_offset, 812 Register end_offset, 813 Register filler); 814 815 // --------------------------------------------------------------------------- 816 // Support functions. 817 818 // Try to get function prototype of a function and puts the value in 819 // the result register. Checks that the function really is a 820 // function and jumps to the miss label if the fast checks fail. The 821 // function register will be untouched; the other registers may be 822 // clobbered. 823 void TryGetFunctionPrototype(Register function, 824 Register result, 825 Register scratch, 826 Label* miss, 827 bool miss_on_bound_function = false); 828 829 // Compare object type for heap object. heap_object contains a non-Smi 830 // whose object type should be compared with the given type. This both 831 // sets the flags and leaves the object type in the type_reg register. 832 // It leaves the map in the map register (unless the type_reg and map register 833 // are the same register). It leaves the heap object in the heap_object 834 // register unless the heap_object register is the same register as one of the 835 // other registers. 836 // Type_reg can be no_reg. In that case ip is used. 837 void CompareObjectType(Register heap_object, 838 Register map, 839 Register type_reg, 840 InstanceType type); 841 842 // Compare object type for heap object. Branch to false_label if type 843 // is lower than min_type or greater than max_type. 844 // Load map into the register map. 845 void CheckObjectTypeRange(Register heap_object, 846 Register map, 847 InstanceType min_type, 848 InstanceType max_type, 849 Label* false_label); 850 851 // Compare instance type in a map. map contains a valid map object whose 852 // object type should be compared with the given type. This both 853 // sets the flags and leaves the object type in the type_reg register. 854 void CompareInstanceType(Register map, 855 Register type_reg, 856 InstanceType type); 857 858 859 // Check if a map for a JSObject indicates that the object has fast elements. 860 // Jump to the specified label if it does not. 861 void CheckFastElements(Register map, 862 Register scratch, 863 Label* fail); 864 865 // Check if a map for a JSObject indicates that the object can have both smi 866 // and HeapObject elements. Jump to the specified label if it does not. 867 void CheckFastObjectElements(Register map, 868 Register scratch, 869 Label* fail); 870 871 // Check if a map for a JSObject indicates that the object has fast smi only 872 // elements. Jump to the specified label if it does not. 873 void CheckFastSmiElements(Register map, 874 Register scratch, 875 Label* fail); 876 877 // Check to see if maybe_number can be stored as a double in 878 // FastDoubleElements. If it can, store it at the index specified by key in 879 // the FastDoubleElements array elements. Otherwise jump to fail. 880 void StoreNumberToDoubleElements(Register value_reg, 881 Register key_reg, 882 Register elements_reg, 883 Register scratch1, 884 LowDwVfpRegister double_scratch, 885 Label* fail, 886 int elements_offset = 0); 887 888 // Compare an object's map with the specified map and its transitioned 889 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 890 // set with result of map compare. If multiple map compares are required, the 891 // compare sequences branches to early_success. 892 void CompareMap(Register obj, 893 Register scratch, 894 Handle<Map> map, 895 Label* early_success); 896 897 // As above, but the map of the object is already loaded into the register 898 // which is preserved by the code generated. 899 void CompareMap(Register obj_map, 900 Handle<Map> map, 901 Label* early_success); 902 903 // Check if the map of an object is equal to a specified map and branch to 904 // label if not. Skip the smi check if not required (object is known to be a 905 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 906 // against maps that are ElementsKind transition maps of the specified map. 907 void CheckMap(Register obj, 908 Register scratch, 909 Handle<Map> map, 910 Label* fail, 911 SmiCheckType smi_check_type); 912 913 914 void CheckMap(Register obj, 915 Register scratch, 916 Heap::RootListIndex index, 917 Label* fail, 918 SmiCheckType smi_check_type); 919 920 921 // Check if the map of an object is equal to a specified map and branch to a 922 // specified target if equal. Skip the smi check if not required (object is 923 // known to be a heap object) 924 void DispatchMap(Register obj, 925 Register scratch, 926 Handle<Map> map, 927 Handle<Code> success, 928 SmiCheckType smi_check_type); 929 930 931 // Compare the object in a register to a value from the root list. 932 // Uses the ip register as scratch. 933 void CompareRoot(Register obj, Heap::RootListIndex index); 934 935 936 // Load and check the instance type of an object for being a string. 937 // Loads the type into the second argument register. 938 // Returns a condition that will be enabled if the object was a string 939 // and the passed-in condition passed. If the passed-in condition failed 940 // then flags remain unchanged. 941 Condition IsObjectStringType(Register obj, 942 Register type, 943 Condition cond = al) { 944 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); 945 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); 946 tst(type, Operand(kIsNotStringMask), cond); 947 DCHECK_EQ(0, kStringTag); 948 return eq; 949 } 950 951 952 // Picks out an array index from the hash field. 953 // Register use: 954 // hash - holds the index's hash. Clobbered. 955 // index - holds the overwritten index on exit. 956 void IndexFromHash(Register hash, Register index); 957 958 // Get the number of least significant bits from a register 959 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 960 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 961 962 // Load the value of a smi object into a double register. 963 // The register value must be between d0 and d15. 964 void SmiToDouble(LowDwVfpRegister value, Register smi); 965 966 // Check if a double can be exactly represented as a signed 32-bit integer. 967 // Z flag set to one if true. 968 void TestDoubleIsInt32(DwVfpRegister double_input, 969 LowDwVfpRegister double_scratch); 970 971 // Try to convert a double to a signed 32-bit integer. 972 // Z flag set to one and result assigned if the conversion is exact. 973 void TryDoubleToInt32Exact(Register result, 974 DwVfpRegister double_input, 975 LowDwVfpRegister double_scratch); 976 977 // Floor a double and writes the value to the result register. 978 // Go to exact if the conversion is exact (to be able to test -0), 979 // fall through calling code if an overflow occurred, else go to done. 980 // In return, input_high is loaded with high bits of input. 981 void TryInt32Floor(Register result, 982 DwVfpRegister double_input, 983 Register input_high, 984 LowDwVfpRegister double_scratch, 985 Label* done, 986 Label* exact); 987 988 // Performs a truncating conversion of a floating point number as used by 989 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 990 // succeeds, otherwise falls through if result is saturated. On return 991 // 'result' either holds answer, or is clobbered on fall through. 992 // 993 // Only public for the test code in test-code-stubs-arm.cc. 994 void TryInlineTruncateDoubleToI(Register result, 995 DwVfpRegister input, 996 Label* done); 997 998 // Performs a truncating conversion of a floating point number as used by 999 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1000 // Exits with 'result' holding the answer. 1001 void TruncateDoubleToI(Register result, DwVfpRegister double_input); 1002 1003 // Performs a truncating conversion of a heap number as used by 1004 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1005 // must be different registers. Exits with 'result' holding the answer. 1006 void TruncateHeapNumberToI(Register result, Register object); 1007 1008 // Converts the smi or heap number in object to an int32 using the rules 1009 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1010 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1011 // different registers. 1012 void TruncateNumberToI(Register object, 1013 Register result, 1014 Register heap_number_map, 1015 Register scratch1, 1016 Label* not_int32); 1017 1018 // Check whether d16-d31 are available on the CPU. The result is given by the 1019 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. 1020 void CheckFor32DRegs(Register scratch); 1021 1022 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double 1023 // values to location, saving [d0..(d15|d31)]. 1024 void SaveFPRegs(Register location, Register scratch); 1025 1026 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double 1027 // values to location, restoring [d0..(d15|d31)]. 1028 void RestoreFPRegs(Register location, Register scratch); 1029 1030 // --------------------------------------------------------------------------- 1031 // Runtime calls 1032 1033 // Call a code stub. 1034 void CallStub(CodeStub* stub, 1035 TypeFeedbackId ast_id = TypeFeedbackId::None(), 1036 Condition cond = al); 1037 1038 // Call a code stub. 1039 void TailCallStub(CodeStub* stub, Condition cond = al); 1040 1041 // Call a runtime routine. 1042 void CallRuntime(const Runtime::Function* f, 1043 int num_arguments, 1044 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1045 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { 1046 const Runtime::Function* function = Runtime::FunctionForId(id); 1047 CallRuntime(function, function->nargs, kSaveFPRegs); 1048 } 1049 1050 // Convenience function: Same as above, but takes the fid instead. 1051 void CallRuntime(Runtime::FunctionId id, 1052 int num_arguments, 1053 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1054 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); 1055 } 1056 1057 // Convenience function: call an external reference. 1058 void CallExternalReference(const ExternalReference& ext, 1059 int num_arguments); 1060 1061 // Tail call of a runtime routine (jump). 1062 // Like JumpToExternalReference, but also takes care of passing the number 1063 // of parameters. 1064 void TailCallExternalReference(const ExternalReference& ext, 1065 int num_arguments, 1066 int result_size); 1067 1068 // Convenience function: tail call a runtime routine (jump). 1069 void TailCallRuntime(Runtime::FunctionId fid, 1070 int num_arguments, 1071 int result_size); 1072 1073 int CalculateStackPassedWords(int num_reg_arguments, 1074 int num_double_arguments); 1075 1076 // Before calling a C-function from generated code, align arguments on stack. 1077 // After aligning the frame, non-register arguments must be stored in 1078 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 1079 // are word sized. If double arguments are used, this function assumes that 1080 // all double arguments are stored before core registers; otherwise the 1081 // correct alignment of the double values is not guaranteed. 1082 // Some compilers/platforms require the stack to be aligned when calling 1083 // C++ code. 1084 // Needs a scratch register to do some arithmetic. This register will be 1085 // trashed. 1086 void PrepareCallCFunction(int num_reg_arguments, 1087 int num_double_registers, 1088 Register scratch); 1089 void PrepareCallCFunction(int num_reg_arguments, 1090 Register scratch); 1091 1092 // There are two ways of passing double arguments on ARM, depending on 1093 // whether soft or hard floating point ABI is used. These functions 1094 // abstract parameter passing for the three different ways we call 1095 // C functions from generated code. 1096 void MovToFloatParameter(DwVfpRegister src); 1097 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); 1098 void MovToFloatResult(DwVfpRegister src); 1099 1100 // Calls a C function and cleans up the space for arguments allocated 1101 // by PrepareCallCFunction. The called function is not allowed to trigger a 1102 // garbage collection, since that might move the code and invalidate the 1103 // return address (unless this is somehow accounted for by the called 1104 // function). 1105 void CallCFunction(ExternalReference function, int num_arguments); 1106 void CallCFunction(Register function, int num_arguments); 1107 void CallCFunction(ExternalReference function, 1108 int num_reg_arguments, 1109 int num_double_arguments); 1110 void CallCFunction(Register function, 1111 int num_reg_arguments, 1112 int num_double_arguments); 1113 1114 void MovFromFloatParameter(DwVfpRegister dst); 1115 void MovFromFloatResult(DwVfpRegister dst); 1116 1117 // Calls an API function. Allocates HandleScope, extracts returned value 1118 // from handle and propagates exceptions. Restores context. stack_space 1119 // - space to be unwound on exit (includes the call JS arguments space and 1120 // the additional space allocated for the fast call). 1121 void CallApiFunctionAndReturn(Register function_address, 1122 ExternalReference thunk_ref, 1123 int stack_space, 1124 MemOperand return_value_operand, 1125 MemOperand* context_restore_operand); 1126 1127 // Jump to a runtime routine. 1128 void JumpToExternalReference(const ExternalReference& builtin); 1129 1130 // Invoke specified builtin JavaScript function. Adds an entry to 1131 // the unresolved list if the name does not resolve. 1132 void InvokeBuiltin(Builtins::JavaScript id, 1133 InvokeFlag flag, 1134 const CallWrapper& call_wrapper = NullCallWrapper()); 1135 1136 // Store the code object for the given builtin in the target register and 1137 // setup the function in r1. 1138 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 1139 1140 // Store the function for the given builtin in the target register. 1141 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 1142 1143 Handle<Object> CodeObject() { 1144 DCHECK(!code_object_.is_null()); 1145 return code_object_; 1146 } 1147 1148 1149 // Emit code for a truncating division by a constant. The dividend register is 1150 // unchanged and ip gets clobbered. Dividend and result must be different. 1151 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1152 1153 // --------------------------------------------------------------------------- 1154 // StatsCounter support 1155 1156 void SetCounter(StatsCounter* counter, int value, 1157 Register scratch1, Register scratch2); 1158 void IncrementCounter(StatsCounter* counter, int value, 1159 Register scratch1, Register scratch2); 1160 void DecrementCounter(StatsCounter* counter, int value, 1161 Register scratch1, Register scratch2); 1162 1163 1164 // --------------------------------------------------------------------------- 1165 // Debugging 1166 1167 // Calls Abort(msg) if the condition cond is not satisfied. 1168 // Use --debug_code to enable. 1169 void Assert(Condition cond, BailoutReason reason); 1170 void AssertFastElements(Register elements); 1171 1172 // Like Assert(), but always enabled. 1173 void Check(Condition cond, BailoutReason reason); 1174 1175 // Print a message to stdout and abort execution. 1176 void Abort(BailoutReason msg); 1177 1178 // Verify restrictions about code generated in stubs. 1179 void set_generating_stub(bool value) { generating_stub_ = value; } 1180 bool generating_stub() { return generating_stub_; } 1181 void set_has_frame(bool value) { has_frame_ = value; } 1182 bool has_frame() { return has_frame_; } 1183 inline bool AllowThisStubCall(CodeStub* stub); 1184 1185 // EABI variant for double arguments in use. 1186 bool use_eabi_hardfloat() { 1187#ifdef __arm__ 1188 return base::OS::ArmUsingHardFloat(); 1189#elif USE_EABI_HARDFLOAT 1190 return true; 1191#else 1192 return false; 1193#endif 1194 } 1195 1196 // --------------------------------------------------------------------------- 1197 // Number utilities 1198 1199 // Check whether the value of reg is a power of two and not zero. If not 1200 // control continues at the label not_power_of_two. If reg is a power of two 1201 // the register scratch contains the value of (reg - 1) when control falls 1202 // through. 1203 void JumpIfNotPowerOfTwoOrZero(Register reg, 1204 Register scratch, 1205 Label* not_power_of_two_or_zero); 1206 // Check whether the value of reg is a power of two and not zero. 1207 // Control falls through if it is, with scratch containing the mask 1208 // value (reg - 1). 1209 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1210 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1211 // strictly positive but not a power of two. 1212 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, 1213 Register scratch, 1214 Label* zero_and_neg, 1215 Label* not_power_of_two); 1216 1217 // --------------------------------------------------------------------------- 1218 // Smi utilities 1219 1220 void SmiTag(Register reg, SBit s = LeaveCC) { 1221 add(reg, reg, Operand(reg), s); 1222 } 1223 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { 1224 add(dst, src, Operand(src), s); 1225 } 1226 1227 // Try to convert int32 to smi. If the value is to large, preserve 1228 // the original value and jump to not_a_smi. Destroys scratch and 1229 // sets flags. 1230 void TrySmiTag(Register reg, Label* not_a_smi) { 1231 TrySmiTag(reg, reg, not_a_smi); 1232 } 1233 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { 1234 SmiTag(ip, src, SetCC); 1235 b(vs, not_a_smi); 1236 mov(reg, ip); 1237 } 1238 1239 1240 void SmiUntag(Register reg, SBit s = LeaveCC) { 1241 mov(reg, Operand::SmiUntag(reg), s); 1242 } 1243 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { 1244 mov(dst, Operand::SmiUntag(src), s); 1245 } 1246 1247 // Untag the source value into destination and jump if source is a smi. 1248 // Souce and destination can be the same register. 1249 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1250 1251 // Untag the source value into destination and jump if source is not a smi. 1252 // Souce and destination can be the same register. 1253 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1254 1255 // Test if the register contains a smi (Z == 0 (eq) if true). 1256 inline void SmiTst(Register value) { 1257 tst(value, Operand(kSmiTagMask)); 1258 } 1259 inline void NonNegativeSmiTst(Register value) { 1260 tst(value, Operand(kSmiTagMask | kSmiSignMask)); 1261 } 1262 // Jump if the register contains a smi. 1263 inline void JumpIfSmi(Register value, Label* smi_label) { 1264 tst(value, Operand(kSmiTagMask)); 1265 b(eq, smi_label); 1266 } 1267 // Jump if either of the registers contain a non-smi. 1268 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1269 tst(value, Operand(kSmiTagMask)); 1270 b(ne, not_smi_label); 1271 } 1272 // Jump if either of the registers contain a non-smi. 1273 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1274 // Jump if either of the registers contain a smi. 1275 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1276 1277 // Abort execution if argument is a smi, enabled via --debug-code. 1278 void AssertNotSmi(Register object); 1279 void AssertSmi(Register object); 1280 1281 // Abort execution if argument is not a string, enabled via --debug-code. 1282 void AssertString(Register object); 1283 1284 // Abort execution if argument is not a name, enabled via --debug-code. 1285 void AssertName(Register object); 1286 1287 // Abort execution if argument is not undefined or an AllocationSite, enabled 1288 // via --debug-code. 1289 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1290 1291 // Abort execution if reg is not the root value with the given index, 1292 // enabled via --debug-code. 1293 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1294 1295 // --------------------------------------------------------------------------- 1296 // HeapNumber utilities 1297 1298 void JumpIfNotHeapNumber(Register object, 1299 Register heap_number_map, 1300 Register scratch, 1301 Label* on_not_heap_number); 1302 1303 // --------------------------------------------------------------------------- 1304 // String utilities 1305 1306 // Generate code to do a lookup in the number string cache. If the number in 1307 // the register object is found in the cache the generated code falls through 1308 // with the result in the result register. The object and the result register 1309 // can be the same. If the number is not found in the cache the code jumps to 1310 // the label not_found with only the content of register object unchanged. 1311 void LookupNumberStringCache(Register object, 1312 Register result, 1313 Register scratch1, 1314 Register scratch2, 1315 Register scratch3, 1316 Label* not_found); 1317 1318 // Checks if both objects are sequential one-byte strings and jumps to label 1319 // if either is not. Assumes that neither object is a smi. 1320 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1, 1321 Register object2, 1322 Register scratch1, 1323 Register scratch2, 1324 Label* failure); 1325 1326 // Checks if both objects are sequential one-byte strings and jumps to label 1327 // if either is not. 1328 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1329 Register scratch1, 1330 Register scratch2, 1331 Label* not_flat_one_byte_strings); 1332 1333 // Checks if both instance types are sequential one-byte strings and jumps to 1334 // label if either is not. 1335 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1336 Register first_object_instance_type, Register second_object_instance_type, 1337 Register scratch1, Register scratch2, Label* failure); 1338 1339 // Check if instance type is sequential one-byte string and jump to label if 1340 // it is not. 1341 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1342 Label* failure); 1343 1344 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); 1345 1346 void EmitSeqStringSetCharCheck(Register string, 1347 Register index, 1348 Register value, 1349 uint32_t encoding_mask); 1350 1351 // --------------------------------------------------------------------------- 1352 // Patching helpers. 1353 1354 // Get the location of a relocated constant (its address in the constant pool) 1355 // from its load site. 1356 void GetRelocatedValueLocation(Register ldr_location, Register result, 1357 Register scratch); 1358 1359 1360 void ClampUint8(Register output_reg, Register input_reg); 1361 1362 void ClampDoubleToUint8(Register result_reg, 1363 DwVfpRegister input_reg, 1364 LowDwVfpRegister double_scratch); 1365 1366 1367 void LoadInstanceDescriptors(Register map, Register descriptors); 1368 void EnumLength(Register dst, Register map); 1369 void NumberOfOwnDescriptors(Register dst, Register map); 1370 1371 template<typename Field> 1372 void DecodeField(Register dst, Register src) { 1373 Ubfx(dst, src, Field::kShift, Field::kSize); 1374 } 1375 1376 template<typename Field> 1377 void DecodeField(Register reg) { 1378 DecodeField<Field>(reg, reg); 1379 } 1380 1381 template<typename Field> 1382 void DecodeFieldToSmi(Register dst, Register src) { 1383 static const int shift = Field::kShift; 1384 static const int mask = Field::kMask >> shift << kSmiTagSize; 1385 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); 1386 STATIC_ASSERT(kSmiTag == 0); 1387 if (shift < kSmiTagSize) { 1388 mov(dst, Operand(src, LSL, kSmiTagSize - shift)); 1389 and_(dst, dst, Operand(mask)); 1390 } else if (shift > kSmiTagSize) { 1391 mov(dst, Operand(src, LSR, shift - kSmiTagSize)); 1392 and_(dst, dst, Operand(mask)); 1393 } else { 1394 and_(dst, src, Operand(mask)); 1395 } 1396 } 1397 1398 template<typename Field> 1399 void DecodeFieldToSmi(Register reg) { 1400 DecodeField<Field>(reg, reg); 1401 } 1402 1403 // Activation support. 1404 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false); 1405 // Returns the pc offset at which the frame ends. 1406 int LeaveFrame(StackFrame::Type type); 1407 1408 // Expects object in r0 and returns map with validated enum cache 1409 // in r0. Assumes that any other register can be used as a scratch. 1410 void CheckEnumCache(Register null_value, Label* call_runtime); 1411 1412 // AllocationMemento support. Arrays may have an associated 1413 // AllocationMemento object that can be checked for in order to pretransition 1414 // to another type. 1415 // On entry, receiver_reg should point to the array object. 1416 // scratch_reg gets clobbered. 1417 // If allocation info is present, condition flags are set to eq. 1418 void TestJSArrayForAllocationMemento(Register receiver_reg, 1419 Register scratch_reg, 1420 Label* no_memento_found); 1421 1422 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1423 Register scratch_reg, 1424 Label* memento_found) { 1425 Label no_memento_found; 1426 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1427 &no_memento_found); 1428 b(eq, memento_found); 1429 bind(&no_memento_found); 1430 } 1431 1432 // Jumps to found label if a prototype map has dictionary elements. 1433 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1434 Register scratch1, Label* found); 1435 1436 private: 1437 void CallCFunctionHelper(Register function, 1438 int num_reg_arguments, 1439 int num_double_arguments); 1440 1441 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1442 1443 // Helper functions for generating invokes. 1444 void InvokePrologue(const ParameterCount& expected, 1445 const ParameterCount& actual, 1446 Handle<Code> code_constant, 1447 Register code_reg, 1448 Label* done, 1449 bool* definitely_mismatches, 1450 InvokeFlag flag, 1451 const CallWrapper& call_wrapper); 1452 1453 void InitializeNewString(Register string, 1454 Register length, 1455 Heap::RootListIndex map_index, 1456 Register scratch1, 1457 Register scratch2); 1458 1459 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1460 void InNewSpace(Register object, 1461 Register scratch, 1462 Condition cond, // eq for new space, ne otherwise. 1463 Label* branch); 1464 1465 // Helper for finding the mark bits for an address. Afterwards, the 1466 // bitmap register points at the word with the mark bits and the mask 1467 // the position of the first bit. Leaves addr_reg unchanged. 1468 inline void GetMarkBits(Register addr_reg, 1469 Register bitmap_reg, 1470 Register mask_reg); 1471 1472 // Helper for throwing exceptions. Compute a handler address and jump to 1473 // it. See the implementation for register usage. 1474 void JumpToHandlerEntry(); 1475 1476 // Compute memory operands for safepoint stack slots. 1477 static int SafepointRegisterStackIndex(int reg_code); 1478 MemOperand SafepointRegisterSlot(Register reg); 1479 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1480 1481 // Loads the constant pool pointer (pp) register. 1482 void LoadConstantPoolPointerRegister(); 1483 1484 bool generating_stub_; 1485 bool has_frame_; 1486 // This handle will be patched with the code object on installation. 1487 Handle<Object> code_object_; 1488 1489 // Needs access to SafepointRegisterStackIndex for compiled frame 1490 // traversal. 1491 friend class StandardFrame; 1492}; 1493 1494 1495// The code patcher is used to patch (typically) small parts of code e.g. for 1496// debugging and other types of instrumentation. When using the code patcher 1497// the exact number of bytes specified must be emitted. It is not legal to emit 1498// relocation information. If any of these constraints are violated it causes 1499// an assertion to fail. 1500class CodePatcher { 1501 public: 1502 enum FlushICache { 1503 FLUSH, 1504 DONT_FLUSH 1505 }; 1506 1507 CodePatcher(byte* address, 1508 int instructions, 1509 FlushICache flush_cache = FLUSH); 1510 virtual ~CodePatcher(); 1511 1512 // Macro assembler to emit code. 1513 MacroAssembler* masm() { return &masm_; } 1514 1515 // Emit an instruction directly. 1516 void Emit(Instr instr); 1517 1518 // Emit an address directly. 1519 void Emit(Address addr); 1520 1521 // Emit the condition part of an instruction leaving the rest of the current 1522 // instruction unchanged. 1523 void EmitCondition(Condition cond); 1524 1525 private: 1526 byte* address_; // The address of the code being patched. 1527 int size_; // Number of bytes of the expected patch size. 1528 MacroAssembler masm_; // Macro assembler used to generate the code. 1529 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1530}; 1531 1532 1533class FrameAndConstantPoolScope { 1534 public: 1535 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type) 1536 : masm_(masm), 1537 type_(type), 1538 old_has_frame_(masm->has_frame()), 1539 old_constant_pool_available_(masm->is_constant_pool_available()) { 1540 // We only want to enable constant pool access for non-manual frame scopes 1541 // to ensure the constant pool pointer is valid throughout the scope. 1542 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); 1543 masm->set_has_frame(true); 1544 masm->set_constant_pool_available(true); 1545 masm->EnterFrame(type, !old_constant_pool_available_); 1546 } 1547 1548 ~FrameAndConstantPoolScope() { 1549 masm_->LeaveFrame(type_); 1550 masm_->set_has_frame(old_has_frame_); 1551 masm_->set_constant_pool_available(old_constant_pool_available_); 1552 } 1553 1554 // Normally we generate the leave-frame code when this object goes 1555 // out of scope. Sometimes we may need to generate the code somewhere else 1556 // in addition. Calling this will achieve that, but the object stays in 1557 // scope, the MacroAssembler is still marked as being in a frame scope, and 1558 // the code will be generated again when it goes out of scope. 1559 void GenerateLeaveFrame() { 1560 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); 1561 masm_->LeaveFrame(type_); 1562 } 1563 1564 private: 1565 MacroAssembler* masm_; 1566 StackFrame::Type type_; 1567 bool old_has_frame_; 1568 bool old_constant_pool_available_; 1569 1570 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope); 1571}; 1572 1573 1574// Class for scoping the the unavailability of constant pool access. 1575class ConstantPoolUnavailableScope { 1576 public: 1577 explicit ConstantPoolUnavailableScope(MacroAssembler* masm) 1578 : masm_(masm), 1579 old_constant_pool_available_(masm->is_constant_pool_available()) { 1580 if (FLAG_enable_ool_constant_pool) { 1581 masm_->set_constant_pool_available(false); 1582 } 1583 } 1584 ~ConstantPoolUnavailableScope() { 1585 if (FLAG_enable_ool_constant_pool) { 1586 masm_->set_constant_pool_available(old_constant_pool_available_); 1587 } 1588 } 1589 1590 private: 1591 MacroAssembler* masm_; 1592 int old_constant_pool_available_; 1593 1594 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope); 1595}; 1596 1597 1598// ----------------------------------------------------------------------------- 1599// Static helper functions. 1600 1601inline MemOperand ContextOperand(Register context, int index) { 1602 return MemOperand(context, Context::SlotOffset(index)); 1603} 1604 1605 1606inline MemOperand GlobalObjectOperand() { 1607 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); 1608} 1609 1610 1611#ifdef GENERATED_CODE_COVERAGE 1612#define CODE_COVERAGE_STRINGIFY(x) #x 1613#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1614#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1615#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 1616#else 1617#define ACCESS_MASM(masm) masm-> 1618#endif 1619 1620 1621} } // namespace v8::internal 1622 1623#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ 1624