1// Copyright 2012 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 6#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 7 8#include "src/assembler.h" 9#include "src/bailout-reason.h" 10#include "src/frames.h" 11#include "src/globals.h" 12 13namespace v8 { 14namespace internal { 15 16// Give alias names to registers for calling conventions. 17const Register kReturnRegister0 = {Register::kCode_r0}; 18const Register kReturnRegister1 = {Register::kCode_r1}; 19const Register kReturnRegister2 = {Register::kCode_r2}; 20const Register kJSFunctionRegister = {Register::kCode_r1}; 21const Register kContextRegister = {Register::kCode_r7}; 22const Register kAllocateSizeRegister = {Register::kCode_r1}; 23const Register kInterpreterAccumulatorRegister = {Register::kCode_r0}; 24const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5}; 25const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6}; 26const Register kInterpreterDispatchTableRegister = {Register::kCode_r8}; 27const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0}; 28const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3}; 29const Register kRuntimeCallFunctionRegister = {Register::kCode_r1}; 30const Register kRuntimeCallArgCountRegister = {Register::kCode_r0}; 31 32// ---------------------------------------------------------------------------- 33// Static helper functions 34 35// Generate a MemOperand for loading a field from an object. 36inline MemOperand FieldMemOperand(Register object, int offset) { 37 return MemOperand(object, offset - kHeapObjectTag); 38} 39 40 41// Give alias names to registers 42const Register cp = {Register::kCode_r7}; // JavaScript context pointer. 43const Register pp = {Register::kCode_r8}; // Constant pool pointer. 44const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer. 45 46// Flags used for AllocateHeapNumber 47enum TaggingMode { 48 // Tag the result. 49 TAG_RESULT, 50 // Don't tag 51 DONT_TAG_RESULT 52}; 53 54 55enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 56enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 57enum PointersToHereCheck { 58 kPointersToHereMaybeInteresting, 59 kPointersToHereAreAlwaysInteresting 60}; 61enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 62 63 64Register GetRegisterThatIsNotOneOf(Register reg1, 65 Register reg2 = no_reg, 66 Register reg3 = no_reg, 67 Register reg4 = no_reg, 68 Register reg5 = no_reg, 69 Register reg6 = no_reg); 70 71 72#ifdef DEBUG 73bool AreAliased(Register reg1, 74 Register reg2, 75 Register reg3 = no_reg, 76 Register reg4 = no_reg, 77 Register reg5 = no_reg, 78 Register reg6 = no_reg, 79 Register reg7 = no_reg, 80 Register reg8 = no_reg); 81#endif 82 83 84enum TargetAddressStorageMode { 85 CAN_INLINE_TARGET_ADDRESS, 86 NEVER_INLINE_TARGET_ADDRESS 87}; 88 89// MacroAssembler implements a collection of frequently used macros. 90class MacroAssembler: public Assembler { 91 public: 92 MacroAssembler(Isolate* isolate, void* buffer, int size, 93 CodeObjectRequired create_code_object); 94 95 96 // Returns the size of a call in instructions. Note, the value returned is 97 // only valid as long as no entries are added to the constant pool between 98 // checking the call size and emitting the actual call. 99 static int CallSize(Register target, Condition cond = al); 100 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 101 int CallStubSize(CodeStub* stub, 102 TypeFeedbackId ast_id = TypeFeedbackId::None(), 103 Condition cond = al); 104 105 // Jump, Call, and Ret pseudo instructions implementing inter-working. 106 void Jump(Register target, Condition cond = al); 107 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 108 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 109 void Call(Register target, Condition cond = al); 110 void Call(Address target, RelocInfo::Mode rmode, 111 Condition cond = al, 112 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 113 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 114 TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al, 115 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); 116 int CallSize(Handle<Code> code, 117 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 118 TypeFeedbackId ast_id = TypeFeedbackId::None(), 119 Condition cond = al); 120 void Ret(Condition cond = al); 121 122 // Used for patching in calls to the deoptimizer. 123 void CallDeoptimizer(Address target); 124 static int CallDeoptimizerSize(); 125 126 // Emit code to discard a non-negative number of pointer-sized elements 127 // from the stack, clobbering only the sp register. 128 void Drop(int count, Condition cond = al); 129 void Drop(Register count, Condition cond = al); 130 131 void Ret(int drop, Condition cond = al); 132 133 // Swap two registers. If the scratch register is omitted then a slightly 134 // less efficient form using xor instead of mov is emitted. 135 void Swap(Register reg1, 136 Register reg2, 137 Register scratch = no_reg, 138 Condition cond = al); 139 140 void Mls(Register dst, Register src1, Register src2, Register srcA, 141 Condition cond = al); 142 void And(Register dst, Register src1, const Operand& src2, 143 Condition cond = al); 144 void Ubfx(Register dst, Register src, int lsb, int width, 145 Condition cond = al); 146 void Sbfx(Register dst, Register src, int lsb, int width, 147 Condition cond = al); 148 // The scratch register is not used for ARMv7. 149 // scratch can be the same register as src (in which case it is trashed), but 150 // not the same as dst. 151 void Bfi(Register dst, 152 Register src, 153 Register scratch, 154 int lsb, 155 int width, 156 Condition cond = al); 157 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); 158 159 void Call(Label* target); 160 void Push(Register src) { push(src); } 161 void Pop(Register dst) { pop(dst); } 162 163 // Register move. May do nothing if the registers are identical. 164 void Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); } 165 void Move(Register dst, Handle<Object> value); 166 void Move(Register dst, Register src, Condition cond = al); 167 void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, 168 Condition cond = al) { 169 if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) { 170 mov(dst, src, sbit, cond); 171 } 172 } 173 void Move(SwVfpRegister dst, SwVfpRegister src); 174 void Move(DwVfpRegister dst, DwVfpRegister src); 175 176 void Load(Register dst, const MemOperand& src, Representation r); 177 void Store(Register src, const MemOperand& dst, Representation r); 178 179 // Load an object from the root table. 180 void LoadRoot(Register destination, 181 Heap::RootListIndex index, 182 Condition cond = al); 183 // Store an object to the root table. 184 void StoreRoot(Register source, 185 Heap::RootListIndex index, 186 Condition cond = al); 187 188 // --------------------------------------------------------------------------- 189 // GC Support 190 191 void IncrementalMarkingRecordWriteHelper(Register object, 192 Register value, 193 Register address); 194 195 enum RememberedSetFinalAction { 196 kReturnAtEnd, 197 kFallThroughAtEnd 198 }; 199 200 // Record in the remembered set the fact that we have a pointer to new space 201 // at the address pointed to by the addr register. Only works if addr is not 202 // in new space. 203 void RememberedSetHelper(Register object, // Used for debug code. 204 Register addr, 205 Register scratch, 206 SaveFPRegsMode save_fp, 207 RememberedSetFinalAction and_then); 208 209 void CheckPageFlag(Register object, 210 Register scratch, 211 int mask, 212 Condition cc, 213 Label* condition_met); 214 215 // Check if object is in new space. Jumps if the object is not in new space. 216 // The register scratch can be object itself, but scratch will be clobbered. 217 void JumpIfNotInNewSpace(Register object, 218 Register scratch, 219 Label* branch) { 220 InNewSpace(object, scratch, eq, branch); 221 } 222 223 // Check if object is in new space. Jumps if the object is in new space. 224 // The register scratch can be object itself, but it will be clobbered. 225 void JumpIfInNewSpace(Register object, 226 Register scratch, 227 Label* branch) { 228 InNewSpace(object, scratch, ne, branch); 229 } 230 231 // Check if an object has a given incremental marking color. 232 void HasColor(Register object, 233 Register scratch0, 234 Register scratch1, 235 Label* has_color, 236 int first_bit, 237 int second_bit); 238 239 void JumpIfBlack(Register object, 240 Register scratch0, 241 Register scratch1, 242 Label* on_black); 243 244 // Checks the color of an object. If the object is white we jump to the 245 // incremental marker. 246 void JumpIfWhite(Register value, Register scratch1, Register scratch2, 247 Register scratch3, Label* value_is_white); 248 249 // Notify the garbage collector that we wrote a pointer into an object. 250 // |object| is the object being stored into, |value| is the object being 251 // stored. value and scratch registers are clobbered by the operation. 252 // The offset is the offset from the start of the object, not the offset from 253 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). 254 void RecordWriteField( 255 Register object, 256 int offset, 257 Register value, 258 Register scratch, 259 LinkRegisterStatus lr_status, 260 SaveFPRegsMode save_fp, 261 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 262 SmiCheck smi_check = INLINE_SMI_CHECK, 263 PointersToHereCheck pointers_to_here_check_for_value = 264 kPointersToHereMaybeInteresting); 265 266 // As above, but the offset has the tag presubtracted. For use with 267 // MemOperand(reg, off). 268 inline void RecordWriteContextSlot( 269 Register context, 270 int offset, 271 Register value, 272 Register scratch, 273 LinkRegisterStatus lr_status, 274 SaveFPRegsMode save_fp, 275 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 276 SmiCheck smi_check = INLINE_SMI_CHECK, 277 PointersToHereCheck pointers_to_here_check_for_value = 278 kPointersToHereMaybeInteresting) { 279 RecordWriteField(context, 280 offset + kHeapObjectTag, 281 value, 282 scratch, 283 lr_status, 284 save_fp, 285 remembered_set_action, 286 smi_check, 287 pointers_to_here_check_for_value); 288 } 289 290 // Notify the garbage collector that we wrote a code entry into a 291 // JSFunction. Only scratch is clobbered by the operation. 292 void RecordWriteCodeEntryField(Register js_function, Register code_entry, 293 Register scratch); 294 295 void RecordWriteForMap( 296 Register object, 297 Register map, 298 Register dst, 299 LinkRegisterStatus lr_status, 300 SaveFPRegsMode save_fp); 301 302 // For a given |object| notify the garbage collector that the slot |address| 303 // has been written. |value| is the object being stored. The value and 304 // address registers are clobbered by the operation. 305 void RecordWrite( 306 Register object, 307 Register address, 308 Register value, 309 LinkRegisterStatus lr_status, 310 SaveFPRegsMode save_fp, 311 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 312 SmiCheck smi_check = INLINE_SMI_CHECK, 313 PointersToHereCheck pointers_to_here_check_for_value = 314 kPointersToHereMaybeInteresting); 315 316 // Push a handle. 317 void Push(Handle<Object> handle); 318 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 319 320 // Push two registers. Pushes leftmost register first (to highest address). 321 void Push(Register src1, Register src2, Condition cond = al) { 322 if (src1.code() > src2.code()) { 323 stm(db_w, sp, src1.bit() | src2.bit(), cond); 324 } else { 325 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 326 str(src2, MemOperand(sp, 4, NegPreIndex), cond); 327 } 328 } 329 330 // Push three registers. Pushes leftmost register first (to highest address). 331 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 332 if (src1.code() > src2.code()) { 333 if (src2.code() > src3.code()) { 334 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 335 } else { 336 stm(db_w, sp, src1.bit() | src2.bit(), cond); 337 str(src3, MemOperand(sp, 4, NegPreIndex), cond); 338 } 339 } else { 340 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 341 Push(src2, src3, cond); 342 } 343 } 344 345 // Push four registers. Pushes leftmost register first (to highest address). 346 void Push(Register src1, 347 Register src2, 348 Register src3, 349 Register src4, 350 Condition cond = al) { 351 if (src1.code() > src2.code()) { 352 if (src2.code() > src3.code()) { 353 if (src3.code() > src4.code()) { 354 stm(db_w, 355 sp, 356 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 357 cond); 358 } else { 359 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 360 str(src4, MemOperand(sp, 4, NegPreIndex), cond); 361 } 362 } else { 363 stm(db_w, sp, src1.bit() | src2.bit(), cond); 364 Push(src3, src4, cond); 365 } 366 } else { 367 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 368 Push(src2, src3, src4, cond); 369 } 370 } 371 372 // Push five registers. Pushes leftmost register first (to highest address). 373 void Push(Register src1, Register src2, Register src3, Register src4, 374 Register src5, Condition cond = al) { 375 if (src1.code() > src2.code()) { 376 if (src2.code() > src3.code()) { 377 if (src3.code() > src4.code()) { 378 if (src4.code() > src5.code()) { 379 stm(db_w, sp, 380 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(), 381 cond); 382 } else { 383 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(), 384 cond); 385 str(src5, MemOperand(sp, 4, NegPreIndex), cond); 386 } 387 } else { 388 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 389 Push(src4, src5, cond); 390 } 391 } else { 392 stm(db_w, sp, src1.bit() | src2.bit(), cond); 393 Push(src3, src4, src5, cond); 394 } 395 } else { 396 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 397 Push(src2, src3, src4, src5, cond); 398 } 399 } 400 401 // Pop two registers. Pops rightmost register first (from lower address). 402 void Pop(Register src1, Register src2, Condition cond = al) { 403 DCHECK(!src1.is(src2)); 404 if (src1.code() > src2.code()) { 405 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 406 } else { 407 ldr(src2, MemOperand(sp, 4, PostIndex), cond); 408 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 409 } 410 } 411 412 // Pop three registers. Pops rightmost register first (from lower address). 413 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { 414 DCHECK(!AreAliased(src1, src2, src3)); 415 if (src1.code() > src2.code()) { 416 if (src2.code() > src3.code()) { 417 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 418 } else { 419 ldr(src3, MemOperand(sp, 4, PostIndex), cond); 420 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 421 } 422 } else { 423 Pop(src2, src3, cond); 424 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 425 } 426 } 427 428 // Pop four registers. Pops rightmost register first (from lower address). 429 void Pop(Register src1, 430 Register src2, 431 Register src3, 432 Register src4, 433 Condition cond = al) { 434 DCHECK(!AreAliased(src1, src2, src3, src4)); 435 if (src1.code() > src2.code()) { 436 if (src2.code() > src3.code()) { 437 if (src3.code() > src4.code()) { 438 ldm(ia_w, 439 sp, 440 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 441 cond); 442 } else { 443 ldr(src4, MemOperand(sp, 4, PostIndex), cond); 444 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 445 } 446 } else { 447 Pop(src3, src4, cond); 448 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 449 } 450 } else { 451 Pop(src2, src3, src4, cond); 452 ldr(src1, MemOperand(sp, 4, PostIndex), cond); 453 } 454 } 455 456 // Push a fixed frame, consisting of lr, fp, constant pool (if 457 // FLAG_enable_embedded_constant_pool) 458 void PushCommonFrame(Register marker_reg = no_reg); 459 460 // Push a standard frame, consisting of lr, fp, constant pool (if 461 // FLAG_enable_embedded_constant_pool), context and JS function 462 void PushStandardFrame(Register function_reg); 463 464 void PopCommonFrame(Register marker_reg = no_reg); 465 466 // Push and pop the registers that can hold pointers, as defined by the 467 // RegList constant kSafepointSavedRegisters. 468 void PushSafepointRegisters(); 469 void PopSafepointRegisters(); 470 // Store value in register src in the safepoint stack slot for 471 // register dst. 472 void StoreToSafepointRegisterSlot(Register src, Register dst); 473 // Load the value of the src register from its safepoint stack slot 474 // into register dst. 475 void LoadFromSafepointRegisterSlot(Register dst, Register src); 476 477 // Load two consecutive registers with two consecutive memory locations. 478 void Ldrd(Register dst1, 479 Register dst2, 480 const MemOperand& src, 481 Condition cond = al); 482 483 // Store two consecutive registers to two consecutive memory locations. 484 void Strd(Register src1, 485 Register src2, 486 const MemOperand& dst, 487 Condition cond = al); 488 489 // If the value is a NaN, canonicalize the value else, do nothing. 490 void VFPCanonicalizeNaN(const DwVfpRegister dst, 491 const DwVfpRegister src, 492 const Condition cond = al); 493 void VFPCanonicalizeNaN(const DwVfpRegister value, 494 const Condition cond = al) { 495 VFPCanonicalizeNaN(value, value, cond); 496 } 497 498 // Compare single values and move the result to the normal condition flags. 499 void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, 500 const Condition cond = al); 501 void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, 502 const Condition cond = al); 503 504 // Compare double values and move the result to the normal condition flags. 505 void VFPCompareAndSetFlags(const DwVfpRegister src1, 506 const DwVfpRegister src2, 507 const Condition cond = al); 508 void VFPCompareAndSetFlags(const DwVfpRegister src1, 509 const double src2, 510 const Condition cond = al); 511 512 // Compare single values and then load the fpscr flags to a register. 513 void VFPCompareAndLoadFlags(const SwVfpRegister src1, 514 const SwVfpRegister src2, 515 const Register fpscr_flags, 516 const Condition cond = al); 517 void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, 518 const Register fpscr_flags, 519 const Condition cond = al); 520 521 // Compare double values and then load the fpscr flags to a register. 522 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 523 const DwVfpRegister src2, 524 const Register fpscr_flags, 525 const Condition cond = al); 526 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 527 const double src2, 528 const Register fpscr_flags, 529 const Condition cond = al); 530 531 void Vmov(const DwVfpRegister dst, 532 const double imm, 533 const Register scratch = no_reg); 534 535 void VmovHigh(Register dst, DwVfpRegister src); 536 void VmovHigh(DwVfpRegister dst, Register src); 537 void VmovLow(Register dst, DwVfpRegister src); 538 void VmovLow(DwVfpRegister dst, Register src); 539 540 void LslPair(Register dst_low, Register dst_high, Register src_low, 541 Register src_high, Register scratch, Register shift); 542 void LslPair(Register dst_low, Register dst_high, Register src_low, 543 Register src_high, uint32_t shift); 544 void LsrPair(Register dst_low, Register dst_high, Register src_low, 545 Register src_high, Register scratch, Register shift); 546 void LsrPair(Register dst_low, Register dst_high, Register src_low, 547 Register src_high, uint32_t shift); 548 void AsrPair(Register dst_low, Register dst_high, Register src_low, 549 Register src_high, Register scratch, Register shift); 550 void AsrPair(Register dst_low, Register dst_high, Register src_low, 551 Register src_high, uint32_t shift); 552 553 // Loads the number from object into dst register. 554 // If |object| is neither smi nor heap number, |not_number| is jumped to 555 // with |object| still intact. 556 void LoadNumber(Register object, 557 LowDwVfpRegister dst, 558 Register heap_number_map, 559 Register scratch, 560 Label* not_number); 561 562 // Loads the number from object into double_dst in the double format. 563 // Control will jump to not_int32 if the value cannot be exactly represented 564 // by a 32-bit integer. 565 // Floating point value in the 32-bit integer range that are not exact integer 566 // won't be loaded. 567 void LoadNumberAsInt32Double(Register object, 568 DwVfpRegister double_dst, 569 Register heap_number_map, 570 Register scratch, 571 LowDwVfpRegister double_scratch, 572 Label* not_int32); 573 574 // Loads the number from object into dst as a 32-bit integer. 575 // Control will jump to not_int32 if the object cannot be exactly represented 576 // by a 32-bit integer. 577 // Floating point value in the 32-bit integer range that are not exact integer 578 // won't be converted. 579 void LoadNumberAsInt32(Register object, 580 Register dst, 581 Register heap_number_map, 582 Register scratch, 583 DwVfpRegister double_scratch0, 584 LowDwVfpRegister double_scratch1, 585 Label* not_int32); 586 587 // Generates function and stub prologue code. 588 void StubPrologue(StackFrame::Type type); 589 void Prologue(bool code_pre_aging); 590 591 // Enter exit frame. 592 // stack_space - extra stack space, used for alignment before call to C. 593 void EnterExitFrame(bool save_doubles, int stack_space = 0); 594 595 // Leave the current exit frame. Expects the return value in r0. 596 // Expect the number of values, pushed prior to the exit frame, to 597 // remove in a register (or no_reg, if there is nothing to remove). 598 void LeaveExitFrame(bool save_doubles, Register argument_count, 599 bool restore_context, 600 bool argument_count_is_length = false); 601 602 // Get the actual activation frame alignment for target environment. 603 static int ActivationFrameAlignment(); 604 605 void LoadContext(Register dst, int context_chain_length); 606 607 // Load the global object from the current context. 608 void LoadGlobalObject(Register dst) { 609 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst); 610 } 611 612 // Load the global proxy from the current context. 613 void LoadGlobalProxy(Register dst) { 614 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); 615 } 616 617 // Conditionally load the cached Array transitioned map of type 618 // transitioned_kind from the native context if the map in register 619 // map_in_out is the cached Array map in the native context of 620 // expected_kind. 621 void LoadTransitionedArrayMapConditional( 622 ElementsKind expected_kind, 623 ElementsKind transitioned_kind, 624 Register map_in_out, 625 Register scratch, 626 Label* no_map_match); 627 628 void LoadNativeContextSlot(int index, Register dst); 629 630 // Load the initial map from the global function. The registers 631 // function and map can be the same, function is then overwritten. 632 void LoadGlobalFunctionInitialMap(Register function, 633 Register map, 634 Register scratch); 635 636 void InitializeRootRegister() { 637 ExternalReference roots_array_start = 638 ExternalReference::roots_array_start(isolate()); 639 mov(kRootRegister, Operand(roots_array_start)); 640 } 641 642 // --------------------------------------------------------------------------- 643 // JavaScript invokes 644 645 // Removes current frame and its arguments from the stack preserving 646 // the arguments and a return address pushed to the stack for the next call. 647 // Both |callee_args_count| and |caller_args_count_reg| do not include 648 // receiver. |callee_args_count| is not modified, |caller_args_count_reg| 649 // is trashed. 650 void PrepareForTailCall(const ParameterCount& callee_args_count, 651 Register caller_args_count_reg, Register scratch0, 652 Register scratch1); 653 654 // Invoke the JavaScript function code by either calling or jumping. 655 void InvokeFunctionCode(Register function, Register new_target, 656 const ParameterCount& expected, 657 const ParameterCount& actual, InvokeFlag flag, 658 const CallWrapper& call_wrapper); 659 660 void FloodFunctionIfStepping(Register fun, Register new_target, 661 const ParameterCount& expected, 662 const ParameterCount& actual); 663 664 // Invoke the JavaScript function in the given register. Changes the 665 // current context to the context in the function before invoking. 666 void InvokeFunction(Register function, 667 Register new_target, 668 const ParameterCount& actual, 669 InvokeFlag flag, 670 const CallWrapper& call_wrapper); 671 672 void InvokeFunction(Register function, 673 const ParameterCount& expected, 674 const ParameterCount& actual, 675 InvokeFlag flag, 676 const CallWrapper& call_wrapper); 677 678 void InvokeFunction(Handle<JSFunction> function, 679 const ParameterCount& expected, 680 const ParameterCount& actual, 681 InvokeFlag flag, 682 const CallWrapper& call_wrapper); 683 684 void IsObjectJSStringType(Register object, 685 Register scratch, 686 Label* fail); 687 688 void IsObjectNameType(Register object, 689 Register scratch, 690 Label* fail); 691 692 // --------------------------------------------------------------------------- 693 // Debugger Support 694 695 void DebugBreak(); 696 697 // --------------------------------------------------------------------------- 698 // Exception handling 699 700 // Push a new stack handler and link into stack handler chain. 701 void PushStackHandler(); 702 703 // Unlink the stack handler on top of the stack from the stack handler chain. 704 // Must preserve the result register. 705 void PopStackHandler(); 706 707 // --------------------------------------------------------------------------- 708 // Inline caching support 709 710 // Generate code for checking access rights - used for security checks 711 // on access to global objects across environments. The holder register 712 // is left untouched, whereas both scratch registers are clobbered. 713 void CheckAccessGlobalProxy(Register holder_reg, 714 Register scratch, 715 Label* miss); 716 717 void GetNumberHash(Register t0, Register scratch); 718 719 void LoadFromNumberDictionary(Label* miss, 720 Register elements, 721 Register key, 722 Register result, 723 Register t0, 724 Register t1, 725 Register t2); 726 727 728 inline void MarkCode(NopMarkerTypes type) { 729 nop(type); 730 } 731 732 // Check if the given instruction is a 'type' marker. 733 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 734 // These instructions are generated to mark special location in the code, 735 // like some special IC code. 736 static inline bool IsMarkedCode(Instr instr, int type) { 737 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 738 return IsNop(instr, type); 739 } 740 741 742 static inline int GetCodeMarker(Instr instr) { 743 int dst_reg_offset = 12; 744 int dst_mask = 0xf << dst_reg_offset; 745 int src_mask = 0xf; 746 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 747 int src_reg = instr & src_mask; 748 uint32_t non_register_mask = ~(dst_mask | src_mask); 749 uint32_t mov_mask = al | 13 << 21; 750 751 // Return <n> if we have a mov rn rn, else return -1. 752 int type = ((instr & non_register_mask) == mov_mask) && 753 (dst_reg == src_reg) && 754 (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER) 755 ? src_reg 756 : -1; 757 DCHECK((type == -1) || 758 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 759 return type; 760 } 761 762 763 // --------------------------------------------------------------------------- 764 // Allocation support 765 766 // Allocate an object in new space or old space. The object_size is 767 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 768 // is passed. If the space is exhausted control continues at the gc_required 769 // label. The allocated object is returned in result. If the flag 770 // tag_allocated_object is true the result is tagged as as a heap object. 771 // All registers are clobbered also when control continues at the gc_required 772 // label. 773 void Allocate(int object_size, 774 Register result, 775 Register scratch1, 776 Register scratch2, 777 Label* gc_required, 778 AllocationFlags flags); 779 780 void Allocate(Register object_size, Register result, Register result_end, 781 Register scratch, Label* gc_required, AllocationFlags flags); 782 783 // FastAllocate is right now only used for folded allocations. It just 784 // increments the top pointer without checking against limit. This can only 785 // be done if it was proved earlier that the allocation will succeed. 786 void FastAllocate(int object_size, Register result, Register scratch1, 787 Register scratch2, AllocationFlags flags); 788 789 void FastAllocate(Register object_size, Register result, Register result_end, 790 Register scratch, AllocationFlags flags); 791 792 void AllocateTwoByteString(Register result, 793 Register length, 794 Register scratch1, 795 Register scratch2, 796 Register scratch3, 797 Label* gc_required); 798 void AllocateOneByteString(Register result, Register length, 799 Register scratch1, Register scratch2, 800 Register scratch3, Label* gc_required); 801 void AllocateTwoByteConsString(Register result, 802 Register length, 803 Register scratch1, 804 Register scratch2, 805 Label* gc_required); 806 void AllocateOneByteConsString(Register result, Register length, 807 Register scratch1, Register scratch2, 808 Label* gc_required); 809 void AllocateTwoByteSlicedString(Register result, 810 Register length, 811 Register scratch1, 812 Register scratch2, 813 Label* gc_required); 814 void AllocateOneByteSlicedString(Register result, Register length, 815 Register scratch1, Register scratch2, 816 Label* gc_required); 817 818 // Allocates a heap number or jumps to the gc_required label if the young 819 // space is full and a scavenge is needed. All registers are clobbered also 820 // when control continues at the gc_required label. 821 void AllocateHeapNumber(Register result, 822 Register scratch1, 823 Register scratch2, 824 Register heap_number_map, 825 Label* gc_required, 826 MutableMode mode = IMMUTABLE); 827 void AllocateHeapNumberWithValue(Register result, 828 DwVfpRegister value, 829 Register scratch1, 830 Register scratch2, 831 Register heap_number_map, 832 Label* gc_required); 833 834 // Allocate and initialize a JSValue wrapper with the specified {constructor} 835 // and {value}. 836 void AllocateJSValue(Register result, Register constructor, Register value, 837 Register scratch1, Register scratch2, 838 Label* gc_required); 839 840 // Copies a number of bytes from src to dst. All registers are clobbered. On 841 // exit src and dst will point to the place just after where the last byte was 842 // read or written and length will be zero. 843 void CopyBytes(Register src, 844 Register dst, 845 Register length, 846 Register scratch); 847 848 // Initialize fields with filler values. Fields starting at |current_address| 849 // not including |end_address| are overwritten with the value in |filler|. At 850 // the end the loop, |current_address| takes the value of |end_address|. 851 void InitializeFieldsWithFiller(Register current_address, 852 Register end_address, Register filler); 853 854 // --------------------------------------------------------------------------- 855 // Support functions. 856 857 // Machine code version of Map::GetConstructor(). 858 // |temp| holds |result|'s map when done, and |temp2| its instance type. 859 void GetMapConstructor(Register result, Register map, Register temp, 860 Register temp2); 861 862 // Try to get function prototype of a function and puts the value in 863 // the result register. Checks that the function really is a 864 // function and jumps to the miss label if the fast checks fail. The 865 // function register will be untouched; the other registers may be 866 // clobbered. 867 void TryGetFunctionPrototype(Register function, Register result, 868 Register scratch, Label* miss); 869 870 // Compare object type for heap object. heap_object contains a non-Smi 871 // whose object type should be compared with the given type. This both 872 // sets the flags and leaves the object type in the type_reg register. 873 // It leaves the map in the map register (unless the type_reg and map register 874 // are the same register). It leaves the heap object in the heap_object 875 // register unless the heap_object register is the same register as one of the 876 // other registers. 877 // Type_reg can be no_reg. In that case ip is used. 878 void CompareObjectType(Register heap_object, 879 Register map, 880 Register type_reg, 881 InstanceType type); 882 883 // Compare instance type in a map. map contains a valid map object whose 884 // object type should be compared with the given type. This both 885 // sets the flags and leaves the object type in the type_reg register. 886 void CompareInstanceType(Register map, 887 Register type_reg, 888 InstanceType type); 889 890 891 // Check if a map for a JSObject indicates that the object has fast elements. 892 // Jump to the specified label if it does not. 893 void CheckFastElements(Register map, 894 Register scratch, 895 Label* fail); 896 897 // Check if a map for a JSObject indicates that the object can have both smi 898 // and HeapObject elements. Jump to the specified label if it does not. 899 void CheckFastObjectElements(Register map, 900 Register scratch, 901 Label* fail); 902 903 // Check if a map for a JSObject indicates that the object has fast smi only 904 // elements. Jump to the specified label if it does not. 905 void CheckFastSmiElements(Register map, 906 Register scratch, 907 Label* fail); 908 909 // Check to see if maybe_number can be stored as a double in 910 // FastDoubleElements. If it can, store it at the index specified by key in 911 // the FastDoubleElements array elements. Otherwise jump to fail. 912 void StoreNumberToDoubleElements(Register value_reg, 913 Register key_reg, 914 Register elements_reg, 915 Register scratch1, 916 LowDwVfpRegister double_scratch, 917 Label* fail, 918 int elements_offset = 0); 919 920 // Compare an object's map with the specified map and its transitioned 921 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 922 // set with result of map compare. If multiple map compares are required, the 923 // compare sequences branches to early_success. 924 void CompareMap(Register obj, 925 Register scratch, 926 Handle<Map> map, 927 Label* early_success); 928 929 // As above, but the map of the object is already loaded into the register 930 // which is preserved by the code generated. 931 void CompareMap(Register obj_map, 932 Handle<Map> map, 933 Label* early_success); 934 935 // Check if the map of an object is equal to a specified map and branch to 936 // label if not. Skip the smi check if not required (object is known to be a 937 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 938 // against maps that are ElementsKind transition maps of the specified map. 939 void CheckMap(Register obj, 940 Register scratch, 941 Handle<Map> map, 942 Label* fail, 943 SmiCheckType smi_check_type); 944 945 946 void CheckMap(Register obj, 947 Register scratch, 948 Heap::RootListIndex index, 949 Label* fail, 950 SmiCheckType smi_check_type); 951 952 953 // Check if the map of an object is equal to a specified weak map and branch 954 // to a specified target if equal. Skip the smi check if not required 955 // (object is known to be a heap object) 956 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2, 957 Handle<WeakCell> cell, Handle<Code> success, 958 SmiCheckType smi_check_type); 959 960 // Compare the given value and the value of weak cell. 961 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch); 962 963 void GetWeakValue(Register value, Handle<WeakCell> cell); 964 965 // Load the value of the weak cell in the value register. Branch to the given 966 // miss label if the weak cell was cleared. 967 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss); 968 969 // Compare the object in a register to a value from the root list. 970 // Uses the ip register as scratch. 971 void CompareRoot(Register obj, Heap::RootListIndex index); 972 void PushRoot(Heap::RootListIndex index) { 973 LoadRoot(ip, index); 974 Push(ip); 975 } 976 977 // Compare the object in a register to a value and jump if they are equal. 978 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) { 979 CompareRoot(with, index); 980 b(eq, if_equal); 981 } 982 983 // Compare the object in a register to a value and jump if they are not equal. 984 void JumpIfNotRoot(Register with, Heap::RootListIndex index, 985 Label* if_not_equal) { 986 CompareRoot(with, index); 987 b(ne, if_not_equal); 988 } 989 990 // Load and check the instance type of an object for being a string. 991 // Loads the type into the second argument register. 992 // Returns a condition that will be enabled if the object was a string 993 // and the passed-in condition passed. If the passed-in condition failed 994 // then flags remain unchanged. 995 Condition IsObjectStringType(Register obj, 996 Register type, 997 Condition cond = al) { 998 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); 999 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); 1000 tst(type, Operand(kIsNotStringMask), cond); 1001 DCHECK_EQ(0u, kStringTag); 1002 return eq; 1003 } 1004 1005 1006 // Picks out an array index from the hash field. 1007 // Register use: 1008 // hash - holds the index's hash. Clobbered. 1009 // index - holds the overwritten index on exit. 1010 void IndexFromHash(Register hash, Register index); 1011 1012 // Get the number of least significant bits from a register 1013 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 1014 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 1015 1016 // Load the value of a smi object into a double register. 1017 // The register value must be between d0 and d15. 1018 void SmiToDouble(LowDwVfpRegister value, Register smi); 1019 1020 // Check if a double can be exactly represented as a signed 32-bit integer. 1021 // Z flag set to one if true. 1022 void TestDoubleIsInt32(DwVfpRegister double_input, 1023 LowDwVfpRegister double_scratch); 1024 1025 // Try to convert a double to a signed 32-bit integer. 1026 // Z flag set to one and result assigned if the conversion is exact. 1027 void TryDoubleToInt32Exact(Register result, 1028 DwVfpRegister double_input, 1029 LowDwVfpRegister double_scratch); 1030 1031 // Floor a double and writes the value to the result register. 1032 // Go to exact if the conversion is exact (to be able to test -0), 1033 // fall through calling code if an overflow occurred, else go to done. 1034 // In return, input_high is loaded with high bits of input. 1035 void TryInt32Floor(Register result, 1036 DwVfpRegister double_input, 1037 Register input_high, 1038 LowDwVfpRegister double_scratch, 1039 Label* done, 1040 Label* exact); 1041 1042 // Performs a truncating conversion of a floating point number as used by 1043 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 1044 // succeeds, otherwise falls through if result is saturated. On return 1045 // 'result' either holds answer, or is clobbered on fall through. 1046 // 1047 // Only public for the test code in test-code-stubs-arm.cc. 1048 void TryInlineTruncateDoubleToI(Register result, 1049 DwVfpRegister input, 1050 Label* done); 1051 1052 // Performs a truncating conversion of a floating point number as used by 1053 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1054 // Exits with 'result' holding the answer. 1055 void TruncateDoubleToI(Register result, DwVfpRegister double_input); 1056 1057 // Performs a truncating conversion of a heap number as used by 1058 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1059 // must be different registers. Exits with 'result' holding the answer. 1060 void TruncateHeapNumberToI(Register result, Register object); 1061 1062 // Converts the smi or heap number in object to an int32 using the rules 1063 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1064 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1065 // different registers. 1066 void TruncateNumberToI(Register object, 1067 Register result, 1068 Register heap_number_map, 1069 Register scratch1, 1070 Label* not_int32); 1071 1072 // Check whether d16-d31 are available on the CPU. The result is given by the 1073 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. 1074 void CheckFor32DRegs(Register scratch); 1075 1076 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double 1077 // values to location, saving [d0..(d15|d31)]. 1078 void SaveFPRegs(Register location, Register scratch); 1079 1080 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double 1081 // values to location, restoring [d0..(d15|d31)]. 1082 void RestoreFPRegs(Register location, Register scratch); 1083 1084 // --------------------------------------------------------------------------- 1085 // Runtime calls 1086 1087 // Call a code stub. 1088 void CallStub(CodeStub* stub, 1089 TypeFeedbackId ast_id = TypeFeedbackId::None(), 1090 Condition cond = al); 1091 1092 // Call a code stub. 1093 void TailCallStub(CodeStub* stub, Condition cond = al); 1094 1095 // Call a runtime routine. 1096 void CallRuntime(const Runtime::Function* f, 1097 int num_arguments, 1098 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1099 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { 1100 const Runtime::Function* function = Runtime::FunctionForId(fid); 1101 CallRuntime(function, function->nargs, kSaveFPRegs); 1102 } 1103 1104 // Convenience function: Same as above, but takes the fid instead. 1105 void CallRuntime(Runtime::FunctionId fid, 1106 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1107 const Runtime::Function* function = Runtime::FunctionForId(fid); 1108 CallRuntime(function, function->nargs, save_doubles); 1109 } 1110 1111 // Convenience function: Same as above, but takes the fid instead. 1112 void CallRuntime(Runtime::FunctionId fid, int num_arguments, 1113 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1114 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); 1115 } 1116 1117 // Convenience function: call an external reference. 1118 void CallExternalReference(const ExternalReference& ext, 1119 int num_arguments); 1120 1121 // Convenience function: tail call a runtime routine (jump). 1122 void TailCallRuntime(Runtime::FunctionId fid); 1123 1124 int CalculateStackPassedWords(int num_reg_arguments, 1125 int num_double_arguments); 1126 1127 // Before calling a C-function from generated code, align arguments on stack. 1128 // After aligning the frame, non-register arguments must be stored in 1129 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 1130 // are word sized. If double arguments are used, this function assumes that 1131 // all double arguments are stored before core registers; otherwise the 1132 // correct alignment of the double values is not guaranteed. 1133 // Some compilers/platforms require the stack to be aligned when calling 1134 // C++ code. 1135 // Needs a scratch register to do some arithmetic. This register will be 1136 // trashed. 1137 void PrepareCallCFunction(int num_reg_arguments, 1138 int num_double_registers, 1139 Register scratch); 1140 void PrepareCallCFunction(int num_reg_arguments, 1141 Register scratch); 1142 1143 // There are two ways of passing double arguments on ARM, depending on 1144 // whether soft or hard floating point ABI is used. These functions 1145 // abstract parameter passing for the three different ways we call 1146 // C functions from generated code. 1147 void MovToFloatParameter(DwVfpRegister src); 1148 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); 1149 void MovToFloatResult(DwVfpRegister src); 1150 1151 // Calls a C function and cleans up the space for arguments allocated 1152 // by PrepareCallCFunction. The called function is not allowed to trigger a 1153 // garbage collection, since that might move the code and invalidate the 1154 // return address (unless this is somehow accounted for by the called 1155 // function). 1156 void CallCFunction(ExternalReference function, int num_arguments); 1157 void CallCFunction(Register function, int num_arguments); 1158 void CallCFunction(ExternalReference function, 1159 int num_reg_arguments, 1160 int num_double_arguments); 1161 void CallCFunction(Register function, 1162 int num_reg_arguments, 1163 int num_double_arguments); 1164 1165 void MovFromFloatParameter(DwVfpRegister dst); 1166 void MovFromFloatResult(DwVfpRegister dst); 1167 1168 // Jump to a runtime routine. 1169 void JumpToExternalReference(const ExternalReference& builtin); 1170 1171 Handle<Object> CodeObject() { 1172 DCHECK(!code_object_.is_null()); 1173 return code_object_; 1174 } 1175 1176 1177 // Emit code for a truncating division by a constant. The dividend register is 1178 // unchanged and ip gets clobbered. Dividend and result must be different. 1179 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1180 1181 // --------------------------------------------------------------------------- 1182 // StatsCounter support 1183 1184 void SetCounter(StatsCounter* counter, int value, 1185 Register scratch1, Register scratch2); 1186 void IncrementCounter(StatsCounter* counter, int value, 1187 Register scratch1, Register scratch2); 1188 void DecrementCounter(StatsCounter* counter, int value, 1189 Register scratch1, Register scratch2); 1190 1191 1192 // --------------------------------------------------------------------------- 1193 // Debugging 1194 1195 // Calls Abort(msg) if the condition cond is not satisfied. 1196 // Use --debug_code to enable. 1197 void Assert(Condition cond, BailoutReason reason); 1198 void AssertFastElements(Register elements); 1199 1200 // Like Assert(), but always enabled. 1201 void Check(Condition cond, BailoutReason reason); 1202 1203 // Print a message to stdout and abort execution. 1204 void Abort(BailoutReason msg); 1205 1206 // Verify restrictions about code generated in stubs. 1207 void set_generating_stub(bool value) { generating_stub_ = value; } 1208 bool generating_stub() { return generating_stub_; } 1209 void set_has_frame(bool value) { has_frame_ = value; } 1210 bool has_frame() { return has_frame_; } 1211 inline bool AllowThisStubCall(CodeStub* stub); 1212 1213 // EABI variant for double arguments in use. 1214 bool use_eabi_hardfloat() { 1215#ifdef __arm__ 1216 return base::OS::ArmUsingHardFloat(); 1217#elif USE_EABI_HARDFLOAT 1218 return true; 1219#else 1220 return false; 1221#endif 1222 } 1223 1224 // --------------------------------------------------------------------------- 1225 // Number utilities 1226 1227 // Check whether the value of reg is a power of two and not zero. If not 1228 // control continues at the label not_power_of_two. If reg is a power of two 1229 // the register scratch contains the value of (reg - 1) when control falls 1230 // through. 1231 void JumpIfNotPowerOfTwoOrZero(Register reg, 1232 Register scratch, 1233 Label* not_power_of_two_or_zero); 1234 // Check whether the value of reg is a power of two and not zero. 1235 // Control falls through if it is, with scratch containing the mask 1236 // value (reg - 1). 1237 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1238 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1239 // strictly positive but not a power of two. 1240 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, 1241 Register scratch, 1242 Label* zero_and_neg, 1243 Label* not_power_of_two); 1244 1245 // --------------------------------------------------------------------------- 1246 // Smi utilities 1247 1248 void SmiTag(Register reg, SBit s = LeaveCC) { 1249 add(reg, reg, Operand(reg), s); 1250 } 1251 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { 1252 add(dst, src, Operand(src), s); 1253 } 1254 1255 // Try to convert int32 to smi. If the value is to large, preserve 1256 // the original value and jump to not_a_smi. Destroys scratch and 1257 // sets flags. 1258 void TrySmiTag(Register reg, Label* not_a_smi) { 1259 TrySmiTag(reg, reg, not_a_smi); 1260 } 1261 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { 1262 SmiTag(ip, src, SetCC); 1263 b(vs, not_a_smi); 1264 mov(reg, ip); 1265 } 1266 1267 1268 void SmiUntag(Register reg, SBit s = LeaveCC) { 1269 mov(reg, Operand::SmiUntag(reg), s); 1270 } 1271 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { 1272 mov(dst, Operand::SmiUntag(src), s); 1273 } 1274 1275 // Untag the source value into destination and jump if source is a smi. 1276 // Souce and destination can be the same register. 1277 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1278 1279 // Untag the source value into destination and jump if source is not a smi. 1280 // Souce and destination can be the same register. 1281 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1282 1283 // Test if the register contains a smi (Z == 0 (eq) if true). 1284 inline void SmiTst(Register value) { 1285 tst(value, Operand(kSmiTagMask)); 1286 } 1287 inline void NonNegativeSmiTst(Register value) { 1288 tst(value, Operand(kSmiTagMask | kSmiSignMask)); 1289 } 1290 // Jump if the register contains a smi. 1291 inline void JumpIfSmi(Register value, Label* smi_label) { 1292 tst(value, Operand(kSmiTagMask)); 1293 b(eq, smi_label); 1294 } 1295 // Jump if either of the registers contain a non-smi. 1296 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1297 tst(value, Operand(kSmiTagMask)); 1298 b(ne, not_smi_label); 1299 } 1300 // Jump if either of the registers contain a non-smi. 1301 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1302 // Jump if either of the registers contain a smi. 1303 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1304 1305 // Abort execution if argument is a number, enabled via --debug-code. 1306 void AssertNotNumber(Register object); 1307 1308 // Abort execution if argument is a smi, enabled via --debug-code. 1309 void AssertNotSmi(Register object); 1310 void AssertSmi(Register object); 1311 1312 // Abort execution if argument is not a string, enabled via --debug-code. 1313 void AssertString(Register object); 1314 1315 // Abort execution if argument is not a name, enabled via --debug-code. 1316 void AssertName(Register object); 1317 1318 // Abort execution if argument is not a JSFunction, enabled via --debug-code. 1319 void AssertFunction(Register object); 1320 1321 // Abort execution if argument is not a JSBoundFunction, 1322 // enabled via --debug-code. 1323 void AssertBoundFunction(Register object); 1324 1325 // Abort execution if argument is not a JSGeneratorObject, 1326 // enabled via --debug-code. 1327 void AssertGeneratorObject(Register object); 1328 1329 // Abort execution if argument is not a JSReceiver, enabled via --debug-code. 1330 void AssertReceiver(Register object); 1331 1332 // Abort execution if argument is not undefined or an AllocationSite, enabled 1333 // via --debug-code. 1334 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1335 1336 // Abort execution if reg is not the root value with the given index, 1337 // enabled via --debug-code. 1338 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1339 1340 // --------------------------------------------------------------------------- 1341 // HeapNumber utilities 1342 1343 void JumpIfNotHeapNumber(Register object, 1344 Register heap_number_map, 1345 Register scratch, 1346 Label* on_not_heap_number); 1347 1348 // --------------------------------------------------------------------------- 1349 // String utilities 1350 1351 // Checks if both objects are sequential one-byte strings and jumps to label 1352 // if either is not. Assumes that neither object is a smi. 1353 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1, 1354 Register object2, 1355 Register scratch1, 1356 Register scratch2, 1357 Label* failure); 1358 1359 // Checks if both objects are sequential one-byte strings and jumps to label 1360 // if either is not. 1361 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1362 Register scratch1, 1363 Register scratch2, 1364 Label* not_flat_one_byte_strings); 1365 1366 // Checks if both instance types are sequential one-byte strings and jumps to 1367 // label if either is not. 1368 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1369 Register first_object_instance_type, Register second_object_instance_type, 1370 Register scratch1, Register scratch2, Label* failure); 1371 1372 // Check if instance type is sequential one-byte string and jump to label if 1373 // it is not. 1374 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1375 Label* failure); 1376 1377 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); 1378 1379 void EmitSeqStringSetCharCheck(Register string, 1380 Register index, 1381 Register value, 1382 uint32_t encoding_mask); 1383 1384 1385 void ClampUint8(Register output_reg, Register input_reg); 1386 1387 void ClampDoubleToUint8(Register result_reg, 1388 DwVfpRegister input_reg, 1389 LowDwVfpRegister double_scratch); 1390 1391 1392 void LoadInstanceDescriptors(Register map, Register descriptors); 1393 void EnumLength(Register dst, Register map); 1394 void NumberOfOwnDescriptors(Register dst, Register map); 1395 void LoadAccessor(Register dst, Register holder, int accessor_index, 1396 AccessorComponent accessor); 1397 1398 template<typename Field> 1399 void DecodeField(Register dst, Register src) { 1400 Ubfx(dst, src, Field::kShift, Field::kSize); 1401 } 1402 1403 template<typename Field> 1404 void DecodeField(Register reg) { 1405 DecodeField<Field>(reg, reg); 1406 } 1407 1408 template<typename Field> 1409 void DecodeFieldToSmi(Register dst, Register src) { 1410 static const int shift = Field::kShift; 1411 static const int mask = Field::kMask >> shift << kSmiTagSize; 1412 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); 1413 STATIC_ASSERT(kSmiTag == 0); 1414 if (shift < kSmiTagSize) { 1415 mov(dst, Operand(src, LSL, kSmiTagSize - shift)); 1416 and_(dst, dst, Operand(mask)); 1417 } else if (shift > kSmiTagSize) { 1418 mov(dst, Operand(src, LSR, shift - kSmiTagSize)); 1419 and_(dst, dst, Operand(mask)); 1420 } else { 1421 and_(dst, src, Operand(mask)); 1422 } 1423 } 1424 1425 template<typename Field> 1426 void DecodeFieldToSmi(Register reg) { 1427 DecodeField<Field>(reg, reg); 1428 } 1429 1430 // Load the type feedback vector from a JavaScript frame. 1431 void EmitLoadTypeFeedbackVector(Register vector); 1432 1433 // Activation support. 1434 void EnterFrame(StackFrame::Type type, 1435 bool load_constant_pool_pointer_reg = false); 1436 // Returns the pc offset at which the frame ends. 1437 int LeaveFrame(StackFrame::Type type); 1438 1439 // Expects object in r0 and returns map with validated enum cache 1440 // in r0. Assumes that any other register can be used as a scratch. 1441 void CheckEnumCache(Label* call_runtime); 1442 1443 // AllocationMemento support. Arrays may have an associated 1444 // AllocationMemento object that can be checked for in order to pretransition 1445 // to another type. 1446 // On entry, receiver_reg should point to the array object. 1447 // scratch_reg gets clobbered. 1448 // If allocation info is present, condition flags are set to eq. 1449 void TestJSArrayForAllocationMemento(Register receiver_reg, 1450 Register scratch_reg, 1451 Label* no_memento_found); 1452 1453 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1454 Register scratch_reg, 1455 Label* memento_found) { 1456 Label no_memento_found; 1457 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1458 &no_memento_found); 1459 b(eq, memento_found); 1460 bind(&no_memento_found); 1461 } 1462 1463 // Jumps to found label if a prototype map has dictionary elements. 1464 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1465 Register scratch1, Label* found); 1466 1467 // Loads the constant pool pointer (pp) register. 1468 void LoadConstantPoolPointerRegisterFromCodeTargetAddress( 1469 Register code_target_address); 1470 void LoadConstantPoolPointerRegister(); 1471 1472 private: 1473 void CallCFunctionHelper(Register function, 1474 int num_reg_arguments, 1475 int num_double_arguments); 1476 1477 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1478 1479 // Helper functions for generating invokes. 1480 void InvokePrologue(const ParameterCount& expected, 1481 const ParameterCount& actual, 1482 Label* done, 1483 bool* definitely_mismatches, 1484 InvokeFlag flag, 1485 const CallWrapper& call_wrapper); 1486 1487 void InitializeNewString(Register string, 1488 Register length, 1489 Heap::RootListIndex map_index, 1490 Register scratch1, 1491 Register scratch2); 1492 1493 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1494 void InNewSpace(Register object, 1495 Register scratch, 1496 Condition cond, // eq for new space, ne otherwise. 1497 Label* branch); 1498 1499 // Helper for finding the mark bits for an address. Afterwards, the 1500 // bitmap register points at the word with the mark bits and the mask 1501 // the position of the first bit. Leaves addr_reg unchanged. 1502 inline void GetMarkBits(Register addr_reg, 1503 Register bitmap_reg, 1504 Register mask_reg); 1505 1506 // Compute memory operands for safepoint stack slots. 1507 static int SafepointRegisterStackIndex(int reg_code); 1508 MemOperand SafepointRegisterSlot(Register reg); 1509 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1510 1511 bool generating_stub_; 1512 bool has_frame_; 1513 // This handle will be patched with the code object on installation. 1514 Handle<Object> code_object_; 1515 1516 // Needs access to SafepointRegisterStackIndex for compiled frame 1517 // traversal. 1518 friend class StandardFrame; 1519}; 1520 1521 1522// The code patcher is used to patch (typically) small parts of code e.g. for 1523// debugging and other types of instrumentation. When using the code patcher 1524// the exact number of bytes specified must be emitted. It is not legal to emit 1525// relocation information. If any of these constraints are violated it causes 1526// an assertion to fail. 1527class CodePatcher { 1528 public: 1529 enum FlushICache { 1530 FLUSH, 1531 DONT_FLUSH 1532 }; 1533 1534 CodePatcher(Isolate* isolate, byte* address, int instructions, 1535 FlushICache flush_cache = FLUSH); 1536 ~CodePatcher(); 1537 1538 // Macro assembler to emit code. 1539 MacroAssembler* masm() { return &masm_; } 1540 1541 // Emit an instruction directly. 1542 void Emit(Instr instr); 1543 1544 // Emit an address directly. 1545 void Emit(Address addr); 1546 1547 // Emit the condition part of an instruction leaving the rest of the current 1548 // instruction unchanged. 1549 void EmitCondition(Condition cond); 1550 1551 private: 1552 byte* address_; // The address of the code being patched. 1553 int size_; // Number of bytes of the expected patch size. 1554 MacroAssembler masm_; // Macro assembler used to generate the code. 1555 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1556}; 1557 1558 1559// ----------------------------------------------------------------------------- 1560// Static helper functions. 1561 1562inline MemOperand ContextMemOperand(Register context, int index = 0) { 1563 return MemOperand(context, Context::SlotOffset(index)); 1564} 1565 1566 1567inline MemOperand NativeContextMemOperand() { 1568 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX); 1569} 1570 1571 1572#ifdef GENERATED_CODE_COVERAGE 1573#define CODE_COVERAGE_STRINGIFY(x) #x 1574#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1575#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1576#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 1577#else 1578#define ACCESS_MASM(masm) masm-> 1579#endif 1580 1581 1582} // namespace internal 1583} // namespace v8 1584 1585#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ 1586