1// Copyright 2012 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include <limits.h> // For LONG_MIN, LONG_MAX. 29 30#include "v8.h" 31 32#if defined(V8_TARGET_ARCH_ARM) 33 34#include "bootstrapper.h" 35#include "codegen.h" 36#include "debug.h" 37#include "runtime.h" 38 39namespace v8 { 40namespace internal { 41 42MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) 43 : Assembler(arg_isolate, buffer, size), 44 generating_stub_(false), 45 allow_stub_calls_(true), 46 has_frame_(false) { 47 if (isolate() != NULL) { 48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 49 isolate()); 50 } 51} 52 53 54// We always generate arm code, never thumb code, even if V8 is compiled to 55// thumb, so we require inter-working support 56#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK) 57#error "flag -mthumb-interwork missing" 58#endif 59 60 61// We do not support thumb inter-working with an arm architecture not supporting 62// the blx instruction (below v5t). If you know what CPU you are compiling for 63// you can use -march=armv7 or similar. 64#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS) 65# error "For thumb inter-working we require an architecture which supports blx" 66#endif 67 68 69// Using bx does not yield better code, so use it only when required 70#if defined(USE_THUMB_INTERWORK) 71#define USE_BX 1 72#endif 73 74 75void MacroAssembler::Jump(Register target, Condition cond) { 76#if USE_BX 77 bx(target, cond); 78#else 79 mov(pc, Operand(target), LeaveCC, cond); 80#endif 81} 82 83 84void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, 85 Condition cond) { 86#if USE_BX 87 mov(ip, Operand(target, rmode)); 88 bx(ip, cond); 89#else 90 mov(pc, Operand(target, rmode), LeaveCC, cond); 91#endif 92} 93 94 95void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, 96 Condition cond) { 97 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 98 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); 99} 100 101 102void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, 103 Condition cond) { 104 ASSERT(RelocInfo::IsCodeTarget(rmode)); 105 // 'code' is always generated ARM code, never THUMB code 106 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); 107} 108 109 110int MacroAssembler::CallSize(Register target, Condition cond) { 111#if USE_BLX 112 return kInstrSize; 113#else 114 return 2 * kInstrSize; 115#endif 116} 117 118 119void MacroAssembler::Call(Register target, Condition cond) { 120 // Block constant pool for the call instruction sequence. 121 BlockConstPoolScope block_const_pool(this); 122 Label start; 123 bind(&start); 124#if USE_BLX 125 blx(target, cond); 126#else 127 // set lr for return at current pc + 8 128 mov(lr, Operand(pc), LeaveCC, cond); 129 mov(pc, Operand(target), LeaveCC, cond); 130#endif 131 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); 132} 133 134 135int MacroAssembler::CallSize( 136 Address target, RelocInfo::Mode rmode, Condition cond) { 137 int size = 2 * kInstrSize; 138 Instr mov_instr = cond | MOV | LeaveCC; 139 intptr_t immediate = reinterpret_cast<intptr_t>(target); 140 if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) { 141 size += kInstrSize; 142 } 143 return size; 144} 145 146 147void MacroAssembler::Call(Address target, 148 RelocInfo::Mode rmode, 149 Condition cond) { 150 // Block constant pool for the call instruction sequence. 151 BlockConstPoolScope block_const_pool(this); 152 Label start; 153 bind(&start); 154#if USE_BLX 155 // On ARMv5 and after the recommended call sequence is: 156 // ldr ip, [pc, #...] 157 // blx ip 158 159 // Statement positions are expected to be recorded when the target 160 // address is loaded. The mov method will automatically record 161 // positions when pc is the target, since this is not the case here 162 // we have to do it explicitly. 163 positions_recorder()->WriteRecordedPositions(); 164 165 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); 166 blx(ip, cond); 167 168 ASSERT(kCallTargetAddressOffset == 2 * kInstrSize); 169#else 170 // Set lr for return at current pc + 8. 171 mov(lr, Operand(pc), LeaveCC, cond); 172 // Emit a ldr<cond> pc, [pc + offset of target in constant pool]. 173 mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond); 174 ASSERT(kCallTargetAddressOffset == kInstrSize); 175#endif 176 ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start)); 177} 178 179 180int MacroAssembler::CallSize(Handle<Code> code, 181 RelocInfo::Mode rmode, 182 unsigned ast_id, 183 Condition cond) { 184 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); 185} 186 187 188void MacroAssembler::Call(Handle<Code> code, 189 RelocInfo::Mode rmode, 190 unsigned ast_id, 191 Condition cond) { 192 Label start; 193 bind(&start); 194 ASSERT(RelocInfo::IsCodeTarget(rmode)); 195 if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) { 196 SetRecordedAstId(ast_id); 197 rmode = RelocInfo::CODE_TARGET_WITH_ID; 198 } 199 // 'code' is always generated ARM code, never THUMB code 200 Call(reinterpret_cast<Address>(code.location()), rmode, cond); 201 ASSERT_EQ(CallSize(code, rmode, ast_id, cond), 202 SizeOfCodeGeneratedSince(&start)); 203} 204 205 206void MacroAssembler::Ret(Condition cond) { 207#if USE_BX 208 bx(lr, cond); 209#else 210 mov(pc, Operand(lr), LeaveCC, cond); 211#endif 212} 213 214 215void MacroAssembler::Drop(int count, Condition cond) { 216 if (count > 0) { 217 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); 218 } 219} 220 221 222void MacroAssembler::Ret(int drop, Condition cond) { 223 Drop(drop, cond); 224 Ret(cond); 225} 226 227 228void MacroAssembler::Swap(Register reg1, 229 Register reg2, 230 Register scratch, 231 Condition cond) { 232 if (scratch.is(no_reg)) { 233 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); 234 eor(reg2, reg2, Operand(reg1), LeaveCC, cond); 235 eor(reg1, reg1, Operand(reg2), LeaveCC, cond); 236 } else { 237 mov(scratch, reg1, LeaveCC, cond); 238 mov(reg1, reg2, LeaveCC, cond); 239 mov(reg2, scratch, LeaveCC, cond); 240 } 241} 242 243 244void MacroAssembler::Call(Label* target) { 245 bl(target); 246} 247 248 249void MacroAssembler::Push(Handle<Object> handle) { 250 mov(ip, Operand(handle)); 251 push(ip); 252} 253 254 255void MacroAssembler::Move(Register dst, Handle<Object> value) { 256 mov(dst, Operand(value)); 257} 258 259 260void MacroAssembler::Move(Register dst, Register src, Condition cond) { 261 if (!dst.is(src)) { 262 mov(dst, src, LeaveCC, cond); 263 } 264} 265 266 267void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { 268 ASSERT(CpuFeatures::IsSupported(VFP3)); 269 CpuFeatures::Scope scope(VFP3); 270 if (!dst.is(src)) { 271 vmov(dst, src); 272 } 273} 274 275 276void MacroAssembler::And(Register dst, Register src1, const Operand& src2, 277 Condition cond) { 278 if (!src2.is_reg() && 279 !src2.must_use_constant_pool() && 280 src2.immediate() == 0) { 281 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); 282 283 } else if (!src2.is_single_instruction() && 284 !src2.must_use_constant_pool() && 285 CpuFeatures::IsSupported(ARMv7) && 286 IsPowerOf2(src2.immediate() + 1)) { 287 ubfx(dst, src1, 0, 288 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); 289 290 } else { 291 and_(dst, src1, src2, LeaveCC, cond); 292 } 293} 294 295 296void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, 297 Condition cond) { 298 ASSERT(lsb < 32); 299 if (!CpuFeatures::IsSupported(ARMv7)) { 300 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 301 and_(dst, src1, Operand(mask), LeaveCC, cond); 302 if (lsb != 0) { 303 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); 304 } 305 } else { 306 ubfx(dst, src1, lsb, width, cond); 307 } 308} 309 310 311void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, 312 Condition cond) { 313 ASSERT(lsb < 32); 314 if (!CpuFeatures::IsSupported(ARMv7)) { 315 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 316 and_(dst, src1, Operand(mask), LeaveCC, cond); 317 int shift_up = 32 - lsb - width; 318 int shift_down = lsb + shift_up; 319 if (shift_up != 0) { 320 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); 321 } 322 if (shift_down != 0) { 323 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); 324 } 325 } else { 326 sbfx(dst, src1, lsb, width, cond); 327 } 328} 329 330 331void MacroAssembler::Bfi(Register dst, 332 Register src, 333 Register scratch, 334 int lsb, 335 int width, 336 Condition cond) { 337 ASSERT(0 <= lsb && lsb < 32); 338 ASSERT(0 <= width && width < 32); 339 ASSERT(lsb + width < 32); 340 ASSERT(!scratch.is(dst)); 341 if (width == 0) return; 342 if (!CpuFeatures::IsSupported(ARMv7)) { 343 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 344 bic(dst, dst, Operand(mask)); 345 and_(scratch, src, Operand((1 << width) - 1)); 346 mov(scratch, Operand(scratch, LSL, lsb)); 347 orr(dst, dst, scratch); 348 } else { 349 bfi(dst, src, lsb, width, cond); 350 } 351} 352 353 354void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { 355 ASSERT(lsb < 32); 356 if (!CpuFeatures::IsSupported(ARMv7)) { 357 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); 358 bic(dst, dst, Operand(mask)); 359 } else { 360 bfc(dst, lsb, width, cond); 361 } 362} 363 364 365void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, 366 Condition cond) { 367 if (!CpuFeatures::IsSupported(ARMv7)) { 368 ASSERT(!dst.is(pc) && !src.rm().is(pc)); 369 ASSERT((satpos >= 0) && (satpos <= 31)); 370 371 // These asserts are required to ensure compatibility with the ARMv7 372 // implementation. 373 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL)); 374 ASSERT(src.rs().is(no_reg)); 375 376 Label done; 377 int satval = (1 << satpos) - 1; 378 379 if (cond != al) { 380 b(NegateCondition(cond), &done); // Skip saturate if !condition. 381 } 382 if (!(src.is_reg() && dst.is(src.rm()))) { 383 mov(dst, src); 384 } 385 tst(dst, Operand(~satval)); 386 b(eq, &done); 387 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative. 388 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. 389 bind(&done); 390 } else { 391 usat(dst, satpos, src, cond); 392 } 393} 394 395 396void MacroAssembler::LoadRoot(Register destination, 397 Heap::RootListIndex index, 398 Condition cond) { 399 ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); 400} 401 402 403void MacroAssembler::StoreRoot(Register source, 404 Heap::RootListIndex index, 405 Condition cond) { 406 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); 407} 408 409 410void MacroAssembler::LoadHeapObject(Register result, 411 Handle<HeapObject> object) { 412 if (isolate()->heap()->InNewSpace(*object)) { 413 Handle<JSGlobalPropertyCell> cell = 414 isolate()->factory()->NewJSGlobalPropertyCell(object); 415 mov(result, Operand(cell)); 416 ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset)); 417 } else { 418 mov(result, Operand(object)); 419 } 420} 421 422 423void MacroAssembler::InNewSpace(Register object, 424 Register scratch, 425 Condition cond, 426 Label* branch) { 427 ASSERT(cond == eq || cond == ne); 428 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); 429 cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); 430 b(cond, branch); 431} 432 433 434void MacroAssembler::RecordWriteField( 435 Register object, 436 int offset, 437 Register value, 438 Register dst, 439 LinkRegisterStatus lr_status, 440 SaveFPRegsMode save_fp, 441 RememberedSetAction remembered_set_action, 442 SmiCheck smi_check) { 443 // First, check if a write barrier is even needed. The tests below 444 // catch stores of Smis. 445 Label done; 446 447 // Skip barrier if writing a smi. 448 if (smi_check == INLINE_SMI_CHECK) { 449 JumpIfSmi(value, &done); 450 } 451 452 // Although the object register is tagged, the offset is relative to the start 453 // of the object, so so offset must be a multiple of kPointerSize. 454 ASSERT(IsAligned(offset, kPointerSize)); 455 456 add(dst, object, Operand(offset - kHeapObjectTag)); 457 if (emit_debug_code()) { 458 Label ok; 459 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); 460 b(eq, &ok); 461 stop("Unaligned cell in write barrier"); 462 bind(&ok); 463 } 464 465 RecordWrite(object, 466 dst, 467 value, 468 lr_status, 469 save_fp, 470 remembered_set_action, 471 OMIT_SMI_CHECK); 472 473 bind(&done); 474 475 // Clobber clobbered input registers when running with the debug-code flag 476 // turned on to provoke errors. 477 if (emit_debug_code()) { 478 mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); 479 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); 480 } 481} 482 483 484// Will clobber 4 registers: object, address, scratch, ip. The 485// register 'object' contains a heap object pointer. The heap object 486// tag is shifted away. 487void MacroAssembler::RecordWrite(Register object, 488 Register address, 489 Register value, 490 LinkRegisterStatus lr_status, 491 SaveFPRegsMode fp_mode, 492 RememberedSetAction remembered_set_action, 493 SmiCheck smi_check) { 494 // The compiled code assumes that record write doesn't change the 495 // context register, so we check that none of the clobbered 496 // registers are cp. 497 ASSERT(!address.is(cp) && !value.is(cp)); 498 499 if (emit_debug_code()) { 500 ldr(ip, MemOperand(address)); 501 cmp(ip, value); 502 Check(eq, "Wrong address or value passed to RecordWrite"); 503 } 504 505 Label done; 506 507 if (smi_check == INLINE_SMI_CHECK) { 508 ASSERT_EQ(0, kSmiTag); 509 tst(value, Operand(kSmiTagMask)); 510 b(eq, &done); 511 } 512 513 CheckPageFlag(value, 514 value, // Used as scratch. 515 MemoryChunk::kPointersToHereAreInterestingMask, 516 eq, 517 &done); 518 CheckPageFlag(object, 519 value, // Used as scratch. 520 MemoryChunk::kPointersFromHereAreInterestingMask, 521 eq, 522 &done); 523 524 // Record the actual write. 525 if (lr_status == kLRHasNotBeenSaved) { 526 push(lr); 527 } 528 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); 529 CallStub(&stub); 530 if (lr_status == kLRHasNotBeenSaved) { 531 pop(lr); 532 } 533 534 bind(&done); 535 536 // Clobber clobbered registers when running with the debug-code flag 537 // turned on to provoke errors. 538 if (emit_debug_code()) { 539 mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); 540 mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); 541 } 542} 543 544 545void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. 546 Register address, 547 Register scratch, 548 SaveFPRegsMode fp_mode, 549 RememberedSetFinalAction and_then) { 550 Label done; 551 if (emit_debug_code()) { 552 Label ok; 553 JumpIfNotInNewSpace(object, scratch, &ok); 554 stop("Remembered set pointer is in new space"); 555 bind(&ok); 556 } 557 // Load store buffer top. 558 ExternalReference store_buffer = 559 ExternalReference::store_buffer_top(isolate()); 560 mov(ip, Operand(store_buffer)); 561 ldr(scratch, MemOperand(ip)); 562 // Store pointer to buffer and increment buffer top. 563 str(address, MemOperand(scratch, kPointerSize, PostIndex)); 564 // Write back new top of buffer. 565 str(scratch, MemOperand(ip)); 566 // Call stub on end of buffer. 567 // Check for end of buffer. 568 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); 569 if (and_then == kFallThroughAtEnd) { 570 b(eq, &done); 571 } else { 572 ASSERT(and_then == kReturnAtEnd); 573 Ret(eq); 574 } 575 push(lr); 576 StoreBufferOverflowStub store_buffer_overflow = 577 StoreBufferOverflowStub(fp_mode); 578 CallStub(&store_buffer_overflow); 579 pop(lr); 580 bind(&done); 581 if (and_then == kReturnAtEnd) { 582 Ret(); 583 } 584} 585 586 587// Push and pop all registers that can hold pointers. 588void MacroAssembler::PushSafepointRegisters() { 589 // Safepoints expect a block of contiguous register values starting with r0: 590 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); 591 // Safepoints expect a block of kNumSafepointRegisters values on the 592 // stack, so adjust the stack for unsaved registers. 593 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 594 ASSERT(num_unsaved >= 0); 595 sub(sp, sp, Operand(num_unsaved * kPointerSize)); 596 stm(db_w, sp, kSafepointSavedRegisters); 597} 598 599 600void MacroAssembler::PopSafepointRegisters() { 601 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 602 ldm(ia_w, sp, kSafepointSavedRegisters); 603 add(sp, sp, Operand(num_unsaved * kPointerSize)); 604} 605 606 607void MacroAssembler::PushSafepointRegistersAndDoubles() { 608 PushSafepointRegisters(); 609 sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * 610 kDoubleSize)); 611 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { 612 vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); 613 } 614} 615 616 617void MacroAssembler::PopSafepointRegistersAndDoubles() { 618 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { 619 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); 620 } 621 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * 622 kDoubleSize)); 623 PopSafepointRegisters(); 624} 625 626void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, 627 Register dst) { 628 str(src, SafepointRegistersAndDoublesSlot(dst)); 629} 630 631 632void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { 633 str(src, SafepointRegisterSlot(dst)); 634} 635 636 637void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { 638 ldr(dst, SafepointRegisterSlot(src)); 639} 640 641 642int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 643 // The registers are pushed starting with the highest encoding, 644 // which means that lowest encodings are closest to the stack pointer. 645 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); 646 return reg_code; 647} 648 649 650MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { 651 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); 652} 653 654 655MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { 656 // General purpose registers are pushed last on the stack. 657 int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize; 658 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; 659 return MemOperand(sp, doubles_size + register_offset); 660} 661 662 663void MacroAssembler::Ldrd(Register dst1, Register dst2, 664 const MemOperand& src, Condition cond) { 665 ASSERT(src.rm().is(no_reg)); 666 ASSERT(!dst1.is(lr)); // r14. 667 ASSERT_EQ(0, dst1.code() % 2); 668 ASSERT_EQ(dst1.code() + 1, dst2.code()); 669 670 // V8 does not use this addressing mode, so the fallback code 671 // below doesn't support it yet. 672 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); 673 674 // Generate two ldr instructions if ldrd is not available. 675 if (CpuFeatures::IsSupported(ARMv7)) { 676 CpuFeatures::Scope scope(ARMv7); 677 ldrd(dst1, dst2, src, cond); 678 } else { 679 if ((src.am() == Offset) || (src.am() == NegOffset)) { 680 MemOperand src2(src); 681 src2.set_offset(src2.offset() + 4); 682 if (dst1.is(src.rn())) { 683 ldr(dst2, src2, cond); 684 ldr(dst1, src, cond); 685 } else { 686 ldr(dst1, src, cond); 687 ldr(dst2, src2, cond); 688 } 689 } else { // PostIndex or NegPostIndex. 690 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex)); 691 if (dst1.is(src.rn())) { 692 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); 693 ldr(dst1, src, cond); 694 } else { 695 MemOperand src2(src); 696 src2.set_offset(src2.offset() - 4); 697 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); 698 ldr(dst2, src2, cond); 699 } 700 } 701 } 702} 703 704 705void MacroAssembler::Strd(Register src1, Register src2, 706 const MemOperand& dst, Condition cond) { 707 ASSERT(dst.rm().is(no_reg)); 708 ASSERT(!src1.is(lr)); // r14. 709 ASSERT_EQ(0, src1.code() % 2); 710 ASSERT_EQ(src1.code() + 1, src2.code()); 711 712 // V8 does not use this addressing mode, so the fallback code 713 // below doesn't support it yet. 714 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); 715 716 // Generate two str instructions if strd is not available. 717 if (CpuFeatures::IsSupported(ARMv7)) { 718 CpuFeatures::Scope scope(ARMv7); 719 strd(src1, src2, dst, cond); 720 } else { 721 MemOperand dst2(dst); 722 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { 723 dst2.set_offset(dst2.offset() + 4); 724 str(src1, dst, cond); 725 str(src2, dst2, cond); 726 } else { // PostIndex or NegPostIndex. 727 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); 728 dst2.set_offset(dst2.offset() - 4); 729 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); 730 str(src2, dst2, cond); 731 } 732 } 733} 734 735 736void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear, 737 const Register scratch, 738 const Condition cond) { 739 vmrs(scratch, cond); 740 bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond); 741 vmsr(scratch, cond); 742} 743 744 745void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, 746 const DwVfpRegister src2, 747 const Condition cond) { 748 // Compare and move FPSCR flags to the normal condition flags. 749 VFPCompareAndLoadFlags(src1, src2, pc, cond); 750} 751 752void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, 753 const double src2, 754 const Condition cond) { 755 // Compare and move FPSCR flags to the normal condition flags. 756 VFPCompareAndLoadFlags(src1, src2, pc, cond); 757} 758 759 760void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, 761 const DwVfpRegister src2, 762 const Register fpscr_flags, 763 const Condition cond) { 764 // Compare and load FPSCR. 765 vcmp(src1, src2, cond); 766 vmrs(fpscr_flags, cond); 767} 768 769void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, 770 const double src2, 771 const Register fpscr_flags, 772 const Condition cond) { 773 // Compare and load FPSCR. 774 vcmp(src1, src2, cond); 775 vmrs(fpscr_flags, cond); 776} 777 778void MacroAssembler::Vmov(const DwVfpRegister dst, 779 const double imm, 780 const Condition cond) { 781 ASSERT(CpuFeatures::IsEnabled(VFP3)); 782 static const DoubleRepresentation minus_zero(-0.0); 783 static const DoubleRepresentation zero(0.0); 784 DoubleRepresentation value(imm); 785 // Handle special values first. 786 if (value.bits == zero.bits) { 787 vmov(dst, kDoubleRegZero, cond); 788 } else if (value.bits == minus_zero.bits) { 789 vneg(dst, kDoubleRegZero, cond); 790 } else { 791 vmov(dst, imm, cond); 792 } 793} 794 795 796void MacroAssembler::EnterFrame(StackFrame::Type type) { 797 // r0-r3: preserved 798 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 799 mov(ip, Operand(Smi::FromInt(type))); 800 push(ip); 801 mov(ip, Operand(CodeObject())); 802 push(ip); 803 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP. 804} 805 806 807void MacroAssembler::LeaveFrame(StackFrame::Type type) { 808 // r0: preserved 809 // r1: preserved 810 // r2: preserved 811 812 // Drop the execution stack down to the frame pointer and restore 813 // the caller frame pointer and return address. 814 mov(sp, fp); 815 ldm(ia_w, sp, fp.bit() | lr.bit()); 816} 817 818 819void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { 820 // Set up the frame structure on the stack. 821 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); 822 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); 823 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); 824 Push(lr, fp); 825 mov(fp, Operand(sp)); // Set up new frame pointer. 826 // Reserve room for saved entry sp and code object. 827 sub(sp, sp, Operand(2 * kPointerSize)); 828 if (emit_debug_code()) { 829 mov(ip, Operand(0)); 830 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); 831 } 832 mov(ip, Operand(CodeObject())); 833 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); 834 835 // Save the frame pointer and the context in top. 836 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 837 str(fp, MemOperand(ip)); 838 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 839 str(cp, MemOperand(ip)); 840 841 // Optionally save all double registers. 842 if (save_doubles) { 843 DwVfpRegister first = d0; 844 DwVfpRegister last = 845 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); 846 vstm(db_w, sp, first, last); 847 // Note that d0 will be accessible at 848 // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize, 849 // since the sp slot and code slot were pushed after the fp. 850 } 851 852 // Reserve place for the return address and stack space and align the frame 853 // preparing for calling the runtime function. 854 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); 855 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); 856 if (frame_alignment > 0) { 857 ASSERT(IsPowerOf2(frame_alignment)); 858 and_(sp, sp, Operand(-frame_alignment)); 859 } 860 861 // Set the exit frame sp value to point just before the return address 862 // location. 863 add(ip, sp, Operand(kPointerSize)); 864 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); 865} 866 867 868void MacroAssembler::InitializeNewString(Register string, 869 Register length, 870 Heap::RootListIndex map_index, 871 Register scratch1, 872 Register scratch2) { 873 mov(scratch1, Operand(length, LSL, kSmiTagSize)); 874 LoadRoot(scratch2, map_index); 875 str(scratch1, FieldMemOperand(string, String::kLengthOffset)); 876 mov(scratch1, Operand(String::kEmptyHashField)); 877 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); 878 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); 879} 880 881 882int MacroAssembler::ActivationFrameAlignment() { 883#if defined(V8_HOST_ARCH_ARM) 884 // Running on the real platform. Use the alignment as mandated by the local 885 // environment. 886 // Note: This will break if we ever start generating snapshots on one ARM 887 // platform for another ARM platform with a different alignment. 888 return OS::ActivationFrameAlignment(); 889#else // defined(V8_HOST_ARCH_ARM) 890 // If we are using the simulator then we should always align to the expected 891 // alignment. As the simulator is used to generate snapshots we do not know 892 // if the target platform will need alignment, so this is controlled from a 893 // flag. 894 return FLAG_sim_stack_alignment; 895#endif // defined(V8_HOST_ARCH_ARM) 896} 897 898 899void MacroAssembler::LeaveExitFrame(bool save_doubles, 900 Register argument_count) { 901 // Optionally restore all double registers. 902 if (save_doubles) { 903 // Calculate the stack location of the saved doubles and restore them. 904 const int offset = 2 * kPointerSize; 905 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize)); 906 DwVfpRegister first = d0; 907 DwVfpRegister last = 908 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); 909 vldm(ia, r3, first, last); 910 } 911 912 // Clear top frame. 913 mov(r3, Operand(0, RelocInfo::NONE)); 914 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 915 str(r3, MemOperand(ip)); 916 917 // Restore current context from top and clear it in debug mode. 918 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 919 ldr(cp, MemOperand(ip)); 920#ifdef DEBUG 921 str(r3, MemOperand(ip)); 922#endif 923 924 // Tear down the exit frame, pop the arguments, and return. 925 mov(sp, Operand(fp)); 926 ldm(ia_w, sp, fp.bit() | lr.bit()); 927 if (argument_count.is_valid()) { 928 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2)); 929 } 930} 931 932void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { 933 if (use_eabi_hardfloat()) { 934 Move(dst, d0); 935 } else { 936 vmov(dst, r0, r1); 937 } 938} 939 940 941void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { 942 // This macro takes the dst register to make the code more readable 943 // at the call sites. However, the dst register has to be r5 to 944 // follow the calling convention which requires the call type to be 945 // in r5. 946 ASSERT(dst.is(r5)); 947 if (call_kind == CALL_AS_FUNCTION) { 948 mov(dst, Operand(Smi::FromInt(1))); 949 } else { 950 mov(dst, Operand(Smi::FromInt(0))); 951 } 952} 953 954 955void MacroAssembler::InvokePrologue(const ParameterCount& expected, 956 const ParameterCount& actual, 957 Handle<Code> code_constant, 958 Register code_reg, 959 Label* done, 960 bool* definitely_mismatches, 961 InvokeFlag flag, 962 const CallWrapper& call_wrapper, 963 CallKind call_kind) { 964 bool definitely_matches = false; 965 *definitely_mismatches = false; 966 Label regular_invoke; 967 968 // Check whether the expected and actual arguments count match. If not, 969 // setup registers according to contract with ArgumentsAdaptorTrampoline: 970 // r0: actual arguments count 971 // r1: function (passed through to callee) 972 // r2: expected arguments count 973 // r3: callee code entry 974 975 // The code below is made a lot easier because the calling code already sets 976 // up actual and expected registers according to the contract if values are 977 // passed in registers. 978 ASSERT(actual.is_immediate() || actual.reg().is(r0)); 979 ASSERT(expected.is_immediate() || expected.reg().is(r2)); 980 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); 981 982 if (expected.is_immediate()) { 983 ASSERT(actual.is_immediate()); 984 if (expected.immediate() == actual.immediate()) { 985 definitely_matches = true; 986 } else { 987 mov(r0, Operand(actual.immediate())); 988 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; 989 if (expected.immediate() == sentinel) { 990 // Don't worry about adapting arguments for builtins that 991 // don't want that done. Skip adaption code by making it look 992 // like we have a match between expected and actual number of 993 // arguments. 994 definitely_matches = true; 995 } else { 996 *definitely_mismatches = true; 997 mov(r2, Operand(expected.immediate())); 998 } 999 } 1000 } else { 1001 if (actual.is_immediate()) { 1002 cmp(expected.reg(), Operand(actual.immediate())); 1003 b(eq, ®ular_invoke); 1004 mov(r0, Operand(actual.immediate())); 1005 } else { 1006 cmp(expected.reg(), Operand(actual.reg())); 1007 b(eq, ®ular_invoke); 1008 } 1009 } 1010 1011 if (!definitely_matches) { 1012 if (!code_constant.is_null()) { 1013 mov(r3, Operand(code_constant)); 1014 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); 1015 } 1016 1017 Handle<Code> adaptor = 1018 isolate()->builtins()->ArgumentsAdaptorTrampoline(); 1019 if (flag == CALL_FUNCTION) { 1020 call_wrapper.BeforeCall(CallSize(adaptor)); 1021 SetCallKind(r5, call_kind); 1022 Call(adaptor); 1023 call_wrapper.AfterCall(); 1024 if (!*definitely_mismatches) { 1025 b(done); 1026 } 1027 } else { 1028 SetCallKind(r5, call_kind); 1029 Jump(adaptor, RelocInfo::CODE_TARGET); 1030 } 1031 bind(®ular_invoke); 1032 } 1033} 1034 1035 1036void MacroAssembler::InvokeCode(Register code, 1037 const ParameterCount& expected, 1038 const ParameterCount& actual, 1039 InvokeFlag flag, 1040 const CallWrapper& call_wrapper, 1041 CallKind call_kind) { 1042 // You can't call a function without a valid frame. 1043 ASSERT(flag == JUMP_FUNCTION || has_frame()); 1044 1045 Label done; 1046 bool definitely_mismatches = false; 1047 InvokePrologue(expected, actual, Handle<Code>::null(), code, 1048 &done, &definitely_mismatches, flag, 1049 call_wrapper, call_kind); 1050 if (!definitely_mismatches) { 1051 if (flag == CALL_FUNCTION) { 1052 call_wrapper.BeforeCall(CallSize(code)); 1053 SetCallKind(r5, call_kind); 1054 Call(code); 1055 call_wrapper.AfterCall(); 1056 } else { 1057 ASSERT(flag == JUMP_FUNCTION); 1058 SetCallKind(r5, call_kind); 1059 Jump(code); 1060 } 1061 1062 // Continue here if InvokePrologue does handle the invocation due to 1063 // mismatched parameter counts. 1064 bind(&done); 1065 } 1066} 1067 1068 1069void MacroAssembler::InvokeCode(Handle<Code> code, 1070 const ParameterCount& expected, 1071 const ParameterCount& actual, 1072 RelocInfo::Mode rmode, 1073 InvokeFlag flag, 1074 CallKind call_kind) { 1075 // You can't call a function without a valid frame. 1076 ASSERT(flag == JUMP_FUNCTION || has_frame()); 1077 1078 Label done; 1079 bool definitely_mismatches = false; 1080 InvokePrologue(expected, actual, code, no_reg, 1081 &done, &definitely_mismatches, flag, 1082 NullCallWrapper(), call_kind); 1083 if (!definitely_mismatches) { 1084 if (flag == CALL_FUNCTION) { 1085 SetCallKind(r5, call_kind); 1086 Call(code, rmode); 1087 } else { 1088 SetCallKind(r5, call_kind); 1089 Jump(code, rmode); 1090 } 1091 1092 // Continue here if InvokePrologue does handle the invocation due to 1093 // mismatched parameter counts. 1094 bind(&done); 1095 } 1096} 1097 1098 1099void MacroAssembler::InvokeFunction(Register fun, 1100 const ParameterCount& actual, 1101 InvokeFlag flag, 1102 const CallWrapper& call_wrapper, 1103 CallKind call_kind) { 1104 // You can't call a function without a valid frame. 1105 ASSERT(flag == JUMP_FUNCTION || has_frame()); 1106 1107 // Contract with called JS functions requires that function is passed in r1. 1108 ASSERT(fun.is(r1)); 1109 1110 Register expected_reg = r2; 1111 Register code_reg = r3; 1112 1113 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); 1114 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 1115 ldr(expected_reg, 1116 FieldMemOperand(code_reg, 1117 SharedFunctionInfo::kFormalParameterCountOffset)); 1118 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); 1119 ldr(code_reg, 1120 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 1121 1122 ParameterCount expected(expected_reg); 1123 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind); 1124} 1125 1126 1127void MacroAssembler::InvokeFunction(Handle<JSFunction> function, 1128 const ParameterCount& actual, 1129 InvokeFlag flag, 1130 const CallWrapper& call_wrapper, 1131 CallKind call_kind) { 1132 // You can't call a function without a valid frame. 1133 ASSERT(flag == JUMP_FUNCTION || has_frame()); 1134 1135 // Get the function and setup the context. 1136 LoadHeapObject(r1, function); 1137 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 1138 1139 ParameterCount expected(function->shared()->formal_parameter_count()); 1140 // We call indirectly through the code field in the function to 1141 // allow recompilation to take effect without changing any of the 1142 // call sites. 1143 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 1144 InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind); 1145} 1146 1147 1148void MacroAssembler::IsObjectJSObjectType(Register heap_object, 1149 Register map, 1150 Register scratch, 1151 Label* fail) { 1152 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); 1153 IsInstanceJSObjectType(map, scratch, fail); 1154} 1155 1156 1157void MacroAssembler::IsInstanceJSObjectType(Register map, 1158 Register scratch, 1159 Label* fail) { 1160 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1161 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 1162 b(lt, fail); 1163 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 1164 b(gt, fail); 1165} 1166 1167 1168void MacroAssembler::IsObjectJSStringType(Register object, 1169 Register scratch, 1170 Label* fail) { 1171 ASSERT(kNotStringTag != 0); 1172 1173 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 1174 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1175 tst(scratch, Operand(kIsNotStringMask)); 1176 b(ne, fail); 1177} 1178 1179 1180#ifdef ENABLE_DEBUGGER_SUPPORT 1181void MacroAssembler::DebugBreak() { 1182 mov(r0, Operand(0, RelocInfo::NONE)); 1183 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); 1184 CEntryStub ces(1); 1185 ASSERT(AllowThisStubCall(&ces)); 1186 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 1187} 1188#endif 1189 1190 1191void MacroAssembler::PushTryHandler(StackHandler::Kind kind, 1192 int handler_index) { 1193 // Adjust this code if not the case. 1194 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 1195 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 1196 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 1197 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 1198 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 1199 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 1200 1201 // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available. 1202 // We will build up the handler from the bottom by pushing on the stack. 1203 // Set up the code object (r5) and the state (r6) for pushing. 1204 unsigned state = 1205 StackHandler::IndexField::encode(handler_index) | 1206 StackHandler::KindField::encode(kind); 1207 mov(r5, Operand(CodeObject())); 1208 mov(r6, Operand(state)); 1209 1210 // Push the frame pointer, context, state, and code object. 1211 if (kind == StackHandler::JS_ENTRY) { 1212 mov(r7, Operand(Smi::FromInt(0))); // Indicates no context. 1213 mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer. 1214 stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit()); 1215 } else { 1216 stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); 1217 } 1218 1219 // Link the current handler as the next handler. 1220 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 1221 ldr(r5, MemOperand(r6)); 1222 push(r5); 1223 // Set this new handler as the current one. 1224 str(sp, MemOperand(r6)); 1225} 1226 1227 1228void MacroAssembler::PopTryHandler() { 1229 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 1230 pop(r1); 1231 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 1232 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); 1233 str(r1, MemOperand(ip)); 1234} 1235 1236 1237void MacroAssembler::JumpToHandlerEntry() { 1238 // Compute the handler entry address and jump to it. The handler table is 1239 // a fixed array of (smi-tagged) code offsets. 1240 // r0 = exception, r1 = code object, r2 = state. 1241 ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table. 1242 add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 1243 mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index. 1244 ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset. 1245 add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. 1246 add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump. 1247} 1248 1249 1250void MacroAssembler::Throw(Register value) { 1251 // Adjust this code if not the case. 1252 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 1253 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 1254 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 1255 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 1256 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 1257 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 1258 1259 // The exception is expected in r0. 1260 if (!value.is(r0)) { 1261 mov(r0, value); 1262 } 1263 // Drop the stack pointer to the top of the top handler. 1264 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 1265 ldr(sp, MemOperand(r3)); 1266 // Restore the next handler. 1267 pop(r2); 1268 str(r2, MemOperand(r3)); 1269 1270 // Get the code object (r1) and state (r2). Restore the context and frame 1271 // pointer. 1272 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); 1273 1274 // If the handler is a JS frame, restore the context to the frame. 1275 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp 1276 // or cp. 1277 tst(cp, cp); 1278 str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); 1279 1280 JumpToHandlerEntry(); 1281} 1282 1283 1284void MacroAssembler::ThrowUncatchable(Register value) { 1285 // Adjust this code if not the case. 1286 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 1287 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 1288 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 1289 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 1290 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 1291 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 1292 1293 // The exception is expected in r0. 1294 if (!value.is(r0)) { 1295 mov(r0, value); 1296 } 1297 // Drop the stack pointer to the top of the top stack handler. 1298 mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 1299 ldr(sp, MemOperand(r3)); 1300 1301 // Unwind the handlers until the ENTRY handler is found. 1302 Label fetch_next, check_kind; 1303 jmp(&check_kind); 1304 bind(&fetch_next); 1305 ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); 1306 1307 bind(&check_kind); 1308 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); 1309 ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset)); 1310 tst(r2, Operand(StackHandler::KindField::kMask)); 1311 b(ne, &fetch_next); 1312 1313 // Set the top handler address to next handler past the top ENTRY handler. 1314 pop(r2); 1315 str(r2, MemOperand(r3)); 1316 // Get the code object (r1) and state (r2). Clear the context and frame 1317 // pointer (0 was saved in the handler). 1318 ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit()); 1319 1320 JumpToHandlerEntry(); 1321} 1322 1323 1324void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 1325 Register scratch, 1326 Label* miss) { 1327 Label same_contexts; 1328 1329 ASSERT(!holder_reg.is(scratch)); 1330 ASSERT(!holder_reg.is(ip)); 1331 ASSERT(!scratch.is(ip)); 1332 1333 // Load current lexical context from the stack frame. 1334 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 1335 // In debug mode, make sure the lexical context is set. 1336#ifdef DEBUG 1337 cmp(scratch, Operand(0, RelocInfo::NONE)); 1338 Check(ne, "we should not have an empty lexical context"); 1339#endif 1340 1341 // Load the global context of the current context. 1342 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; 1343 ldr(scratch, FieldMemOperand(scratch, offset)); 1344 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); 1345 1346 // Check the context is a global context. 1347 if (emit_debug_code()) { 1348 // TODO(119): avoid push(holder_reg)/pop(holder_reg) 1349 // Cannot use ip as a temporary in this verification code. Due to the fact 1350 // that ip is clobbered as part of cmp with an object Operand. 1351 push(holder_reg); // Temporarily save holder on the stack. 1352 // Read the first word and compare to the global_context_map. 1353 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); 1354 LoadRoot(ip, Heap::kGlobalContextMapRootIndex); 1355 cmp(holder_reg, ip); 1356 Check(eq, "JSGlobalObject::global_context should be a global context."); 1357 pop(holder_reg); // Restore holder. 1358 } 1359 1360 // Check if both contexts are the same. 1361 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); 1362 cmp(scratch, Operand(ip)); 1363 b(eq, &same_contexts); 1364 1365 // Check the context is a global context. 1366 if (emit_debug_code()) { 1367 // TODO(119): avoid push(holder_reg)/pop(holder_reg) 1368 // Cannot use ip as a temporary in this verification code. Due to the fact 1369 // that ip is clobbered as part of cmp with an object Operand. 1370 push(holder_reg); // Temporarily save holder on the stack. 1371 mov(holder_reg, ip); // Move ip to its holding place. 1372 LoadRoot(ip, Heap::kNullValueRootIndex); 1373 cmp(holder_reg, ip); 1374 Check(ne, "JSGlobalProxy::context() should not be null."); 1375 1376 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); 1377 LoadRoot(ip, Heap::kGlobalContextMapRootIndex); 1378 cmp(holder_reg, ip); 1379 Check(eq, "JSGlobalObject::global_context should be a global context."); 1380 // Restore ip is not needed. ip is reloaded below. 1381 pop(holder_reg); // Restore holder. 1382 // Restore ip to holder's context. 1383 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); 1384 } 1385 1386 // Check that the security token in the calling global object is 1387 // compatible with the security token in the receiving global 1388 // object. 1389 int token_offset = Context::kHeaderSize + 1390 Context::SECURITY_TOKEN_INDEX * kPointerSize; 1391 1392 ldr(scratch, FieldMemOperand(scratch, token_offset)); 1393 ldr(ip, FieldMemOperand(ip, token_offset)); 1394 cmp(scratch, Operand(ip)); 1395 b(ne, miss); 1396 1397 bind(&same_contexts); 1398} 1399 1400 1401void MacroAssembler::GetNumberHash(Register t0, Register scratch) { 1402 // First of all we assign the hash seed to scratch. 1403 LoadRoot(scratch, Heap::kHashSeedRootIndex); 1404 SmiUntag(scratch); 1405 1406 // Xor original key with a seed. 1407 eor(t0, t0, Operand(scratch)); 1408 1409 // Compute the hash code from the untagged key. This must be kept in sync 1410 // with ComputeIntegerHash in utils.h. 1411 // 1412 // hash = ~hash + (hash << 15); 1413 mvn(scratch, Operand(t0)); 1414 add(t0, scratch, Operand(t0, LSL, 15)); 1415 // hash = hash ^ (hash >> 12); 1416 eor(t0, t0, Operand(t0, LSR, 12)); 1417 // hash = hash + (hash << 2); 1418 add(t0, t0, Operand(t0, LSL, 2)); 1419 // hash = hash ^ (hash >> 4); 1420 eor(t0, t0, Operand(t0, LSR, 4)); 1421 // hash = hash * 2057; 1422 mov(scratch, Operand(t0, LSL, 11)); 1423 add(t0, t0, Operand(t0, LSL, 3)); 1424 add(t0, t0, scratch); 1425 // hash = hash ^ (hash >> 16); 1426 eor(t0, t0, Operand(t0, LSR, 16)); 1427} 1428 1429 1430void MacroAssembler::LoadFromNumberDictionary(Label* miss, 1431 Register elements, 1432 Register key, 1433 Register result, 1434 Register t0, 1435 Register t1, 1436 Register t2) { 1437 // Register use: 1438 // 1439 // elements - holds the slow-case elements of the receiver on entry. 1440 // Unchanged unless 'result' is the same register. 1441 // 1442 // key - holds the smi key on entry. 1443 // Unchanged unless 'result' is the same register. 1444 // 1445 // result - holds the result on exit if the load succeeded. 1446 // Allowed to be the same as 'key' or 'result'. 1447 // Unchanged on bailout so 'key' or 'result' can be used 1448 // in further computation. 1449 // 1450 // Scratch registers: 1451 // 1452 // t0 - holds the untagged key on entry and holds the hash once computed. 1453 // 1454 // t1 - used to hold the capacity mask of the dictionary 1455 // 1456 // t2 - used for the index into the dictionary. 1457 Label done; 1458 1459 GetNumberHash(t0, t1); 1460 1461 // Compute the capacity mask. 1462 ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); 1463 mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int 1464 sub(t1, t1, Operand(1)); 1465 1466 // Generate an unrolled loop that performs a few probes before giving up. 1467 static const int kProbes = 4; 1468 for (int i = 0; i < kProbes; i++) { 1469 // Use t2 for index calculations and keep the hash intact in t0. 1470 mov(t2, t0); 1471 // Compute the masked index: (hash + i + i * i) & mask. 1472 if (i > 0) { 1473 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); 1474 } 1475 and_(t2, t2, Operand(t1)); 1476 1477 // Scale the index by multiplying by the element size. 1478 ASSERT(SeededNumberDictionary::kEntrySize == 3); 1479 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 1480 1481 // Check if the key is identical to the name. 1482 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); 1483 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); 1484 cmp(key, Operand(ip)); 1485 if (i != kProbes - 1) { 1486 b(eq, &done); 1487 } else { 1488 b(ne, miss); 1489 } 1490 } 1491 1492 bind(&done); 1493 // Check that the value is a normal property. 1494 // t2: elements + (index * kPointerSize) 1495 const int kDetailsOffset = 1496 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; 1497 ldr(t1, FieldMemOperand(t2, kDetailsOffset)); 1498 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); 1499 b(ne, miss); 1500 1501 // Get the value at the masked, scaled index and return. 1502 const int kValueOffset = 1503 SeededNumberDictionary::kElementsStartOffset + kPointerSize; 1504 ldr(result, FieldMemOperand(t2, kValueOffset)); 1505} 1506 1507 1508void MacroAssembler::AllocateInNewSpace(int object_size, 1509 Register result, 1510 Register scratch1, 1511 Register scratch2, 1512 Label* gc_required, 1513 AllocationFlags flags) { 1514 if (!FLAG_inline_new) { 1515 if (emit_debug_code()) { 1516 // Trash the registers to simulate an allocation failure. 1517 mov(result, Operand(0x7091)); 1518 mov(scratch1, Operand(0x7191)); 1519 mov(scratch2, Operand(0x7291)); 1520 } 1521 jmp(gc_required); 1522 return; 1523 } 1524 1525 ASSERT(!result.is(scratch1)); 1526 ASSERT(!result.is(scratch2)); 1527 ASSERT(!scratch1.is(scratch2)); 1528 ASSERT(!scratch1.is(ip)); 1529 ASSERT(!scratch2.is(ip)); 1530 1531 // Make object size into bytes. 1532 if ((flags & SIZE_IN_WORDS) != 0) { 1533 object_size *= kPointerSize; 1534 } 1535 ASSERT_EQ(0, object_size & kObjectAlignmentMask); 1536 1537 // Check relative positions of allocation top and limit addresses. 1538 // The values must be adjacent in memory to allow the use of LDM. 1539 // Also, assert that the registers are numbered such that the values 1540 // are loaded in the correct order. 1541 ExternalReference new_space_allocation_top = 1542 ExternalReference::new_space_allocation_top_address(isolate()); 1543 ExternalReference new_space_allocation_limit = 1544 ExternalReference::new_space_allocation_limit_address(isolate()); 1545 intptr_t top = 1546 reinterpret_cast<intptr_t>(new_space_allocation_top.address()); 1547 intptr_t limit = 1548 reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); 1549 ASSERT((limit - top) == kPointerSize); 1550 ASSERT(result.code() < ip.code()); 1551 1552 // Set up allocation top address and object size registers. 1553 Register topaddr = scratch1; 1554 Register obj_size_reg = scratch2; 1555 mov(topaddr, Operand(new_space_allocation_top)); 1556 mov(obj_size_reg, Operand(object_size)); 1557 1558 // This code stores a temporary value in ip. This is OK, as the code below 1559 // does not need ip for implicit literal generation. 1560 if ((flags & RESULT_CONTAINS_TOP) == 0) { 1561 // Load allocation top into result and allocation limit into ip. 1562 ldm(ia, topaddr, result.bit() | ip.bit()); 1563 } else { 1564 if (emit_debug_code()) { 1565 // Assert that result actually contains top on entry. ip is used 1566 // immediately below so this use of ip does not cause difference with 1567 // respect to register content between debug and release mode. 1568 ldr(ip, MemOperand(topaddr)); 1569 cmp(result, ip); 1570 Check(eq, "Unexpected allocation top"); 1571 } 1572 // Load allocation limit into ip. Result already contains allocation top. 1573 ldr(ip, MemOperand(topaddr, limit - top)); 1574 } 1575 1576 // Calculate new top and bail out if new space is exhausted. Use result 1577 // to calculate the new top. 1578 add(scratch2, result, Operand(obj_size_reg), SetCC); 1579 b(cs, gc_required); 1580 cmp(scratch2, Operand(ip)); 1581 b(hi, gc_required); 1582 str(scratch2, MemOperand(topaddr)); 1583 1584 // Tag object if requested. 1585 if ((flags & TAG_OBJECT) != 0) { 1586 add(result, result, Operand(kHeapObjectTag)); 1587 } 1588} 1589 1590 1591void MacroAssembler::AllocateInNewSpace(Register object_size, 1592 Register result, 1593 Register scratch1, 1594 Register scratch2, 1595 Label* gc_required, 1596 AllocationFlags flags) { 1597 if (!FLAG_inline_new) { 1598 if (emit_debug_code()) { 1599 // Trash the registers to simulate an allocation failure. 1600 mov(result, Operand(0x7091)); 1601 mov(scratch1, Operand(0x7191)); 1602 mov(scratch2, Operand(0x7291)); 1603 } 1604 jmp(gc_required); 1605 return; 1606 } 1607 1608 // Assert that the register arguments are different and that none of 1609 // them are ip. ip is used explicitly in the code generated below. 1610 ASSERT(!result.is(scratch1)); 1611 ASSERT(!result.is(scratch2)); 1612 ASSERT(!scratch1.is(scratch2)); 1613 ASSERT(!object_size.is(ip)); 1614 ASSERT(!result.is(ip)); 1615 ASSERT(!scratch1.is(ip)); 1616 ASSERT(!scratch2.is(ip)); 1617 1618 // Check relative positions of allocation top and limit addresses. 1619 // The values must be adjacent in memory to allow the use of LDM. 1620 // Also, assert that the registers are numbered such that the values 1621 // are loaded in the correct order. 1622 ExternalReference new_space_allocation_top = 1623 ExternalReference::new_space_allocation_top_address(isolate()); 1624 ExternalReference new_space_allocation_limit = 1625 ExternalReference::new_space_allocation_limit_address(isolate()); 1626 intptr_t top = 1627 reinterpret_cast<intptr_t>(new_space_allocation_top.address()); 1628 intptr_t limit = 1629 reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); 1630 ASSERT((limit - top) == kPointerSize); 1631 ASSERT(result.code() < ip.code()); 1632 1633 // Set up allocation top address. 1634 Register topaddr = scratch1; 1635 mov(topaddr, Operand(new_space_allocation_top)); 1636 1637 // This code stores a temporary value in ip. This is OK, as the code below 1638 // does not need ip for implicit literal generation. 1639 if ((flags & RESULT_CONTAINS_TOP) == 0) { 1640 // Load allocation top into result and allocation limit into ip. 1641 ldm(ia, topaddr, result.bit() | ip.bit()); 1642 } else { 1643 if (emit_debug_code()) { 1644 // Assert that result actually contains top on entry. ip is used 1645 // immediately below so this use of ip does not cause difference with 1646 // respect to register content between debug and release mode. 1647 ldr(ip, MemOperand(topaddr)); 1648 cmp(result, ip); 1649 Check(eq, "Unexpected allocation top"); 1650 } 1651 // Load allocation limit into ip. Result already contains allocation top. 1652 ldr(ip, MemOperand(topaddr, limit - top)); 1653 } 1654 1655 // Calculate new top and bail out if new space is exhausted. Use result 1656 // to calculate the new top. Object size may be in words so a shift is 1657 // required to get the number of bytes. 1658 if ((flags & SIZE_IN_WORDS) != 0) { 1659 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC); 1660 } else { 1661 add(scratch2, result, Operand(object_size), SetCC); 1662 } 1663 b(cs, gc_required); 1664 cmp(scratch2, Operand(ip)); 1665 b(hi, gc_required); 1666 1667 // Update allocation top. result temporarily holds the new top. 1668 if (emit_debug_code()) { 1669 tst(scratch2, Operand(kObjectAlignmentMask)); 1670 Check(eq, "Unaligned allocation in new space"); 1671 } 1672 str(scratch2, MemOperand(topaddr)); 1673 1674 // Tag object if requested. 1675 if ((flags & TAG_OBJECT) != 0) { 1676 add(result, result, Operand(kHeapObjectTag)); 1677 } 1678} 1679 1680 1681void MacroAssembler::UndoAllocationInNewSpace(Register object, 1682 Register scratch) { 1683 ExternalReference new_space_allocation_top = 1684 ExternalReference::new_space_allocation_top_address(isolate()); 1685 1686 // Make sure the object has no tag before resetting top. 1687 and_(object, object, Operand(~kHeapObjectTagMask)); 1688#ifdef DEBUG 1689 // Check that the object un-allocated is below the current top. 1690 mov(scratch, Operand(new_space_allocation_top)); 1691 ldr(scratch, MemOperand(scratch)); 1692 cmp(object, scratch); 1693 Check(lt, "Undo allocation of non allocated memory"); 1694#endif 1695 // Write the address of the object to un-allocate as the current top. 1696 mov(scratch, Operand(new_space_allocation_top)); 1697 str(object, MemOperand(scratch)); 1698} 1699 1700 1701void MacroAssembler::AllocateTwoByteString(Register result, 1702 Register length, 1703 Register scratch1, 1704 Register scratch2, 1705 Register scratch3, 1706 Label* gc_required) { 1707 // Calculate the number of bytes needed for the characters in the string while 1708 // observing object alignment. 1709 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 1710 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. 1711 add(scratch1, scratch1, 1712 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); 1713 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 1714 1715 // Allocate two-byte string in new space. 1716 AllocateInNewSpace(scratch1, 1717 result, 1718 scratch2, 1719 scratch3, 1720 gc_required, 1721 TAG_OBJECT); 1722 1723 // Set the map, length and hash field. 1724 InitializeNewString(result, 1725 length, 1726 Heap::kStringMapRootIndex, 1727 scratch1, 1728 scratch2); 1729} 1730 1731 1732void MacroAssembler::AllocateAsciiString(Register result, 1733 Register length, 1734 Register scratch1, 1735 Register scratch2, 1736 Register scratch3, 1737 Label* gc_required) { 1738 // Calculate the number of bytes needed for the characters in the string while 1739 // observing object alignment. 1740 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); 1741 ASSERT(kCharSize == 1); 1742 add(scratch1, length, 1743 Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); 1744 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 1745 1746 // Allocate ASCII string in new space. 1747 AllocateInNewSpace(scratch1, 1748 result, 1749 scratch2, 1750 scratch3, 1751 gc_required, 1752 TAG_OBJECT); 1753 1754 // Set the map, length and hash field. 1755 InitializeNewString(result, 1756 length, 1757 Heap::kAsciiStringMapRootIndex, 1758 scratch1, 1759 scratch2); 1760} 1761 1762 1763void MacroAssembler::AllocateTwoByteConsString(Register result, 1764 Register length, 1765 Register scratch1, 1766 Register scratch2, 1767 Label* gc_required) { 1768 AllocateInNewSpace(ConsString::kSize, 1769 result, 1770 scratch1, 1771 scratch2, 1772 gc_required, 1773 TAG_OBJECT); 1774 1775 InitializeNewString(result, 1776 length, 1777 Heap::kConsStringMapRootIndex, 1778 scratch1, 1779 scratch2); 1780} 1781 1782 1783void MacroAssembler::AllocateAsciiConsString(Register result, 1784 Register length, 1785 Register scratch1, 1786 Register scratch2, 1787 Label* gc_required) { 1788 AllocateInNewSpace(ConsString::kSize, 1789 result, 1790 scratch1, 1791 scratch2, 1792 gc_required, 1793 TAG_OBJECT); 1794 1795 InitializeNewString(result, 1796 length, 1797 Heap::kConsAsciiStringMapRootIndex, 1798 scratch1, 1799 scratch2); 1800} 1801 1802 1803void MacroAssembler::AllocateTwoByteSlicedString(Register result, 1804 Register length, 1805 Register scratch1, 1806 Register scratch2, 1807 Label* gc_required) { 1808 AllocateInNewSpace(SlicedString::kSize, 1809 result, 1810 scratch1, 1811 scratch2, 1812 gc_required, 1813 TAG_OBJECT); 1814 1815 InitializeNewString(result, 1816 length, 1817 Heap::kSlicedStringMapRootIndex, 1818 scratch1, 1819 scratch2); 1820} 1821 1822 1823void MacroAssembler::AllocateAsciiSlicedString(Register result, 1824 Register length, 1825 Register scratch1, 1826 Register scratch2, 1827 Label* gc_required) { 1828 AllocateInNewSpace(SlicedString::kSize, 1829 result, 1830 scratch1, 1831 scratch2, 1832 gc_required, 1833 TAG_OBJECT); 1834 1835 InitializeNewString(result, 1836 length, 1837 Heap::kSlicedAsciiStringMapRootIndex, 1838 scratch1, 1839 scratch2); 1840} 1841 1842 1843void MacroAssembler::CompareObjectType(Register object, 1844 Register map, 1845 Register type_reg, 1846 InstanceType type) { 1847 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 1848 CompareInstanceType(map, type_reg, type); 1849} 1850 1851 1852void MacroAssembler::CompareInstanceType(Register map, 1853 Register type_reg, 1854 InstanceType type) { 1855 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1856 cmp(type_reg, Operand(type)); 1857} 1858 1859 1860void MacroAssembler::CompareRoot(Register obj, 1861 Heap::RootListIndex index) { 1862 ASSERT(!obj.is(ip)); 1863 LoadRoot(ip, index); 1864 cmp(obj, ip); 1865} 1866 1867 1868void MacroAssembler::CheckFastElements(Register map, 1869 Register scratch, 1870 Label* fail) { 1871 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); 1872 STATIC_ASSERT(FAST_ELEMENTS == 1); 1873 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 1874 cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); 1875 b(hi, fail); 1876} 1877 1878 1879void MacroAssembler::CheckFastObjectElements(Register map, 1880 Register scratch, 1881 Label* fail) { 1882 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); 1883 STATIC_ASSERT(FAST_ELEMENTS == 1); 1884 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 1885 cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); 1886 b(ls, fail); 1887 cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); 1888 b(hi, fail); 1889} 1890 1891 1892void MacroAssembler::CheckFastSmiOnlyElements(Register map, 1893 Register scratch, 1894 Label* fail) { 1895 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); 1896 ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); 1897 cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); 1898 b(hi, fail); 1899} 1900 1901 1902void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, 1903 Register key_reg, 1904 Register receiver_reg, 1905 Register elements_reg, 1906 Register scratch1, 1907 Register scratch2, 1908 Register scratch3, 1909 Register scratch4, 1910 Label* fail) { 1911 Label smi_value, maybe_nan, have_double_value, is_nan, done; 1912 Register mantissa_reg = scratch2; 1913 Register exponent_reg = scratch3; 1914 1915 // Handle smi values specially. 1916 JumpIfSmi(value_reg, &smi_value); 1917 1918 // Ensure that the object is a heap number 1919 CheckMap(value_reg, 1920 scratch1, 1921 isolate()->factory()->heap_number_map(), 1922 fail, 1923 DONT_DO_SMI_CHECK); 1924 1925 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 1926 // in the exponent. 1927 mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); 1928 ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); 1929 cmp(exponent_reg, scratch1); 1930 b(ge, &maybe_nan); 1931 1932 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); 1933 1934 bind(&have_double_value); 1935 add(scratch1, elements_reg, 1936 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); 1937 str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); 1938 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); 1939 str(exponent_reg, FieldMemOperand(scratch1, offset)); 1940 jmp(&done); 1941 1942 bind(&maybe_nan); 1943 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise 1944 // it's an Infinity, and the non-NaN code path applies. 1945 b(gt, &is_nan); 1946 ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); 1947 cmp(mantissa_reg, Operand(0)); 1948 b(eq, &have_double_value); 1949 bind(&is_nan); 1950 // Load canonical NaN for storing into the double array. 1951 uint64_t nan_int64 = BitCast<uint64_t>( 1952 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); 1953 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); 1954 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); 1955 jmp(&have_double_value); 1956 1957 bind(&smi_value); 1958 add(scratch1, elements_reg, 1959 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); 1960 add(scratch1, scratch1, 1961 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); 1962 // scratch1 is now effective address of the double element 1963 1964 FloatingPointHelper::Destination destination; 1965 if (CpuFeatures::IsSupported(VFP3)) { 1966 destination = FloatingPointHelper::kVFPRegisters; 1967 } else { 1968 destination = FloatingPointHelper::kCoreRegisters; 1969 } 1970 1971 Register untagged_value = receiver_reg; 1972 SmiUntag(untagged_value, value_reg); 1973 FloatingPointHelper::ConvertIntToDouble(this, 1974 untagged_value, 1975 destination, 1976 d0, 1977 mantissa_reg, 1978 exponent_reg, 1979 scratch4, 1980 s2); 1981 if (destination == FloatingPointHelper::kVFPRegisters) { 1982 CpuFeatures::Scope scope(VFP3); 1983 vstr(d0, scratch1, 0); 1984 } else { 1985 str(mantissa_reg, MemOperand(scratch1, 0)); 1986 str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); 1987 } 1988 bind(&done); 1989} 1990 1991 1992void MacroAssembler::CompareMap(Register obj, 1993 Register scratch, 1994 Handle<Map> map, 1995 Label* early_success, 1996 CompareMapMode mode) { 1997 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 1998 cmp(scratch, Operand(map)); 1999 if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { 2000 Map* transitioned_fast_element_map( 2001 map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); 2002 ASSERT(transitioned_fast_element_map == NULL || 2003 map->elements_kind() != FAST_ELEMENTS); 2004 if (transitioned_fast_element_map != NULL) { 2005 b(eq, early_success); 2006 cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map))); 2007 } 2008 2009 Map* transitioned_double_map( 2010 map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); 2011 ASSERT(transitioned_double_map == NULL || 2012 map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); 2013 if (transitioned_double_map != NULL) { 2014 b(eq, early_success); 2015 cmp(scratch, Operand(Handle<Map>(transitioned_double_map))); 2016 } 2017 } 2018} 2019 2020 2021void MacroAssembler::CheckMap(Register obj, 2022 Register scratch, 2023 Handle<Map> map, 2024 Label* fail, 2025 SmiCheckType smi_check_type, 2026 CompareMapMode mode) { 2027 if (smi_check_type == DO_SMI_CHECK) { 2028 JumpIfSmi(obj, fail); 2029 } 2030 2031 Label success; 2032 CompareMap(obj, scratch, map, &success, mode); 2033 b(ne, fail); 2034 bind(&success); 2035} 2036 2037 2038void MacroAssembler::CheckMap(Register obj, 2039 Register scratch, 2040 Heap::RootListIndex index, 2041 Label* fail, 2042 SmiCheckType smi_check_type) { 2043 if (smi_check_type == DO_SMI_CHECK) { 2044 JumpIfSmi(obj, fail); 2045 } 2046 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 2047 LoadRoot(ip, index); 2048 cmp(scratch, ip); 2049 b(ne, fail); 2050} 2051 2052 2053void MacroAssembler::DispatchMap(Register obj, 2054 Register scratch, 2055 Handle<Map> map, 2056 Handle<Code> success, 2057 SmiCheckType smi_check_type) { 2058 Label fail; 2059 if (smi_check_type == DO_SMI_CHECK) { 2060 JumpIfSmi(obj, &fail); 2061 } 2062 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 2063 mov(ip, Operand(map)); 2064 cmp(scratch, ip); 2065 Jump(success, RelocInfo::CODE_TARGET, eq); 2066 bind(&fail); 2067} 2068 2069 2070void MacroAssembler::TryGetFunctionPrototype(Register function, 2071 Register result, 2072 Register scratch, 2073 Label* miss, 2074 bool miss_on_bound_function) { 2075 // Check that the receiver isn't a smi. 2076 JumpIfSmi(function, miss); 2077 2078 // Check that the function really is a function. Load map into result reg. 2079 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); 2080 b(ne, miss); 2081 2082 if (miss_on_bound_function) { 2083 ldr(scratch, 2084 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 2085 ldr(scratch, 2086 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 2087 tst(scratch, 2088 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); 2089 b(ne, miss); 2090 } 2091 2092 // Make sure that the function has an instance prototype. 2093 Label non_instance; 2094 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); 2095 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); 2096 b(ne, &non_instance); 2097 2098 // Get the prototype or initial map from the function. 2099 ldr(result, 2100 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2101 2102 // If the prototype or initial map is the hole, don't return it and 2103 // simply miss the cache instead. This will allow us to allocate a 2104 // prototype object on-demand in the runtime system. 2105 LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2106 cmp(result, ip); 2107 b(eq, miss); 2108 2109 // If the function does not have an initial map, we're done. 2110 Label done; 2111 CompareObjectType(result, scratch, scratch, MAP_TYPE); 2112 b(ne, &done); 2113 2114 // Get the prototype from the initial map. 2115 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); 2116 jmp(&done); 2117 2118 // Non-instance prototype: Fetch prototype from constructor field 2119 // in initial map. 2120 bind(&non_instance); 2121 ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); 2122 2123 // All done. 2124 bind(&done); 2125} 2126 2127 2128void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { 2129 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 2130 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond); 2131} 2132 2133 2134void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { 2135 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); 2136 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); 2137} 2138 2139 2140static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { 2141 return ref0.address() - ref1.address(); 2142} 2143 2144 2145void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, 2146 int stack_space) { 2147 ExternalReference next_address = 2148 ExternalReference::handle_scope_next_address(); 2149 const int kNextOffset = 0; 2150 const int kLimitOffset = AddressOffset( 2151 ExternalReference::handle_scope_limit_address(), 2152 next_address); 2153 const int kLevelOffset = AddressOffset( 2154 ExternalReference::handle_scope_level_address(), 2155 next_address); 2156 2157 // Allocate HandleScope in callee-save registers. 2158 mov(r7, Operand(next_address)); 2159 ldr(r4, MemOperand(r7, kNextOffset)); 2160 ldr(r5, MemOperand(r7, kLimitOffset)); 2161 ldr(r6, MemOperand(r7, kLevelOffset)); 2162 add(r6, r6, Operand(1)); 2163 str(r6, MemOperand(r7, kLevelOffset)); 2164 2165 // Native call returns to the DirectCEntry stub which redirects to the 2166 // return address pushed on stack (could have moved after GC). 2167 // DirectCEntry stub itself is generated early and never moves. 2168 DirectCEntryStub stub; 2169 stub.GenerateCall(this, function); 2170 2171 Label promote_scheduled_exception; 2172 Label delete_allocated_handles; 2173 Label leave_exit_frame; 2174 2175 // If result is non-zero, dereference to get the result value 2176 // otherwise set it to undefined. 2177 cmp(r0, Operand(0)); 2178 LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); 2179 ldr(r0, MemOperand(r0), ne); 2180 2181 // No more valid handles (the result handle was the last one). Restore 2182 // previous handle scope. 2183 str(r4, MemOperand(r7, kNextOffset)); 2184 if (emit_debug_code()) { 2185 ldr(r1, MemOperand(r7, kLevelOffset)); 2186 cmp(r1, r6); 2187 Check(eq, "Unexpected level after return from api call"); 2188 } 2189 sub(r6, r6, Operand(1)); 2190 str(r6, MemOperand(r7, kLevelOffset)); 2191 ldr(ip, MemOperand(r7, kLimitOffset)); 2192 cmp(r5, ip); 2193 b(ne, &delete_allocated_handles); 2194 2195 // Check if the function scheduled an exception. 2196 bind(&leave_exit_frame); 2197 LoadRoot(r4, Heap::kTheHoleValueRootIndex); 2198 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate()))); 2199 ldr(r5, MemOperand(ip)); 2200 cmp(r4, r5); 2201 b(ne, &promote_scheduled_exception); 2202 2203 // LeaveExitFrame expects unwind space to be in a register. 2204 mov(r4, Operand(stack_space)); 2205 LeaveExitFrame(false, r4); 2206 mov(pc, lr); 2207 2208 bind(&promote_scheduled_exception); 2209 TailCallExternalReference( 2210 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 2211 0, 2212 1); 2213 2214 // HandleScope limit has changed. Delete allocated extensions. 2215 bind(&delete_allocated_handles); 2216 str(r5, MemOperand(r7, kLimitOffset)); 2217 mov(r4, r0); 2218 PrepareCallCFunction(1, r5); 2219 mov(r0, Operand(ExternalReference::isolate_address())); 2220 CallCFunction( 2221 ExternalReference::delete_handle_scope_extensions(isolate()), 1); 2222 mov(r0, r4); 2223 jmp(&leave_exit_frame); 2224} 2225 2226 2227bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 2228 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; 2229 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); 2230} 2231 2232 2233void MacroAssembler::IllegalOperation(int num_arguments) { 2234 if (num_arguments > 0) { 2235 add(sp, sp, Operand(num_arguments * kPointerSize)); 2236 } 2237 LoadRoot(r0, Heap::kUndefinedValueRootIndex); 2238} 2239 2240 2241void MacroAssembler::IndexFromHash(Register hash, Register index) { 2242 // If the hash field contains an array index pick it out. The assert checks 2243 // that the constants for the maximum number of digits for an array index 2244 // cached in the hash field and the number of bits reserved for it does not 2245 // conflict. 2246 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < 2247 (1 << String::kArrayIndexValueBits)); 2248 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in 2249 // the low kHashShift bits. 2250 STATIC_ASSERT(kSmiTag == 0); 2251 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); 2252 mov(index, Operand(hash, LSL, kSmiTagSize)); 2253} 2254 2255 2256void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, 2257 Register outHighReg, 2258 Register outLowReg) { 2259 // ARMv7 VFP3 instructions to implement integer to double conversion. 2260 mov(r7, Operand(inReg, ASR, kSmiTagSize)); 2261 vmov(s15, r7); 2262 vcvt_f64_s32(d7, s15); 2263 vmov(outLowReg, outHighReg, d7); 2264} 2265 2266 2267void MacroAssembler::ObjectToDoubleVFPRegister(Register object, 2268 DwVfpRegister result, 2269 Register scratch1, 2270 Register scratch2, 2271 Register heap_number_map, 2272 SwVfpRegister scratch3, 2273 Label* not_number, 2274 ObjectToDoubleFlags flags) { 2275 Label done; 2276 if ((flags & OBJECT_NOT_SMI) == 0) { 2277 Label not_smi; 2278 JumpIfNotSmi(object, ¬_smi); 2279 // Remove smi tag and convert to double. 2280 mov(scratch1, Operand(object, ASR, kSmiTagSize)); 2281 vmov(scratch3, scratch1); 2282 vcvt_f64_s32(result, scratch3); 2283 b(&done); 2284 bind(¬_smi); 2285 } 2286 // Check for heap number and load double value from it. 2287 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); 2288 sub(scratch2, object, Operand(kHeapObjectTag)); 2289 cmp(scratch1, heap_number_map); 2290 b(ne, not_number); 2291 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { 2292 // If exponent is all ones the number is either a NaN or +/-Infinity. 2293 ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); 2294 Sbfx(scratch1, 2295 scratch1, 2296 HeapNumber::kExponentShift, 2297 HeapNumber::kExponentBits); 2298 // All-one value sign extend to -1. 2299 cmp(scratch1, Operand(-1)); 2300 b(eq, not_number); 2301 } 2302 vldr(result, scratch2, HeapNumber::kValueOffset); 2303 bind(&done); 2304} 2305 2306 2307void MacroAssembler::SmiToDoubleVFPRegister(Register smi, 2308 DwVfpRegister value, 2309 Register scratch1, 2310 SwVfpRegister scratch2) { 2311 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); 2312 vmov(scratch2, scratch1); 2313 vcvt_f64_s32(value, scratch2); 2314} 2315 2316 2317// Tries to get a signed int32 out of a double precision floating point heap 2318// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the 2319// 32bits signed integer range. 2320void MacroAssembler::ConvertToInt32(Register source, 2321 Register dest, 2322 Register scratch, 2323 Register scratch2, 2324 DwVfpRegister double_scratch, 2325 Label *not_int32) { 2326 if (CpuFeatures::IsSupported(VFP3)) { 2327 CpuFeatures::Scope scope(VFP3); 2328 sub(scratch, source, Operand(kHeapObjectTag)); 2329 vldr(double_scratch, scratch, HeapNumber::kValueOffset); 2330 vcvt_s32_f64(double_scratch.low(), double_scratch); 2331 vmov(dest, double_scratch.low()); 2332 // Signed vcvt instruction will saturate to the minimum (0x80000000) or 2333 // maximun (0x7fffffff) signed 32bits integer when the double is out of 2334 // range. When substracting one, the minimum signed integer becomes the 2335 // maximun signed integer. 2336 sub(scratch, dest, Operand(1)); 2337 cmp(scratch, Operand(LONG_MAX - 1)); 2338 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. 2339 b(ge, not_int32); 2340 } else { 2341 // This code is faster for doubles that are in the ranges -0x7fffffff to 2342 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to 2343 // the range of signed int32 values that are not Smis. Jumps to the label 2344 // 'not_int32' if the double isn't in the range -0x80000000.0 to 2345 // 0x80000000.0 (excluding the endpoints). 2346 Label right_exponent, done; 2347 // Get exponent word. 2348 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); 2349 // Get exponent alone in scratch2. 2350 Ubfx(scratch2, 2351 scratch, 2352 HeapNumber::kExponentShift, 2353 HeapNumber::kExponentBits); 2354 // Load dest with zero. We use this either for the final shift or 2355 // for the answer. 2356 mov(dest, Operand(0, RelocInfo::NONE)); 2357 // Check whether the exponent matches a 32 bit signed int that is not a Smi. 2358 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is 2359 // the exponent that we are fastest at and also the highest exponent we can 2360 // handle here. 2361 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; 2362 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we 2363 // split it up to avoid a constant pool entry. You can't do that in general 2364 // for cmp because of the overflow flag, but we know the exponent is in the 2365 // range 0-2047 so there is no overflow. 2366 int fudge_factor = 0x400; 2367 sub(scratch2, scratch2, Operand(fudge_factor)); 2368 cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); 2369 // If we have a match of the int32-but-not-Smi exponent then skip some 2370 // logic. 2371 b(eq, &right_exponent); 2372 // If the exponent is higher than that then go to slow case. This catches 2373 // numbers that don't fit in a signed int32, infinities and NaNs. 2374 b(gt, not_int32); 2375 2376 // We know the exponent is smaller than 30 (biased). If it is less than 2377 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. 2378 // it rounds to zero. 2379 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; 2380 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); 2381 // Dest already has a Smi zero. 2382 b(lt, &done); 2383 2384 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to 2385 // get how much to shift down. 2386 rsb(dest, scratch2, Operand(30)); 2387 2388 bind(&right_exponent); 2389 // Get the top bits of the mantissa. 2390 and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); 2391 // Put back the implicit 1. 2392 orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); 2393 // Shift up the mantissa bits to take up the space the exponent used to 2394 // take. We just orred in the implicit bit so that took care of one and 2395 // we want to leave the sign bit 0 so we subtract 2 bits from the shift 2396 // distance. 2397 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; 2398 mov(scratch2, Operand(scratch2, LSL, shift_distance)); 2399 // Put sign in zero flag. 2400 tst(scratch, Operand(HeapNumber::kSignMask)); 2401 // Get the second half of the double. For some exponents we don't 2402 // actually need this because the bits get shifted out again, but 2403 // it's probably slower to test than just to do it. 2404 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); 2405 // Shift down 22 bits to get the last 10 bits. 2406 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); 2407 // Move down according to the exponent. 2408 mov(dest, Operand(scratch, LSR, dest)); 2409 // Fix sign if sign bit was set. 2410 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); 2411 bind(&done); 2412 } 2413} 2414 2415 2416void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, 2417 SwVfpRegister result, 2418 DwVfpRegister double_input, 2419 Register scratch1, 2420 Register scratch2, 2421 CheckForInexactConversion check_inexact) { 2422 ASSERT(CpuFeatures::IsSupported(VFP3)); 2423 CpuFeatures::Scope scope(VFP3); 2424 Register prev_fpscr = scratch1; 2425 Register scratch = scratch2; 2426 2427 int32_t check_inexact_conversion = 2428 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; 2429 2430 // Set custom FPCSR: 2431 // - Set rounding mode. 2432 // - Clear vfp cumulative exception flags. 2433 // - Make sure Flush-to-zero mode control bit is unset. 2434 vmrs(prev_fpscr); 2435 bic(scratch, 2436 prev_fpscr, 2437 Operand(kVFPExceptionMask | 2438 check_inexact_conversion | 2439 kVFPRoundingModeMask | 2440 kVFPFlushToZeroMask)); 2441 // 'Round To Nearest' is encoded by 0b00 so no bits need to be set. 2442 if (rounding_mode != kRoundToNearest) { 2443 orr(scratch, scratch, Operand(rounding_mode)); 2444 } 2445 vmsr(scratch); 2446 2447 // Convert the argument to an integer. 2448 vcvt_s32_f64(result, 2449 double_input, 2450 (rounding_mode == kRoundToZero) ? kDefaultRoundToZero 2451 : kFPSCRRounding); 2452 2453 // Retrieve FPSCR. 2454 vmrs(scratch); 2455 // Restore FPSCR. 2456 vmsr(prev_fpscr); 2457 // Check for vfp exceptions. 2458 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); 2459} 2460 2461 2462void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, 2463 Register input_high, 2464 Register input_low, 2465 Register scratch) { 2466 Label done, normal_exponent, restore_sign; 2467 2468 // Extract the biased exponent in result. 2469 Ubfx(result, 2470 input_high, 2471 HeapNumber::kExponentShift, 2472 HeapNumber::kExponentBits); 2473 2474 // Check for Infinity and NaNs, which should return 0. 2475 cmp(result, Operand(HeapNumber::kExponentMask)); 2476 mov(result, Operand(0), LeaveCC, eq); 2477 b(eq, &done); 2478 2479 // Express exponent as delta to (number of mantissa bits + 31). 2480 sub(result, 2481 result, 2482 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31), 2483 SetCC); 2484 2485 // If the delta is strictly positive, all bits would be shifted away, 2486 // which means that we can return 0. 2487 b(le, &normal_exponent); 2488 mov(result, Operand(0)); 2489 b(&done); 2490 2491 bind(&normal_exponent); 2492 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; 2493 // Calculate shift. 2494 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC); 2495 2496 // Save the sign. 2497 Register sign = result; 2498 result = no_reg; 2499 and_(sign, input_high, Operand(HeapNumber::kSignMask)); 2500 2501 // Set the implicit 1 before the mantissa part in input_high. 2502 orr(input_high, 2503 input_high, 2504 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); 2505 // Shift the mantissa bits to the correct position. 2506 // We don't need to clear non-mantissa bits as they will be shifted away. 2507 // If they weren't, it would mean that the answer is in the 32bit range. 2508 mov(input_high, Operand(input_high, LSL, scratch)); 2509 2510 // Replace the shifted bits with bits from the lower mantissa word. 2511 Label pos_shift, shift_done; 2512 rsb(scratch, scratch, Operand(32), SetCC); 2513 b(&pos_shift, ge); 2514 2515 // Negate scratch. 2516 rsb(scratch, scratch, Operand(0)); 2517 mov(input_low, Operand(input_low, LSL, scratch)); 2518 b(&shift_done); 2519 2520 bind(&pos_shift); 2521 mov(input_low, Operand(input_low, LSR, scratch)); 2522 2523 bind(&shift_done); 2524 orr(input_high, input_high, Operand(input_low)); 2525 // Restore sign if necessary. 2526 cmp(sign, Operand(0)); 2527 result = sign; 2528 sign = no_reg; 2529 rsb(result, input_high, Operand(0), LeaveCC, ne); 2530 mov(result, input_high, LeaveCC, eq); 2531 bind(&done); 2532} 2533 2534 2535void MacroAssembler::EmitECMATruncate(Register result, 2536 DwVfpRegister double_input, 2537 SwVfpRegister single_scratch, 2538 Register scratch, 2539 Register input_high, 2540 Register input_low) { 2541 CpuFeatures::Scope scope(VFP3); 2542 ASSERT(!input_high.is(result)); 2543 ASSERT(!input_low.is(result)); 2544 ASSERT(!input_low.is(input_high)); 2545 ASSERT(!scratch.is(result) && 2546 !scratch.is(input_high) && 2547 !scratch.is(input_low)); 2548 ASSERT(!single_scratch.is(double_input.low()) && 2549 !single_scratch.is(double_input.high())); 2550 2551 Label done; 2552 2553 // Clear cumulative exception flags. 2554 ClearFPSCRBits(kVFPExceptionMask, scratch); 2555 // Try a conversion to a signed integer. 2556 vcvt_s32_f64(single_scratch, double_input); 2557 vmov(result, single_scratch); 2558 // Retrieve he FPSCR. 2559 vmrs(scratch); 2560 // Check for overflow and NaNs. 2561 tst(scratch, Operand(kVFPOverflowExceptionBit | 2562 kVFPUnderflowExceptionBit | 2563 kVFPInvalidOpExceptionBit)); 2564 // If we had no exceptions we are done. 2565 b(eq, &done); 2566 2567 // Load the double value and perform a manual truncation. 2568 vmov(input_low, input_high, double_input); 2569 EmitOutOfInt32RangeTruncate(result, 2570 input_high, 2571 input_low, 2572 scratch); 2573 bind(&done); 2574} 2575 2576 2577void MacroAssembler::GetLeastBitsFromSmi(Register dst, 2578 Register src, 2579 int num_least_bits) { 2580 if (CpuFeatures::IsSupported(ARMv7)) { 2581 ubfx(dst, src, kSmiTagSize, num_least_bits); 2582 } else { 2583 mov(dst, Operand(src, ASR, kSmiTagSize)); 2584 and_(dst, dst, Operand((1 << num_least_bits) - 1)); 2585 } 2586} 2587 2588 2589void MacroAssembler::GetLeastBitsFromInt32(Register dst, 2590 Register src, 2591 int num_least_bits) { 2592 and_(dst, src, Operand((1 << num_least_bits) - 1)); 2593} 2594 2595 2596void MacroAssembler::CallRuntime(const Runtime::Function* f, 2597 int num_arguments) { 2598 // All parameters are on the stack. r0 has the return value after call. 2599 2600 // If the expected number of arguments of the runtime function is 2601 // constant, we check that the actual number of arguments match the 2602 // expectation. 2603 if (f->nargs >= 0 && f->nargs != num_arguments) { 2604 IllegalOperation(num_arguments); 2605 return; 2606 } 2607 2608 // TODO(1236192): Most runtime routines don't need the number of 2609 // arguments passed in because it is constant. At some point we 2610 // should remove this need and make the runtime routine entry code 2611 // smarter. 2612 mov(r0, Operand(num_arguments)); 2613 mov(r1, Operand(ExternalReference(f, isolate()))); 2614 CEntryStub stub(1); 2615 CallStub(&stub); 2616} 2617 2618 2619void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { 2620 CallRuntime(Runtime::FunctionForId(fid), num_arguments); 2621} 2622 2623 2624void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { 2625 const Runtime::Function* function = Runtime::FunctionForId(id); 2626 mov(r0, Operand(function->nargs)); 2627 mov(r1, Operand(ExternalReference(function, isolate()))); 2628 CEntryStub stub(1, kSaveFPRegs); 2629 CallStub(&stub); 2630} 2631 2632 2633void MacroAssembler::CallExternalReference(const ExternalReference& ext, 2634 int num_arguments) { 2635 mov(r0, Operand(num_arguments)); 2636 mov(r1, Operand(ext)); 2637 2638 CEntryStub stub(1); 2639 CallStub(&stub); 2640} 2641 2642 2643void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, 2644 int num_arguments, 2645 int result_size) { 2646 // TODO(1236192): Most runtime routines don't need the number of 2647 // arguments passed in because it is constant. At some point we 2648 // should remove this need and make the runtime routine entry code 2649 // smarter. 2650 mov(r0, Operand(num_arguments)); 2651 JumpToExternalReference(ext); 2652} 2653 2654 2655void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, 2656 int num_arguments, 2657 int result_size) { 2658 TailCallExternalReference(ExternalReference(fid, isolate()), 2659 num_arguments, 2660 result_size); 2661} 2662 2663 2664void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { 2665#if defined(__thumb__) 2666 // Thumb mode builtin. 2667 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); 2668#endif 2669 mov(r1, Operand(builtin)); 2670 CEntryStub stub(1); 2671 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 2672} 2673 2674 2675void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 2676 InvokeFlag flag, 2677 const CallWrapper& call_wrapper) { 2678 // You can't call a builtin without a valid frame. 2679 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2680 2681 GetBuiltinEntry(r2, id); 2682 if (flag == CALL_FUNCTION) { 2683 call_wrapper.BeforeCall(CallSize(r2)); 2684 SetCallKind(r5, CALL_AS_METHOD); 2685 Call(r2); 2686 call_wrapper.AfterCall(); 2687 } else { 2688 ASSERT(flag == JUMP_FUNCTION); 2689 SetCallKind(r5, CALL_AS_METHOD); 2690 Jump(r2); 2691 } 2692} 2693 2694 2695void MacroAssembler::GetBuiltinFunction(Register target, 2696 Builtins::JavaScript id) { 2697 // Load the builtins object into target register. 2698 ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 2699 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); 2700 // Load the JavaScript builtin function from the builtins object. 2701 ldr(target, FieldMemOperand(target, 2702 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 2703} 2704 2705 2706void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 2707 ASSERT(!target.is(r1)); 2708 GetBuiltinFunction(r1, id); 2709 // Load the code entry point from the builtins object. 2710 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 2711} 2712 2713 2714void MacroAssembler::SetCounter(StatsCounter* counter, int value, 2715 Register scratch1, Register scratch2) { 2716 if (FLAG_native_code_counters && counter->Enabled()) { 2717 mov(scratch1, Operand(value)); 2718 mov(scratch2, Operand(ExternalReference(counter))); 2719 str(scratch1, MemOperand(scratch2)); 2720 } 2721} 2722 2723 2724void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 2725 Register scratch1, Register scratch2) { 2726 ASSERT(value > 0); 2727 if (FLAG_native_code_counters && counter->Enabled()) { 2728 mov(scratch2, Operand(ExternalReference(counter))); 2729 ldr(scratch1, MemOperand(scratch2)); 2730 add(scratch1, scratch1, Operand(value)); 2731 str(scratch1, MemOperand(scratch2)); 2732 } 2733} 2734 2735 2736void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 2737 Register scratch1, Register scratch2) { 2738 ASSERT(value > 0); 2739 if (FLAG_native_code_counters && counter->Enabled()) { 2740 mov(scratch2, Operand(ExternalReference(counter))); 2741 ldr(scratch1, MemOperand(scratch2)); 2742 sub(scratch1, scratch1, Operand(value)); 2743 str(scratch1, MemOperand(scratch2)); 2744 } 2745} 2746 2747 2748void MacroAssembler::Assert(Condition cond, const char* msg) { 2749 if (emit_debug_code()) 2750 Check(cond, msg); 2751} 2752 2753 2754void MacroAssembler::AssertRegisterIsRoot(Register reg, 2755 Heap::RootListIndex index) { 2756 if (emit_debug_code()) { 2757 LoadRoot(ip, index); 2758 cmp(reg, ip); 2759 Check(eq, "Register did not match expected root"); 2760 } 2761} 2762 2763 2764void MacroAssembler::AssertFastElements(Register elements) { 2765 if (emit_debug_code()) { 2766 ASSERT(!elements.is(ip)); 2767 Label ok; 2768 push(elements); 2769 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 2770 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 2771 cmp(elements, ip); 2772 b(eq, &ok); 2773 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); 2774 cmp(elements, ip); 2775 b(eq, &ok); 2776 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); 2777 cmp(elements, ip); 2778 b(eq, &ok); 2779 Abort("JSObject with fast elements map has slow elements"); 2780 bind(&ok); 2781 pop(elements); 2782 } 2783} 2784 2785 2786void MacroAssembler::Check(Condition cond, const char* msg) { 2787 Label L; 2788 b(cond, &L); 2789 Abort(msg); 2790 // will not return here 2791 bind(&L); 2792} 2793 2794 2795void MacroAssembler::Abort(const char* msg) { 2796 Label abort_start; 2797 bind(&abort_start); 2798 // We want to pass the msg string like a smi to avoid GC 2799 // problems, however msg is not guaranteed to be aligned 2800 // properly. Instead, we pass an aligned pointer that is 2801 // a proper v8 smi, but also pass the alignment difference 2802 // from the real pointer as a smi. 2803 intptr_t p1 = reinterpret_cast<intptr_t>(msg); 2804 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; 2805 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); 2806#ifdef DEBUG 2807 if (msg != NULL) { 2808 RecordComment("Abort message: "); 2809 RecordComment(msg); 2810 } 2811#endif 2812 2813 mov(r0, Operand(p0)); 2814 push(r0); 2815 mov(r0, Operand(Smi::FromInt(p1 - p0))); 2816 push(r0); 2817 // Disable stub call restrictions to always allow calls to abort. 2818 if (!has_frame_) { 2819 // We don't actually want to generate a pile of code for this, so just 2820 // claim there is a stack frame, without generating one. 2821 FrameScope scope(this, StackFrame::NONE); 2822 CallRuntime(Runtime::kAbort, 2); 2823 } else { 2824 CallRuntime(Runtime::kAbort, 2); 2825 } 2826 // will not return here 2827 if (is_const_pool_blocked()) { 2828 // If the calling code cares about the exact number of 2829 // instructions generated, we insert padding here to keep the size 2830 // of the Abort macro constant. 2831 static const int kExpectedAbortInstructions = 10; 2832 int abort_instructions = InstructionsGeneratedSince(&abort_start); 2833 ASSERT(abort_instructions <= kExpectedAbortInstructions); 2834 while (abort_instructions++ < kExpectedAbortInstructions) { 2835 nop(); 2836 } 2837 } 2838} 2839 2840 2841void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 2842 if (context_chain_length > 0) { 2843 // Move up the chain of contexts to the context containing the slot. 2844 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); 2845 for (int i = 1; i < context_chain_length; i++) { 2846 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); 2847 } 2848 } else { 2849 // Slot is in the current function context. Move it into the 2850 // destination register in case we store into it (the write barrier 2851 // cannot be allowed to destroy the context in esi). 2852 mov(dst, cp); 2853 } 2854} 2855 2856 2857void MacroAssembler::LoadTransitionedArrayMapConditional( 2858 ElementsKind expected_kind, 2859 ElementsKind transitioned_kind, 2860 Register map_in_out, 2861 Register scratch, 2862 Label* no_map_match) { 2863 // Load the global or builtins object from the current context. 2864 ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 2865 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); 2866 2867 // Check that the function's map is the same as the expected cached map. 2868 int expected_index = 2869 Context::GetContextMapIndexFromElementsKind(expected_kind); 2870 ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index))); 2871 cmp(map_in_out, ip); 2872 b(ne, no_map_match); 2873 2874 // Use the transitioned cached map. 2875 int trans_index = 2876 Context::GetContextMapIndexFromElementsKind(transitioned_kind); 2877 ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index))); 2878} 2879 2880 2881void MacroAssembler::LoadInitialArrayMap( 2882 Register function_in, Register scratch, Register map_out) { 2883 ASSERT(!function_in.is(map_out)); 2884 Label done; 2885 ldr(map_out, FieldMemOperand(function_in, 2886 JSFunction::kPrototypeOrInitialMapOffset)); 2887 if (!FLAG_smi_only_arrays) { 2888 LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, 2889 FAST_ELEMENTS, 2890 map_out, 2891 scratch, 2892 &done); 2893 } 2894 bind(&done); 2895} 2896 2897 2898void MacroAssembler::LoadGlobalFunction(int index, Register function) { 2899 // Load the global or builtins object from the current context. 2900 ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); 2901 // Load the global context from the global or builtins object. 2902 ldr(function, FieldMemOperand(function, 2903 GlobalObject::kGlobalContextOffset)); 2904 // Load the function from the global context. 2905 ldr(function, MemOperand(function, Context::SlotOffset(index))); 2906} 2907 2908 2909void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, 2910 Register map, 2911 Register scratch) { 2912 // Load the initial map. The global functions all have initial maps. 2913 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2914 if (emit_debug_code()) { 2915 Label ok, fail; 2916 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); 2917 b(&ok); 2918 bind(&fail); 2919 Abort("Global functions must have initial map"); 2920 bind(&ok); 2921 } 2922} 2923 2924 2925void MacroAssembler::JumpIfNotPowerOfTwoOrZero( 2926 Register reg, 2927 Register scratch, 2928 Label* not_power_of_two_or_zero) { 2929 sub(scratch, reg, Operand(1), SetCC); 2930 b(mi, not_power_of_two_or_zero); 2931 tst(scratch, reg); 2932 b(ne, not_power_of_two_or_zero); 2933} 2934 2935 2936void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg( 2937 Register reg, 2938 Register scratch, 2939 Label* zero_and_neg, 2940 Label* not_power_of_two) { 2941 sub(scratch, reg, Operand(1), SetCC); 2942 b(mi, zero_and_neg); 2943 tst(scratch, reg); 2944 b(ne, not_power_of_two); 2945} 2946 2947 2948void MacroAssembler::JumpIfNotBothSmi(Register reg1, 2949 Register reg2, 2950 Label* on_not_both_smi) { 2951 STATIC_ASSERT(kSmiTag == 0); 2952 tst(reg1, Operand(kSmiTagMask)); 2953 tst(reg2, Operand(kSmiTagMask), eq); 2954 b(ne, on_not_both_smi); 2955} 2956 2957 2958void MacroAssembler::UntagAndJumpIfSmi( 2959 Register dst, Register src, Label* smi_case) { 2960 STATIC_ASSERT(kSmiTag == 0); 2961 mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); 2962 b(cc, smi_case); // Shifter carry is not set for a smi. 2963} 2964 2965 2966void MacroAssembler::UntagAndJumpIfNotSmi( 2967 Register dst, Register src, Label* non_smi_case) { 2968 STATIC_ASSERT(kSmiTag == 0); 2969 mov(dst, Operand(src, ASR, kSmiTagSize), SetCC); 2970 b(cs, non_smi_case); // Shifter carry is set for a non-smi. 2971} 2972 2973 2974void MacroAssembler::JumpIfEitherSmi(Register reg1, 2975 Register reg2, 2976 Label* on_either_smi) { 2977 STATIC_ASSERT(kSmiTag == 0); 2978 tst(reg1, Operand(kSmiTagMask)); 2979 tst(reg2, Operand(kSmiTagMask), ne); 2980 b(eq, on_either_smi); 2981} 2982 2983 2984void MacroAssembler::AbortIfSmi(Register object) { 2985 STATIC_ASSERT(kSmiTag == 0); 2986 tst(object, Operand(kSmiTagMask)); 2987 Assert(ne, "Operand is a smi"); 2988} 2989 2990 2991void MacroAssembler::AbortIfNotSmi(Register object) { 2992 STATIC_ASSERT(kSmiTag == 0); 2993 tst(object, Operand(kSmiTagMask)); 2994 Assert(eq, "Operand is not smi"); 2995} 2996 2997 2998void MacroAssembler::AbortIfNotString(Register object) { 2999 STATIC_ASSERT(kSmiTag == 0); 3000 tst(object, Operand(kSmiTagMask)); 3001 Assert(ne, "Operand is not a string"); 3002 push(object); 3003 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 3004 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); 3005 pop(object); 3006 Assert(lo, "Operand is not a string"); 3007} 3008 3009 3010 3011void MacroAssembler::AbortIfNotRootValue(Register src, 3012 Heap::RootListIndex root_value_index, 3013 const char* message) { 3014 CompareRoot(src, root_value_index); 3015 Assert(eq, message); 3016} 3017 3018 3019void MacroAssembler::JumpIfNotHeapNumber(Register object, 3020 Register heap_number_map, 3021 Register scratch, 3022 Label* on_not_heap_number) { 3023 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 3024 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3025 cmp(scratch, heap_number_map); 3026 b(ne, on_not_heap_number); 3027} 3028 3029 3030void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( 3031 Register first, 3032 Register second, 3033 Register scratch1, 3034 Register scratch2, 3035 Label* failure) { 3036 // Test that both first and second are sequential ASCII strings. 3037 // Assume that they are non-smis. 3038 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); 3039 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); 3040 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); 3041 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); 3042 3043 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, 3044 scratch2, 3045 scratch1, 3046 scratch2, 3047 failure); 3048} 3049 3050void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, 3051 Register second, 3052 Register scratch1, 3053 Register scratch2, 3054 Label* failure) { 3055 // Check that neither is a smi. 3056 STATIC_ASSERT(kSmiTag == 0); 3057 and_(scratch1, first, Operand(second)); 3058 JumpIfSmi(scratch1, failure); 3059 JumpIfNonSmisNotBothSequentialAsciiStrings(first, 3060 second, 3061 scratch1, 3062 scratch2, 3063 failure); 3064} 3065 3066 3067// Allocates a heap number or jumps to the need_gc label if the young space 3068// is full and a scavenge is needed. 3069void MacroAssembler::AllocateHeapNumber(Register result, 3070 Register scratch1, 3071 Register scratch2, 3072 Register heap_number_map, 3073 Label* gc_required) { 3074 // Allocate an object in the heap for the heap number and tag it as a heap 3075 // object. 3076 AllocateInNewSpace(HeapNumber::kSize, 3077 result, 3078 scratch1, 3079 scratch2, 3080 gc_required, 3081 TAG_OBJECT); 3082 3083 // Store heap number map in the allocated object. 3084 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3085 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); 3086} 3087 3088 3089void MacroAssembler::AllocateHeapNumberWithValue(Register result, 3090 DwVfpRegister value, 3091 Register scratch1, 3092 Register scratch2, 3093 Register heap_number_map, 3094 Label* gc_required) { 3095 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); 3096 sub(scratch1, result, Operand(kHeapObjectTag)); 3097 vstr(value, scratch1, HeapNumber::kValueOffset); 3098} 3099 3100 3101// Copies a fixed number of fields of heap objects from src to dst. 3102void MacroAssembler::CopyFields(Register dst, 3103 Register src, 3104 RegList temps, 3105 int field_count) { 3106 // At least one bit set in the first 15 registers. 3107 ASSERT((temps & ((1 << 15) - 1)) != 0); 3108 ASSERT((temps & dst.bit()) == 0); 3109 ASSERT((temps & src.bit()) == 0); 3110 // Primitive implementation using only one temporary register. 3111 3112 Register tmp = no_reg; 3113 // Find a temp register in temps list. 3114 for (int i = 0; i < 15; i++) { 3115 if ((temps & (1 << i)) != 0) { 3116 tmp.set_code(i); 3117 break; 3118 } 3119 } 3120 ASSERT(!tmp.is(no_reg)); 3121 3122 for (int i = 0; i < field_count; i++) { 3123 ldr(tmp, FieldMemOperand(src, i * kPointerSize)); 3124 str(tmp, FieldMemOperand(dst, i * kPointerSize)); 3125 } 3126} 3127 3128 3129void MacroAssembler::CopyBytes(Register src, 3130 Register dst, 3131 Register length, 3132 Register scratch) { 3133 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; 3134 3135 // Align src before copying in word size chunks. 3136 bind(&align_loop); 3137 cmp(length, Operand(0)); 3138 b(eq, &done); 3139 bind(&align_loop_1); 3140 tst(src, Operand(kPointerSize - 1)); 3141 b(eq, &word_loop); 3142 ldrb(scratch, MemOperand(src, 1, PostIndex)); 3143 strb(scratch, MemOperand(dst, 1, PostIndex)); 3144 sub(length, length, Operand(1), SetCC); 3145 b(ne, &byte_loop_1); 3146 3147 // Copy bytes in word size chunks. 3148 bind(&word_loop); 3149 if (emit_debug_code()) { 3150 tst(src, Operand(kPointerSize - 1)); 3151 Assert(eq, "Expecting alignment for CopyBytes"); 3152 } 3153 cmp(length, Operand(kPointerSize)); 3154 b(lt, &byte_loop); 3155 ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); 3156#if CAN_USE_UNALIGNED_ACCESSES 3157 str(scratch, MemOperand(dst, kPointerSize, PostIndex)); 3158#else 3159 strb(scratch, MemOperand(dst, 1, PostIndex)); 3160 mov(scratch, Operand(scratch, LSR, 8)); 3161 strb(scratch, MemOperand(dst, 1, PostIndex)); 3162 mov(scratch, Operand(scratch, LSR, 8)); 3163 strb(scratch, MemOperand(dst, 1, PostIndex)); 3164 mov(scratch, Operand(scratch, LSR, 8)); 3165 strb(scratch, MemOperand(dst, 1, PostIndex)); 3166#endif 3167 sub(length, length, Operand(kPointerSize)); 3168 b(&word_loop); 3169 3170 // Copy the last bytes if any left. 3171 bind(&byte_loop); 3172 cmp(length, Operand(0)); 3173 b(eq, &done); 3174 bind(&byte_loop_1); 3175 ldrb(scratch, MemOperand(src, 1, PostIndex)); 3176 strb(scratch, MemOperand(dst, 1, PostIndex)); 3177 sub(length, length, Operand(1), SetCC); 3178 b(ne, &byte_loop_1); 3179 bind(&done); 3180} 3181 3182 3183void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, 3184 Register end_offset, 3185 Register filler) { 3186 Label loop, entry; 3187 b(&entry); 3188 bind(&loop); 3189 str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); 3190 bind(&entry); 3191 cmp(start_offset, end_offset); 3192 b(lt, &loop); 3193} 3194 3195 3196void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. 3197 Register source, // Input. 3198 Register scratch) { 3199 ASSERT(!zeros.is(source) || !source.is(scratch)); 3200 ASSERT(!zeros.is(scratch)); 3201 ASSERT(!scratch.is(ip)); 3202 ASSERT(!source.is(ip)); 3203 ASSERT(!zeros.is(ip)); 3204#ifdef CAN_USE_ARMV5_INSTRUCTIONS 3205 clz(zeros, source); // This instruction is only supported after ARM5. 3206#else 3207 // Order of the next two lines is important: zeros register 3208 // can be the same as source register. 3209 Move(scratch, source); 3210 mov(zeros, Operand(0, RelocInfo::NONE)); 3211 // Top 16. 3212 tst(scratch, Operand(0xffff0000)); 3213 add(zeros, zeros, Operand(16), LeaveCC, eq); 3214 mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); 3215 // Top 8. 3216 tst(scratch, Operand(0xff000000)); 3217 add(zeros, zeros, Operand(8), LeaveCC, eq); 3218 mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); 3219 // Top 4. 3220 tst(scratch, Operand(0xf0000000)); 3221 add(zeros, zeros, Operand(4), LeaveCC, eq); 3222 mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); 3223 // Top 2. 3224 tst(scratch, Operand(0xc0000000)); 3225 add(zeros, zeros, Operand(2), LeaveCC, eq); 3226 mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); 3227 // Top bit. 3228 tst(scratch, Operand(0x80000000u)); 3229 add(zeros, zeros, Operand(1), LeaveCC, eq); 3230#endif 3231} 3232 3233 3234void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( 3235 Register first, 3236 Register second, 3237 Register scratch1, 3238 Register scratch2, 3239 Label* failure) { 3240 int kFlatAsciiStringMask = 3241 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 3242 int kFlatAsciiStringTag = ASCII_STRING_TYPE; 3243 and_(scratch1, first, Operand(kFlatAsciiStringMask)); 3244 and_(scratch2, second, Operand(kFlatAsciiStringMask)); 3245 cmp(scratch1, Operand(kFlatAsciiStringTag)); 3246 // Ignore second test if first test failed. 3247 cmp(scratch2, Operand(kFlatAsciiStringTag), eq); 3248 b(ne, failure); 3249} 3250 3251 3252void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, 3253 Register scratch, 3254 Label* failure) { 3255 int kFlatAsciiStringMask = 3256 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 3257 int kFlatAsciiStringTag = ASCII_STRING_TYPE; 3258 and_(scratch, type, Operand(kFlatAsciiStringMask)); 3259 cmp(scratch, Operand(kFlatAsciiStringTag)); 3260 b(ne, failure); 3261} 3262 3263static const int kRegisterPassedArguments = 4; 3264 3265 3266int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, 3267 int num_double_arguments) { 3268 int stack_passed_words = 0; 3269 if (use_eabi_hardfloat()) { 3270 // In the hard floating point calling convention, we can use 3271 // all double registers to pass doubles. 3272 if (num_double_arguments > DoubleRegister::kNumRegisters) { 3273 stack_passed_words += 3274 2 * (num_double_arguments - DoubleRegister::kNumRegisters); 3275 } 3276 } else { 3277 // In the soft floating point calling convention, every double 3278 // argument is passed using two registers. 3279 num_reg_arguments += 2 * num_double_arguments; 3280 } 3281 // Up to four simple arguments are passed in registers r0..r3. 3282 if (num_reg_arguments > kRegisterPassedArguments) { 3283 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; 3284 } 3285 return stack_passed_words; 3286} 3287 3288 3289void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 3290 int num_double_arguments, 3291 Register scratch) { 3292 int frame_alignment = ActivationFrameAlignment(); 3293 int stack_passed_arguments = CalculateStackPassedWords( 3294 num_reg_arguments, num_double_arguments); 3295 if (frame_alignment > kPointerSize) { 3296 // Make stack end at alignment and make room for num_arguments - 4 words 3297 // and the original value of sp. 3298 mov(scratch, sp); 3299 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 3300 ASSERT(IsPowerOf2(frame_alignment)); 3301 and_(sp, sp, Operand(-frame_alignment)); 3302 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 3303 } else { 3304 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 3305 } 3306} 3307 3308 3309void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 3310 Register scratch) { 3311 PrepareCallCFunction(num_reg_arguments, 0, scratch); 3312} 3313 3314 3315void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { 3316 if (use_eabi_hardfloat()) { 3317 Move(d0, dreg); 3318 } else { 3319 vmov(r0, r1, dreg); 3320 } 3321} 3322 3323 3324void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, 3325 DoubleRegister dreg2) { 3326 if (use_eabi_hardfloat()) { 3327 if (dreg2.is(d0)) { 3328 ASSERT(!dreg1.is(d1)); 3329 Move(d1, dreg2); 3330 Move(d0, dreg1); 3331 } else { 3332 Move(d0, dreg1); 3333 Move(d1, dreg2); 3334 } 3335 } else { 3336 vmov(r0, r1, dreg1); 3337 vmov(r2, r3, dreg2); 3338 } 3339} 3340 3341 3342void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, 3343 Register reg) { 3344 if (use_eabi_hardfloat()) { 3345 Move(d0, dreg); 3346 Move(r0, reg); 3347 } else { 3348 Move(r2, reg); 3349 vmov(r0, r1, dreg); 3350 } 3351} 3352 3353 3354void MacroAssembler::CallCFunction(ExternalReference function, 3355 int num_reg_arguments, 3356 int num_double_arguments) { 3357 mov(ip, Operand(function)); 3358 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); 3359} 3360 3361 3362void MacroAssembler::CallCFunction(Register function, 3363 int num_reg_arguments, 3364 int num_double_arguments) { 3365 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); 3366} 3367 3368 3369void MacroAssembler::CallCFunction(ExternalReference function, 3370 int num_arguments) { 3371 CallCFunction(function, num_arguments, 0); 3372} 3373 3374 3375void MacroAssembler::CallCFunction(Register function, 3376 int num_arguments) { 3377 CallCFunction(function, num_arguments, 0); 3378} 3379 3380 3381void MacroAssembler::CallCFunctionHelper(Register function, 3382 int num_reg_arguments, 3383 int num_double_arguments) { 3384 ASSERT(has_frame()); 3385 // Make sure that the stack is aligned before calling a C function unless 3386 // running in the simulator. The simulator has its own alignment check which 3387 // provides more information. 3388#if defined(V8_HOST_ARCH_ARM) 3389 if (emit_debug_code()) { 3390 int frame_alignment = OS::ActivationFrameAlignment(); 3391 int frame_alignment_mask = frame_alignment - 1; 3392 if (frame_alignment > kPointerSize) { 3393 ASSERT(IsPowerOf2(frame_alignment)); 3394 Label alignment_as_expected; 3395 tst(sp, Operand(frame_alignment_mask)); 3396 b(eq, &alignment_as_expected); 3397 // Don't use Check here, as it will call Runtime_Abort possibly 3398 // re-entering here. 3399 stop("Unexpected alignment"); 3400 bind(&alignment_as_expected); 3401 } 3402 } 3403#endif 3404 3405 // Just call directly. The function called cannot cause a GC, or 3406 // allow preemption, so the return address in the link register 3407 // stays correct. 3408 Call(function); 3409 int stack_passed_arguments = CalculateStackPassedWords( 3410 num_reg_arguments, num_double_arguments); 3411 if (ActivationFrameAlignment() > kPointerSize) { 3412 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 3413 } else { 3414 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 3415 } 3416} 3417 3418 3419void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, 3420 Register result) { 3421 const uint32_t kLdrOffsetMask = (1 << 12) - 1; 3422 const int32_t kPCRegOffset = 2 * kPointerSize; 3423 ldr(result, MemOperand(ldr_location)); 3424 if (emit_debug_code()) { 3425 // Check that the instruction is a ldr reg, [pc + offset] . 3426 and_(result, result, Operand(kLdrPCPattern)); 3427 cmp(result, Operand(kLdrPCPattern)); 3428 Check(eq, "The instruction to patch should be a load from pc."); 3429 // Result was clobbered. Restore it. 3430 ldr(result, MemOperand(ldr_location)); 3431 } 3432 // Get the address of the constant. 3433 and_(result, result, Operand(kLdrOffsetMask)); 3434 add(result, ldr_location, Operand(result)); 3435 add(result, result, Operand(kPCRegOffset)); 3436} 3437 3438 3439void MacroAssembler::CheckPageFlag( 3440 Register object, 3441 Register scratch, 3442 int mask, 3443 Condition cc, 3444 Label* condition_met) { 3445 and_(scratch, object, Operand(~Page::kPageAlignmentMask)); 3446 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); 3447 tst(scratch, Operand(mask)); 3448 b(cc, condition_met); 3449} 3450 3451 3452void MacroAssembler::JumpIfBlack(Register object, 3453 Register scratch0, 3454 Register scratch1, 3455 Label* on_black) { 3456 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. 3457 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 3458} 3459 3460 3461void MacroAssembler::HasColor(Register object, 3462 Register bitmap_scratch, 3463 Register mask_scratch, 3464 Label* has_color, 3465 int first_bit, 3466 int second_bit) { 3467 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); 3468 3469 GetMarkBits(object, bitmap_scratch, mask_scratch); 3470 3471 Label other_color, word_boundary; 3472 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3473 tst(ip, Operand(mask_scratch)); 3474 b(first_bit == 1 ? eq : ne, &other_color); 3475 // Shift left 1 by adding. 3476 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); 3477 b(eq, &word_boundary); 3478 tst(ip, Operand(mask_scratch)); 3479 b(second_bit == 1 ? ne : eq, has_color); 3480 jmp(&other_color); 3481 3482 bind(&word_boundary); 3483 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); 3484 tst(ip, Operand(1)); 3485 b(second_bit == 1 ? ne : eq, has_color); 3486 bind(&other_color); 3487} 3488 3489 3490// Detect some, but not all, common pointer-free objects. This is used by the 3491// incremental write barrier which doesn't care about oddballs (they are always 3492// marked black immediately so this code is not hit). 3493void MacroAssembler::JumpIfDataObject(Register value, 3494 Register scratch, 3495 Label* not_data_object) { 3496 Label is_data_object; 3497 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 3498 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 3499 b(eq, &is_data_object); 3500 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 3501 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 3502 // If it's a string and it's not a cons string then it's an object containing 3503 // no GC pointers. 3504 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 3505 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); 3506 b(ne, not_data_object); 3507 bind(&is_data_object); 3508} 3509 3510 3511void MacroAssembler::GetMarkBits(Register addr_reg, 3512 Register bitmap_reg, 3513 Register mask_reg) { 3514 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); 3515 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); 3516 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); 3517 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; 3518 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); 3519 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); 3520 mov(ip, Operand(1)); 3521 mov(mask_reg, Operand(ip, LSL, mask_reg)); 3522} 3523 3524 3525void MacroAssembler::EnsureNotWhite( 3526 Register value, 3527 Register bitmap_scratch, 3528 Register mask_scratch, 3529 Register load_scratch, 3530 Label* value_is_white_and_not_data) { 3531 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); 3532 GetMarkBits(value, bitmap_scratch, mask_scratch); 3533 3534 // If the value is black or grey we don't need to do anything. 3535 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 3536 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 3537 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 3538 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 3539 3540 Label done; 3541 3542 // Since both black and grey have a 1 in the first position and white does 3543 // not have a 1 there we only need to check one bit. 3544 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3545 tst(mask_scratch, load_scratch); 3546 b(ne, &done); 3547 3548 if (emit_debug_code()) { 3549 // Check for impossible bit pattern. 3550 Label ok; 3551 // LSL may overflow, making the check conservative. 3552 tst(load_scratch, Operand(mask_scratch, LSL, 1)); 3553 b(eq, &ok); 3554 stop("Impossible marking bit pattern"); 3555 bind(&ok); 3556 } 3557 3558 // Value is white. We check whether it is data that doesn't need scanning. 3559 // Currently only checks for HeapNumber and non-cons strings. 3560 Register map = load_scratch; // Holds map while checking type. 3561 Register length = load_scratch; // Holds length of object after testing type. 3562 Label is_data_object; 3563 3564 // Check for heap-number 3565 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 3566 CompareRoot(map, Heap::kHeapNumberMapRootIndex); 3567 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); 3568 b(eq, &is_data_object); 3569 3570 // Check for strings. 3571 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 3572 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 3573 // If it's a string and it's not a cons string then it's an object containing 3574 // no GC pointers. 3575 Register instance_type = load_scratch; 3576 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); 3577 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); 3578 b(ne, value_is_white_and_not_data); 3579 // It's a non-indirect (non-cons and non-slice) string. 3580 // If it's external, the length is just ExternalString::kSize. 3581 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). 3582 // External strings are the only ones with the kExternalStringTag bit 3583 // set. 3584 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); 3585 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); 3586 tst(instance_type, Operand(kExternalStringTag)); 3587 mov(length, Operand(ExternalString::kSize), LeaveCC, ne); 3588 b(ne, &is_data_object); 3589 3590 // Sequential string, either ASCII or UC16. 3591 // For ASCII (char-size of 1) we shift the smi tag away to get the length. 3592 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby 3593 // getting the length multiplied by 2. 3594 ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); 3595 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 3596 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); 3597 tst(instance_type, Operand(kStringEncodingMask)); 3598 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); 3599 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); 3600 and_(length, length, Operand(~kObjectAlignmentMask)); 3601 3602 bind(&is_data_object); 3603 // Value is a data object, and it is white. Mark it black. Since we know 3604 // that the object is white we can make it black by flipping one bit. 3605 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3606 orr(ip, ip, Operand(mask_scratch)); 3607 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 3608 3609 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); 3610 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); 3611 add(ip, ip, Operand(length)); 3612 str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); 3613 3614 bind(&done); 3615} 3616 3617 3618void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { 3619 Usat(output_reg, 8, Operand(input_reg)); 3620} 3621 3622 3623void MacroAssembler::ClampDoubleToUint8(Register result_reg, 3624 DoubleRegister input_reg, 3625 DoubleRegister temp_double_reg) { 3626 Label above_zero; 3627 Label done; 3628 Label in_bounds; 3629 3630 Vmov(temp_double_reg, 0.0); 3631 VFPCompareAndSetFlags(input_reg, temp_double_reg); 3632 b(gt, &above_zero); 3633 3634 // Double value is less than zero, NaN or Inf, return 0. 3635 mov(result_reg, Operand(0)); 3636 b(al, &done); 3637 3638 // Double value is >= 255, return 255. 3639 bind(&above_zero); 3640 Vmov(temp_double_reg, 255.0); 3641 VFPCompareAndSetFlags(input_reg, temp_double_reg); 3642 b(le, &in_bounds); 3643 mov(result_reg, Operand(255)); 3644 b(al, &done); 3645 3646 // In 0-255 range, round and truncate. 3647 bind(&in_bounds); 3648 Vmov(temp_double_reg, 0.5); 3649 vadd(temp_double_reg, input_reg, temp_double_reg); 3650 vcvt_u32_f64(temp_double_reg.low(), temp_double_reg); 3651 vmov(result_reg, temp_double_reg.low()); 3652 bind(&done); 3653} 3654 3655 3656void MacroAssembler::LoadInstanceDescriptors(Register map, 3657 Register descriptors) { 3658 ldr(descriptors, 3659 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset)); 3660 Label not_smi; 3661 JumpIfNotSmi(descriptors, ¬_smi); 3662 mov(descriptors, Operand(FACTORY->empty_descriptor_array())); 3663 bind(¬_smi); 3664} 3665 3666 3667void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { 3668 Label next; 3669 // Preload a couple of values used in the loop. 3670 Register empty_fixed_array_value = r6; 3671 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); 3672 Register empty_descriptor_array_value = r7; 3673 LoadRoot(empty_descriptor_array_value, 3674 Heap::kEmptyDescriptorArrayRootIndex); 3675 mov(r1, r0); 3676 bind(&next); 3677 3678 // Check that there are no elements. Register r1 contains the 3679 // current JS object we've reached through the prototype chain. 3680 ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); 3681 cmp(r2, empty_fixed_array_value); 3682 b(ne, call_runtime); 3683 3684 // Check that instance descriptors are not empty so that we can 3685 // check for an enum cache. Leave the map in r2 for the subsequent 3686 // prototype load. 3687 ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 3688 ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset)); 3689 JumpIfSmi(r3, call_runtime); 3690 3691 // Check that there is an enum cache in the non-empty instance 3692 // descriptors (r3). This is the case if the next enumeration 3693 // index field does not contain a smi. 3694 ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset)); 3695 JumpIfSmi(r3, call_runtime); 3696 3697 // For all objects but the receiver, check that the cache is empty. 3698 Label check_prototype; 3699 cmp(r1, r0); 3700 b(eq, &check_prototype); 3701 ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset)); 3702 cmp(r3, empty_fixed_array_value); 3703 b(ne, call_runtime); 3704 3705 // Load the prototype from the map and loop if non-null. 3706 bind(&check_prototype); 3707 ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset)); 3708 cmp(r1, null_value); 3709 b(ne, &next); 3710} 3711 3712 3713bool AreAliased(Register r1, Register r2, Register r3, Register r4) { 3714 if (r1.is(r2)) return true; 3715 if (r1.is(r3)) return true; 3716 if (r1.is(r4)) return true; 3717 if (r2.is(r3)) return true; 3718 if (r2.is(r4)) return true; 3719 if (r3.is(r4)) return true; 3720 return false; 3721} 3722 3723 3724CodePatcher::CodePatcher(byte* address, int instructions) 3725 : address_(address), 3726 instructions_(instructions), 3727 size_(instructions * Assembler::kInstrSize), 3728 masm_(Isolate::Current(), address, size_ + Assembler::kGap) { 3729 // Create a new macro assembler pointing to the address of the code to patch. 3730 // The size is adjusted with kGap on order for the assembler to generate size 3731 // bytes of instructions without failing with buffer size constraints. 3732 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3733} 3734 3735 3736CodePatcher::~CodePatcher() { 3737 // Indicate that code has changed. 3738 CPU::FlushICache(address_, size_); 3739 3740 // Check that the code was patched as expected. 3741 ASSERT(masm_.pc_ == address_ + size_); 3742 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 3743} 3744 3745 3746void CodePatcher::Emit(Instr instr) { 3747 masm()->emit(instr); 3748} 3749 3750 3751void CodePatcher::Emit(Address addr) { 3752 masm()->emit(reinterpret_cast<Instr>(addr)); 3753} 3754 3755 3756void CodePatcher::EmitCondition(Condition cond) { 3757 Instr instr = Assembler::instr_at(masm_.pc_); 3758 instr = (instr & ~kCondMask) | cond; 3759 masm_.emit(instr); 3760} 3761 3762 3763} } // namespace v8::internal 3764 3765#endif // V8_TARGET_ARCH_ARM 3766