macro-assembler-mips.cc revision f7060e27768c550ace7ec48ad8c093466db52dfa
1// Copyright 2010 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 29 30#include "v8.h" 31 32#if defined(V8_TARGET_ARCH_MIPS) 33 34#include "bootstrapper.h" 35#include "codegen-inl.h" 36#include "debug.h" 37#include "runtime.h" 38 39namespace v8 { 40namespace internal { 41 42MacroAssembler::MacroAssembler(void* buffer, int size) 43 : Assembler(buffer, size), 44 unresolved_(0), 45 generating_stub_(false), 46 allow_stub_calls_(true), 47 code_object_(Heap::undefined_value()) { 48} 49 50 51 52void MacroAssembler::Jump(Register target, Condition cond, 53 Register r1, const Operand& r2) { 54 Jump(Operand(target), cond, r1, r2); 55} 56 57 58void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, 59 Condition cond, Register r1, const Operand& r2) { 60 Jump(Operand(target, rmode), cond, r1, r2); 61} 62 63 64void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, 65 Condition cond, Register r1, const Operand& r2) { 66 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 67 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); 68} 69 70 71void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, 72 Condition cond, Register r1, const Operand& r2) { 73 ASSERT(RelocInfo::IsCodeTarget(rmode)); 74 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); 75} 76 77 78void MacroAssembler::Call(Register target, 79 Condition cond, Register r1, const Operand& r2) { 80 Call(Operand(target), cond, r1, r2); 81} 82 83 84void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, 85 Condition cond, Register r1, const Operand& r2) { 86 Call(Operand(target, rmode), cond, r1, r2); 87} 88 89 90void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, 91 Condition cond, Register r1, const Operand& r2) { 92 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 93 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); 94} 95 96 97void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, 98 Condition cond, Register r1, const Operand& r2) { 99 ASSERT(RelocInfo::IsCodeTarget(rmode)); 100 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); 101} 102 103 104void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) { 105 Jump(Operand(ra), cond, r1, r2); 106} 107 108 109void MacroAssembler::LoadRoot(Register destination, 110 Heap::RootListIndex index) { 111 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 112} 113 114void MacroAssembler::LoadRoot(Register destination, 115 Heap::RootListIndex index, 116 Condition cond, 117 Register src1, const Operand& src2) { 118 Branch(NegateCondition(cond), 2, src1, src2); 119 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 120} 121 122 123void MacroAssembler::RecordWrite(Register object, Register offset, 124 Register scratch) { 125 UNIMPLEMENTED_MIPS(); 126} 127 128 129// --------------------------------------------------------------------------- 130// Instruction macros 131 132void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) { 133 if (rt.is_reg()) { 134 add(rd, rs, rt.rm()); 135 } else { 136 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 137 addi(rd, rs, rt.imm32_); 138 } else { 139 // li handles the relocation. 140 ASSERT(!rs.is(at)); 141 li(at, rt); 142 add(rd, rs, at); 143 } 144 } 145} 146 147 148void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 149 if (rt.is_reg()) { 150 addu(rd, rs, rt.rm()); 151 } else { 152 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 153 addiu(rd, rs, rt.imm32_); 154 } else { 155 // li handles the relocation. 156 ASSERT(!rs.is(at)); 157 li(at, rt); 158 addu(rd, rs, at); 159 } 160 } 161} 162 163 164void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 165 if (rt.is_reg()) { 166 mul(rd, rs, rt.rm()); 167 } else { 168 // li handles the relocation. 169 ASSERT(!rs.is(at)); 170 li(at, rt); 171 mul(rd, rs, at); 172 } 173} 174 175 176void MacroAssembler::Mult(Register rs, const Operand& rt) { 177 if (rt.is_reg()) { 178 mult(rs, rt.rm()); 179 } else { 180 // li handles the relocation. 181 ASSERT(!rs.is(at)); 182 li(at, rt); 183 mult(rs, at); 184 } 185} 186 187 188void MacroAssembler::Multu(Register rs, const Operand& rt) { 189 if (rt.is_reg()) { 190 multu(rs, rt.rm()); 191 } else { 192 // li handles the relocation. 193 ASSERT(!rs.is(at)); 194 li(at, rt); 195 multu(rs, at); 196 } 197} 198 199 200void MacroAssembler::Div(Register rs, const Operand& rt) { 201 if (rt.is_reg()) { 202 div(rs, rt.rm()); 203 } else { 204 // li handles the relocation. 205 ASSERT(!rs.is(at)); 206 li(at, rt); 207 div(rs, at); 208 } 209} 210 211 212void MacroAssembler::Divu(Register rs, const Operand& rt) { 213 if (rt.is_reg()) { 214 divu(rs, rt.rm()); 215 } else { 216 // li handles the relocation. 217 ASSERT(!rs.is(at)); 218 li(at, rt); 219 divu(rs, at); 220 } 221} 222 223 224void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 225 if (rt.is_reg()) { 226 and_(rd, rs, rt.rm()); 227 } else { 228 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 229 andi(rd, rs, rt.imm32_); 230 } else { 231 // li handles the relocation. 232 ASSERT(!rs.is(at)); 233 li(at, rt); 234 and_(rd, rs, at); 235 } 236 } 237} 238 239 240void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 241 if (rt.is_reg()) { 242 or_(rd, rs, rt.rm()); 243 } else { 244 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 245 ori(rd, rs, rt.imm32_); 246 } else { 247 // li handles the relocation. 248 ASSERT(!rs.is(at)); 249 li(at, rt); 250 or_(rd, rs, at); 251 } 252 } 253} 254 255 256void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 257 if (rt.is_reg()) { 258 xor_(rd, rs, rt.rm()); 259 } else { 260 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 261 xori(rd, rs, rt.imm32_); 262 } else { 263 // li handles the relocation. 264 ASSERT(!rs.is(at)); 265 li(at, rt); 266 xor_(rd, rs, at); 267 } 268 } 269} 270 271 272void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { 273 if (rt.is_reg()) { 274 nor(rd, rs, rt.rm()); 275 } else { 276 // li handles the relocation. 277 ASSERT(!rs.is(at)); 278 li(at, rt); 279 nor(rd, rs, at); 280 } 281} 282 283 284void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 285 if (rt.is_reg()) { 286 slt(rd, rs, rt.rm()); 287 } else { 288 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 289 slti(rd, rs, rt.imm32_); 290 } else { 291 // li handles the relocation. 292 ASSERT(!rs.is(at)); 293 li(at, rt); 294 slt(rd, rs, at); 295 } 296 } 297} 298 299 300void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 301 if (rt.is_reg()) { 302 sltu(rd, rs, rt.rm()); 303 } else { 304 if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { 305 sltiu(rd, rs, rt.imm32_); 306 } else { 307 // li handles the relocation. 308 ASSERT(!rs.is(at)); 309 li(at, rt); 310 sltu(rd, rs, at); 311 } 312 } 313} 314 315 316//------------Pseudo-instructions------------- 317 318void MacroAssembler::movn(Register rd, Register rt) { 319 addiu(at, zero_reg, -1); // Fill at with ones. 320 xor_(rd, rt, at); 321} 322 323 324void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { 325 ASSERT(!j.is_reg()); 326 327 if (!MustUseAt(j.rmode_) && !gen2instr) { 328 // Normal load of an immediate value which does not need Relocation Info. 329 if (is_int16(j.imm32_)) { 330 addiu(rd, zero_reg, j.imm32_); 331 } else if (!(j.imm32_ & HIMask)) { 332 ori(rd, zero_reg, j.imm32_); 333 } else if (!(j.imm32_ & LOMask)) { 334 lui(rd, (HIMask & j.imm32_) >> 16); 335 } else { 336 lui(rd, (HIMask & j.imm32_) >> 16); 337 ori(rd, rd, (LOMask & j.imm32_)); 338 } 339 } else if (MustUseAt(j.rmode_) || gen2instr) { 340 if (MustUseAt(j.rmode_)) { 341 RecordRelocInfo(j.rmode_, j.imm32_); 342 } 343 // We need always the same number of instructions as we may need to patch 344 // this code to load another value which may need 2 instructions to load. 345 if (is_int16(j.imm32_)) { 346 nop(); 347 addiu(rd, zero_reg, j.imm32_); 348 } else if (!(j.imm32_ & HIMask)) { 349 nop(); 350 ori(rd, zero_reg, j.imm32_); 351 } else if (!(j.imm32_ & LOMask)) { 352 nop(); 353 lui(rd, (HIMask & j.imm32_) >> 16); 354 } else { 355 lui(rd, (HIMask & j.imm32_) >> 16); 356 ori(rd, rd, (LOMask & j.imm32_)); 357 } 358 } 359} 360 361 362// Exception-generating instructions and debugging support 363void MacroAssembler::stop(const char* msg) { 364 // TO_UPGRADE: Just a break for now. Maybe we could upgrade it. 365 // We use the 0x54321 value to be able to find it easily when reading memory. 366 break_(0x54321); 367} 368 369 370void MacroAssembler::MultiPush(RegList regs) { 371 int16_t NumSaved = 0; 372 int16_t NumToPush = NumberOfBitsSet(regs); 373 374 addiu(sp, sp, -4 * NumToPush); 375 for (int16_t i = kNumRegisters; i > 0; i--) { 376 if ((regs & (1 << i)) != 0) { 377 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); 378 } 379 } 380} 381 382 383void MacroAssembler::MultiPushReversed(RegList regs) { 384 int16_t NumSaved = 0; 385 int16_t NumToPush = NumberOfBitsSet(regs); 386 387 addiu(sp, sp, -4 * NumToPush); 388 for (int16_t i = 0; i < kNumRegisters; i++) { 389 if ((regs & (1 << i)) != 0) { 390 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); 391 } 392 } 393} 394 395 396void MacroAssembler::MultiPop(RegList regs) { 397 int16_t NumSaved = 0; 398 399 for (int16_t i = 0; i < kNumRegisters; i++) { 400 if ((regs & (1 << i)) != 0) { 401 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); 402 } 403 } 404 addiu(sp, sp, 4 * NumSaved); 405} 406 407 408void MacroAssembler::MultiPopReversed(RegList regs) { 409 int16_t NumSaved = 0; 410 411 for (int16_t i = kNumRegisters; i > 0; i--) { 412 if ((regs & (1 << i)) != 0) { 413 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); 414 } 415 } 416 addiu(sp, sp, 4 * NumSaved); 417} 418 419 420// Emulated condtional branches do not emit a nop in the branch delay slot. 421 422// Trashes the at register if no scratch register is provided. 423void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs, 424 const Operand& rt, Register scratch) { 425 Register r2 = no_reg; 426 if (rt.is_reg()) { 427 // We don't want any other register but scratch clobbered. 428 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); 429 r2 = rt.rm_; 430 } else if (cond != cc_always) { 431 // We don't want any other register but scratch clobbered. 432 ASSERT(!scratch.is(rs)); 433 r2 = scratch; 434 li(r2, rt); 435 } 436 437 switch (cond) { 438 case cc_always: 439 b(offset); 440 break; 441 case eq: 442 beq(rs, r2, offset); 443 break; 444 case ne: 445 bne(rs, r2, offset); 446 break; 447 448 // Signed comparison 449 case greater: 450 slt(scratch, r2, rs); 451 bne(scratch, zero_reg, offset); 452 break; 453 case greater_equal: 454 slt(scratch, rs, r2); 455 beq(scratch, zero_reg, offset); 456 break; 457 case less: 458 slt(scratch, rs, r2); 459 bne(scratch, zero_reg, offset); 460 break; 461 case less_equal: 462 slt(scratch, r2, rs); 463 beq(scratch, zero_reg, offset); 464 break; 465 466 // Unsigned comparison. 467 case Ugreater: 468 sltu(scratch, r2, rs); 469 bne(scratch, zero_reg, offset); 470 break; 471 case Ugreater_equal: 472 sltu(scratch, rs, r2); 473 beq(scratch, zero_reg, offset); 474 break; 475 case Uless: 476 sltu(scratch, rs, r2); 477 bne(scratch, zero_reg, offset); 478 break; 479 case Uless_equal: 480 sltu(scratch, r2, rs); 481 beq(scratch, zero_reg, offset); 482 break; 483 484 default: 485 UNREACHABLE(); 486 } 487 // Emit a nop in the branch delay slot. 488 nop(); 489} 490 491 492void MacroAssembler::Branch(Condition cond, Label* L, Register rs, 493 const Operand& rt, Register scratch) { 494 Register r2 = no_reg; 495 if (rt.is_reg()) { 496 r2 = rt.rm_; 497 } else if (cond != cc_always) { 498 r2 = scratch; 499 li(r2, rt); 500 } 501 502 // We use branch_offset as an argument for the branch instructions to be sure 503 // it is called just before generating the branch instruction, as needed. 504 505 switch (cond) { 506 case cc_always: 507 b(shifted_branch_offset(L, false)); 508 break; 509 case eq: 510 beq(rs, r2, shifted_branch_offset(L, false)); 511 break; 512 case ne: 513 bne(rs, r2, shifted_branch_offset(L, false)); 514 break; 515 516 // Signed comparison 517 case greater: 518 slt(scratch, r2, rs); 519 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 520 break; 521 case greater_equal: 522 slt(scratch, rs, r2); 523 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 524 break; 525 case less: 526 slt(scratch, rs, r2); 527 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 528 break; 529 case less_equal: 530 slt(scratch, r2, rs); 531 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 532 break; 533 534 // Unsigned comparison. 535 case Ugreater: 536 sltu(scratch, r2, rs); 537 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 538 break; 539 case Ugreater_equal: 540 sltu(scratch, rs, r2); 541 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 542 break; 543 case Uless: 544 sltu(scratch, rs, r2); 545 bne(scratch, zero_reg, shifted_branch_offset(L, false)); 546 break; 547 case Uless_equal: 548 sltu(scratch, r2, rs); 549 beq(scratch, zero_reg, shifted_branch_offset(L, false)); 550 break; 551 552 default: 553 UNREACHABLE(); 554 } 555 // Emit a nop in the branch delay slot. 556 nop(); 557} 558 559 560// Trashes the at register if no scratch register is provided. 561// We need to use a bgezal or bltzal, but they can't be used directly with the 562// slt instructions. We could use sub or add instead but we would miss overflow 563// cases, so we keep slt and add an intermediate third instruction. 564void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, 565 const Operand& rt, Register scratch) { 566 Register r2 = no_reg; 567 if (rt.is_reg()) { 568 r2 = rt.rm_; 569 } else if (cond != cc_always) { 570 r2 = scratch; 571 li(r2, rt); 572 } 573 574 switch (cond) { 575 case cc_always: 576 bal(offset); 577 break; 578 case eq: 579 bne(rs, r2, 2); 580 nop(); 581 bal(offset); 582 break; 583 case ne: 584 beq(rs, r2, 2); 585 nop(); 586 bal(offset); 587 break; 588 589 // Signed comparison 590 case greater: 591 slt(scratch, r2, rs); 592 addiu(scratch, scratch, -1); 593 bgezal(scratch, offset); 594 break; 595 case greater_equal: 596 slt(scratch, rs, r2); 597 addiu(scratch, scratch, -1); 598 bltzal(scratch, offset); 599 break; 600 case less: 601 slt(scratch, rs, r2); 602 addiu(scratch, scratch, -1); 603 bgezal(scratch, offset); 604 break; 605 case less_equal: 606 slt(scratch, r2, rs); 607 addiu(scratch, scratch, -1); 608 bltzal(scratch, offset); 609 break; 610 611 // Unsigned comparison. 612 case Ugreater: 613 sltu(scratch, r2, rs); 614 addiu(scratch, scratch, -1); 615 bgezal(scratch, offset); 616 break; 617 case Ugreater_equal: 618 sltu(scratch, rs, r2); 619 addiu(scratch, scratch, -1); 620 bltzal(scratch, offset); 621 break; 622 case Uless: 623 sltu(scratch, rs, r2); 624 addiu(scratch, scratch, -1); 625 bgezal(scratch, offset); 626 break; 627 case Uless_equal: 628 sltu(scratch, r2, rs); 629 addiu(scratch, scratch, -1); 630 bltzal(scratch, offset); 631 break; 632 633 default: 634 UNREACHABLE(); 635 } 636 // Emit a nop in the branch delay slot. 637 nop(); 638} 639 640 641void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, 642 const Operand& rt, Register scratch) { 643 Register r2 = no_reg; 644 if (rt.is_reg()) { 645 r2 = rt.rm_; 646 } else if (cond != cc_always) { 647 r2 = scratch; 648 li(r2, rt); 649 } 650 651 switch (cond) { 652 case cc_always: 653 bal(shifted_branch_offset(L, false)); 654 break; 655 case eq: 656 bne(rs, r2, 2); 657 nop(); 658 bal(shifted_branch_offset(L, false)); 659 break; 660 case ne: 661 beq(rs, r2, 2); 662 nop(); 663 bal(shifted_branch_offset(L, false)); 664 break; 665 666 // Signed comparison 667 case greater: 668 slt(scratch, r2, rs); 669 addiu(scratch, scratch, -1); 670 bgezal(scratch, shifted_branch_offset(L, false)); 671 break; 672 case greater_equal: 673 slt(scratch, rs, r2); 674 addiu(scratch, scratch, -1); 675 bltzal(scratch, shifted_branch_offset(L, false)); 676 break; 677 case less: 678 slt(scratch, rs, r2); 679 addiu(scratch, scratch, -1); 680 bgezal(scratch, shifted_branch_offset(L, false)); 681 break; 682 case less_equal: 683 slt(scratch, r2, rs); 684 addiu(scratch, scratch, -1); 685 bltzal(scratch, shifted_branch_offset(L, false)); 686 break; 687 688 // Unsigned comparison. 689 case Ugreater: 690 sltu(scratch, r2, rs); 691 addiu(scratch, scratch, -1); 692 bgezal(scratch, shifted_branch_offset(L, false)); 693 break; 694 case Ugreater_equal: 695 sltu(scratch, rs, r2); 696 addiu(scratch, scratch, -1); 697 bltzal(scratch, shifted_branch_offset(L, false)); 698 break; 699 case Uless: 700 sltu(scratch, rs, r2); 701 addiu(scratch, scratch, -1); 702 bgezal(scratch, shifted_branch_offset(L, false)); 703 break; 704 case Uless_equal: 705 sltu(scratch, r2, rs); 706 addiu(scratch, scratch, -1); 707 bltzal(scratch, shifted_branch_offset(L, false)); 708 break; 709 710 default: 711 UNREACHABLE(); 712 } 713 // Emit a nop in the branch delay slot. 714 nop(); 715} 716 717 718void MacroAssembler::Jump(const Operand& target, 719 Condition cond, Register rs, const Operand& rt) { 720 if (target.is_reg()) { 721 if (cond == cc_always) { 722 jr(target.rm()); 723 } else { 724 Branch(NegateCondition(cond), 2, rs, rt); 725 jr(target.rm()); 726 } 727 } else { // !target.is_reg() 728 if (!MustUseAt(target.rmode_)) { 729 if (cond == cc_always) { 730 j(target.imm32_); 731 } else { 732 Branch(NegateCondition(cond), 2, rs, rt); 733 j(target.imm32_); // Will generate only one instruction. 734 } 735 } else { // MustUseAt(target) 736 li(at, target); 737 if (cond == cc_always) { 738 jr(at); 739 } else { 740 Branch(NegateCondition(cond), 2, rs, rt); 741 jr(at); // Will generate only one instruction. 742 } 743 } 744 } 745 // Emit a nop in the branch delay slot. 746 nop(); 747} 748 749 750void MacroAssembler::Call(const Operand& target, 751 Condition cond, Register rs, const Operand& rt) { 752 if (target.is_reg()) { 753 if (cond == cc_always) { 754 jalr(target.rm()); 755 } else { 756 Branch(NegateCondition(cond), 2, rs, rt); 757 jalr(target.rm()); 758 } 759 } else { // !target.is_reg() 760 if (!MustUseAt(target.rmode_)) { 761 if (cond == cc_always) { 762 jal(target.imm32_); 763 } else { 764 Branch(NegateCondition(cond), 2, rs, rt); 765 jal(target.imm32_); // Will generate only one instruction. 766 } 767 } else { // MustUseAt(target) 768 li(at, target); 769 if (cond == cc_always) { 770 jalr(at); 771 } else { 772 Branch(NegateCondition(cond), 2, rs, rt); 773 jalr(at); // Will generate only one instruction. 774 } 775 } 776 } 777 // Emit a nop in the branch delay slot. 778 nop(); 779} 780 781void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { 782 UNIMPLEMENTED_MIPS(); 783} 784 785 786void MacroAssembler::Drop(int count, Condition cond) { 787 UNIMPLEMENTED_MIPS(); 788} 789 790 791void MacroAssembler::Call(Label* target) { 792 UNIMPLEMENTED_MIPS(); 793} 794 795 796#ifdef ENABLE_DEBUGGER_SUPPORT 797 // --------------------------------------------------------------------------- 798 // Debugger Support 799 800 void MacroAssembler::DebugBreak() { 801 UNIMPLEMENTED_MIPS(); 802 } 803#endif 804 805 806// --------------------------------------------------------------------------- 807// Exception handling 808 809void MacroAssembler::PushTryHandler(CodeLocation try_location, 810 HandlerType type) { 811 // Adjust this code if not the case. 812 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); 813 // The return address is passed in register ra. 814 if (try_location == IN_JAVASCRIPT) { 815 if (type == TRY_CATCH_HANDLER) { 816 li(t0, Operand(StackHandler::TRY_CATCH)); 817 } else { 818 li(t0, Operand(StackHandler::TRY_FINALLY)); 819 } 820 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize 821 && StackHandlerConstants::kFPOffset == 2 * kPointerSize 822 && StackHandlerConstants::kPCOffset == 3 * kPointerSize 823 && StackHandlerConstants::kNextOffset == 0 * kPointerSize); 824 // Save the current handler as the next handler. 825 LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); 826 lw(t1, MemOperand(t2)); 827 828 addiu(sp, sp, -StackHandlerConstants::kSize); 829 sw(ra, MemOperand(sp, 12)); 830 sw(fp, MemOperand(sp, 8)); 831 sw(t0, MemOperand(sp, 4)); 832 sw(t1, MemOperand(sp, 0)); 833 834 // Link this handler as the new current one. 835 sw(sp, MemOperand(t2)); 836 837 } else { 838 // Must preserve a0-a3, and s0 (argv). 839 ASSERT(try_location == IN_JS_ENTRY); 840 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize 841 && StackHandlerConstants::kFPOffset == 2 * kPointerSize 842 && StackHandlerConstants::kPCOffset == 3 * kPointerSize 843 && StackHandlerConstants::kNextOffset == 0 * kPointerSize); 844 845 // The frame pointer does not point to a JS frame so we save NULL 846 // for fp. We expect the code throwing an exception to check fp 847 // before dereferencing it to restore the context. 848 li(t0, Operand(StackHandler::ENTRY)); 849 850 // Save the current handler as the next handler. 851 LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); 852 lw(t1, MemOperand(t2)); 853 854 addiu(sp, sp, -StackHandlerConstants::kSize); 855 sw(ra, MemOperand(sp, 12)); 856 sw(zero_reg, MemOperand(sp, 8)); 857 sw(t0, MemOperand(sp, 4)); 858 sw(t1, MemOperand(sp, 0)); 859 860 // Link this handler as the new current one. 861 sw(sp, MemOperand(t2)); 862 } 863} 864 865 866void MacroAssembler::PopTryHandler() { 867 UNIMPLEMENTED_MIPS(); 868} 869 870 871 872// ----------------------------------------------------------------------------- 873// Activation frames 874 875void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { 876 Label extra_push, end; 877 878 andi(scratch, sp, 7); 879 880 // We check for args and receiver size on the stack, all of them word sized. 881 // We add one for sp, that we also want to store on the stack. 882 if (((arg_count + 1) % kPointerSizeLog2) == 0) { 883 Branch(ne, &extra_push, at, Operand(zero_reg)); 884 } else { // ((arg_count + 1) % 2) == 1 885 Branch(eq, &extra_push, at, Operand(zero_reg)); 886 } 887 888 // Save sp on the stack. 889 mov(scratch, sp); 890 Push(scratch); 891 b(&end); 892 893 // Align before saving sp on the stack. 894 bind(&extra_push); 895 mov(scratch, sp); 896 addiu(sp, sp, -8); 897 sw(scratch, MemOperand(sp)); 898 899 // The stack is aligned and sp is stored on the top. 900 bind(&end); 901} 902 903 904void MacroAssembler::ReturnFromAlignedCall() { 905 lw(sp, MemOperand(sp)); 906} 907 908 909// ----------------------------------------------------------------------------- 910// JavaScript invokes 911 912void MacroAssembler::InvokePrologue(const ParameterCount& expected, 913 const ParameterCount& actual, 914 Handle<Code> code_constant, 915 Register code_reg, 916 Label* done, 917 InvokeFlag flag) { 918 bool definitely_matches = false; 919 Label regular_invoke; 920 921 // Check whether the expected and actual arguments count match. If not, 922 // setup registers according to contract with ArgumentsAdaptorTrampoline: 923 // a0: actual arguments count 924 // a1: function (passed through to callee) 925 // a2: expected arguments count 926 // a3: callee code entry 927 928 // The code below is made a lot easier because the calling code already sets 929 // up actual and expected registers according to the contract if values are 930 // passed in registers. 931 ASSERT(actual.is_immediate() || actual.reg().is(a0)); 932 ASSERT(expected.is_immediate() || expected.reg().is(a2)); 933 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); 934 935 if (expected.is_immediate()) { 936 ASSERT(actual.is_immediate()); 937 if (expected.immediate() == actual.immediate()) { 938 definitely_matches = true; 939 } else { 940 li(a0, Operand(actual.immediate())); 941 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; 942 if (expected.immediate() == sentinel) { 943 // Don't worry about adapting arguments for builtins that 944 // don't want that done. Skip adaption code by making it look 945 // like we have a match between expected and actual number of 946 // arguments. 947 definitely_matches = true; 948 } else { 949 li(a2, Operand(expected.immediate())); 950 } 951 } 952 } else if (actual.is_immediate()) { 953 Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate())); 954 li(a0, Operand(actual.immediate())); 955 } else { 956 Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg())); 957 } 958 959 if (!definitely_matches) { 960 if (!code_constant.is_null()) { 961 li(a3, Operand(code_constant)); 962 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); 963 } 964 965 ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); 966 if (flag == CALL_FUNCTION) { 967 CallBuiltin(adaptor); 968 b(done); 969 nop(); 970 } else { 971 JumpToBuiltin(adaptor); 972 } 973 bind(®ular_invoke); 974 } 975} 976 977void MacroAssembler::InvokeCode(Register code, 978 const ParameterCount& expected, 979 const ParameterCount& actual, 980 InvokeFlag flag) { 981 Label done; 982 983 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); 984 if (flag == CALL_FUNCTION) { 985 Call(code); 986 } else { 987 ASSERT(flag == JUMP_FUNCTION); 988 Jump(code); 989 } 990 // Continue here if InvokePrologue does handle the invocation due to 991 // mismatched parameter counts. 992 bind(&done); 993} 994 995 996void MacroAssembler::InvokeCode(Handle<Code> code, 997 const ParameterCount& expected, 998 const ParameterCount& actual, 999 RelocInfo::Mode rmode, 1000 InvokeFlag flag) { 1001 Label done; 1002 1003 InvokePrologue(expected, actual, code, no_reg, &done, flag); 1004 if (flag == CALL_FUNCTION) { 1005 Call(code, rmode); 1006 } else { 1007 Jump(code, rmode); 1008 } 1009 // Continue here if InvokePrologue does handle the invocation due to 1010 // mismatched parameter counts. 1011 bind(&done); 1012} 1013 1014 1015void MacroAssembler::InvokeFunction(Register function, 1016 const ParameterCount& actual, 1017 InvokeFlag flag) { 1018 // Contract with called JS functions requires that function is passed in a1. 1019 ASSERT(function.is(a1)); 1020 Register expected_reg = a2; 1021 Register code_reg = a3; 1022 1023 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 1024 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 1025 lw(expected_reg, 1026 FieldMemOperand(code_reg, 1027 SharedFunctionInfo::kFormalParameterCountOffset)); 1028 lw(code_reg, 1029 MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); 1030 addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag); 1031 1032 ParameterCount expected(expected_reg); 1033 InvokeCode(code_reg, expected, actual, flag); 1034} 1035 1036 1037// --------------------------------------------------------------------------- 1038// Support functions. 1039 1040 void MacroAssembler::GetObjectType(Register function, 1041 Register map, 1042 Register type_reg) { 1043 lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); 1044 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 1045 } 1046 1047 1048 void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { 1049 // Load builtin address. 1050 LoadExternalReference(t9, builtin_entry); 1051 lw(t9, MemOperand(t9)); // Deref address. 1052 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); 1053 // Call and allocate arguments slots. 1054 jalr(t9); 1055 // Use the branch delay slot to allocated argument slots. 1056 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1057 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); 1058 } 1059 1060 1061 void MacroAssembler::CallBuiltin(Register target) { 1062 // Target already holds target address. 1063 // Call and allocate arguments slots. 1064 jalr(target); 1065 // Use the branch delay slot to allocated argument slots. 1066 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1067 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); 1068 } 1069 1070 1071 void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) { 1072 // Load builtin address. 1073 LoadExternalReference(t9, builtin_entry); 1074 lw(t9, MemOperand(t9)); // Deref address. 1075 addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); 1076 // Call and allocate arguments slots. 1077 jr(t9); 1078 // Use the branch delay slot to allocated argument slots. 1079 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1080 } 1081 1082 1083 void MacroAssembler::JumpToBuiltin(Register target) { 1084 // t9 already holds target address. 1085 // Call and allocate arguments slots. 1086 jr(t9); 1087 // Use the branch delay slot to allocated argument slots. 1088 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); 1089 } 1090 1091 1092// ----------------------------------------------------------------------------- 1093// Runtime calls 1094 1095void MacroAssembler::CallStub(CodeStub* stub, Condition cond, 1096 Register r1, const Operand& r2) { 1097 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. 1098 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); 1099} 1100 1101 1102void MacroAssembler::StubReturn(int argc) { 1103 UNIMPLEMENTED_MIPS(); 1104} 1105 1106 1107void MacroAssembler::IllegalOperation(int num_arguments) { 1108 if (num_arguments > 0) { 1109 addiu(sp, sp, num_arguments * kPointerSize); 1110 } 1111 LoadRoot(v0, Heap::kUndefinedValueRootIndex); 1112} 1113 1114 1115void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { 1116 // All parameters are on the stack. v0 has the return value after call. 1117 1118 // If the expected number of arguments of the runtime function is 1119 // constant, we check that the actual number of arguments match the 1120 // expectation. 1121 if (f->nargs >= 0 && f->nargs != num_arguments) { 1122 IllegalOperation(num_arguments); 1123 return; 1124 } 1125 1126 // TODO(1236192): Most runtime routines don't need the number of 1127 // arguments passed in because it is constant. At some point we 1128 // should remove this need and make the runtime routine entry code 1129 // smarter. 1130 li(a0, num_arguments); 1131 LoadExternalReference(a1, ExternalReference(f)); 1132 CEntryStub stub(1); 1133 CallStub(&stub); 1134} 1135 1136 1137void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { 1138 CallRuntime(Runtime::FunctionForId(fid), num_arguments); 1139} 1140 1141 1142void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, 1143 int num_arguments, 1144 int result_size) { 1145 UNIMPLEMENTED_MIPS(); 1146} 1147 1148 1149void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, 1150 int num_arguments, 1151 int result_size) { 1152 TailCallExternalReference(ExternalReference(fid), num_arguments, result_size); 1153} 1154 1155 1156void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { 1157 UNIMPLEMENTED_MIPS(); 1158} 1159 1160 1161Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, 1162 bool* resolved) { 1163 UNIMPLEMENTED_MIPS(); 1164 return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN 1165} 1166 1167 1168void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 1169 InvokeJSFlags flags) { 1170 UNIMPLEMENTED_MIPS(); 1171} 1172 1173 1174void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 1175 UNIMPLEMENTED_MIPS(); 1176} 1177 1178 1179void MacroAssembler::SetCounter(StatsCounter* counter, int value, 1180 Register scratch1, Register scratch2) { 1181 UNIMPLEMENTED_MIPS(); 1182} 1183 1184 1185void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 1186 Register scratch1, Register scratch2) { 1187 UNIMPLEMENTED_MIPS(); 1188} 1189 1190 1191void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 1192 Register scratch1, Register scratch2) { 1193 UNIMPLEMENTED_MIPS(); 1194} 1195 1196 1197// ----------------------------------------------------------------------------- 1198// Debugging 1199 1200void MacroAssembler::Assert(Condition cc, const char* msg, 1201 Register rs, Operand rt) { 1202 UNIMPLEMENTED_MIPS(); 1203} 1204 1205 1206void MacroAssembler::Check(Condition cc, const char* msg, 1207 Register rs, Operand rt) { 1208 UNIMPLEMENTED_MIPS(); 1209} 1210 1211 1212void MacroAssembler::Abort(const char* msg) { 1213 UNIMPLEMENTED_MIPS(); 1214} 1215 1216 1217void MacroAssembler::EnterFrame(StackFrame::Type type) { 1218 addiu(sp, sp, -5 * kPointerSize); 1219 li(t0, Operand(Smi::FromInt(type))); 1220 li(t1, Operand(CodeObject())); 1221 sw(ra, MemOperand(sp, 4 * kPointerSize)); 1222 sw(fp, MemOperand(sp, 3 * kPointerSize)); 1223 sw(cp, MemOperand(sp, 2 * kPointerSize)); 1224 sw(t0, MemOperand(sp, 1 * kPointerSize)); 1225 sw(t1, MemOperand(sp, 0 * kPointerSize)); 1226 addiu(fp, sp, 3 * kPointerSize); 1227} 1228 1229 1230void MacroAssembler::LeaveFrame(StackFrame::Type type) { 1231 mov(sp, fp); 1232 lw(fp, MemOperand(sp, 0 * kPointerSize)); 1233 lw(ra, MemOperand(sp, 1 * kPointerSize)); 1234 addiu(sp, sp, 2 * kPointerSize); 1235} 1236 1237 1238void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, 1239 Register hold_argc, 1240 Register hold_argv, 1241 Register hold_function) { 1242 // Compute the argv pointer and keep it in a callee-saved register. 1243 // a0 is argc. 1244 sll(t0, a0, kPointerSizeLog2); 1245 add(hold_argv, sp, t0); 1246 addi(hold_argv, hold_argv, -kPointerSize); 1247 1248 // Compute callee's stack pointer before making changes and save it as 1249 // t1 register so that it is restored as sp register on exit, thereby 1250 // popping the args. 1251 // t1 = sp + kPointerSize * #args 1252 add(t1, sp, t0); 1253 1254 // Align the stack at this point. 1255 AlignStack(0); 1256 1257 // Save registers. 1258 addiu(sp, sp, -12); 1259 sw(t1, MemOperand(sp, 8)); 1260 sw(ra, MemOperand(sp, 4)); 1261 sw(fp, MemOperand(sp, 0)); 1262 mov(fp, sp); // Setup new frame pointer. 1263 1264 // Push debug marker. 1265 if (mode == ExitFrame::MODE_DEBUG) { 1266 Push(zero_reg); 1267 } else { 1268 li(t0, Operand(CodeObject())); 1269 Push(t0); 1270 } 1271 1272 // Save the frame pointer and the context in top. 1273 LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); 1274 sw(fp, MemOperand(t0)); 1275 LoadExternalReference(t0, ExternalReference(Top::k_context_address)); 1276 sw(cp, MemOperand(t0)); 1277 1278 // Setup argc and the builtin function in callee-saved registers. 1279 mov(hold_argc, a0); 1280 mov(hold_function, a1); 1281} 1282 1283 1284void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { 1285 // Clear top frame. 1286 LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); 1287 sw(zero_reg, MemOperand(t0)); 1288 1289 // Restore current context from top and clear it in debug mode. 1290 LoadExternalReference(t0, ExternalReference(Top::k_context_address)); 1291 lw(cp, MemOperand(t0)); 1292#ifdef DEBUG 1293 sw(a3, MemOperand(t0)); 1294#endif 1295 1296 // Pop the arguments, restore registers, and return. 1297 mov(sp, fp); // Respect ABI stack constraint. 1298 lw(fp, MemOperand(sp, 0)); 1299 lw(ra, MemOperand(sp, 4)); 1300 lw(sp, MemOperand(sp, 8)); 1301 jr(ra); 1302 nop(); // Branch delay slot nop. 1303} 1304 1305 1306void MacroAssembler::AlignStack(int offset) { 1307 // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, 1308 // and an offset of 1 aligns to 4 modulo 8 bytes. 1309 int activation_frame_alignment = OS::ActivationFrameAlignment(); 1310 if (activation_frame_alignment != kPointerSize) { 1311 // This code needs to be made more general if this assert doesn't hold. 1312 ASSERT(activation_frame_alignment == 2 * kPointerSize); 1313 if (offset == 0) { 1314 andi(t0, sp, activation_frame_alignment - 1); 1315 Push(zero_reg, eq, t0, zero_reg); 1316 } else { 1317 andi(t0, sp, activation_frame_alignment - 1); 1318 addiu(t0, t0, -4); 1319 Push(zero_reg, eq, t0, zero_reg); 1320 } 1321 } 1322} 1323 1324} } // namespace v8::internal 1325 1326#endif // V8_TARGET_ARCH_MIPS 1327