Searched refs:bgtz (Results 1 - 22 of 22) sorted by relevance

/external/llvm/test/MC/Mips/
H A Dmicromips-bad-branches.s49 # CHECK: bgtz $1, -65535
51 # CHECK: bgtz $1, -65537
53 # CHECK: bgtz $1, 65535
55 # CHECK: bgtz $1, 65536
164 bgtz $1, -65535
165 bgtz $1, -65536
166 bgtz $1, -65537
167 bgtz $1, 65534
168 bgtz $1, 65535
169 bgtz
[all...]
H A Dmips-bad-branches.s89 # CHECK: bgtz $1, -131069
91 # CHECK: bgtz $1, -131070
93 # CHECK: bgtz $1, -131071
95 # CHECK: bgtz $1, -131073
97 # CHECK: bgtz $1, 131069
99 # CHECK: bgtz $1, 131070
101 # CHECK: bgtz $1, 131071
103 # CHECK: bgtz $1, 131072
303 bgtz $1, -131068
304 bgtz
[all...]
H A Dmicromips-branch-instructions.s22 # CHECK-EL: bgtz $6, 1332 # encoding: [0xc6,0x40,0x9a,0x02]
45 # CHECK-EB: bgtz $6, 1332 # encoding: [0x40,0xc6,0x02,0x9a]
61 bgtz $6,1332
H A Dmicromips-branch16.s26 # CHECK-FIXUP: bgtz $4, bar # encoding: [0xc4'A',0x40'A',0x00,0x00]
65 bgtz $4, bar
H A Dmips-jump-instructions.s23 # CHECK32: bgtz $6, 1332 # encoding: [0x4d,0x01,0xc0,0x1c]
48 # CHECK64: bgtz $6, 1332 # encoding: [0x4d,0x01,0xc0,0x1c]
75 bgtz $6,1332
/external/pixman/pixman/
H A Dpixman-mips-memcpy-asm.S98 bgtz v1, $loop16w /* skip "pref 30, 64(a0)" for too short arrays */
105 bgtz v1, $skip_pref30_96 /* skip "pref 30, 96(a0)" */
127 bgtz v1, $skip_pref30_128 /* skip "pref 30, 128(a0)" */
245 bgtz v1, $ua_loop16w /* skip "pref 30, 64(a0)" for too short arrays */
254 bgtz v1, $ua_skip_pref30_96
284 bgtz v1, $ua_skip_pref30_128
H A Dpixman-mips-dspr2-asm.S167 bgtz t2, 1b
209 bgtz t2, 1b
741 bgtz t2, 1b
801 bgtz t2, 1b
853 bgtz t2, 1b
1001 bgtz t1, 0b
1021 bgtz t1, 0b
1030 bgtz t1, 0b
1126 bgtz t1, 0b
1148 bgtz t
[all...]
/external/valgrind/main/none/tests/mips32/
H A Dbranches.stdout.exp145 bgtz :: 6, RSval: 0
146 bgtz :: 2, RSval: 1
147 bgtz :: 8, RSval: -1
148 bgtz :: 9, RSval: -1
149 bgtz :: 10, RSval: -2
150 bgtz :: 11, RSval: -1
151 bgtz :: 7, RSval: 5
152 bgtz :: 13, RSval: -3
153 bgtz :: 9, RSval: 125
154 bgtz
[all...]
/external/linux-tools-perf/perf-3.12.0/arch/m32r/lib/
H A Dmemset.S48 bgtz r2, word_set_loop
131 bgtz r2, word_set_loop
/external/valgrind/main/none/tests/mips64/
H A Dbranches.stdout.exp145 bgtz :: out: 6, RDval: 0, RSval: 0
146 bgtz :: out: 2, RDval: 1, RSval: 1
147 bgtz :: out: 8, RDval: 2, RSval: -1
148 bgtz :: out: 9, RDval: 3, RSval: -1
149 bgtz :: out: 10, RDval: 4, RSval: -2
150 bgtz :: out: 11, RDval: 5, RSval: -1
151 bgtz :: out: 7, RDval: 6, RSval: 5
152 bgtz :: out: 13, RDval: 7, RSval: -3
153 bgtz :: out: 9, RDval: 8, RSval: 125
154 bgtz
[all...]
H A Dbranch_and_jump_instructions.stdout.exp567 bgtz :: out: 0x9, RSval: 0x0
568 bgtz :: out: 0x2, RSval: 0x1
569 bgtz :: out: 0xb, RSval: 0xffffffff
570 bgtz :: out: 0xc, RSval: 0xffffffff
571 bgtz :: out: 0xd, RSval: 0xfffffffe
572 bgtz :: out: 0xe, RSval: 0xffffffff
573 bgtz :: out: 0x7, RSval: 0x5
574 bgtz :: out: 0x10, RSval: 0xfffffffffffffffd
575 bgtz :: out: 0x9, RSval: 0x7d
576 bgtz
[all...]
/external/openssl/crypto/bn/asm/
H A Dbn-mips.S14 bgtz $6,bn_mul_add_words_internal
87 bgtz $8,.L_bn_mul_add_words_loop
149 bgtz $6,bn_mul_words_internal
203 bgtz $8,.L_bn_mul_words_loop
253 bgtz $6,bn_sqr_words_internal
301 bgtz $8,.L_bn_sqr_words_loop
346 bgtz $7,bn_add_words_internal
402 bgtz $1,.L_bn_add_words_loop
453 bgtz $7,bn_sub_words_internal
510 bgtz
[all...]
H A Dmips.pl117 bgtz $a2,bn_mul_add_words_internal
204 bgtz $ta0,.L_bn_mul_add_words_loop
276 bgtz $a2,bn_mul_words_internal
344 bgtz $ta0,.L_bn_mul_words_loop
404 bgtz $a2,bn_sqr_words_internal
466 bgtz $ta0,.L_bn_sqr_words_loop
521 bgtz $a3,bn_add_words_internal
591 bgtz $at,.L_bn_add_words_loop
652 bgtz $a3,bn_sub_words_internal
723 bgtz
[all...]
H A Dmips3.s635 bgtz a2,.-4
/external/chromium_org/v8/src/mips/
H A Ddisasm-mips.cc1060 Format(instr, "bgtz 'rs, 'imm16u");
H A Dmacro-assembler-mips.cc1874 bgtz(rs, offset);
1907 bgtz(rs, offset);
1967 bgtz(rs, offset);
2014 bgtz(rs, offset);
2112 bgtz(rs, offset);
2153 bgtz(rs, offset);
2221 bgtz(rs, offset);
H A Dassembler-mips.h662 void bgtz(Register rs, int16_t offset);
H A Dassembler-mips.cc1222 void Assembler::bgtz(Register rs, int16_t offset) { function in class:v8::Assembler
/external/chromium_org/v8/src/mips64/
H A Ddisasm-mips64.cc1186 Format(instr, "bgtz 'rs, 'imm16u");
H A Dmacro-assembler-mips64.cc1926 bgtz(rs, offset);
1959 bgtz(rs, offset);
2019 bgtz(rs, offset);
2066 bgtz(rs, offset);
2164 bgtz(rs, offset);
2205 bgtz(rs, offset);
2273 bgtz(rs, offset);
1278 DCHECK(pos < 32); DCHECK(pos + size < 33); ext_(rt, rs, pos, size); } void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK(pos < 32); DCHECK(pos + size <= 32); DCHECK(size != 0); ins_(rt, rs, pos, size); } void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch) { mfc1(t8, fs); Cvt_d_uw(fd, t8, scratch); } void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch) { DCHECK(!fd.is(scratch)); DCHECK(!rs.is(t9)); DCHECK(!rs.is(at)); Ext(t9, rs, 31, 1); Ext(at, rs, 0, 31); mtc1(at, fd); mthc1(zero_reg, fd); cvt_d_w(fd, fd); Label conversion_done; Branch(&conversion_done, eq, t9, Operand(zero_reg)); li(at, 0x41E00000); mtc1(zero_reg, scratch); mthc1(at, scratch); add_d(fd, fd, scratch); bind(&conversion_done); } void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) { round_l_d(fd, fs); } void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) { floor_l_d(fd, fs); } void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) { ceil_l_d(fd, fs); } void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) { trunc_l_d(fd, fs); } void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch) { dmfc1(t8, fs); li(at, 0x7fffffffffffffff); and_(t8, t8, at); dmtc1(t8, fs); trunc_l_d(fd, fs); } void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch) { Trunc_uw_d(fs, t8, scratch); mtc1(t8, fd); } void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { trunc_w_d(fd, fs); } void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { round_w_d(fd, fs); } void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { floor_w_d(fd, fs); } void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { ceil_w_d(fd, fs); } void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch) { DCHECK(!fd.is(scratch)); DCHECK(!rs.is(at)); li(at, 0x41E00000); mtc1(zero_reg, scratch); mthc1(at, scratch); Label simple_convert; BranchF(&simple_convert, NULL, lt, fd, scratch); sub_d(scratch, fd, scratch); trunc_w_d(scratch, scratch); mfc1(rs, scratch); Or(rs, rs, 1 << 31); Label done; Branch(&done); bind(&simple_convert); trunc_w_d(scratch, fd); mfc1(rs, scratch); bind(&done); } void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch) { if (0) { madd_d(fd, fr, fs, ft); } else { DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch)); mul_d(scratch, fs, ft); add_d(fd, fr, scratch); } } void MacroAssembler::BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1, FPURegister cmp2, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cc == al) { Branch(bd, target); return; } DCHECK(nan || target); if (nan) { if (kArchVariant != kMips64r6) { c(UN, D, cmp1, cmp2); bc1t(nan); } else { DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); cmp(UN, L, f31, cmp1, cmp2); bc1nez(nan, f31); } } if (kArchVariant != kMips64r6) { if (target) { switch (cc) { case lt: c(OLT, D, cmp1, cmp2); bc1t(target); break; case gt: c(ULE, D, cmp1, cmp2); bc1f(target); break; case ge: c(ULT, D, cmp1, cmp2); bc1f(target); break; case le: c(OLE, D, cmp1, cmp2); bc1t(target); break; case eq: c(EQ, D, cmp1, cmp2); bc1t(target); break; case ueq: c(UEQ, D, cmp1, cmp2); bc1t(target); break; case ne: c(EQ, D, cmp1, cmp2); bc1f(target); break; case nue: c(UEQ, D, cmp1, cmp2); bc1f(target); break; default: CHECK(0); } } } else { if (target) { DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); switch (cc) { case lt: cmp(OLT, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case gt: cmp(ULE, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; case ge: cmp(ULT, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; case le: cmp(OLE, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case eq: cmp(EQ, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case ueq: cmp(UEQ, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case ne: cmp(EQ, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; case nue: cmp(UEQ, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; default: CHECK(0); } } } if (bd == PROTECT) { nop(); } } void MacroAssembler::Move(FPURegister dst, double imm) { static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value_rep(imm); bool force_load = dst.is(kDoubleRegZero); if (value_rep == zero && !force_load) { mov_d(dst, kDoubleRegZero); } else if (value_rep == minus_zero && !force_load) { neg_d(dst, kDoubleRegZero); } else { uint32_t lo, hi; DoubleAsTwoUInt32(imm, &lo, &hi); if (lo != 0) { li(at, Operand(lo)); mtc1(at, dst); } else { mtc1(zero_reg, dst); } if (hi != 0) { li(at, Operand(hi)); mthc1(at, dst); } else { mthc1(zero_reg, dst); } } } void MacroAssembler::Movz(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, ne, rt, Operand(zero_reg)); mov(rd, rs); bind(&done); } else { movz(rd, rs, rt); } } void MacroAssembler::Movn(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, eq, rt, Operand(zero_reg)); mov(rd, rs); bind(&done); } else { movn(rd, rs, rt); } } void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { movt(rd, rs, cc); } void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { movf(rd, rs, cc); } void MacroAssembler::Clz(Register rd, Register rs) { clz(rd, rs); } void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, Register result, DoubleRegister double_input, Register scratch, DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact) { DCHECK(!result.is(scratch)); DCHECK(!double_input.is(double_scratch)); DCHECK(!except_flag.is(scratch)); Label done; mov(except_flag, zero_reg); cvt_w_d(double_scratch, double_input); mfc1(result, double_scratch); cvt_d_w(double_scratch, double_scratch); BranchF(&done, NULL, eq, double_input, double_scratch); int32_t except_mask = kFCSRFlagMask; if (check_inexact == kDontCheckForInexactConversion) { except_mask &= ~kFCSRInexactFlagMask; } cfc1(scratch, FCSR); ctc1(zero_reg, FCSR); switch (rounding_mode) { case kRoundToNearest: Round_w_d(double_scratch, double_input); break; case kRoundToZero: Trunc_w_d(double_scratch, double_input); break; case kRoundToPlusInf: Ceil_w_d(double_scratch, double_input); break; case kRoundToMinusInf: Floor_w_d(double_scratch, double_input); break; } cfc1(except_flag, FCSR); ctc1(scratch, FCSR); mfc1(result, double_scratch); And(except_flag, except_flag, Operand(except_mask)); bind(&done); } void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister single_scratch = kLithiumScratchDouble.low(); Register scratch = at; Register scratch2 = t9; cfc1(scratch2, FCSR); ctc1(zero_reg, FCSR); trunc_w_d(single_scratch, double_input); mfc1(result, single_scratch); cfc1(scratch, FCSR); ctc1(scratch2, FCSR); And(scratch, scratch, kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); Branch(done, eq, scratch, Operand(zero_reg)); } void MacroAssembler::TruncateDoubleToI(Register result, DoubleRegister double_input) { Label done; TryInlineTruncateDoubleToI(result, double_input, &done); push(ra); Dsubu(sp, sp, Operand(kDoubleSize)); sdc1(double_input, MemOperand(sp, 0)); DoubleToIStub stub(isolate(), sp, result, 0, true, true); CallStub(&stub); Daddu(sp, sp, Operand(kDoubleSize)); pop(ra); bind(&done); } void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { Label done; DoubleRegister double_scratch = f12; DCHECK(!result.is(object)); ldc1(double_scratch, MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); TryInlineTruncateDoubleToI(result, double_scratch, &done); push(ra); DoubleToIStub stub(isolate(), object, result, HeapNumber::kValueOffset - kHeapObjectTag, true, true); CallStub(&stub); pop(ra); bind(&done); } void MacroAssembler::TruncateNumberToI(Register object, Register result, Register heap_number_map, Register scratch, Label* not_number) { Label done; DCHECK(!result.is(object)); UntagAndJumpIfSmi(result, object, &done); JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); TruncateHeapNumberToI(result, object); bind(&done); } void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { SmiUntag(dst, src); And(dst, dst, Operand((1 << num_least_bits) - 1)); } void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src, int num_least_bits) { DCHECK(!src.is(dst)); And(dst, src, Operand((1 << num_least_bits) - 1)); } void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { BranchShort(offset, bdslot); } void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchShort(offset, cond, rs, rt, bdslot); } void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchShort(L, bdslot); } else { Jr(L, bdslot); } } else { if (is_trampoline_emitted()) { Jr(L, bdslot); } else { BranchShort(L, bdslot); } } } void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchShort(L, cond, rs, rt, bdslot); } else { if (cond != cc_always) { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jr(L, bdslot); bind(&skip); } else { Jr(L, bdslot); } } } else { if (is_trampoline_emitted()) { if (cond != cc_always) { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jr(L, bdslot); bind(&skip); } else { Jr(L, bdslot); } } else { BranchShort(L, cond, rs, rt, bdslot); } } } void MacroAssembler::Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index, BranchDelaySlot bdslot) { LoadRoot(at, index); Branch(L, cond, rs, Operand(at), bdslot); } void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { b(offset); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); DCHECK(!rs.is(zero_reg)); Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { BlockTrampolinePoolScope block_trampoline_pool(this); r2 = rt.rm_; switch (cond) { case cc_always: b(offset); break; case eq: beq(rs, r2, offset); break; case ne: bne(rs, r2, offset); break; case greater: if (r2.is(zero_reg)) { bgtz(rs, offset); } else { slt(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case greater_equal: if (r2.is(zero_reg)) { bgez(rs, offset); } else { slt(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case less: if (r2.is(zero_reg)) { bltz(rs, offset); } else { slt(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case less_equal: if (r2.is(zero_reg)) { blez(rs, offset); } else { slt(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; case Ugreater: if (r2.is(zero_reg)) { bgtz(rs, offset); } else { sltu(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (r2.is(zero_reg)) { bgez(rs, offset); } else { sltu(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case Uless: if (r2.is(zero_reg)) { return; } else { sltu(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (r2.is(zero_reg)) { b(offset); } else { sltu(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } else { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: b(offset); break; case eq: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); beq(rs, r2, offset); break; case ne: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); bne(rs, r2, offset); break; case greater: if (rt.imm64_ == 0) { bgtz(rs, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case greater_equal: if (rt.imm64_ == 0) { bgez(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); beq(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case less: if (rt.imm64_ == 0) { bltz(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); bne(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case less_equal: if (rt.imm64_ == 0) { blez(rs, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; case Ugreater: if (rt.imm64_ == 0) { bgtz(rs, offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (rt.imm64_ == 0) { bgez(rs, offset); } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); beq(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case Uless: if (rt.imm64_ == 0) { return; } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); bne(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (rt.imm64_ == 0) { b(offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { b(shifted_branch_offset(L, false)); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); int32_t offset = 0; Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { BlockTrampolinePoolScope block_trampoline_pool(this); r2 = rt.rm_; switch (cond) { case cc_always: offset = shifted_branch_offset(L, false); b(offset); break; case eq: offset = shifted_branch_offset(L, false); beq(rs, r2, offset); break; case ne: offset = shifted_branch_offset(L, false); bne(rs, r2, offset); break; case greater: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case greater_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else { slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case less: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bltz(rs, offset); } else { slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case less_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); blez(rs, offset); } else { slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Ugreater: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else { sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Uless: if (r2.is(zero_reg)) { return; } else { sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); b(offset); } else { sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } else { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: offset = shifted_branch_offset(L, false); b(offset); break; case eq: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); beq(rs, r2, offset); break; case ne: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); bne(rs, r2, offset); break; case greater: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case greater_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case less: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bltz(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case less_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); blez(rs, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Ugreater: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bne(rs, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Uless: if (rt.imm64_ == 0) { return; } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); beq(rs, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } DCHECK(is_int16(offset)); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) { BranchAndLinkShort(offset, bdslot); } void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchAndLinkShort(offset, cond, rs, rt, bdslot); } void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchAndLinkShort(L, bdslot); } else { Jalr(L, bdslot); } } else { if (is_trampoline_emitted()) { Jalr(L, bdslot); } else { BranchAndLinkShort(L, bdslot); } } } void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchAndLinkShort(L, cond, rs, rt, bdslot); } else { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jalr(L, bdslot); bind(&skip); } } else { if (is_trampoline_emitted()) { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jalr(L, bdslot); bind(&skip); } else { BranchAndLinkShort(L, cond, rs, rt, bdslot); } } } void MacroAssembler::BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot) { bal(offset); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { r2 = rt.rm_; } else if (cond != cc_always) { r2 = scratch; li(r2, rt); } { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: bal(offset); break; case eq: bne(rs, r2, 2); nop(); bal(offset); break; case ne: beq(rs, r2, 2); nop(); bal(offset); break; case greater: slt(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); bal(offset); break; case greater_equal: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case less: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case less_equal: slt(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case Ugreater: sltu(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); bal(offset); break; case Ugreater_equal: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case Uless: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case Uless_equal: sltu(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); bal(offset); break; default: UNREACHABLE(); } } if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { bal(shifted_branch_offset(L, false)); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); int32_t offset = 0; Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { r2 = rt.rm_; } else if (cond != cc_always) { r2 = scratch; li(r2, rt); } { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: offset = shifted_branch_offset(L, false); bal(offset); break; case eq: bne(rs, r2, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case ne: beq(rs, r2, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case greater: slt(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case greater_equal: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case less: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case less_equal: slt(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Ugreater: sltu(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Ugreater_equal: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Uless: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Uless_equal: sltu(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; default: UNREACHABLE(); } } DCHECK(is_int16(offset)); if (bdslot == PROTECT) nop(); } void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { jr(target); } else { BRANCH_ARGS_CHECK(cond, rs, rt); Branch(2, NegateCondition(cond), rs, rt); jr(target); } if (bd == PROTECT) nop(); } void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Label skip; if (cond != cc_always) { Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); } li(t9, Operand(target, rmode)); Jump(t9, al, zero_reg, Operand(zero_reg), bd); bind(&skip); } void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); } void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(RelocInfo::IsCodeTarget(rmode)); AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); } int MacroAssembler::CallSize(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { int size = 0; if (cond == cc_always) { size += 1; } else { size += 3; } if (bd == PROTECT) size += 1; return size * kInstrSize; } void MacroAssembler::Call(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); if (cond == cc_always) { jalr(target); } else { BRANCH_ARGS_CHECK(cond, rs, rt); Branch(2, NegateCondition(cond), rs, rt); jalr(target); } if (bd == PROTECT) nop(); DCHECK_EQ(CallSize(target, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { int size = CallSize(t9, cond, rs, rt, bd); return size + 4 * kInstrSize; } void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); int64_t target_int = reinterpret_cast<int64_t>(target); positions_recorder()->WriteRecordedPositions(); li(t9, Operand(target_int, rmode), ADDRESS_LOAD); Call(t9, cond, rs, rt, bd); DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { AllowDeferredHandleDereference using_raw_address; return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); } void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); DCHECK(RelocInfo::IsCodeTarget(rmode)); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { SetRecordedAstId(ast_id); rmode = RelocInfo::CODE_TARGET_WITH_ID; } AllowDeferredHandleDereference embedding_raw_address; Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Jump(ra, cond, rs, rt, bd); } void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); uint64_t imm28; imm28 = jump_address(L); imm28 &= kImm28Mask; { BlockGrowBufferScope block_buf_growth(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); j(imm28); } if (bdslot == PROTECT) nop(); } void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); uint64_t imm64; imm64 = jump_address(L); { BlockGrowBufferScope block_buf_growth(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); li(at, Operand(imm64), ADDRESS_LOAD); } jr(at); if (bdslot == PROTECT) nop(); } void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); uint64_t imm64; imm64 = jump_address(L); { BlockGrowBufferScope block_buf_growth(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); li(at, Operand(imm64), ADDRESS_LOAD); } jalr(at); if (bdslot == PROTECT) nop(); } void MacroAssembler::DropAndRet(int drop) { Ret(USE_DELAY_SLOT); daddiu(sp, sp, drop * kPointerSize); } void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1, const Operand& r2) { Label skip; if (cond != cc_always) { Branch(&skip, NegateCondition(cond), r1, r2); } Drop(drop); Ret(); if (cond != cc_always) { bind(&skip); } } void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; } Label skip; if (cond != al) { Branch(&skip, NegateCondition(cond), reg, op); } daddiu(sp, sp, count * kPointerSize); if (cond != al) { bind(&skip); } } void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { if (scratch.is(no_reg)) { Xor(reg1, reg1, Operand(reg2)); Xor(reg2, reg2, Operand(reg1)); Xor(reg1, reg1, Operand(reg2)); } else { mov(scratch, reg1); mov(reg1, reg2); mov(reg2, scratch); } } void MacroAssembler::Call(Label* target) { BranchAndLink(target); } void MacroAssembler::Push(Handle<Object> handle) { li(at, Operand(handle)); push(at); } void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { DCHECK(!src.is(scratch)); mov(scratch, src); dsrl32(src, src, 0); dsll32(src, src, 0); push(src); dsll32(scratch, scratch, 0); push(scratch); } void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { DCHECK(!dst.is(scratch)); pop(scratch); dsrl32(scratch, scratch, 0); pop(dst); dsrl32(dst, dst, 0); dsll32(dst, dst, 0); or_(dst, dst, scratch); } void MacroAssembler::DebugBreak() { PrepareCEntryArgs(0); PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(isolate(), 1); DCHECK(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } void MacroAssembler::PushTryHandler(StackHandler::Kind kind, int handler_index) { STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); unsigned state = StackHandler::IndexField::encode(handler_index) | StackHandler::KindField::encode(kind); li(a5, Operand(CodeObject()), CONSTANT_SIZE); li(a6, Operand(state)); if (kind == StackHandler::JS_ENTRY) { DCHECK_EQ(Smi::FromInt(0), 0); Push(zero_reg, zero_reg, a6, a5); } else { MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit()); } li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ld(a5, MemOperand(a6)); push(a5); sd(sp, MemOperand(a6)); } void MacroAssembler::PopTryHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(a1); Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); sd(a1, MemOperand(at)); } void MacroAssembler::JumpToHandlerEntry() { Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); dsrl(a2, a2, StackHandler::kKindWidth); dsll(a2, a2, kPointerSizeLog2); Daddu(a2, a3, a2); ld(a2, MemOperand(a2)); Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); dsra32(t9, a2, 0); Daddu(t9, t9, a1); Jump(t9); } void MacroAssembler::Throw(Register value) { STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); Move(v0, value); li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ld(sp, MemOperand(a3)); pop(a2); sd(a2, MemOperand(a3)); MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); Label done; Branch(&done, eq, cp, Operand(zero_reg)); sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); bind(&done); JumpToHandlerEntry(); } void MacroAssembler::ThrowUncatchable(Register value) { STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); if (!value.is(v0)) { mov(v0, value); } li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ld(sp, MemOperand(a3)); Label fetch_next, check_kind; jmp(&check_kind); bind(&fetch_next); ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); bind(&check_kind); STATIC_ASSERT(StackHandler::JS_ENTRY == 0); ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset)); And(a2, a2, Operand(StackHandler::KindField::kMask)); Branch(&fetch_next, ne, a2, Operand(zero_reg)); pop(a2); sd(a2, MemOperand(a3)); MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); JumpToHandlerEntry(); } void MacroAssembler::Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label* gc_required, AllocationFlags flags) { DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { li(result, 0x7091); li(scratch1, 0x7191); li(scratch2, 0x7291); } jmp(gc_required); return; } DCHECK(!result.is(scratch1)); DCHECK(!result.is(scratch2)); DCHECK(!scratch1.is(scratch2)); DCHECK(!scratch1.is(t9)); DCHECK(!scratch2.is(t9)); DCHECK(!result.is(t9)); if ((flags & SIZE_IN_WORDS) != 0) { object_size *= kPointerSize; } DCHECK(0 == (object_size & kObjectAlignmentMask)); ExternalReference allocation_top = AllocationUtils::GetAllocationTopReference(isolate(), flags); ExternalReference allocation_limit = AllocationUtils::GetAllocationLimitReference(isolate(), flags); intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); Register topaddr = scratch1; li(topaddr, Operand(allocation_top)); if ((flags & RESULT_CONTAINS_TOP) == 0) { ld(result, MemOperand(topaddr)); ld(t9, MemOperand(topaddr, kPointerSize)); } else { if (emit_debug_code()) { ld(t9, MemOperand(topaddr)); Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); } ld(t9, MemOperand(topaddr, limit - top)); } DCHECK(kPointerSize == kDoubleSize); if (emit_debug_code()) { And(at, result, Operand(kDoubleAlignmentMask)); Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); } Daddu(scratch2, result, Operand(object_size)); Branch(gc_required, Ugreater, scratch2, Operand(t9)); sd(scratch2, MemOperand(topaddr)); if ((flags & TAG_OBJECT) != 0) { Daddu(result, result, Operand(kHeapObjectTag)); } } void MacroAssembler::Allocate(Register object_size, Register result, Register scratch1, Register scratch2, Label* gc_required, AllocationFlags flags) { if (!FLAG_inline_new) { if (emit_debug_code()) { li(result, 0x7091); li(scratch1, 0x7191); li(scratch2, 0x7291); } jmp(gc_required); return; } DCHECK(!result.is(scratch1)); DCHECK(!result.is(scratch2)); DCHECK(!scratch1.is(scratch2)); DCHECK(!object_size.is(t9)); DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); ExternalReference allocation_top = AllocationUtils::GetAllocationTopReference(isolate(), flags); ExternalReference allocation_limit = AllocationUtils::GetAllocationLimitReference(isolate(), flags); intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); Register topaddr = scratch1; li(topaddr, Operand(allocation_top)); if ((flags & RESULT_CONTAINS_TOP) == 0) { ld(result, MemOperand(topaddr)); ld(t9, MemOperand(topaddr, kPointerSize)); } else { if (emit_debug_code()) { ld(t9, MemOperand(topaddr)); Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); } ld(t9, MemOperand(topaddr, limit - top)); } DCHECK(kPointerSize == kDoubleSize); if (emit_debug_code()) { And(at, result, Operand(kDoubleAlignmentMask)); Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); } if ((flags & SIZE_IN_WORDS) != 0) { dsll(scratch2, object_size, kPointerSizeLog2); Daddu(scratch2, result, scratch2); } else { Daddu(scratch2, result, Operand(object_size)); } Branch(gc_required, Ugreater, scratch2, Operand(t9)); if (emit_debug_code()) { And(t9, scratch2, Operand(kObjectAlignmentMask)); Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg)); } sd(scratch2, MemOperand(topaddr)); if ((flags & TAG_OBJECT) != 0) { Daddu(result, result, Operand(kHeapObjectTag)); } } void MacroAssembler::UndoAllocationInNewSpace(Register object, Register scratch) { ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); And(object, object, Operand(~kHeapObjectTagMask)); li(scratch, Operand(new_space_allocation_top)); ld(scratch, MemOperand(scratch)); Check(less, kUndoAllocationOfNonAllocatedMemory, object, Operand(scratch)); li(scratch, Operand(new_space_allocation_top)); sd(object, MemOperand(scratch)); } void MacroAssembler::AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label* gc_required) { DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); dsll(scratch1, length, 1); daddiu(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateOneByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label* gc_required) { DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); DCHECK(kCharSize == 1); daddiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateOneByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateOneByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); Branch(&succeed, eq, at, Operand(zero_reg)); Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE)); bind(&succeed); } void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label* need_gc, TaggingMode tagging_mode, MutableMode mode) { Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); Heap::RootListIndex map_index = mode == MUTABLE ? Heap::kMutableHeapNumberMapRootIndex : Heap::kHeapNumberMapRootIndex; AssertIsRoot(heap_number_map, map_index); if (tagging_mode == TAG_RESULT) { sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); } } void MacroAssembler::AllocateHeapNumberWithValue(Register result, FPURegister value, Register scratch1, Register scratch2, Label* gc_required) { LoadRoot(t8, Heap::kHeapNumberMapRootIndex); AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); } void MacroAssembler::CopyFields(Register dst, Register src, RegList temps, int field_count) { DCHECK((temps & dst.bit()) == 0); DCHECK((temps & src.bit()) == 0); Register tmp = no_reg; for (int i = 0; i < kNumRegisters; i++) { if ((temps & (1 << i)) != 0) { tmp.code_ = i; break; } } DCHECK(!tmp.is(no_reg)); for (int i = 0; i < field_count; i++) { ld(tmp, FieldMemOperand(src, i * kPointerSize)); sd(tmp, FieldMemOperand(dst, i * kPointerSize)); } } void MacroAssembler::CopyBytes(Register src, Register dst, Register length, Register scratch) { Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; Branch(&byte_loop, le, length, Operand(kPointerSize)); bind(&align_loop_1); And(scratch, src, kPointerSize - 1); Branch(&word_loop, eq, scratch, Operand(zero_reg)); lbu(scratch, MemOperand(src)); Daddu(src, src, 1); sb(scratch, MemOperand(dst)); Daddu(dst, dst, 1); Dsubu(length, length, Operand(1)); Branch(&align_loop_1, ne, length, Operand(zero_reg)); bind(&word_loop); if (emit_debug_code()) { And(scratch, src, kPointerSize - 1); Assert(eq, kExpectingAlignmentForCopyBytes, scratch, Operand(zero_reg)); } Branch(&byte_loop, lt, length, Operand(kPointerSize)); ld(scratch, MemOperand(src)); Daddu(src, src, kPointerSize); sb(scratch, MemOperand(dst, 0)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 1)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 2)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 3)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 4)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 5)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 6)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 7)); Daddu(dst, dst, 8); Dsubu(length, length, Operand(kPointerSize)); Branch(&word_loop); bind(&byte_loop); Branch(&done, eq, length, Operand(zero_reg)); bind(&byte_loop_1); lbu(scratch, MemOperand(src)); Daddu(src, src, 1); sb(scratch, MemOperand(dst)); Daddu(dst, dst, 1); Dsubu(length, length, Operand(1)); Branch(&byte_loop_1, ne, length, Operand(zero_reg)); bind(&done); } void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler) { Label loop, entry; Branch(&entry); bind(&loop); sd(filler, MemOperand(start_offset)); Daddu(start_offset, start_offset, kPointerSize); bind(&entry); Branch(&loop, lt, start_offset, Operand(end_offset)); } void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 2); STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); } void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 2); STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, ls, scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); } void MacroAssembler::CheckFastSmiElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); } void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Label* fail, int elements_offset) { Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; JumpIfSmi(value_reg, &smi_value); CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, fail, DONT_DO_SMI_CHECK); li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1)); lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); bind(&have_double_value); dsra(scratch1, key_reg, 32 - kDoubleSizeLog2); Daddu(scratch1, scratch1, elements_reg); sw(mantissa_reg, FieldMemOperand( scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + sizeof(kHoleNanLower32); sw(exponent_reg, FieldMemOperand(scratch1, offset)); jmp(&done); bind(&maybe_nan); lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); bind(&is_nan); LoadRoot(at, Heap::kNanValueRootIndex); lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset)); lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset)); jmp(&have_double_value); bind(&smi_value); Daddu(scratch1, elements_reg, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - elements_offset)); dsra(scratch2, key_reg, 32 - kDoubleSizeLog2); Daddu(scratch1, scratch1, scratch2); Register untagged_value = elements_reg; SmiUntag(untagged_value, value_reg); mtc1(untagged_value, f2); cvt_d_w(f0, f2); sdc1(f0, MemOperand(scratch1, 0)); bind(&done); } void MacroAssembler::CompareMapAndBranch(Register obj, Register scratch, Handle<Map> map, Label* early_success, Condition cond, Label* branch_to) { ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); CompareMapAndBranch(scratch, map, early_success, cond, branch_to); } void MacroAssembler::CompareMapAndBranch(Register obj_map, Handle<Map> map, Label* early_success, Condition cond, Label* branch_to) { Branch(branch_to, cond, obj_map, Operand(map)); } void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail, SmiCheckType smi_check_type) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } Label success; CompareMapAndBranch(obj, scratch, map, &success, ne, fail); bind(&success); } void MacroAssembler::DispatchMap(Register obj, Register scratch, Handle<Map> map, Handle<Code> success, SmiCheckType smi_check_type) { Label fail; if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, &fail); } ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map)); bind(&fail); } void MacroAssembler::CheckMap(Register obj, Register scratch, Heap::RootListIndex index, Label* fail, SmiCheckType smi_check_type) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); LoadRoot(at, index); Branch(fail, ne, scratch, Operand(at)); } void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { Move(dst, v0, v1); } else { Move(dst, f0); } } void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { Move(dst, a0, a1); } else { Move(dst, f12); } } void MacroAssembler::MovToFloatParameter(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f12, src); } else { Move(a0, a1, src); } } void MacroAssembler::MovToFloatResult(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f0, src); } else { Move(v0, v1, src); } } void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (!IsMipsSoftFloatABI) { const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14; if (src2.is(f12)) { DCHECK(!src1.is(fparg2)); Move(fparg2, src2); Move(f12, src1); } else { Move(f12, src1); Move(fparg2, src2); } } else { Move(a0, a1, src1); Move(a2, a3, src2); } } void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, Handle<Code> code_constant, Register code_reg, Label* done, bool* definitely_mismatches, InvokeFlag flag, const CallWrapper& call_wrapper) { bool definitely_matches = false; *definitely_mismatches = false; Label regular_invoke; DCHECK(actual.is_immediate() || actual.reg().is(a0)); DCHECK(expected.is_immediate() || expected.reg().is(a2)); DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); if (expected.is_immediate()) { DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { li(a0, Operand(actual.immediate())); const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; if (expected.immediate() == sentinel) { definitely_matches = true; } else { *definitely_mismatches = true; li(a2, Operand(expected.immediate())); } } } else if (actual.is_immediate()) { Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate())); li(a0, Operand(actual.immediate())); } else { Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg())); } if (!definitely_matches) { if (!code_constant.is_null()) { li(a3, Operand(code_constant)); daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); } Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(adaptor)); Call(adaptor); call_wrapper.AfterCall(); if (!*definitely_mismatches) { Branch(done); } } else { Jump(adaptor, RelocInfo::CODE_TARGET); } bind(&regular_invoke); } } void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; bool definitely_mismatches = false; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, &definitely_mismatches, flag, call_wrapper); if (!definitely_mismatches) { if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(code)); Call(code); call_wrapper.AfterCall(); } else { DCHECK(flag == JUMP_FUNCTION); Jump(code); } bind(&done); } } void MacroAssembler::InvokeFunction(Register function, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); DCHECK(function.is(a1)); Register expected_reg = a2; Register code_reg = a3; ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); lw(expected_reg, FieldMemOperand(code_reg, SharedFunctionInfo::kFormalParameterCountOffset)); ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); ParameterCount expected(expected_reg); InvokeCode(code_reg, expected, actual, flag, call_wrapper); } void MacroAssembler::InvokeFunction(Register function, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); DCHECK(function.is(a1)); ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); InvokeCode(a3, expected, actual, flag, call_wrapper); } void MacroAssembler::InvokeFunction(Handle<JSFunction> function, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { li(a1, function); InvokeFunction(a1, expected, actual, flag, call_wrapper); } void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label* fail) { ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); IsInstanceJSObjectType(map, scratch, fail); } void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch, Label* fail) { lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); } void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, Label* fail) { DCHECK(kNotStringTag != 0); ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); And(scratch, scratch, Operand(kIsNotStringMask)); Branch(fail, ne, scratch, Operand(zero_reg)); } void MacroAssembler::IsObjectNameType(Register object, Register scratch, Label* fail) { ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE)); } void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, Register scratch, Label* miss, bool miss_on_bound_function) { Label non_instance; if (miss_on_bound_function) { JumpIfSmi(function, miss); GetObjectType(function, result, scratch); Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); ld(scratch, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); lwu(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); And(scratch, scratch, Operand(1 << SharedFunctionInfo::kBoundFunction)); Branch(miss, ne, scratch, Operand(zero_reg)); lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); Branch(&non_instance, ne, scratch, Operand(zero_reg)); } ld(result, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); LoadRoot(t8, Heap::kTheHoleValueRootIndex); Branch(miss, eq, result, Operand(t8)); Label done; GetObjectType(result, scratch, scratch); Branch(&done, ne, scratch, Operand(MAP_TYPE)); ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); if (miss_on_bound_function) { jmp(&done); bind(&non_instance); ld(result, FieldMemOperand(result, Map::kConstructorOffset)); } bind(&done); } void MacroAssembler::GetObjectType(Register object, Register map, Register type_reg) { ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); } void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, Condition cond, Register r1, const Operand& r2, BranchDelaySlot bd) { DCHECK(AllowThisStubCall(stub)); Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond, r1, r2, bd); } void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2, BranchDelaySlot bd) { Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); } static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { int64_t offset = (ref0.address() - ref1.address()); DCHECK(static_cast<int>(offset) == offset); return static_cast<int>(offset); } void MacroAssembler::CallApiFunctionAndReturn( Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand* context_restore_operand) { ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; const int kLimitOffset = AddressOffset( ExternalReference::handle_scope_limit_address(isolate()), next_address); const int kLevelOffset = AddressOffset( ExternalReference::handle_scope_level_address(isolate()), next_address); DCHECK(function_address.is(a1) || function_address.is(a2)); Label profiler_disabled; Label end_profiler_check; li(t9, Operand(ExternalReference::is_profiling_address(isolate()))); lb(t9, MemOperand(t9, 0)); Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); li(t9, Operand(thunk_ref)); jmp(&end_profiler_check); bind(&profiler_disabled); mov(t9, function_address); bind(&end_profiler_check); li(s3, Operand(next_address)); ld(s0, MemOperand(s3, kNextOffset)); ld(s1, MemOperand(s3, kLimitOffset)); ld(s2, MemOperand(s3, kLevelOffset)); Daddu(s2, s2, Operand(1)); sd(s2, MemOperand(s3, kLevelOffset)); if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); PrepareCallCFunction(1, a0); li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } DirectCEntryStub stub(isolate()); stub.GenerateCall(this, t9); if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); PrepareCallCFunction(1, a0); li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } Label promote_scheduled_exception; Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; Label return_value_loaded; ld(v0, return_value_operand); bind(&return_value_loaded); sd(s0, MemOperand(s3, kNextOffset)); if (emit_debug_code()) { ld(a1, MemOperand(s3, kLevelOffset)); Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); } Dsubu(s2, s2, Operand(1)); sd(s2, MemOperand(s3, kLevelOffset)); ld(at, MemOperand(s3, kLimitOffset)); Branch(&delete_allocated_handles, ne, s1, Operand(at)); bind(&leave_exit_frame); LoadRoot(a4, Heap::kTheHoleValueRootIndex); li(at, Operand(ExternalReference::scheduled_exception_address(isolate()))); ld(a5, MemOperand(at)); Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); bind(&exception_handled); bool restore_context = context_restore_operand != NULL; if (restore_context) { ld(cp, *context_restore_operand); } li(s0, Operand(stack_space)); LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN); bind(&promote_scheduled_exception); { FrameScope frame(this, StackFrame::INTERNAL); CallExternalReference( ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); } jmp(&exception_handled); bind(&delete_allocated_handles); sd(s1, MemOperand(s3, kLimitOffset)); mov(s0, v0); mov(a0, v0); PrepareCallCFunction(1, s1); li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), 1); mov(v0, s0); jmp(&leave_exit_frame); } bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { return has_frame_ || !stub->SometimesSetsUpAFrame(); } void MacroAssembler::IndexFromHash(Register hash, Register index) { DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); } void MacroAssembler::ObjectToDoubleFPURegister(Register object, FPURegister result, Register scratch1, Register scratch2, Register heap_number_map, Label* not_number, ObjectToDoubleFlags flags) { Label done; if ((flags & OBJECT_NOT_SMI) == 0) { Label not_smi; JumpIfNotSmi(object, &not_smi); dsra32(scratch1, object, 0); mtc1(scratch1, result); cvt_d_w(result, result); Branch(&done); bind(&not_smi); } ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); Branch(not_number, ne, scratch1, Operand(heap_number_map)); if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { Register exponent = scratch1; Register mask_reg = scratch2; lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); li(mask_reg, HeapNumber::kExponentMask); And(exponent, exponent, mask_reg); Branch(not_number, eq, exponent, Operand(mask_reg)); } ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); bind(&done); } void MacroAssembler::SmiToDoubleFPURegister(Register smi, FPURegister value, Register scratch1) { dsra32(scratch1, smi, 0); mtc1(scratch1, value); cvt_d_w(value, value); } void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { DCHECK(!dst.is(overflow_dst)); DCHECK(!dst.is(scratch)); DCHECK(!overflow_dst.is(scratch)); DCHECK(!overflow_dst.is(left)); DCHECK(!overflow_dst.is(right)); if (left.is(right) && dst.is(left)) { DCHECK(!dst.is(t9)); DCHECK(!scratch.is(t9)); DCHECK(!left.is(t9)); DCHECK(!right.is(t9)); DCHECK(!overflow_dst.is(t9)); mov(t9, right); right = t9; } if (dst.is(left)) { mov(scratch, left); daddu(dst, left, right); xor_(scratch, dst, scratch); xor_(overflow_dst, dst, right); and_(overflow_dst, overflow_dst, scratch); } else if (dst.is(right)) { mov(scratch, right); daddu(dst, left, right); xor_(scratch, dst, scratch); xor_(overflow_dst, dst, left); and_(overflow_dst, overflow_dst, scratch); } else { daddu(dst, left, right); xor_(overflow_dst, dst, left); xor_(scratch, dst, right); and_(overflow_dst, scratch, overflow_dst); } } void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { DCHECK(!dst.is(overflow_dst)); DCHECK(!dst.is(scratch)); DCHECK(!overflow_dst.is(scratch)); DCHECK(!overflow_dst.is(left)); DCHECK(!overflow_dst.is(right)); DCHECK(!scratch.is(left)); DCHECK(!scratch.is(right)); if (left.is(right)) { mov(dst, zero_reg); mov(overflow_dst, zero_reg); return; } if (dst.is(left)) { mov(scratch, left); dsubu(dst, left, right); xor_(overflow_dst, dst, scratch); xor_(scratch, scratch, right); and_(overflow_dst, scratch, overflow_dst); } else if (dst.is(right)) { mov(scratch, right); dsubu(dst, left, right); xor_(overflow_dst, dst, left); xor_(scratch, left, scratch); and_(overflow_dst, scratch, overflow_dst); } else { dsubu(dst, left, right); xor_(overflow_dst, dst, left); xor_(scratch, left, right); and_(overflow_dst, scratch, overflow_dst); } } void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, SaveFPRegsMode save_doubles) { CHECK(f->nargs < 0 || f->nargs == num_arguments); PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference(f, isolate())); CEntryStub stub(isolate(), 1, save_doubles); CallStub(&stub); } void MacroAssembler::CallExternalReference(const ExternalReference& ext, int num_arguments, BranchDelaySlot bd) { PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ext); CEntryStub stub(isolate(), 1); CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd); } void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, int num_arguments, int result_size) { PrepareCEntryArgs(num_arguments); JumpToExternalReference(ext); } void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size) { TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments, result_size); } void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, BranchDelaySlot bd) { PrepareCEntryFunction(builtin); CEntryStub stub(isolate(), 1); Jump(stub.GetCode(), RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); } void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); GetBuiltinEntry(t9, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(t9)); Call(t9); call_wrapper.AfterCall(); } else { DCHECK(flag == JUMP_FUNCTION); Jump(t9); } } void MacroAssembler::GetBuiltinFunction(Register target, Builtins::JavaScript id) { ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); ld(target, FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); } void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { DCHECK(!target.is(a1)); GetBuiltinFunction(a1, id); ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); } void MacroAssembler::SetCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { if (FLAG_native_code_counters && counter->Enabled()) { li(scratch1, Operand(value)); li(scratch2, Operand(ExternalReference(counter))); sd(scratch1, MemOperand(scratch2)); } } void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); ld(scratch1, MemOperand(scratch2)); Daddu(scratch1, scratch1, Operand(value)); sd(scratch1, MemOperand(scratch2)); } } void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); ld(scratch1, MemOperand(scratch2)); Dsubu(scratch1, scratch1, Operand(value)); sd(scratch1, MemOperand(scratch2)); } } void MacroAssembler::Assert(Condition cc, BailoutReason reason, Register rs, Operand rt) { if (emit_debug_code()) Check(cc, reason, rs, rt); } void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { DCHECK(!elements.is(at)); Label ok; push(elements); ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); LoadRoot(at, Heap::kFixedArrayMapRootIndex); Branch(&ok, eq, elements, Operand(at)); LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); Branch(&ok, eq, elements, Operand(at)); LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); Branch(&ok, eq, elements, Operand(at)); Abort(kJSObjectWithFastElementsMapHasSlowElements); bind(&ok); pop(elements); } } void MacroAssembler::Check(Condition cc, BailoutReason reason, Register rs, Operand rt) { Label L; Branch(&L, cc, rs, rt); Abort(reason); bind(&L); } void MacroAssembler::Abort(BailoutReason reason) { Label abort_start; bind(&abort_start); const char* msg = GetBailoutReason(reason); if (msg != NULL) { RecordComment(�); RecordComment(msg); } if (FLAG_trap_on_abort) { stop(msg); return; } li(a0, Operand(Smi::FromInt(reason))); push(a0); if (!has_frame_) { FrameScope scope(this, StackFrame::NONE); CallRuntime(Runtime::kAbort, 1); } else { CallRuntime(Runtime::kAbort, 1); } if (is_trampoline_pool_blocked()) { static const int kExpectedAbortInstructions = 10; int abort_instructions = InstructionsGeneratedSince(&abort_start); DCHECK(abort_instructions <= kExpectedAbortInstructions); while (abort_instructions++ < kExpectedAbortInstructions) { nop(); } } } void MacroAssembler::LoadContext(Register dst, int context_chain_length) { if (context_chain_length > 0) { ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); for (int i = 1; i < context_chain_length; i++) { ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); } } else { Move(dst, cp); } } void MacroAssembler::LoadTransitionedArrayMapConditional( ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label* no_map_match) { ld(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); ld(scratch, MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize; ld(at, FieldMemOperand(scratch, offset)); Branch(no_map_match, ne, map_in_out, Operand(at)); offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize; ld(map_in_out, FieldMemOperand(scratch, offset)); } void MacroAssembler::LoadGlobalFunction(int index, Register function) { ld(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); ld(function, FieldMemOperand(function, GlobalObject::kNativeContextOffset)); ld(function, MemOperand(function, Context::SlotOffset(index))); } void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch) { ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); if (emit_debug_code()) { Label ok, fail; CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); Branch(&ok); bind(&fail); Abort(kGlobalFunctionsMustHaveInitialMap); bind(&ok); } } void MacroAssembler::StubPrologue() { Push(ra, fp, cp); Push(Smi::FromInt(StackFrame::STUB)); Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } void MacroAssembler::Prologue(bool code_pre_aging) { PredictableCodeSizeScope predictible_code_size_scope( this, kNoCodeAgeSequenceLength); if (code_pre_aging) { Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); nop(Assembler::CODE_AGE_MARKER_NOP); li(t9, Operand(reinterpret_cast<uint64_t>(stub->instruction_start())), ADDRESS_LOAD); nop(); jalr(t9, a0); nop(); nop(); } else { Push(ra, fp, cp, a1); nop(Assembler::CODE_AGE_SEQUENCE_NOP); nop(Assembler::CODE_AGE_SEQUENCE_NOP); nop(Assembler::CODE_AGE_SEQUENCE_NOP); Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } } void MacroAssembler::EnterFrame(StackFrame::Type type) { daddiu(sp, sp, -5 * kPointerSize); li(t8, Operand(Smi::FromInt(type))); li(t9, Operand(CodeObject()), CONSTANT_SIZE); sd(ra, MemOperand(sp, 4 * kPointerSize)); sd(fp, MemOperand(sp, 3 * kPointerSize)); sd(cp, MemOperand(sp, 2 * kPointerSize)); sd(t8, MemOperand(sp, 1 * kPointerSize)); sd(t9, MemOperand(sp, 0 * kPointerSize)); Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); } void MacroAssembler::LeaveFrame(StackFrame::Type type) { mov(sp, fp); ld(fp, MemOperand(sp, 0 * kPointerSize)); ld(ra, MemOperand(sp, 1 * kPointerSize)); daddiu(sp, sp, 2 * kPointerSize); } void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); daddiu(sp, sp, -4 * kPointerSize); sd(ra, MemOperand(sp, 3 * kPointerSize)); sd(fp, MemOperand(sp, 2 * kPointerSize)); daddiu(fp, sp, 2 * kPointerSize); if (emit_debug_code()) { sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); } li(t8, Operand(CodeObject()), CONSTANT_SIZE); sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); sd(fp, MemOperand(t8)); li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); sd(cp, MemOperand(t8)); const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (save_doubles) { int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; int space = kNumOfSavedRegisters * kDoubleSize ; Dsubu(sp, sp, Operand(space)); for (int i = 0; i < kNumOfSavedRegisters; i++) { FPURegister reg = FPURegister::from_code(2 * i); sdc1(reg, MemOperand(sp, i * kDoubleSize)); } } DCHECK(stack_space >= 0); Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize)); if (frame_alignment > 0) { DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); } daddiu(at, sp, kPointerSize); sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); } void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context, bool do_return) { if (save_doubles) { int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize + kNumOfSavedRegisters * kDoubleSize)); for (int i = 0; i < kNumOfSavedRegisters; i++) { FPURegister reg = FPURegister::from_code(2 * i); ldc1(reg, MemOperand(t8, i * kDoubleSize)); } } li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); sd(zero_reg, MemOperand(t8)); if (restore_context) { li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); ld(cp, MemOperand(t8)); } li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); sd(a3, MemOperand(t8)); mov(sp, fp); ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); if (argument_count.is_valid()) { dsll(t8, argument_count, kPointerSizeLog2); daddu(sp, sp, t8); } if (do_return) { Ret(USE_DELAY_SLOT); } daddiu(sp, sp, 2 * kPointerSize); } void MacroAssembler::InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2) { dsll32(scratch1, length, 0); LoadRoot(scratch2, map_index); sd(scratch1, FieldMemOperand(string, String::kLengthOffset)); li(scratch1, Operand(String::kEmptyHashField)); sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); } int MacroAssembler::ActivationFrameAlignment() { return base::OS::ActivationFrameAlignment(); } void MacroAssembler::AssertStackIsAligned() { if (emit_debug_code()) { const int frame_alignment = ActivationFrameAlignment(); const int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { Label alignment_as_expected; DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); andi(at, sp, frame_alignment_mask); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); stop(�); bind(&alignment_as_expected); } } } void MacroAssembler::JumpIfNotPowerOfTwoOrZero( Register reg, Register scratch, Label* not_power_of_two_or_zero) { Dsubu(scratch, reg, Operand(1)); Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, scratch, Operand(zero_reg)); and_(at, scratch, reg); Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); } void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { DCHECK(!reg.is(overflow)); mov(overflow, reg); SmiTag(reg); xor_(overflow, overflow, reg); } void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src, Register overflow) { if (dst.is(src)) { SmiTagCheckOverflow(dst, overflow); } else { DCHECK(!dst.is(src)); DCHECK(!dst.is(overflow)); DCHECK(!src.is(overflow)); SmiTag(dst, src); xor_(overflow, dst, src); } } void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { if (SmiValuesAre32Bits()) { lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); } else { lw(dst, src); SmiUntag(dst); } } void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) { if (SmiValuesAre32Bits()) { lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); dsll(dst, dst, scale); } else { lw(dst, src); DCHECK(scale >= kSmiTagSize); sll(dst, dst, scale - kSmiTagSize); } } void MacroAssembler::SmiLoadWithScale(Register d_smi, Register d_scaled, MemOperand src, int scale) { if (SmiValuesAre32Bits()) { ld(d_smi, src); dsra(d_scaled, d_smi, kSmiShift - scale); } else { lw(d_smi, src); DCHECK(scale >= kSmiTagSize); sll(d_scaled, d_smi, scale - kSmiTagSize); } } void MacroAssembler::SmiLoadUntagWithScale(Register d_int, Register d_scaled, MemOperand src, int scale) { if (SmiValuesAre32Bits()) { lw(d_int, UntagSmiMemOperand(src.rm(), src.offset())); dsll(d_scaled, d_int, scale); } else { lw(d_int, src); SmiUntag(d_int); sll(d_scaled, d_int, scale); } } void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case) { JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT); SmiUntag(dst, src); } void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case) { JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT); SmiUntag(dst, src); } void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, Register scratch, BranchDelaySlot bd) { DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); } void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch, BranchDelaySlot bd) { DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); } void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi) { STATIC_ASSERT(kSmiTag == 0); DCHECK_EQ(1, kSmiTagMask); or_(at, reg1, reg2); JumpIfNotSmi(at, on_not_both_smi); } void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi) { STATIC_ASSERT(kSmiTag == 0); DCHECK_EQ(1, kSmiTagMask); and_(at, reg1, reg2); JumpIfSmi(at, on_either_smi); } void MacroAssembler::AssertNotSmi(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); andi(at, object, kSmiTagMask); Check(ne, kOperandIsASmi, at, Operand(zero_reg)); } } void MacroAssembler::AssertSmi(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); andi(at, object, kSmiTagMask); Check(eq, kOperandIsASmi, at, Operand(zero_reg)); } } void MacroAssembler::AssertString(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); SmiTst(object, a4); Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg)); push(object); ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE)); pop(object); } } void MacroAssembler::AssertName(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); SmiTst(object, a4); Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg)); push(object); ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE)); pop(object); } } void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, Register scratch) { if (emit_debug_code()) { Label done_checking; AssertNotSmi(object); LoadRoot(scratch, Heap::kUndefinedValueRootIndex); Branch(&done_checking, eq, object, Operand(scratch)); push(object); ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch)); pop(object); bind(&done_checking); } } void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { DCHECK(!reg.is(at)); LoadRoot(at, index); Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); } } void MacroAssembler::JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label* on_not_heap_number) { ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); } void MacroAssembler::LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label* not_found) { Register number_string_cache = result; Register mask = scratch3; LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); dsra32(mask, mask, 1); Daddu(mask, mask, -1); Label is_smi; Label load_result_from_cache; JumpIfSmi(object, &is_smi); CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, DONT_DO_SMI_CHECK); STATIC_ASSERT(8 == kDoubleSize); Daddu(scratch1, object, Operand(HeapNumber::kValueOffset - kHeapObjectTag)); ld(scratch2, MemOperand(scratch1, kPointerSize)); ld(scratch1, MemOperand(scratch1, 0)); Xor(scratch1, scratch1, Operand(scratch2)); And(scratch1, scratch1, Operand(mask)); dsll(scratch1, scratch1, kPointerSizeLog2 + 1); Daddu(scratch1, number_string_cache, scratch1); Register probe = mask; ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); JumpIfSmi(probe, not_found); ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); BranchF(&load_result_from_cache, NULL, eq, f12, f14); Branch(not_found); bind(&is_smi); Register scratch = scratch1; dsra32(scratch, scratch, 0); And(scratch, mask, Operand(scratch)); dsll(scratch, scratch, kPointerSizeLog2 + 1); Daddu(scratch, number_string_cache, scratch); ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); Branch(not_found, ne, object, Operand(probe)); bind(&load_result_from_cache); ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); IncrementCounter(isolate()->counters()->number_to_string_native(), 1, scratch1, scratch2); } void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings( Register first, Register second, Register scratch1, Register scratch2, Label* failure) { ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1, scratch2, failure); } void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, Register second, Register scratch1, Register scratch2, Label* failure) { STATIC_ASSERT(kSmiTag == 0); And(scratch1, first, Operand(second)); JumpIfSmi(scratch1, failure); JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1, scratch2, failure); } void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( Register first, Register second, Register scratch1, Register scratch2, Label* failure) { const int kFlatOneByteStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatOneByteStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; DCHECK(kFlatOneByteStringTag <= 0xffff); andi(scratch1, first, kFlatOneByteStringMask); Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag)); andi(scratch2, second, kFlatOneByteStringMask); Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag)); } void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, Label* failure) { const int kFlatOneByteStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatOneByteStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; And(scratch, type, Operand(kFlatOneByteStringMask)); Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag)); } static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4; int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; num_reg_arguments += 2 * num_double_arguments; if (num_reg_arguments > kRegisterPassedArguments) { stack_passed_words += num_reg_arguments - kRegisterPassedArguments; } stack_passed_words += kCArgSlotCount; return stack_passed_words; } void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index, Register value, Register scratch, uint32_t encoding_mask) { Label is_object; SmiTst(string, at); Check(ne, kNonObject, at, Operand(zero_reg)); ld(at, FieldMemOperand(string, HeapObject::kMapOffset)); lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); andi(at, at, kStringRepresentationMask | kStringEncodingMask); li(scratch, Operand(encoding_mask)); Check(eq, kUnexpectedStringType, at, Operand(scratch)); ld(at, FieldMemOperand(string, String::kLengthOffset)); Check(lt, kIndexIsTooLarge, index, Operand(at)); DCHECK(Smi::FromInt(0) == 0); Check(ge, kIndexIsNegative, index, Operand(zero_reg)); } void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); if (frame_alignment > kPointerSize) { mov(scratch, sp); Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); } } void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { li(t8, Operand(function)); CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments); } void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK(has_frame()); if (emit_debug_code()) { int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); Label alignment_as_expected; And(at, sp, Operand(frame_alignment_mask)); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); stop(�); bind(&alignment_as_expected); } } if (!function.is(t9)) { mov(t9, function); function = t9; } Call(function); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); if (base::OS::ActivationFrameAlignment() > kPointerSize) argument
H A Dassembler-mips64.h653 void bgtz(Register rs, int16_t offset);
H A Dassembler-mips64.cc1201 void Assembler::bgtz(Register rs, int16_t offset) { function in class:v8::Assembler

Completed in 2375 milliseconds