Searched refs:And (Results 1 - 25 of 219) sorted by relevance

123456789

/external/clang/lib/Analysis/
H A DThreadSafetyLogical.cpp45 case LExpr::And:
50 return RNeg ? RightOrOperator(cast<And>(RHS))
51 : RightAndOperator(cast<And>(RHS));
70 case LExpr::And:
75 return LNeg ? LeftAndOperator(cast<And>(LHS))
76 : LeftOrOperator(cast<And>(LHS));
/external/clang/include/clang/Basic/
H A DOperatorPrecedence.h36 And = 8, // & enumerator in enum:clang::prec::Level
/external/easymock/src/org/easymock/internal/matchers/
H A DAnd.java24 public class And implements IArgumentMatcher, Serializable { class in inherits:IArgumentMatcher,Serializable
30 public And(List<IArgumentMatcher> matchers) { method in class:And
/external/mockito/src/org/mockito/internal/matchers/
H A DAnd.java17 public class And extends ArgumentMatcher implements Serializable { class in inherits:ArgumentMatcher,Serializable
22 public And(List<Matcher> matchers) { method in class:And
/external/antlr/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/
H A DANTLRUnwantedTokenException.h42 - (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
/external/antlr/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/
H A DANTLRUnwantedTokenException.h42 - (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
/external/antlr/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/
H A DANTLRUnwantedTokenException.h42 - (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
/external/antlr/antlr-3.4/runtime/ObjC/Framework/
H A DANTLRUnwantedTokenException.h42 - (id) initWithStream:(id<ANTLRIntStream>)anInput And:(NSInteger)expected;
/external/clang/include/clang/Analysis/Analyses/
H A DThreadSafetyLogical.h26 And, enumerator in enum:clang::threadSafety::lexpr::LExpr::Opcode
69 class And : public BinOp { class in namespace:clang::threadSafety::lexpr
71 And(LExpr *LHS, LExpr *RHS) : BinOp(LHS, RHS, LExpr::And) {} function in class:clang::threadSafety::lexpr::And
73 static bool classof(const LExpr *E) { return E->kind() == LExpr::And; }
/external/vixl/examples/
H A Dgetting-started.cc38 __ And(x0, x0, x1);
/external/mockito/src/org/mockito/internal/progress/
H A DArgumentMatcherStorageImpl.java10 import org.mockito.internal.matchers.And;
53 assertStateFor("And(?)", TWO_SUB_MATCHERS);
54 And and = new And(popLastArgumentMatchers(TWO_SUB_MATCHERS));
/external/apache-xml/src/main/java/org/apache/xpath/operations/
H A DAnd.java19 * $Id: And.java 468655 2006-10-28 07:12:06Z minchau $
30 public class And extends Operation class in inherits:Operation
/external/chromium_org/third_party/re2/re2/
H A Dprefilter.h62 static Prefilter* And(Prefilter* a, Prefilter* b);
68 // Generalized And/Or
H A Dprefilter.cc41 // Simplify if the node is an empty Or or And.
131 Prefilter* Prefilter::And(Prefilter* a, Prefilter* b) { function in class:re2::Prefilter
207 static Info* And(Info* a, Info* b);
321 Prefilter::Info* Prefilter::Info::And(Info* a, Info* b) { function in class:re2::Prefilter::Info
329 ab->match_ = Prefilter::And(a->TakeMatch(), b->TakeMatch());
586 info = And(info, exact);
589 info = And(info, ci);
595 info = And(info, exact);
/external/clang/lib/Basic/
H A DOperatorPrecedence.cpp59 case tok::amp: return prec::And;
/external/llvm/test/MC/PowerPC/
H A Dppc64-relocs-01.s35 # 2. And a R_PPC64_TOC against no symbol (the linker will replace for the
/external/regex-re2/re2/
H A Dprefilter.h62 static Prefilter* And(Prefilter* a, Prefilter* b);
68 // Generalized And/Or
H A Dprefilter.cc41 // Simplify if the node is an empty Or or And.
131 Prefilter* Prefilter::And(Prefilter* a, Prefilter* b) { function in class:re2::Prefilter
207 static Info* And(Info* a, Info* b);
329 Prefilter::Info* Prefilter::Info::And(Info* a, Info* b) { function in class:re2::Prefilter::Info
337 ab->match_ = Prefilter::And(a->TakeMatch(), b->TakeMatch());
594 info = And(info, exact);
597 info = And(info, ci);
603 info = And(info, exact);
/external/qemu-pc-bios/bochs/bios/
H A Dacpi-dsdt.dsl331 And (Local0, 0x80000000, Local0)
360 And (Local0, 0x08000000, Local0)
388 And (Local0, 0x80000000, Local0)
450 If (And (0x80, PRQ0, Local1))
497 If (And (0x80, PRQ1, Local1))
544 If (And (0x80, PRQ2, Local1))
591 If (And (0x80, PRQ3, Local1))
665 If (And(\_SB.PCI0.PCIU, ShiftLeft(1, nr))) { \
668 If (And(\_SB.PCI0.PCID, ShiftLeft(1, nr))) { \
/external/easymock/src/org/easymock/internal/
H A DLastControl.java24 import org.easymock.internal.matchers.And;
68 stack.push(new And(popLastArgumentMatchers(count)));
/external/chromium_org/v8/src/ic/mips/
H A Dstub-cache-mips.cc71 __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
140 __ And(scratch, scratch, Operand(mask));
151 __ And(scratch, scratch, Operand(mask2));
/external/chromium_org/v8/src/ic/mips64/
H A Dstub-cache-mips64.cc71 __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
141 __ And(scratch, scratch, Operand(mask));
152 __ And(scratch, scratch, Operand(mask2));
/external/llvm/lib/IR/
H A DInstruction.cpp216 case And: return "and";
457 /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
460 return Opcode == And || Opcode == Or || Opcode == Xor ||
491 case And:
504 /// In LLVM, the And and Or operators are idempotent.
507 return Opcode == And || Opcode == Or;
/external/llvm/lib/Target/X86/
H A DX86AtomicExpandPass.cpp140 case AtomicRMWInst::And:
183 case AtomicRMWInst::And:
/external/chromium_org/v8/src/mips64/
H A Dmacro-assembler-mips64.cc162 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
195 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
264 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
395 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
603 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
904 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { function in class:v8::internal::MacroAssembler
1700 And(except_flag, except_flag, Operand(except_mask));
1723 And(scratch,
1797 And(dst, dst, Operand((1 << num_least_bits) - 1));
1805 And(ds
1278 DCHECK(pos < 32); DCHECK(pos + size < 33); ext_(rt, rs, pos, size); } void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK(pos < 32); DCHECK(pos + size <= 32); DCHECK(size != 0); ins_(rt, rs, pos, size); } void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch) { mfc1(t8, fs); Cvt_d_uw(fd, t8, scratch); } void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch) { DCHECK(!fd.is(scratch)); DCHECK(!rs.is(t9)); DCHECK(!rs.is(at)); Ext(t9, rs, 31, 1); Ext(at, rs, 0, 31); mtc1(at, fd); mthc1(zero_reg, fd); cvt_d_w(fd, fd); Label conversion_done; Branch(&conversion_done, eq, t9, Operand(zero_reg)); li(at, 0x41E00000); mtc1(zero_reg, scratch); mthc1(at, scratch); add_d(fd, fd, scratch); bind(&conversion_done); } void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) { round_l_d(fd, fs); } void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) { floor_l_d(fd, fs); } void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) { ceil_l_d(fd, fs); } void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) { trunc_l_d(fd, fs); } void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch) { dmfc1(t8, fs); li(at, 0x7fffffffffffffff); and_(t8, t8, at); dmtc1(t8, fs); trunc_l_d(fd, fs); } void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch) { Trunc_uw_d(fs, t8, scratch); mtc1(t8, fd); } void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { trunc_w_d(fd, fs); } void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { round_w_d(fd, fs); } void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { floor_w_d(fd, fs); } void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { ceil_w_d(fd, fs); } void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch) { DCHECK(!fd.is(scratch)); DCHECK(!rs.is(at)); li(at, 0x41E00000); mtc1(zero_reg, scratch); mthc1(at, scratch); Label simple_convert; BranchF(&simple_convert, NULL, lt, fd, scratch); sub_d(scratch, fd, scratch); trunc_w_d(scratch, scratch); mfc1(rs, scratch); Or(rs, rs, 1 << 31); Label done; Branch(&done); bind(&simple_convert); trunc_w_d(scratch, fd); mfc1(rs, scratch); bind(&done); } void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch) { if (0) { madd_d(fd, fr, fs, ft); } else { DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch)); mul_d(scratch, fs, ft); add_d(fd, fr, scratch); } } void MacroAssembler::BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1, FPURegister cmp2, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cc == al) { Branch(bd, target); return; } DCHECK(nan || target); if (nan) { if (kArchVariant != kMips64r6) { c(UN, D, cmp1, cmp2); bc1t(nan); } else { DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); cmp(UN, L, f31, cmp1, cmp2); bc1nez(nan, f31); } } if (kArchVariant != kMips64r6) { if (target) { switch (cc) { case lt: c(OLT, D, cmp1, cmp2); bc1t(target); break; case gt: c(ULE, D, cmp1, cmp2); bc1f(target); break; case ge: c(ULT, D, cmp1, cmp2); bc1f(target); break; case le: c(OLE, D, cmp1, cmp2); bc1t(target); break; case eq: c(EQ, D, cmp1, cmp2); bc1t(target); break; case ueq: c(UEQ, D, cmp1, cmp2); bc1t(target); break; case ne: c(EQ, D, cmp1, cmp2); bc1f(target); break; case nue: c(UEQ, D, cmp1, cmp2); bc1f(target); break; default: CHECK(0); } } } else { if (target) { DCHECK(!cmp1.is(f31) && !cmp2.is(f31)); switch (cc) { case lt: cmp(OLT, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case gt: cmp(ULE, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; case ge: cmp(ULT, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; case le: cmp(OLE, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case eq: cmp(EQ, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case ueq: cmp(UEQ, L, f31, cmp1, cmp2); bc1nez(target, f31); break; case ne: cmp(EQ, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; case nue: cmp(UEQ, L, f31, cmp1, cmp2); bc1eqz(target, f31); break; default: CHECK(0); } } } if (bd == PROTECT) { nop(); } } void MacroAssembler::Move(FPURegister dst, double imm) { static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value_rep(imm); bool force_load = dst.is(kDoubleRegZero); if (value_rep == zero && !force_load) { mov_d(dst, kDoubleRegZero); } else if (value_rep == minus_zero && !force_load) { neg_d(dst, kDoubleRegZero); } else { uint32_t lo, hi; DoubleAsTwoUInt32(imm, &lo, &hi); if (lo != 0) { li(at, Operand(lo)); mtc1(at, dst); } else { mtc1(zero_reg, dst); } if (hi != 0) { li(at, Operand(hi)); mthc1(at, dst); } else { mthc1(zero_reg, dst); } } } void MacroAssembler::Movz(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, ne, rt, Operand(zero_reg)); mov(rd, rs); bind(&done); } else { movz(rd, rs, rt); } } void MacroAssembler::Movn(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, eq, rt, Operand(zero_reg)); mov(rd, rs); bind(&done); } else { movn(rd, rs, rt); } } void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { movt(rd, rs, cc); } void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { movf(rd, rs, cc); } void MacroAssembler::Clz(Register rd, Register rs) { clz(rd, rs); } void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, Register result, DoubleRegister double_input, Register scratch, DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact) { DCHECK(!result.is(scratch)); DCHECK(!double_input.is(double_scratch)); DCHECK(!except_flag.is(scratch)); Label done; mov(except_flag, zero_reg); cvt_w_d(double_scratch, double_input); mfc1(result, double_scratch); cvt_d_w(double_scratch, double_scratch); BranchF(&done, NULL, eq, double_input, double_scratch); int32_t except_mask = kFCSRFlagMask; if (check_inexact == kDontCheckForInexactConversion) { except_mask &= ~kFCSRInexactFlagMask; } cfc1(scratch, FCSR); ctc1(zero_reg, FCSR); switch (rounding_mode) { case kRoundToNearest: Round_w_d(double_scratch, double_input); break; case kRoundToZero: Trunc_w_d(double_scratch, double_input); break; case kRoundToPlusInf: Ceil_w_d(double_scratch, double_input); break; case kRoundToMinusInf: Floor_w_d(double_scratch, double_input); break; } cfc1(except_flag, FCSR); ctc1(scratch, FCSR); mfc1(result, double_scratch); And(except_flag, except_flag, Operand(except_mask)); bind(&done); } void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister single_scratch = kLithiumScratchDouble.low(); Register scratch = at; Register scratch2 = t9; cfc1(scratch2, FCSR); ctc1(zero_reg, FCSR); trunc_w_d(single_scratch, double_input); mfc1(result, single_scratch); cfc1(scratch, FCSR); ctc1(scratch2, FCSR); And(scratch, scratch, kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask); Branch(done, eq, scratch, Operand(zero_reg)); } void MacroAssembler::TruncateDoubleToI(Register result, DoubleRegister double_input) { Label done; TryInlineTruncateDoubleToI(result, double_input, &done); push(ra); Dsubu(sp, sp, Operand(kDoubleSize)); sdc1(double_input, MemOperand(sp, 0)); DoubleToIStub stub(isolate(), sp, result, 0, true, true); CallStub(&stub); Daddu(sp, sp, Operand(kDoubleSize)); pop(ra); bind(&done); } void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { Label done; DoubleRegister double_scratch = f12; DCHECK(!result.is(object)); ldc1(double_scratch, MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); TryInlineTruncateDoubleToI(result, double_scratch, &done); push(ra); DoubleToIStub stub(isolate(), object, result, HeapNumber::kValueOffset - kHeapObjectTag, true, true); CallStub(&stub); pop(ra); bind(&done); } void MacroAssembler::TruncateNumberToI(Register object, Register result, Register heap_number_map, Register scratch, Label* not_number) { Label done; DCHECK(!result.is(object)); UntagAndJumpIfSmi(result, object, &done); JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); TruncateHeapNumberToI(result, object); bind(&done); } void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { SmiUntag(dst, src); And(dst, dst, Operand((1 << num_least_bits) - 1)); } void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src, int num_least_bits) { DCHECK(!src.is(dst)); And(dst, src, Operand((1 << num_least_bits) - 1)); } void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { BranchShort(offset, bdslot); } void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchShort(offset, cond, rs, rt, bdslot); } void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchShort(L, bdslot); } else { Jr(L, bdslot); } } else { if (is_trampoline_emitted()) { Jr(L, bdslot); } else { BranchShort(L, bdslot); } } } void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchShort(L, cond, rs, rt, bdslot); } else { if (cond != cc_always) { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jr(L, bdslot); bind(&skip); } else { Jr(L, bdslot); } } } else { if (is_trampoline_emitted()) { if (cond != cc_always) { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jr(L, bdslot); bind(&skip); } else { Jr(L, bdslot); } } else { BranchShort(L, cond, rs, rt, bdslot); } } } void MacroAssembler::Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index, BranchDelaySlot bdslot) { LoadRoot(at, index); Branch(L, cond, rs, Operand(at), bdslot); } void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { b(offset); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); DCHECK(!rs.is(zero_reg)); Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { BlockTrampolinePoolScope block_trampoline_pool(this); r2 = rt.rm_; switch (cond) { case cc_always: b(offset); break; case eq: beq(rs, r2, offset); break; case ne: bne(rs, r2, offset); break; case greater: if (r2.is(zero_reg)) { bgtz(rs, offset); } else { slt(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case greater_equal: if (r2.is(zero_reg)) { bgez(rs, offset); } else { slt(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case less: if (r2.is(zero_reg)) { bltz(rs, offset); } else { slt(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case less_equal: if (r2.is(zero_reg)) { blez(rs, offset); } else { slt(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; case Ugreater: if (r2.is(zero_reg)) { bgtz(rs, offset); } else { sltu(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (r2.is(zero_reg)) { bgez(rs, offset); } else { sltu(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case Uless: if (r2.is(zero_reg)) { return; } else { sltu(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (r2.is(zero_reg)) { b(offset); } else { sltu(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } else { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: b(offset); break; case eq: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); beq(rs, r2, offset); break; case ne: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); bne(rs, r2, offset); break; case greater: if (rt.imm64_ == 0) { bgtz(rs, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case greater_equal: if (rt.imm64_ == 0) { bgez(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); beq(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case less: if (rt.imm64_ == 0) { bltz(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); bne(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case less_equal: if (rt.imm64_ == 0) { blez(rs, offset); } else { r2 = scratch; li(r2, rt); slt(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; case Ugreater: if (rt.imm64_ == 0) { bgtz(rs, offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (rt.imm64_ == 0) { bgez(rs, offset); } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); beq(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); beq(scratch, zero_reg, offset); } break; case Uless: if (rt.imm64_ == 0) { return; } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); bne(scratch, zero_reg, offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (rt.imm64_ == 0) { b(offset); } else { r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { b(shifted_branch_offset(L, false)); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); int32_t offset = 0; Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { BlockTrampolinePoolScope block_trampoline_pool(this); r2 = rt.rm_; switch (cond) { case cc_always: offset = shifted_branch_offset(L, false); b(offset); break; case eq: offset = shifted_branch_offset(L, false); beq(rs, r2, offset); break; case ne: offset = shifted_branch_offset(L, false); bne(rs, r2, offset); break; case greater: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case greater_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else { slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case less: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bltz(rs, offset); } else { slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case less_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); blez(rs, offset); } else { slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Ugreater: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else { sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Uless: if (r2.is(zero_reg)) { return; } else { sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (r2.is(zero_reg)) { offset = shifted_branch_offset(L, false); b(offset); } else { sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } else { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: offset = shifted_branch_offset(L, false); b(offset); break; case eq: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); beq(rs, r2, offset); break; case ne: DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); offset = shifted_branch_offset(L, false); bne(rs, r2, offset); break; case greater: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bgtz(rs, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case greater_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case less: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bltz(rs, offset); } else if (is_int16(rt.imm64_)) { slti(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case less_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); blez(rs, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); slt(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Ugreater: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bne(rs, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Ugreater_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); bgez(rs, offset); } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; case Uless: if (rt.imm64_ == 0) { return; } else if (is_int16(rt.imm64_)) { sltiu(scratch, rs, rt.imm64_); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, rs, r2); offset = shifted_branch_offset(L, false); bne(scratch, zero_reg, offset); } break; case Uless_equal: if (rt.imm64_ == 0) { offset = shifted_branch_offset(L, false); beq(rs, zero_reg, offset); } else { DCHECK(!scratch.is(rs)); r2 = scratch; li(r2, rt); sltu(scratch, r2, rs); offset = shifted_branch_offset(L, false); beq(scratch, zero_reg, offset); } break; default: UNREACHABLE(); } } DCHECK(is_int16(offset)); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) { BranchAndLinkShort(offset, bdslot); } void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchAndLinkShort(offset, cond, rs, rt, bdslot); } void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchAndLinkShort(L, bdslot); } else { Jalr(L, bdslot); } } else { if (is_trampoline_emitted()) { Jalr(L, bdslot); } else { BranchAndLinkShort(L, bdslot); } } } void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near(L)) { BranchAndLinkShort(L, cond, rs, rt, bdslot); } else { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jalr(L, bdslot); bind(&skip); } } else { if (is_trampoline_emitted()) { Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); Jalr(L, bdslot); bind(&skip); } else { BranchAndLinkShort(L, cond, rs, rt, bdslot); } } } void MacroAssembler::BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot) { bal(offset); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { r2 = rt.rm_; } else if (cond != cc_always) { r2 = scratch; li(r2, rt); } { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: bal(offset); break; case eq: bne(rs, r2, 2); nop(); bal(offset); break; case ne: beq(rs, r2, 2); nop(); bal(offset); break; case greater: slt(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); bal(offset); break; case greater_equal: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case less: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case less_equal: slt(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case Ugreater: sltu(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); bal(offset); break; case Ugreater_equal: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case Uless: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); bal(offset); break; case Uless_equal: sltu(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); bal(offset); break; default: UNREACHABLE(); } } if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { bal(shifted_branch_offset(L, false)); if (bdslot == PROTECT) nop(); } void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); int32_t offset = 0; Register r2 = no_reg; Register scratch = at; if (rt.is_reg()) { r2 = rt.rm_; } else if (cond != cc_always) { r2 = scratch; li(r2, rt); } { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { case cc_always: offset = shifted_branch_offset(L, false); bal(offset); break; case eq: bne(rs, r2, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case ne: beq(rs, r2, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case greater: slt(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case greater_equal: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case less: slt(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case less_equal: slt(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Ugreater: sltu(scratch, r2, rs); beq(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Ugreater_equal: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Uless: sltu(scratch, rs, r2); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; case Uless_equal: sltu(scratch, r2, rs); bne(scratch, zero_reg, 2); nop(); offset = shifted_branch_offset(L, false); bal(offset); break; default: UNREACHABLE(); } } DCHECK(is_int16(offset)); if (bdslot == PROTECT) nop(); } void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { jr(target); } else { BRANCH_ARGS_CHECK(cond, rs, rt); Branch(2, NegateCondition(cond), rs, rt); jr(target); } if (bd == PROTECT) nop(); } void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Label skip; if (cond != cc_always) { Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); } li(t9, Operand(target, rmode)); Jump(t9, al, zero_reg, Operand(zero_reg), bd); bind(&skip); } void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); } void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(RelocInfo::IsCodeTarget(rmode)); AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); } int MacroAssembler::CallSize(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { int size = 0; if (cond == cc_always) { size += 1; } else { size += 3; } if (bd == PROTECT) size += 1; return size * kInstrSize; } void MacroAssembler::Call(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); if (cond == cc_always) { jalr(target); } else { BRANCH_ARGS_CHECK(cond, rs, rt); Branch(2, NegateCondition(cond), rs, rt); jalr(target); } if (bd == PROTECT) nop(); DCHECK_EQ(CallSize(target, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { int size = CallSize(t9, cond, rs, rt, bd); return size + 4 * kInstrSize; } void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); int64_t target_int = reinterpret_cast<int64_t>(target); positions_recorder()->WriteRecordedPositions(); li(t9, Operand(target_int, rmode), ADDRESS_LOAD); Call(t9, cond, rs, rt, bd); DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { AllowDeferredHandleDereference using_raw_address; return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); } void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); Label start; bind(&start); DCHECK(RelocInfo::IsCodeTarget(rmode)); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { SetRecordedAstId(ast_id); rmode = RelocInfo::CODE_TARGET_WITH_ID; } AllowDeferredHandleDereference embedding_raw_address; Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); } void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Jump(ra, cond, rs, rt, bd); } void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); uint64_t imm28; imm28 = jump_address(L); imm28 &= kImm28Mask; { BlockGrowBufferScope block_buf_growth(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); j(imm28); } if (bdslot == PROTECT) nop(); } void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); uint64_t imm64; imm64 = jump_address(L); { BlockGrowBufferScope block_buf_growth(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); li(at, Operand(imm64), ADDRESS_LOAD); } jr(at); if (bdslot == PROTECT) nop(); } void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); uint64_t imm64; imm64 = jump_address(L); { BlockGrowBufferScope block_buf_growth(this); RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); li(at, Operand(imm64), ADDRESS_LOAD); } jalr(at); if (bdslot == PROTECT) nop(); } void MacroAssembler::DropAndRet(int drop) { Ret(USE_DELAY_SLOT); daddiu(sp, sp, drop * kPointerSize); } void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1, const Operand& r2) { Label skip; if (cond != cc_always) { Branch(&skip, NegateCondition(cond), r1, r2); } Drop(drop); Ret(); if (cond != cc_always) { bind(&skip); } } void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; } Label skip; if (cond != al) { Branch(&skip, NegateCondition(cond), reg, op); } daddiu(sp, sp, count * kPointerSize); if (cond != al) { bind(&skip); } } void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { if (scratch.is(no_reg)) { Xor(reg1, reg1, Operand(reg2)); Xor(reg2, reg2, Operand(reg1)); Xor(reg1, reg1, Operand(reg2)); } else { mov(scratch, reg1); mov(reg1, reg2); mov(reg2, scratch); } } void MacroAssembler::Call(Label* target) { BranchAndLink(target); } void MacroAssembler::Push(Handle<Object> handle) { li(at, Operand(handle)); push(at); } void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { DCHECK(!src.is(scratch)); mov(scratch, src); dsrl32(src, src, 0); dsll32(src, src, 0); push(src); dsll32(scratch, scratch, 0); push(scratch); } void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { DCHECK(!dst.is(scratch)); pop(scratch); dsrl32(scratch, scratch, 0); pop(dst); dsrl32(dst, dst, 0); dsll32(dst, dst, 0); or_(dst, dst, scratch); } void MacroAssembler::DebugBreak() { PrepareCEntryArgs(0); PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(isolate(), 1); DCHECK(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } void MacroAssembler::PushTryHandler(StackHandler::Kind kind, int handler_index) { STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); unsigned state = StackHandler::IndexField::encode(handler_index) | StackHandler::KindField::encode(kind); li(a5, Operand(CodeObject()), CONSTANT_SIZE); li(a6, Operand(state)); if (kind == StackHandler::JS_ENTRY) { DCHECK_EQ(Smi::FromInt(0), 0); Push(zero_reg, zero_reg, a6, a5); } else { MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit()); } li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ld(a5, MemOperand(a6)); push(a5); sd(sp, MemOperand(a6)); } void MacroAssembler::PopTryHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(a1); Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); sd(a1, MemOperand(at)); } void MacroAssembler::JumpToHandlerEntry() { Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); dsrl(a2, a2, StackHandler::kKindWidth); dsll(a2, a2, kPointerSizeLog2); Daddu(a2, a3, a2); ld(a2, MemOperand(a2)); Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); dsra32(t9, a2, 0); Daddu(t9, t9, a1); Jump(t9); } void MacroAssembler::Throw(Register value) { STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); Move(v0, value); li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ld(sp, MemOperand(a3)); pop(a2); sd(a2, MemOperand(a3)); MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); Label done; Branch(&done, eq, cp, Operand(zero_reg)); sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); bind(&done); JumpToHandlerEntry(); } void MacroAssembler::ThrowUncatchable(Register value) { STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); if (!value.is(v0)) { mov(v0, value); } li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ld(sp, MemOperand(a3)); Label fetch_next, check_kind; jmp(&check_kind); bind(&fetch_next); ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); bind(&check_kind); STATIC_ASSERT(StackHandler::JS_ENTRY == 0); ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset)); And(a2, a2, Operand(StackHandler::KindField::kMask)); Branch(&fetch_next, ne, a2, Operand(zero_reg)); pop(a2); sd(a2, MemOperand(a3)); MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); JumpToHandlerEntry(); } void MacroAssembler::Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label* gc_required, AllocationFlags flags) { DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); if (!FLAG_inline_new) { if (emit_debug_code()) { li(result, 0x7091); li(scratch1, 0x7191); li(scratch2, 0x7291); } jmp(gc_required); return; } DCHECK(!result.is(scratch1)); DCHECK(!result.is(scratch2)); DCHECK(!scratch1.is(scratch2)); DCHECK(!scratch1.is(t9)); DCHECK(!scratch2.is(t9)); DCHECK(!result.is(t9)); if ((flags & SIZE_IN_WORDS) != 0) { object_size *= kPointerSize; } DCHECK(0 == (object_size & kObjectAlignmentMask)); ExternalReference allocation_top = AllocationUtils::GetAllocationTopReference(isolate(), flags); ExternalReference allocation_limit = AllocationUtils::GetAllocationLimitReference(isolate(), flags); intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); Register topaddr = scratch1; li(topaddr, Operand(allocation_top)); if ((flags & RESULT_CONTAINS_TOP) == 0) { ld(result, MemOperand(topaddr)); ld(t9, MemOperand(topaddr, kPointerSize)); } else { if (emit_debug_code()) { ld(t9, MemOperand(topaddr)); Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); } ld(t9, MemOperand(topaddr, limit - top)); } DCHECK(kPointerSize == kDoubleSize); if (emit_debug_code()) { And(at, result, Operand(kDoubleAlignmentMask)); Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); } Daddu(scratch2, result, Operand(object_size)); Branch(gc_required, Ugreater, scratch2, Operand(t9)); sd(scratch2, MemOperand(topaddr)); if ((flags & TAG_OBJECT) != 0) { Daddu(result, result, Operand(kHeapObjectTag)); } } void MacroAssembler::Allocate(Register object_size, Register result, Register scratch1, Register scratch2, Label* gc_required, AllocationFlags flags) { if (!FLAG_inline_new) { if (emit_debug_code()) { li(result, 0x7091); li(scratch1, 0x7191); li(scratch2, 0x7291); } jmp(gc_required); return; } DCHECK(!result.is(scratch1)); DCHECK(!result.is(scratch2)); DCHECK(!scratch1.is(scratch2)); DCHECK(!object_size.is(t9)); DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); ExternalReference allocation_top = AllocationUtils::GetAllocationTopReference(isolate(), flags); ExternalReference allocation_limit = AllocationUtils::GetAllocationLimitReference(isolate(), flags); intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); Register topaddr = scratch1; li(topaddr, Operand(allocation_top)); if ((flags & RESULT_CONTAINS_TOP) == 0) { ld(result, MemOperand(topaddr)); ld(t9, MemOperand(topaddr, kPointerSize)); } else { if (emit_debug_code()) { ld(t9, MemOperand(topaddr)); Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); } ld(t9, MemOperand(topaddr, limit - top)); } DCHECK(kPointerSize == kDoubleSize); if (emit_debug_code()) { And(at, result, Operand(kDoubleAlignmentMask)); Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); } if ((flags & SIZE_IN_WORDS) != 0) { dsll(scratch2, object_size, kPointerSizeLog2); Daddu(scratch2, result, scratch2); } else { Daddu(scratch2, result, Operand(object_size)); } Branch(gc_required, Ugreater, scratch2, Operand(t9)); if (emit_debug_code()) { And(t9, scratch2, Operand(kObjectAlignmentMask)); Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg)); } sd(scratch2, MemOperand(topaddr)); if ((flags & TAG_OBJECT) != 0) { Daddu(result, result, Operand(kHeapObjectTag)); } } void MacroAssembler::UndoAllocationInNewSpace(Register object, Register scratch) { ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); And(object, object, Operand(~kHeapObjectTagMask)); li(scratch, Operand(new_space_allocation_top)); ld(scratch, MemOperand(scratch)); Check(less, kUndoAllocationOfNonAllocatedMemory, object, Operand(scratch)); li(scratch, Operand(new_space_allocation_top)); sd(object, MemOperand(scratch)); } void MacroAssembler::AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label* gc_required) { DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); dsll(scratch1, length, 1); daddiu(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateOneByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label* gc_required) { DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); DCHECK(kCharSize == 1); daddiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateOneByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::AllocateOneByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label* gc_required) { Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex, scratch1, scratch2); } void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name) { STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); Label succeed; And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); Branch(&succeed, eq, at, Operand(zero_reg)); Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE)); bind(&succeed); } void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label* need_gc, TaggingMode tagging_mode, MutableMode mode) { Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); Heap::RootListIndex map_index = mode == MUTABLE ? Heap::kMutableHeapNumberMapRootIndex : Heap::kHeapNumberMapRootIndex; AssertIsRoot(heap_number_map, map_index); if (tagging_mode == TAG_RESULT) { sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); } } void MacroAssembler::AllocateHeapNumberWithValue(Register result, FPURegister value, Register scratch1, Register scratch2, Label* gc_required) { LoadRoot(t8, Heap::kHeapNumberMapRootIndex); AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); } void MacroAssembler::CopyFields(Register dst, Register src, RegList temps, int field_count) { DCHECK((temps & dst.bit()) == 0); DCHECK((temps & src.bit()) == 0); Register tmp = no_reg; for (int i = 0; i < kNumRegisters; i++) { if ((temps & (1 << i)) != 0) { tmp.code_ = i; break; } } DCHECK(!tmp.is(no_reg)); for (int i = 0; i < field_count; i++) { ld(tmp, FieldMemOperand(src, i * kPointerSize)); sd(tmp, FieldMemOperand(dst, i * kPointerSize)); } } void MacroAssembler::CopyBytes(Register src, Register dst, Register length, Register scratch) { Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; Branch(&byte_loop, le, length, Operand(kPointerSize)); bind(&align_loop_1); And(scratch, src, kPointerSize - 1); Branch(&word_loop, eq, scratch, Operand(zero_reg)); lbu(scratch, MemOperand(src)); Daddu(src, src, 1); sb(scratch, MemOperand(dst)); Daddu(dst, dst, 1); Dsubu(length, length, Operand(1)); Branch(&align_loop_1, ne, length, Operand(zero_reg)); bind(&word_loop); if (emit_debug_code()) { And(scratch, src, kPointerSize - 1); Assert(eq, kExpectingAlignmentForCopyBytes, scratch, Operand(zero_reg)); } Branch(&byte_loop, lt, length, Operand(kPointerSize)); ld(scratch, MemOperand(src)); Daddu(src, src, kPointerSize); sb(scratch, MemOperand(dst, 0)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 1)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 2)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 3)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 4)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 5)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 6)); dsrl(scratch, scratch, 8); sb(scratch, MemOperand(dst, 7)); Daddu(dst, dst, 8); Dsubu(length, length, Operand(kPointerSize)); Branch(&word_loop); bind(&byte_loop); Branch(&done, eq, length, Operand(zero_reg)); bind(&byte_loop_1); lbu(scratch, MemOperand(src)); Daddu(src, src, 1); sb(scratch, MemOperand(dst)); Daddu(dst, dst, 1); Dsubu(length, length, Operand(1)); Branch(&byte_loop_1, ne, length, Operand(zero_reg)); bind(&done); } void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, Register end_offset, Register filler) { Label loop, entry; Branch(&entry); bind(&loop); sd(filler, MemOperand(start_offset)); Daddu(start_offset, start_offset, kPointerSize); bind(&entry); Branch(&loop, lt, start_offset, Operand(end_offset)); } void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 2); STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); } void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 2); STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, ls, scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); } void MacroAssembler::CheckFastSmiElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); } void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Label* fail, int elements_offset) { Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; JumpIfSmi(value_reg, &smi_value); CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, fail, DONT_DO_SMI_CHECK); li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1)); lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); bind(&have_double_value); dsra(scratch1, key_reg, 32 - kDoubleSizeLog2); Daddu(scratch1, scratch1, elements_reg); sw(mantissa_reg, FieldMemOperand( scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + sizeof(kHoleNanLower32); sw(exponent_reg, FieldMemOperand(scratch1, offset)); jmp(&done); bind(&maybe_nan); lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); bind(&is_nan); LoadRoot(at, Heap::kNanValueRootIndex); lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset)); lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset)); jmp(&have_double_value); bind(&smi_value); Daddu(scratch1, elements_reg, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - elements_offset)); dsra(scratch2, key_reg, 32 - kDoubleSizeLog2); Daddu(scratch1, scratch1, scratch2); Register untagged_value = elements_reg; SmiUntag(untagged_value, value_reg); mtc1(untagged_value, f2); cvt_d_w(f0, f2); sdc1(f0, MemOperand(scratch1, 0)); bind(&done); } void MacroAssembler::CompareMapAndBranch(Register obj, Register scratch, Handle<Map> map, Label* early_success, Condition cond, Label* branch_to) { ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); CompareMapAndBranch(scratch, map, early_success, cond, branch_to); } void MacroAssembler::CompareMapAndBranch(Register obj_map, Handle<Map> map, Label* early_success, Condition cond, Label* branch_to) { Branch(branch_to, cond, obj_map, Operand(map)); } void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail, SmiCheckType smi_check_type) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } Label success; CompareMapAndBranch(obj, scratch, map, &success, ne, fail); bind(&success); } void MacroAssembler::DispatchMap(Register obj, Register scratch, Handle<Map> map, Handle<Code> success, SmiCheckType smi_check_type) { Label fail; if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, &fail); } ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map)); bind(&fail); } void MacroAssembler::CheckMap(Register obj, Register scratch, Heap::RootListIndex index, Label* fail, SmiCheckType smi_check_type) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); LoadRoot(at, index); Branch(fail, ne, scratch, Operand(at)); } void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { Move(dst, v0, v1); } else { Move(dst, f0); } } void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { Move(dst, a0, a1); } else { Move(dst, f12); } } void MacroAssembler::MovToFloatParameter(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f12, src); } else { Move(a0, a1, src); } } void MacroAssembler::MovToFloatResult(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f0, src); } else { Move(v0, v1, src); } } void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (!IsMipsSoftFloatABI) { const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14; if (src2.is(f12)) { DCHECK(!src1.is(fparg2)); Move(fparg2, src2); Move(f12, src1); } else { Move(f12, src1); Move(fparg2, src2); } } else { Move(a0, a1, src1); Move(a2, a3, src2); } } void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, Handle<Code> code_constant, Register code_reg, Label* done, bool* definitely_mismatches, InvokeFlag flag, const CallWrapper& call_wrapper) { bool definitely_matches = false; *definitely_mismatches = false; Label regular_invoke; DCHECK(actual.is_immediate() || actual.reg().is(a0)); DCHECK(expected.is_immediate() || expected.reg().is(a2)); DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); if (expected.is_immediate()) { DCHECK(actual.is_immediate()); if (expected.immediate() == actual.immediate()) { definitely_matches = true; } else { li(a0, Operand(actual.immediate())); const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; if (expected.immediate() == sentinel) { definitely_matches = true; } else { *definitely_mismatches = true; li(a2, Operand(expected.immediate())); } } } else if (actual.is_immediate()) { Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate())); li(a0, Operand(actual.immediate())); } else { Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg())); } if (!definitely_matches) { if (!code_constant.is_null()) { li(a3, Operand(code_constant)); daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); } Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(adaptor)); Call(adaptor); call_wrapper.AfterCall(); if (!*definitely_mismatches) { Branch(done); } } else { Jump(adaptor, RelocInfo::CODE_TARGET); } bind(&regular_invoke); } } void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); Label done; bool definitely_mismatches = false; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, &definitely_mismatches, flag, call_wrapper); if (!definitely_mismatches) { if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(code)); Call(code); call_wrapper.AfterCall(); } else { DCHECK(flag == JUMP_FUNCTION); Jump(code); } bind(&done); } } void MacroAssembler::InvokeFunction(Register function, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); DCHECK(function.is(a1)); Register expected_reg = a2; Register code_reg = a3; ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); lw(expected_reg, FieldMemOperand(code_reg, SharedFunctionInfo::kFormalParameterCountOffset)); ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); ParameterCount expected(expected_reg); InvokeCode(code_reg, expected, actual, flag, call_wrapper); } void MacroAssembler::InvokeFunction(Register function, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); DCHECK(function.is(a1)); ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); InvokeCode(a3, expected, actual, flag, call_wrapper); } void MacroAssembler::InvokeFunction(Handle<JSFunction> function, const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper) { li(a1, function); InvokeFunction(a1, expected, actual, flag, call_wrapper); } void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label* fail) { ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); IsInstanceJSObjectType(map, scratch, fail); } void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch, Label* fail) { lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); } void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, Label* fail) { DCHECK(kNotStringTag != 0); ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); And(scratch, scratch, Operand(kIsNotStringMask)); Branch(fail, ne, scratch, Operand(zero_reg)); } void MacroAssembler::IsObjectNameType(Register object, Register scratch, Label* fail) { ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE)); } void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, Register scratch, Label* miss, bool miss_on_bound_function) { Label non_instance; if (miss_on_bound_function) { JumpIfSmi(function, miss); GetObjectType(function, result, scratch); Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); ld(scratch, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); lwu(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); And(scratch, scratch, Operand(1 << SharedFunctionInfo::kBoundFunction)); Branch(miss, ne, scratch, Operand(zero_reg)); lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); Branch(&non_instance, ne, scratch, Operand(zero_reg)); } ld(result, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); LoadRoot(t8, Heap::kTheHoleValueRootIndex); Branch(miss, eq, result, Operand(t8)); Label done; GetObjectType(result, scratch, scratch); Branch(&done, ne, scratch, Operand(MAP_TYPE)); ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); if (miss_on_bound_function) { jmp(&done); bind(&non_instance); ld(result, FieldMemOperand(result, Map::kConstructorOffset)); } bind(&done); } void MacroAssembler::GetObjectType(Register object, Register map, Register type_reg) { ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); } void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, Condition cond, Register r1, const Operand& r2, BranchDelaySlot bd) { DCHECK(AllowThisStubCall(stub)); Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond, r1, r2, bd); } void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2, BranchDelaySlot bd) { Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); } static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { int64_t offset = (ref0.address() - ref1.address()); DCHECK(static_cast<int>(offset) == offset); return static_cast<int>(offset); } void MacroAssembler::CallApiFunctionAndReturn( Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand* context_restore_operand) { ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; const int kLimitOffset = AddressOffset( ExternalReference::handle_scope_limit_address(isolate()), next_address); const int kLevelOffset = AddressOffset( ExternalReference::handle_scope_level_address(isolate()), next_address); DCHECK(function_address.is(a1) || function_address.is(a2)); Label profiler_disabled; Label end_profiler_check; li(t9, Operand(ExternalReference::is_profiling_address(isolate()))); lb(t9, MemOperand(t9, 0)); Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); li(t9, Operand(thunk_ref)); jmp(&end_profiler_check); bind(&profiler_disabled); mov(t9, function_address); bind(&end_profiler_check); li(s3, Operand(next_address)); ld(s0, MemOperand(s3, kNextOffset)); ld(s1, MemOperand(s3, kLimitOffset)); ld(s2, MemOperand(s3, kLevelOffset)); Daddu(s2, s2, Operand(1)); sd(s2, MemOperand(s3, kLevelOffset)); if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); PrepareCallCFunction(1, a0); li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } DirectCEntryStub stub(isolate()); stub.GenerateCall(this, t9); if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); PrepareCallCFunction(1, a0); li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } Label promote_scheduled_exception; Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; Label return_value_loaded; ld(v0, return_value_operand); bind(&return_value_loaded); sd(s0, MemOperand(s3, kNextOffset)); if (emit_debug_code()) { ld(a1, MemOperand(s3, kLevelOffset)); Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); } Dsubu(s2, s2, Operand(1)); sd(s2, MemOperand(s3, kLevelOffset)); ld(at, MemOperand(s3, kLimitOffset)); Branch(&delete_allocated_handles, ne, s1, Operand(at)); bind(&leave_exit_frame); LoadRoot(a4, Heap::kTheHoleValueRootIndex); li(at, Operand(ExternalReference::scheduled_exception_address(isolate()))); ld(a5, MemOperand(at)); Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); bind(&exception_handled); bool restore_context = context_restore_operand != NULL; if (restore_context) { ld(cp, *context_restore_operand); } li(s0, Operand(stack_space)); LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN); bind(&promote_scheduled_exception); { FrameScope frame(this, StackFrame::INTERNAL); CallExternalReference( ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); } jmp(&exception_handled); bind(&delete_allocated_handles); sd(s1, MemOperand(s3, kLimitOffset)); mov(s0, v0); mov(a0, v0); PrepareCallCFunction(1, s1); li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), 1); mov(v0, s0); jmp(&leave_exit_frame); } bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { return has_frame_ || !stub->SometimesSetsUpAFrame(); } void MacroAssembler::IndexFromHash(Register hash, Register index) { DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < (1 << String::kArrayIndexValueBits)); DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); } void MacroAssembler::ObjectToDoubleFPURegister(Register object, FPURegister result, Register scratch1, Register scratch2, Register heap_number_map, Label* not_number, ObjectToDoubleFlags flags) { Label done; if ((flags & OBJECT_NOT_SMI) == 0) { Label not_smi; JumpIfNotSmi(object, &not_smi); dsra32(scratch1, object, 0); mtc1(scratch1, result); cvt_d_w(result, result); Branch(&done); bind(&not_smi); } ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); Branch(not_number, ne, scratch1, Operand(heap_number_map)); if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { Register exponent = scratch1; Register mask_reg = scratch2; lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); li(mask_reg, HeapNumber::kExponentMask); And(exponent, exponent, mask_reg); Branch(not_number, eq, exponent, Operand(mask_reg)); } ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); bind(&done); } void MacroAssembler::SmiToDoubleFPURegister(Register smi, FPURegister value, Register scratch1) { dsra32(scratch1, smi, 0); mtc1(scratch1, value); cvt_d_w(value, value); } void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { DCHECK(!dst.is(overflow_dst)); DCHECK(!dst.is(scratch)); DCHECK(!overflow_dst.is(scratch)); DCHECK(!overflow_dst.is(left)); DCHECK(!overflow_dst.is(right)); if (left.is(right) && dst.is(left)) { DCHECK(!dst.is(t9)); DCHECK(!scratch.is(t9)); DCHECK(!left.is(t9)); DCHECK(!right.is(t9)); DCHECK(!overflow_dst.is(t9)); mov(t9, right); right = t9; } if (dst.is(left)) { mov(scratch, left); daddu(dst, left, right); xor_(scratch, dst, scratch); xor_(overflow_dst, dst, right); and_(overflow_dst, overflow_dst, scratch); } else if (dst.is(right)) { mov(scratch, right); daddu(dst, left, right); xor_(scratch, dst, scratch); xor_(overflow_dst, dst, left); and_(overflow_dst, overflow_dst, scratch); } else { daddu(dst, left, right); xor_(overflow_dst, dst, left); xor_(scratch, dst, right); and_(overflow_dst, scratch, overflow_dst); } } void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { DCHECK(!dst.is(overflow_dst)); DCHECK(!dst.is(scratch)); DCHECK(!overflow_dst.is(scratch)); DCHECK(!overflow_dst.is(left)); DCHECK(!overflow_dst.is(right)); DCHECK(!scratch.is(left)); DCHECK(!scratch.is(right)); if (left.is(right)) { mov(dst, zero_reg); mov(overflow_dst, zero_reg); return; } if (dst.is(left)) { mov(scratch, left); dsubu(dst, left, right); xor_(overflow_dst, dst, scratch); xor_(scratch, scratch, right); and_(overflow_dst, scratch, overflow_dst); } else if (dst.is(right)) { mov(scratch, right); dsubu(dst, left, right); xor_(overflow_dst, dst, left); xor_(scratch, left, scratch); and_(overflow_dst, scratch, overflow_dst); } else { dsubu(dst, left, right); xor_(overflow_dst, dst, left); xor_(scratch, left, right); and_(overflow_dst, scratch, overflow_dst); } } void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, SaveFPRegsMode save_doubles) { CHECK(f->nargs < 0 || f->nargs == num_arguments); PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference(f, isolate())); CEntryStub stub(isolate(), 1, save_doubles); CallStub(&stub); } void MacroAssembler::CallExternalReference(const ExternalReference& ext, int num_arguments, BranchDelaySlot bd) { PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ext); CEntryStub stub(isolate(), 1); CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd); } void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, int num_arguments, int result_size) { PrepareCEntryArgs(num_arguments); JumpToExternalReference(ext); } void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size) { TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments, result_size); } void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, BranchDelaySlot bd) { PrepareCEntryFunction(builtin); CEntryStub stub(isolate(), 1); Jump(stub.GetCode(), RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); } void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { DCHECK(flag == JUMP_FUNCTION || has_frame()); GetBuiltinEntry(t9, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(t9)); Call(t9); call_wrapper.AfterCall(); } else { DCHECK(flag == JUMP_FUNCTION); Jump(t9); } } void MacroAssembler::GetBuiltinFunction(Register target, Builtins::JavaScript id) { ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); ld(target, FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); } void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { DCHECK(!target.is(a1)); GetBuiltinFunction(a1, id); ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); } void MacroAssembler::SetCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { if (FLAG_native_code_counters && counter->Enabled()) { li(scratch1, Operand(value)); li(scratch2, Operand(ExternalReference(counter))); sd(scratch1, MemOperand(scratch2)); } } void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); ld(scratch1, MemOperand(scratch2)); Daddu(scratch1, scratch1, Operand(value)); sd(scratch1, MemOperand(scratch2)); } } void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK(value > 0); if (FLAG_native_code_counters && counter->Enabled()) { li(scratch2, Operand(ExternalReference(counter))); ld(scratch1, MemOperand(scratch2)); Dsubu(scratch1, scratch1, Operand(value)); sd(scratch1, MemOperand(scratch2)); } } void MacroAssembler::Assert(Condition cc, BailoutReason reason, Register rs, Operand rt) { if (emit_debug_code()) Check(cc, reason, rs, rt); } void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { DCHECK(!elements.is(at)); Label ok; push(elements); ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); LoadRoot(at, Heap::kFixedArrayMapRootIndex); Branch(&ok, eq, elements, Operand(at)); LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); Branch(&ok, eq, elements, Operand(at)); LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); Branch(&ok, eq, elements, Operand(at)); Abort(kJSObjectWithFastElementsMapHasSlowElements); bind(&ok); pop(elements); } } void MacroAssembler::Check(Condition cc, BailoutReason reason, Register rs, Operand rt) { Label L; Branch(&L, cc, rs, rt); Abort(reason); bind(&L); } void MacroAssembler::Abort(BailoutReason reason) { Label abort_start; bind(&abort_start); const char* msg = GetBailoutReason(reason); if (msg != NULL) { RecordComment(�); RecordComment(msg); } if (FLAG_trap_on_abort) { stop(msg); return; } li(a0, Operand(Smi::FromInt(reason))); push(a0); if (!has_frame_) { FrameScope scope(this, StackFrame::NONE); CallRuntime(Runtime::kAbort, 1); } else { CallRuntime(Runtime::kAbort, 1); } if (is_trampoline_pool_blocked()) { static const int kExpectedAbortInstructions = 10; int abort_instructions = InstructionsGeneratedSince(&abort_start); DCHECK(abort_instructions <= kExpectedAbortInstructions); while (abort_instructions++ < kExpectedAbortInstructions) { nop(); } } } void MacroAssembler::LoadContext(Register dst, int context_chain_length) { if (context_chain_length > 0) { ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); for (int i = 1; i < context_chain_length; i++) { ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); } } else { Move(dst, cp); } } void MacroAssembler::LoadTransitionedArrayMapConditional( ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label* no_map_match) { ld(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); ld(scratch, MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize; ld(at, FieldMemOperand(scratch, offset)); Branch(no_map_match, ne, map_in_out, Operand(at)); offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize; ld(map_in_out, FieldMemOperand(scratch, offset)); } void MacroAssembler::LoadGlobalFunction(int index, Register function) { ld(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); ld(function, FieldMemOperand(function, GlobalObject::kNativeContextOffset)); ld(function, MemOperand(function, Context::SlotOffset(index))); } void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch) { ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); if (emit_debug_code()) { Label ok, fail; CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); Branch(&ok); bind(&fail); Abort(kGlobalFunctionsMustHaveInitialMap); bind(&ok); } } void MacroAssembler::StubPrologue() { Push(ra, fp, cp); Push(Smi::FromInt(StackFrame::STUB)); Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } void MacroAssembler::Prologue(bool code_pre_aging) { PredictableCodeSizeScope predictible_code_size_scope( this, kNoCodeAgeSequenceLength); if (code_pre_aging) { Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); nop(Assembler::CODE_AGE_MARKER_NOP); li(t9, Operand(reinterpret_cast<uint64_t>(stub->instruction_start())), ADDRESS_LOAD); nop(); jalr(t9, a0); nop(); nop(); } else { Push(ra, fp, cp, a1); nop(Assembler::CODE_AGE_SEQUENCE_NOP); nop(Assembler::CODE_AGE_SEQUENCE_NOP); nop(Assembler::CODE_AGE_SEQUENCE_NOP); Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); } } void MacroAssembler::EnterFrame(StackFrame::Type type) { daddiu(sp, sp, -5 * kPointerSize); li(t8, Operand(Smi::FromInt(type))); li(t9, Operand(CodeObject()), CONSTANT_SIZE); sd(ra, MemOperand(sp, 4 * kPointerSize)); sd(fp, MemOperand(sp, 3 * kPointerSize)); sd(cp, MemOperand(sp, 2 * kPointerSize)); sd(t8, MemOperand(sp, 1 * kPointerSize)); sd(t9, MemOperand(sp, 0 * kPointerSize)); Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); } void MacroAssembler::LeaveFrame(StackFrame::Type type) { mov(sp, fp); ld(fp, MemOperand(sp, 0 * kPointerSize)); ld(ra, MemOperand(sp, 1 * kPointerSize)); daddiu(sp, sp, 2 * kPointerSize); } void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); daddiu(sp, sp, -4 * kPointerSize); sd(ra, MemOperand(sp, 3 * kPointerSize)); sd(fp, MemOperand(sp, 2 * kPointerSize)); daddiu(fp, sp, 2 * kPointerSize); if (emit_debug_code()) { sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); } li(t8, Operand(CodeObject()), CONSTANT_SIZE); sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); sd(fp, MemOperand(t8)); li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); sd(cp, MemOperand(t8)); const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (save_doubles) { int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; int space = kNumOfSavedRegisters * kDoubleSize ; Dsubu(sp, sp, Operand(space)); for (int i = 0; i < kNumOfSavedRegisters; i++) { FPURegister reg = FPURegister::from_code(2 * i); sdc1(reg, MemOperand(sp, i * kDoubleSize)); } } DCHECK(stack_space >= 0); Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize)); if (frame_alignment > 0) { DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); } daddiu(at, sp, kPointerSize); sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); } void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context, bool do_return) { if (save_doubles) { int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize + kNumOfSavedRegisters * kDoubleSize)); for (int i = 0; i < kNumOfSavedRegisters; i++) { FPURegister reg = FPURegister::from_code(2 * i); ldc1(reg, MemOperand(t8, i * kDoubleSize)); } } li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); sd(zero_reg, MemOperand(t8)); if (restore_context) { li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); ld(cp, MemOperand(t8)); } li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); sd(a3, MemOperand(t8)); mov(sp, fp); ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); if (argument_count.is_valid()) { dsll(t8, argument_count, kPointerSizeLog2); daddu(sp, sp, t8); } if (do_return) { Ret(USE_DELAY_SLOT); } daddiu(sp, sp, 2 * kPointerSize); } void MacroAssembler::InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2) { dsll32(scratch1, length, 0); LoadRoot(scratch2, map_index); sd(scratch1, FieldMemOperand(string, String::kLengthOffset)); li(scratch1, Operand(String::kEmptyHashField)); sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); } int MacroAssembler::ActivationFrameAlignment() { return base::OS::ActivationFrameAlignment(); } void MacroAssembler::AssertStackIsAligned() { if (emit_debug_code()) { const int frame_alignment = ActivationFrameAlignment(); const int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { Label alignment_as_expected; DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); andi(at, sp, frame_alignment_mask); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); stop(�); bind(&alignment_as_expected); } } } void MacroAssembler::JumpIfNotPowerOfTwoOrZero( Register reg, Register scratch, Label* not_power_of_two_or_zero) { Dsubu(scratch, reg, Operand(1)); Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, scratch, Operand(zero_reg)); and_(at, scratch, reg); Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); } void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { DCHECK(!reg.is(overflow)); mov(overflow, reg); SmiTag(reg); xor_(overflow, overflow, reg); } void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src, Register overflow) { if (dst.is(src)) { SmiTagCheckOverflow(dst, overflow); } else { DCHECK(!dst.is(src)); DCHECK(!dst.is(overflow)); DCHECK(!src.is(overflow)); SmiTag(dst, src); xor_(overflow, dst, src); } } void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { if (SmiValuesAre32Bits()) { lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); } else { lw(dst, src); SmiUntag(dst); } } void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) { if (SmiValuesAre32Bits()) { lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); dsll(dst, dst, scale); } else { lw(dst, src); DCHECK(scale >= kSmiTagSize); sll(dst, dst, scale - kSmiTagSize); } } void MacroAssembler::SmiLoadWithScale(Register d_smi, Register d_scaled, MemOperand src, int scale) { if (SmiValuesAre32Bits()) { ld(d_smi, src); dsra(d_scaled, d_smi, kSmiShift - scale); } else { lw(d_smi, src); DCHECK(scale >= kSmiTagSize); sll(d_scaled, d_smi, scale - kSmiTagSize); } } void MacroAssembler::SmiLoadUntagWithScale(Register d_int, Register d_scaled, MemOperand src, int scale) { if (SmiValuesAre32Bits()) { lw(d_int, UntagSmiMemOperand(src.rm(), src.offset())); dsll(d_scaled, d_int, scale); } else { lw(d_int, src); SmiUntag(d_int); sll(d_scaled, d_int, scale); } } void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case) { JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT); SmiUntag(dst, src); } void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case) { JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT); SmiUntag(dst, src); } void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, Register scratch, BranchDelaySlot bd) { DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); } void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch, BranchDelaySlot bd) { DCHECK_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); } void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi) { STATIC_ASSERT(kSmiTag == 0); DCHECK_EQ(1, kSmiTagMask); or_(at, reg1, reg2); JumpIfNotSmi(at, on_not_both_smi); } void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi) { STATIC_ASSERT(kSmiTag == 0); DCHECK_EQ(1, kSmiTagMask); and_(at, reg1, reg2); JumpIfSmi(at, on_either_smi); } void MacroAssembler::AssertNotSmi(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); andi(at, object, kSmiTagMask); Check(ne, kOperandIsASmi, at, Operand(zero_reg)); } } void MacroAssembler::AssertSmi(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); andi(at, object, kSmiTagMask); Check(eq, kOperandIsASmi, at, Operand(zero_reg)); } } void MacroAssembler::AssertString(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); SmiTst(object, a4); Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg)); push(object); ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE)); pop(object); } } void MacroAssembler::AssertName(Register object) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); SmiTst(object, a4); Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg)); push(object); ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE)); pop(object); } } void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, Register scratch) { if (emit_debug_code()) { Label done_checking; AssertNotSmi(object); LoadRoot(scratch, Heap::kUndefinedValueRootIndex); Branch(&done_checking, eq, object, Operand(scratch)); push(object); ld(object, FieldMemOperand(object, HeapObject::kMapOffset)); LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch)); pop(object); bind(&done_checking); } } void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { DCHECK(!reg.is(at)); LoadRoot(at, index); Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); } } void MacroAssembler::JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label* on_not_heap_number) { ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); } void MacroAssembler::LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label* not_found) { Register number_string_cache = result; Register mask = scratch3; LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); dsra32(mask, mask, 1); Daddu(mask, mask, -1); Label is_smi; Label load_result_from_cache; JumpIfSmi(object, &is_smi); CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, DONT_DO_SMI_CHECK); STATIC_ASSERT(8 == kDoubleSize); Daddu(scratch1, object, Operand(HeapNumber::kValueOffset - kHeapObjectTag)); ld(scratch2, MemOperand(scratch1, kPointerSize)); ld(scratch1, MemOperand(scratch1, 0)); Xor(scratch1, scratch1, Operand(scratch2)); And(scratch1, scratch1, Operand(mask)); dsll(scratch1, scratch1, kPointerSizeLog2 + 1); Daddu(scratch1, number_string_cache, scratch1); Register probe = mask; ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); JumpIfSmi(probe, not_found); ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); BranchF(&load_result_from_cache, NULL, eq, f12, f14); Branch(not_found); bind(&is_smi); Register scratch = scratch1; dsra32(scratch, scratch, 0); And(scratch, mask, Operand(scratch)); dsll(scratch, scratch, kPointerSizeLog2 + 1); Daddu(scratch, number_string_cache, scratch); ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); Branch(not_found, ne, object, Operand(probe)); bind(&load_result_from_cache); ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); IncrementCounter(isolate()->counters()->number_to_string_native(), 1, scratch1, scratch2); } void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings( Register first, Register second, Register scratch1, Register scratch2, Label* failure) { ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1, scratch2, failure); } void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, Register second, Register scratch1, Register scratch2, Label* failure) { STATIC_ASSERT(kSmiTag == 0); And(scratch1, first, Operand(second)); JumpIfSmi(scratch1, failure); JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1, scratch2, failure); } void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( Register first, Register second, Register scratch1, Register scratch2, Label* failure) { const int kFlatOneByteStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatOneByteStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; DCHECK(kFlatOneByteStringTag <= 0xffff); andi(scratch1, first, kFlatOneByteStringMask); Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag)); andi(scratch2, second, kFlatOneByteStringMask); Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag)); } void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, Label* failure) { const int kFlatOneByteStringMask = kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; const int kFlatOneByteStringTag = kStringTag | kOneByteStringTag | kSeqStringTag; And(scratch, type, Operand(kFlatOneByteStringMask)); Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag)); } static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4; int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; num_reg_arguments += 2 * num_double_arguments; if (num_reg_arguments > kRegisterPassedArguments) { stack_passed_words += num_reg_arguments - kRegisterPassedArguments; } stack_passed_words += kCArgSlotCount; return stack_passed_words; } void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index, Register value, Register scratch, uint32_t encoding_mask) { Label is_object; SmiTst(string, at); Check(ne, kNonObject, at, Operand(zero_reg)); ld(at, FieldMemOperand(string, HeapObject::kMapOffset)); lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); andi(at, at, kStringRepresentationMask | kStringEncodingMask); li(scratch, Operand(encoding_mask)); Check(eq, kUnexpectedStringType, at, Operand(scratch)); ld(at, FieldMemOperand(string, String::kLengthOffset)); Check(lt, kIndexIsTooLarge, index, Operand(at)); DCHECK(Smi::FromInt(0) == 0); Check(ge, kIndexIsNegative, index, Operand(zero_reg)); } void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); if (frame_alignment > kPointerSize) { mov(scratch, sp); Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); } } void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { li(t8, Operand(function)); CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments); } void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK(has_frame()); if (emit_debug_code()) { int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); Label alignment_as_expected; And(at, sp, Operand(frame_alignment_mask)); Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); stop(�); bind(&alignment_as_expected); } } if (!function.is(t9)) { mov(t9, function); function = t9; } Call(function); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); if (base::OS::ActivationFrameAlignment() > kPointerSize) argument
[all...]

Completed in 680 milliseconds

123456789