/art/compiler/dex/quick/x86/ |
H A D | fp_x86.cc | 432 LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe); local 433 branch->target = retry; 487 LIR* branch = nullptr; local 489 branch = NewLIR2(kX86Jcc8, 0, kX86CondPE); 507 branch->target = NewLIR0(kPseudoTargetLabel); 516 LIR* branch = nullptr; local 536 branch = NewLIR2(kX86Jcc8, 0, kX86CondPE); 537 branch->target = not_taken; 542 branch = NewLIR2(kX86Jcc8, 0, kX86CondPE); 543 branch [all...] |
H A D | call_x86.cc | 196 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) argument 197 : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) { 231 LIR* branch = OpCondBranch(kCondUlt, nullptr); local 233 new(arena_)StackOverflowSlowPath(this, branch,
|
H A D | int_x86.cc | 102 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , local 104 branch->target = target; 105 return branch; 121 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc); local 122 branch->target = target; 123 return branch; 399 // Do special compare/branch against simple const operand 627 LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); local 638 branch->target = NewLIR0(kPseudoTargetLabel); 654 LIR* branch local 710 LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe); local 800 LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); local 1540 LIR* branch = OpCondBranch(kCondUge, nullptr); local 1577 LIR* branch = OpCondBranch(kCondLs, nullptr); local 3378 LIR* branch; local [all...] |
H A D | utility_x86.cc | 120 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, local 122 branch->target = target; 123 return branch; 953 LIR* branch = OpCondBranch(cond, target); local 954 return branch;
|
/art/runtime/arch/mips64/ |
H A D | jni_entrypoints_mips64.S | 49 .cpreturn # Restore gp from t8 in branch delay slot. gp is not used
|
H A D | quick_entrypoints_mips64.S | 523 * thread and we branch to another stub to deliver it. 567 beq $t9, $t3, 1f # branch if result type char == 'D' 569 beq $t9, $t3, 2f # branch if result type char == 'F' 571 beq $t9, $t3, 3f # branch if result type char == 'J' 701 beq $t1, $t2, 1f # branch if result type char == 'D' 703 beq $t1, $t3, 1f # branch if result type char == 'F' 803 beq $t1, $t2, 1f # branch if result type char == 'D' 805 beq $t1, $t3, 1f # branch if result type char == 'F' 870 .cpreturn # Restore gp from t8 in branch delay slot. 929 .cpreturn # Restore gp from t8 in branch dela [all...] |
/art/compiler/dex/quick/mips/ |
H A D | int_mips.cc | 71 LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr); local 78 branch->target = target; 84 LIR* branch; local 134 branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg()); 142 branch = NewLIR1(br_op, t_reg.GetReg()); 145 branch->target = target; 146 return branch; 150 LIR* branch; local 155 branch = OpCmpBranch(cond, reg, t_reg, target); 157 return branch; [all...] |
H A D | call_mips.cc | 96 // Now, fill the branch delay slot. 176 // Now, fill the branch delay slot with bias strip. 318 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) argument 319 : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) { 340 LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr); local 341 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * ptr_size));
|
/art/compiler/dex/quick/ |
H A D | gen_common.cc | 84 LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr); local 87 AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm, 154 // Second branch to the slow path, or null if there's only one branch. 182 LIR* branch = OpUnconditionalBranch(nullptr); local 183 AddDivZeroCheckSlowPath(branch); 187 LIR* branch = OpCondBranch(c_code, nullptr); local 188 AddDivZeroCheckSlowPath(branch); 192 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); local 193 AddDivZeroCheckSlowPath(branch); 196 AddDivZeroCheckSlowPath(LIR* branch) argument 235 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); local 265 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); local 272 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) argument 284 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); local 1418 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr); local 2085 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) argument 2107 LIR* branch = OpTestSuspend(nullptr); local 2126 LIR* branch = OpUnconditionalBranch(nullptr); local [all...] |
H A D | mir_to_lir.cc | 29 SpecialSuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) argument 30 : LIRSlowPath(m2l, branch, cont), 233 LIR* branch = OpTestSuspend(nullptr); local 237 new (arena_) SpecialSuspendCheckSlowPath(this, branch, cont); 1338 // If the fall_through block is no longer laid out consecutively, drop in a branch.
|
H A D | codegen_util.cc | 924 * branch table during the assembly phase. All resource flags 952 * int targets[size] branch targets, relative to switch opcode 973 * int targets[size] branch targets, relative to switch opcode 1264 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); local 1265 return branch;
|
H A D | mir_to_lir.h | 467 // branch over them. 491 // So you see we need two labels and two branches. The first branch (called fromfast) is 492 // the conditional branch to the slow path code. The second label (called cont) is used 493 // as an unconditional branch target for getting back to the code after the slow path 791 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 1125 * @brief Compare memory to immediate, and branch if condition true. 1126 * @param cond The condition code that when true will branch to the target. 1132 * @param target branch target (or null) 1134 * @returns The branch instruction that was generated. 1389 // The default implementation will create a chained compare-and-branch [all...] |
H A D | gen_invoke.cc | 48 void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) { argument 71 AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume)); 966 // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag). 969 // Generate compare and branch.
|
/art/compiler/dex/quick/arm64/ |
H A D | call_arm64.cc | 90 // Key does match: branch to case label. 94 // Add displacement to base branch address and go! 137 // Get base branch address. 142 // Add displacement to base branch address and go! 362 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) argument 363 : LIRSlowPath(m2l, branch), 386 LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr); local 387 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
|
H A D | int_arm64.cc | 265 * Generate a register comparison to an immediate and branch. Caller 266 * is responsible for setting branch target field. 270 LIR* branch = nullptr; local 276 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 282 branch = NewLIR2(opcode | wide, reg.GetReg(), 0); 287 branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0); 291 if (branch == nullptr) { 293 branch = NewLIR2(kA64B2ct, arm_cond, 0); 296 branch->target = target; 297 return branch; 310 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target); local [all...] |
H A D | utility_arm64.cc | 546 LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc), local 548 branch->target = target; 549 return branch;
|
/art/compiler/dex/quick/arm/ |
H A D | call_arm.cc | 53 * add rARM_PC, r_disp ; This is the branch from which we compute displacement 84 // Establish loop branch target 202 // cmp-and-branch branches to eq where r2 will be used. Copy the 450 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace) argument 451 : LIRSlowPath(m2l, branch), restore_lr_(restore_lr), 465 // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes 480 LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr); local 482 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size)); 498 LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr); local 499 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, fals [all...] |
H A D | int_arm.cc | 329 // Do special compare/branch against simple const operand if not already in registers. 377 * Generate a register comparison to an immediate and branch. Caller 378 * is responsible for setting branch target field. 381 LIR* branch = nullptr; local 385 * compare-and-branch if zero is ideal if it will reach. However, because null checks 386 * branch forward to a slow path, they will frequently not reach - and thus have to 388 * pass). Here we estimate the branch distance for checks, and if large directly 396 branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz, 401 branch = NewLIR2(kThumb2Cbz, reg.GetReg(), 0); 405 if (branch [all...] |
H A D | utility_arm.cc | 264 LIR* branch = NewLIR2(kThumbBCond, 0 /* offset to be patched */, local 266 branch->target = target; 267 return branch;
|
/art/compiler/utils/arm/ |
H A D | assembler_thumb2.cc | 1350 // This is always unresolved as it must be a forward branch. 1610 Branch::Size size = AddBranch(branch_type, pc, label->Position(), cond); // Resolved branch. 1612 // The branch is to a bound label which means that it's a backwards branch. We know the 1614 // branch the size may change if it so happens that other branches change size that change 1615 // the distance to the target and that distance puts this branch over the limit for 16 bits. 1618 Emit16(0); // Space for a 16 bit branch. 1620 Emit32(0); // Space for a 32 bit branch. 1624 uint16_t branch_id = AddBranch(branch_type, pc, cond); // Unresolved branch. 1631 label->LinkTo(branch_id); // Link to the branch I 2256 Branch* branch = GetBranch(position); // Get the branch at this id. local [all...] |
H A D | assembler_thumb2.h | 41 for (auto& branch : branches_) { 42 delete branch; 439 bool force_32bit_branches_; // Force the assembler to use 32 bit branch instructions. 473 // depends on both the type of branch and the offset to which it is branching. When 474 // generating code for branches we don't know the size before hand (if the branch is 477 // we can determine the actual size of the branch. However, if we had guessed wrong before 479 // instruction (assume that we never decrease the size of a branch). 481 // To handle this, we keep a record of every branch in the program. The actual instruction 482 // encoding for these is delayed until we know the final size of every branch. When we 483 // bind a label to a branch (w [all...] |
/art/compiler/dex/ |
H A D | mir_optimization_test.cc | 455 BasicBlock* branch = cu_.mir_graph->GetBasicBlock(branch_bb); local 456 return target_bb != NullBasicBlockId && cu_.mir_graph->IsBackEdge(branch, target_bb); 460 BasicBlock* branch = cu_.mir_graph->GetBasicBlock(branch_bb); local 461 return cu_.mir_graph->IsSuspendCheckEdge(branch, target_bb);
|
/art/runtime/arch/arm/ |
H A D | quick_entrypoints_arm.S | 169 cbnz r0, 1f @ result non-zero branch over 175 cbz r0, 1f @ result zero branch over 324 * thread and we branch to another stub to deliver it.
|
/art/runtime/arch/mips/ |
H A D | quick_entrypoints_mips.S | 458 * thread and we branch to another stub to deliver it. 547 beq $t1, $t2, 1f # branch if result type char == 'D' 549 beq $t1, $t3, 1f # branch if result type char == 'F' 1092 # don't care if $v0 and/or $v1 are modified, when exception branch taken 1200 # don't care if $v0 and/or $v1 are modified, when exception branch taken
|