Searched refs:m2l (Results 1 - 15 of 15) sorted by relevance

/art/compiler/dex/quick/
H A Dquick_cfi_test.cc88 std::unique_ptr<Mir2Lir> m2l(QuickCompiler::GetCodeGenerator(&cu, nullptr));
89 m2l->frame_size_ = 64u;
90 m2l->CompilerInitializeRegAlloc();
91 for (const auto& info : m2l->reg_pool_->core_regs_) {
92 if (m2l->num_core_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
93 m2l->core_spill_mask_ |= 1 << info->GetReg().GetRegNum();
94 m2l->num_core_spills_++;
97 for (const auto& info : m2l->reg_pool_->sp_regs_) {
98 if (m2l->num_fp_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
99 m2l
[all...]
H A Dgen_common.cc64 CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, argument
67 : LIRSlowPath(m2l, fromfast, cont), trampoline_(trampoline_in),
134 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, argument
136 : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
199 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in) argument
200 : LIRSlowPath(m2l, branch_in) {
217 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in, argument
219 : LIRSlowPath(m2l, branch_in),
242 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in) argument
243 : LIRSlowPath(m2l, branch_i
272 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) argument
1396 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load) argument
2085 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) argument
[all...]
H A Dmir_to_lir.h422 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
499 LIRSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont = nullptr) argument
500 : m2l_(m2l), cu_(m2l->cu_),
501 current_dex_pc_(m2l->current_dalvik_offset_), current_mir_(m2l->current_mir_),
532 ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type) argument
533 : m2l_(m2l),
534 old_mem_ref_type_(m2l->mem_ref_type_) {
H A Dmir_to_lir.cc29 SpecialSuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) argument
30 : LIRSlowPath(m2l, branch, cont),
H A Dralloc_util.cc62 Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, argument
76 dp_regs_(arena->Adapter()), next_dp_reg_(0), m2l_(m2l) {
H A Dgen_invoke.cc51 IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in) argument
52 : LIRSlowPath(m2l, branch_in, resume_in), info_(info_in) {
/art/compiler/dex/quick/x86/
H A Dquick_assemble_x86_test.cc96 X86Mir2Lir* m2l = static_cast<X86Mir2Lir*>(cu_->cg.get()); local
97 m2l->CompilerInitializeRegAlloc();
98 return m2l;
139 X86Mir2Lir* m2l = Prepare(target); local
149 lir.flags.size = m2l->GetInsnSize(&lir);
151 AssemblerStatus status = m2l->AssembleInstructions(&lir, 0);
156 std::vector<uint8_t> buffer(m2l->code_buffer_.begin(), m2l->code_buffer_.end());
199 X86Mir2Lir *m2l = Prepare(target); local
209 (m2l
[all...]
H A Dcall_x86.cc196 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) argument
197 : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
H A Dcodegen_x86.h35 explicit InToRegStorageX86_64Mapper(Mir2Lir* m2l) argument
36 : m2l_(m2l), cur_core_reg_(0), cur_fp_reg_(0) {}
50 explicit InToRegStorageX86Mapper(Mir2Lir* m2l) argument
51 : InToRegStorageX86_64Mapper(m2l) { }
H A Dint_x86.cc1502 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, argument
1504 : LIRSlowPath(m2l, branch_in),
1550 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, argument
1552 : LIRSlowPath(m2l, branch_in),
/art/compiler/dex/quick/arm64/
H A Dint_arm64.cc1476 static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) { argument
1483 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1484 m2l->cfi().RelOffset(DwarfCoreReg(reg1), offset << reg_log2_size);
1486 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1488 m2l->cfi().RelOffset(DwarfCoreReg(reg2), offset << reg_log2_size);
1489 m2l->cfi().RelOffset(DwarfCoreReg(reg1), (offset + 1) << reg_log2_size);
1495 static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) { argument
1502 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
1504 m2l->cfi().RelOffset(DwarfFpReg(reg1), offset << reg_log2_size);
1506 m2l
1514 SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size) argument
1537 SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask) argument
1689 UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) argument
1708 UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) argument
[all...]
H A Dcall_arm64.cc362 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) argument
363 : LIRSlowPath(m2l, branch),
/art/compiler/dex/quick/mips/
H A Dcodegen_mips.h32 explicit InToRegStorageMipsMapper(Mir2Lir* m2l) : m2l_(m2l), cur_core_reg_(0) {} argument
45 explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {} argument
H A Dcall_mips.cc318 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) argument
319 : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
/art/compiler/dex/quick/arm/
H A Dcall_arm.cc450 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace) argument
451 : LIRSlowPath(m2l, branch), restore_lr_(restore_lr),

Completed in 153 milliseconds