/art/compiler/dex/quick/arm/ |
H A D | utility_arm.cc | 669 // No short form - load from the literal pool. 695 LIR* load; local 727 load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0); 729 return load; 751 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg()); 753 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale); 755 return load; 856 LIR* load = NULL; local 869 load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vldrd, r_base, displacement, r_dest); 873 load 971 LIR* load; local [all...] |
/art/compiler/dex/quick/arm64/ |
H A D | utility_arm64.cc | 145 // No short form - load from the literal pool. 407 // Compute how many movk, movz instructions are needed to load the value. 415 // 1 instruction is enough to load the immediate. 1026 LIR* load; local 1032 // register offset load (rather than doing the sign extension in a separate instruction). 1097 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg()); 1100 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), 1104 return load; 1199 LIR* load = NULL; local 1252 // Can use scaled load 1279 LIR* load = LoadBaseDispBody(r_base, displacement, r_dest, size); local [all...] |
/art/compiler/dex/quick/ |
H A D | gen_common.cc | 208 // Force an implicit null check by performing a memory operation (load) from the given 212 LIR* load = Load32Disp(reg, 0, tmp); local 214 MarkSafepointPC(load); 584 // Ensure load of status and store of value don't re-order. 585 // TODO: Presumably the actual value store is control-dependent on the status load, 669 // Ensure load of status and load of value don't re-order. 730 // A load of the class will lead to an iget with offset 0. 835 // We're don't need access checks, load type from dex cache 1065 LoadConstant(result_reg, 1); // .eq case - load tru 1301 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load) argument [all...] |
/art/compiler/dex/quick/mips/ |
H A D | utility_mips.cc | 466 LIR *load = NULL; local 519 load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg()); 521 load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg()); 528 load = NewLIR3(opcode, r_dest.GetLowReg(), LOWORD_OFFSET, r_tmp.GetReg()); 534 load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg()); 542 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 549 return load; 555 // Do atomic 64-bit load. 563 LIR* load; local 564 load [all...] |
/art/compiler/dex/quick/x86/ |
H A D | target_x86.cc | 769 // On x86, atomic 64-bit load/store requires an fp register. 770 // Smaller aligned load/store is atomic for both core and fp registers. 882 // First load the pointer in fs:[suspend-trigger] into eax 1317 // below is ahead of the load above then this will not be true 1804 // Okay, load it from the constant vector area. 1823 LIR *load = NewLIR2(opcode, reg, rl_method.reg.GetReg()); local 1824 load->flags.fixup = kFixupLoad; 1825 load->target = data_target; 2547 * and as part of the load sequence, it must be replaced with 2711 // For 64-bit load w [all...] |
H A D | utility_x86.cc | 572 // We will load the value from the literal area. 636 LIR *load = NULL; local 692 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 696 load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), 700 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 707 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 716 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 722 // We can't use either register for the first load. 724 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 731 load 771 LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, local [all...] |
/art/compiler/optimizing/ |
H A D | code_generator.cc | 185 HLoadLocal* load = input->AsLoadLocal(); local 186 if (load != nullptr) { 187 loc = GetStackLocation(load);
|
H A D | code_generator_arm.cc | 301 Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const { 302 switch (load->GetType()) { 304 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 309 return Location::StackSlot(GetStackSlot(load->GetLocal())); 313 LOG(FATAL) << "Unimplemented type " << load->GetType(); 320 LOG(FATAL) << "Unexpected type " << load->GetType(); 689 void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) { argument 690 load->SetLocations(nullptr); 693 void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) { argument
|
H A D | code_generator_x86.cc | 276 Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const { 277 switch (load->GetType()) { 279 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 284 return Location::StackSlot(GetStackSlot(load->GetLocal())); 288 LOG(FATAL) << "Unimplemented type " << load->GetType(); 295 LOG(FATAL) << "Unexpected type " << load->GetType(); 562 void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) { argument
|
H A D | code_generator_x86_64.cc | 248 Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const { 249 switch (load->GetType()) { 251 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 256 return Location::StackSlot(GetStackSlot(load->GetLocal())); 260 LOG(FATAL) << "Unimplemented type " << load->GetType(); 267 LOG(FATAL) << "Unexpected type " << load->GetType(); 441 void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) { argument
|
H A D | ssa_builder.cc | 132 void SsaBuilder::VisitLoadLocal(HLoadLocal* load) { argument 133 load->ReplaceWith(current_locals_->Get(load->GetLocal()->GetRegNumber())); 134 load->GetBlock()->RemoveInstruction(load);
|
/art/compiler/utils/arm/ |
H A D | assembler_arm32.cc | 544 bool load, 564 (load ? L : 0) | 573 (load ? L : 0) | 600 bool load, 608 (load ? L : 0) | 543 EmitMemOp(Condition cond, bool load, bool byte, Register rd, const Address& ad) argument 598 EmitMultiMemOp(Condition cond, BlockAddressMode am, bool load, Register base, RegList regs) argument
|
H A D | assembler_thumb2.cc | 1245 bool load, 1295 (load ? B20 : 0) | 1319 (load ? B11 : 0); 1329 // the size of the load/store. 1356 if (must_be_32bit || offset < 0 || offset >= (1 << 10) || !load) { 1363 int32_t encoding = 0x1f << 27 | 0xf << 16 | B22 | (load ? B20 : 0) | 1368 // 16 bit literal load. 1371 int32_t encoding = B14 | (load ? B11 : 0) | static_cast<uint32_t>(rd) << 8 | offset >> 2; 1383 int32_t encoding = 0x1f << 27 | (load ? B20 : 0) | static_cast<uint32_t>(rd) << 12 | 1393 int32_t encoding = B14 | B12 | (load 1244 EmitLoadStore(Condition cond, bool load, bool byte, bool half, bool is_signed, Register rd, const Address& ad) argument 1407 EmitMultiMemOp(Condition cond, BlockAddressMode am, bool load, Register base, RegList regs) argument [all...] |
/art/disassembler/ |
H A D | disassembler_x86.cc | 204 bool load = false; // loads from memory (ie rm is on the right) local 221 case r8_rm8: opcode << #opname; load = true; has_modrm = true; byte_operand = true; break; \ 222 case r32_rm32: opcode << #opname; load = true; has_modrm = true; break; \ 274 load = true; 282 case 0x69: opcode << "imul"; load = true; has_modrm = true; immediate_bytes = 4; break; 284 case 0x6B: opcode << "imul"; load = true; has_modrm = true; immediate_bytes = 1; break; 302 case 0x8A: opcode << "mov"; load = true; has_modrm = true; byte_operand = true; break; 303 case 0x8B: opcode << "mov"; load = true; has_modrm = true; break; 323 load = *instr == 0x10; 324 store = !load; [all...] |