code_generator.cc revision 38207af82afb6f99c687f64b15601ed20d82220a
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator.h" 18 19#include "code_generator_arm.h" 20#include "code_generator_arm64.h" 21#include "code_generator_x86.h" 22#include "code_generator_x86_64.h" 23#include "compiled_method.h" 24#include "dex/verified_method.h" 25#include "driver/dex_compilation_unit.h" 26#include "gc_map_builder.h" 27#include "leb128.h" 28#include "mapping_table.h" 29#include "mirror/array-inl.h" 30#include "mirror/object_array-inl.h" 31#include "mirror/object_reference.h" 32#include "ssa_liveness_analysis.h" 33#include "utils/assembler.h" 34#include "verifier/dex_gc_map.h" 35#include "vmap_table.h" 36 37namespace art { 38 39// Return whether a location is consistent with a type. 40static bool CheckType(Primitive::Type type, Location location) { 41 if (location.IsFpuRegister() 42 || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) { 43 return (type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble); 44 } else if (location.IsRegister() || 45 (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) { 46 return Primitive::IsIntegralType(type) || (type == Primitive::kPrimNot); 47 } else if (location.IsRegisterPair()) { 48 return type == Primitive::kPrimLong; 49 } else if (location.IsFpuRegisterPair()) { 50 return type == Primitive::kPrimDouble; 51 } else if (location.IsStackSlot()) { 52 return (Primitive::IsIntegralType(type) && type != Primitive::kPrimLong) 53 || (type == Primitive::kPrimFloat) 54 || (type == Primitive::kPrimNot); 55 } else if (location.IsDoubleStackSlot()) { 56 return (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble); 57 } else if (location.IsConstant()) { 58 if (location.GetConstant()->IsIntConstant()) { 59 return Primitive::IsIntegralType(type) && (type != Primitive::kPrimLong); 60 } else if (location.GetConstant()->IsNullConstant()) { 61 return type == Primitive::kPrimNot; 62 } else if (location.GetConstant()->IsLongConstant()) { 63 return type == Primitive::kPrimLong; 64 } else if (location.GetConstant()->IsFloatConstant()) { 65 return type == Primitive::kPrimFloat; 66 } else { 67 return location.GetConstant()->IsDoubleConstant() 68 && (type == Primitive::kPrimDouble); 69 } 70 } else { 71 return location.IsInvalid() || (location.GetPolicy() == Location::kAny); 72 } 73} 74 75// Check that a location summary is consistent with an instruction. 76static bool CheckTypeConsistency(HInstruction* instruction) { 77 LocationSummary* locations = instruction->GetLocations(); 78 if (locations == nullptr) { 79 return true; 80 } 81 82 if (locations->Out().IsUnallocated() 83 && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) { 84 DCHECK(CheckType(instruction->GetType(), locations->InAt(0))) 85 << instruction->GetType() 86 << " " << locations->InAt(0); 87 } else { 88 DCHECK(CheckType(instruction->GetType(), locations->Out())) 89 << instruction->GetType() 90 << " " << locations->Out(); 91 } 92 93 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 94 DCHECK(CheckType(instruction->InputAt(i)->GetType(), locations->InAt(i))) 95 << instruction->InputAt(i)->GetType() 96 << " " << locations->InAt(i); 97 } 98 99 HEnvironment* environment = instruction->GetEnvironment(); 100 for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) { 101 if (environment->GetInstructionAt(i) != nullptr) { 102 Primitive::Type type = environment->GetInstructionAt(i)->GetType(); 103 DCHECK(CheckType(type, environment->GetLocationAt(i))) 104 << type << " " << environment->GetLocationAt(i); 105 } else { 106 DCHECK(environment->GetLocationAt(i).IsInvalid()) 107 << environment->GetLocationAt(i); 108 } 109 } 110 return true; 111} 112 113size_t CodeGenerator::GetCacheOffset(uint32_t index) { 114 return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue(); 115} 116 117size_t CodeGenerator::GetCachePointerOffset(uint32_t index) { 118 auto pointer_size = InstructionSetPointerSize(GetInstructionSet()); 119 return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index; 120} 121 122void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) { 123 Initialize(); 124 if (!is_leaf) { 125 MarkNotLeaf(); 126 } 127 const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet()); 128 InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs() 129 + GetGraph()->GetTemporariesVRegSlots() 130 + 1 /* filler */, 131 0, /* the baseline compiler does not have live registers at slow path */ 132 0, /* the baseline compiler does not have live registers at slow path */ 133 GetGraph()->GetMaximumNumberOfOutVRegs() 134 + (is_64_bit ? 2 : 1) /* current method */, 135 GetGraph()->GetBlocks()); 136 CompileInternal(allocator, /* is_baseline */ true); 137} 138 139bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const { 140 DCHECK_EQ(block_order_->Get(current_block_index_), current); 141 return GetNextBlockToEmit() == FirstNonEmptyBlock(next); 142} 143 144HBasicBlock* CodeGenerator::GetNextBlockToEmit() const { 145 for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) { 146 HBasicBlock* block = block_order_->Get(i); 147 if (!block->IsSingleGoto()) { 148 return block; 149 } 150 } 151 return nullptr; 152} 153 154HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const { 155 while (block->IsSingleGoto()) { 156 block = block->GetSuccessors().Get(0); 157 } 158 return block; 159} 160 161void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) { 162 is_baseline_ = is_baseline; 163 HGraphVisitor* instruction_visitor = GetInstructionVisitor(); 164 DCHECK_EQ(current_block_index_, 0u); 165 GenerateFrameEntry(); 166 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_)); 167 for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) { 168 HBasicBlock* block = block_order_->Get(current_block_index_); 169 // Don't generate code for an empty block. Its predecessors will branch to its successor 170 // directly. Also, the label of that block will not be emitted, so this helps catch 171 // errors where we reference that label. 172 if (block->IsSingleGoto()) continue; 173 Bind(block); 174 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 175 HInstruction* current = it.Current(); 176 if (is_baseline) { 177 InitLocationsBaseline(current); 178 } 179 DCHECK(CheckTypeConsistency(current)); 180 current->Accept(instruction_visitor); 181 } 182 } 183 184 // Generate the slow paths. 185 for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) { 186 slow_paths_.Get(i)->EmitNativeCode(this); 187 } 188 189 // Finalize instructions in assember; 190 Finalize(allocator); 191} 192 193void CodeGenerator::CompileOptimized(CodeAllocator* allocator) { 194 // The register allocator already called `InitializeCodeGeneration`, 195 // where the frame size has been computed. 196 DCHECK(block_order_ != nullptr); 197 Initialize(); 198 CompileInternal(allocator, /* is_baseline */ false); 199} 200 201void CodeGenerator::Finalize(CodeAllocator* allocator) { 202 size_t code_size = GetAssembler()->CodeSize(); 203 uint8_t* buffer = allocator->Allocate(code_size); 204 205 MemoryRegion code(buffer, code_size); 206 GetAssembler()->FinalizeInstructions(code); 207} 208 209size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) { 210 for (size_t i = 0; i < length; ++i) { 211 if (!array[i]) { 212 array[i] = true; 213 return i; 214 } 215 } 216 LOG(FATAL) << "Could not find a register in baseline register allocator"; 217 UNREACHABLE(); 218} 219 220size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) { 221 for (size_t i = 0; i < length - 1; i += 2) { 222 if (!array[i] && !array[i + 1]) { 223 array[i] = true; 224 array[i + 1] = true; 225 return i; 226 } 227 } 228 LOG(FATAL) << "Could not find a register in baseline register allocator"; 229 UNREACHABLE(); 230} 231 232void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots, 233 size_t maximum_number_of_live_core_registers, 234 size_t maximum_number_of_live_fp_registers, 235 size_t number_of_out_slots, 236 const GrowableArray<HBasicBlock*>& block_order) { 237 block_order_ = &block_order; 238 DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock()); 239 DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), block_order_->Get(1))); 240 ComputeSpillMask(); 241 first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize; 242 243 if (number_of_spill_slots == 0 244 && !HasAllocatedCalleeSaveRegisters() 245 && IsLeafMethod() 246 && !RequiresCurrentMethod()) { 247 DCHECK_EQ(maximum_number_of_live_core_registers, 0u); 248 DCHECK_EQ(maximum_number_of_live_fp_registers, 0u); 249 SetFrameSize(CallPushesPC() ? GetWordSize() : 0); 250 } else { 251 SetFrameSize(RoundUp( 252 number_of_spill_slots * kVRegSize 253 + number_of_out_slots * kVRegSize 254 + maximum_number_of_live_core_registers * GetWordSize() 255 + maximum_number_of_live_fp_registers * GetFloatingPointSpillSlotSize() 256 + FrameEntrySpillSize(), 257 kStackAlignment)); 258 } 259} 260 261Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const { 262 uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); 263 // The type of the previous instruction tells us if we need a single or double stack slot. 264 Primitive::Type type = temp->GetType(); 265 int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1; 266 // Use the temporary region (right below the dex registers). 267 int32_t slot = GetFrameSize() - FrameEntrySpillSize() 268 - kVRegSize // filler 269 - (number_of_locals * kVRegSize) 270 - ((temp_size + temp->GetIndex()) * kVRegSize); 271 return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot); 272} 273 274int32_t CodeGenerator::GetStackSlot(HLocal* local) const { 275 uint16_t reg_number = local->GetRegNumber(); 276 uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); 277 if (reg_number >= number_of_locals) { 278 // Local is a parameter of the method. It is stored in the caller's frame. 279 // TODO: Share this logic with StackVisitor::GetVRegOffsetFromQuickCode. 280 return GetFrameSize() + InstructionSetPointerSize(GetInstructionSet()) // ART method 281 + (reg_number - number_of_locals) * kVRegSize; 282 } else { 283 // Local is a temporary in this method. It is stored in this method's frame. 284 return GetFrameSize() - FrameEntrySpillSize() 285 - kVRegSize // filler. 286 - (number_of_locals * kVRegSize) 287 + (reg_number * kVRegSize); 288 } 289} 290 291void CodeGenerator::CreateCommonInvokeLocationSummary( 292 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) { 293 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena(); 294 LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCall); 295 296 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { 297 HInstruction* input = invoke->InputAt(i); 298 locations->SetInAt(i, visitor->GetNextLocation(input->GetType())); 299 } 300 301 locations->SetOut(visitor->GetReturnLocation(invoke->GetType())); 302 303 if (invoke->IsInvokeStaticOrDirect()) { 304 HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect(); 305 if (call->IsStringInit()) { 306 locations->AddTemp(visitor->GetMethodLocation()); 307 } else if (call->IsRecursive()) { 308 locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation()); 309 } else { 310 locations->AddTemp(visitor->GetMethodLocation()); 311 locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister()); 312 } 313 } else { 314 locations->AddTemp(visitor->GetMethodLocation()); 315 } 316} 317 318void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const { 319 // The DCHECKS below check that a register is not specified twice in 320 // the summary. The out location can overlap with an input, so we need 321 // to special case it. 322 if (location.IsRegister()) { 323 DCHECK(is_out || !blocked_core_registers_[location.reg()]); 324 blocked_core_registers_[location.reg()] = true; 325 } else if (location.IsFpuRegister()) { 326 DCHECK(is_out || !blocked_fpu_registers_[location.reg()]); 327 blocked_fpu_registers_[location.reg()] = true; 328 } else if (location.IsFpuRegisterPair()) { 329 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]); 330 blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true; 331 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]); 332 blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true; 333 } else if (location.IsRegisterPair()) { 334 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]); 335 blocked_core_registers_[location.AsRegisterPairLow<int>()] = true; 336 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]); 337 blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true; 338 } 339} 340 341void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const { 342 LocationSummary* locations = instruction->GetLocations(); 343 if (locations == nullptr) return; 344 345 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 346 blocked_core_registers_[i] = false; 347 } 348 349 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 350 blocked_fpu_registers_[i] = false; 351 } 352 353 for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) { 354 blocked_register_pairs_[i] = false; 355 } 356 357 // Mark all fixed input, temp and output registers as used. 358 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { 359 BlockIfInRegister(locations->InAt(i)); 360 } 361 362 for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) { 363 Location loc = locations->GetTemp(i); 364 BlockIfInRegister(loc); 365 } 366 Location result_location = locations->Out(); 367 if (locations->OutputCanOverlapWithInputs()) { 368 BlockIfInRegister(result_location, /* is_out */ true); 369 } 370 371 SetupBlockedRegisters(/* is_baseline */ true); 372 373 // Allocate all unallocated input locations. 374 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { 375 Location loc = locations->InAt(i); 376 HInstruction* input = instruction->InputAt(i); 377 if (loc.IsUnallocated()) { 378 if ((loc.GetPolicy() == Location::kRequiresRegister) 379 || (loc.GetPolicy() == Location::kRequiresFpuRegister)) { 380 loc = AllocateFreeRegister(input->GetType()); 381 } else { 382 DCHECK_EQ(loc.GetPolicy(), Location::kAny); 383 HLoadLocal* load = input->AsLoadLocal(); 384 if (load != nullptr) { 385 loc = GetStackLocation(load); 386 } else { 387 loc = AllocateFreeRegister(input->GetType()); 388 } 389 } 390 locations->SetInAt(i, loc); 391 } 392 } 393 394 // Allocate all unallocated temp locations. 395 for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) { 396 Location loc = locations->GetTemp(i); 397 if (loc.IsUnallocated()) { 398 switch (loc.GetPolicy()) { 399 case Location::kRequiresRegister: 400 // Allocate a core register (large enough to fit a 32-bit integer). 401 loc = AllocateFreeRegister(Primitive::kPrimInt); 402 break; 403 404 case Location::kRequiresFpuRegister: 405 // Allocate a core register (large enough to fit a 64-bit double). 406 loc = AllocateFreeRegister(Primitive::kPrimDouble); 407 break; 408 409 default: 410 LOG(FATAL) << "Unexpected policy for temporary location " 411 << loc.GetPolicy(); 412 } 413 locations->SetTempAt(i, loc); 414 } 415 } 416 if (result_location.IsUnallocated()) { 417 switch (result_location.GetPolicy()) { 418 case Location::kAny: 419 case Location::kRequiresRegister: 420 case Location::kRequiresFpuRegister: 421 result_location = AllocateFreeRegister(instruction->GetType()); 422 break; 423 case Location::kSameAsFirstInput: 424 result_location = locations->InAt(0); 425 break; 426 } 427 locations->UpdateOut(result_location); 428 } 429} 430 431void CodeGenerator::InitLocationsBaseline(HInstruction* instruction) { 432 AllocateLocations(instruction); 433 if (instruction->GetLocations() == nullptr) { 434 if (instruction->IsTemporary()) { 435 HInstruction* previous = instruction->GetPrevious(); 436 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 437 Move(previous, temp_location, instruction); 438 } 439 return; 440 } 441 AllocateRegistersLocally(instruction); 442 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 443 Location location = instruction->GetLocations()->InAt(i); 444 HInstruction* input = instruction->InputAt(i); 445 if (location.IsValid()) { 446 // Move the input to the desired location. 447 if (input->GetNext()->IsTemporary()) { 448 // If the input was stored in a temporary, use that temporary to 449 // perform the move. 450 Move(input->GetNext(), location, instruction); 451 } else { 452 Move(input, location, instruction); 453 } 454 } 455 } 456} 457 458void CodeGenerator::AllocateLocations(HInstruction* instruction) { 459 instruction->Accept(GetLocationBuilder()); 460 DCHECK(CheckTypeConsistency(instruction)); 461 LocationSummary* locations = instruction->GetLocations(); 462 if (!instruction->IsSuspendCheckEntry()) { 463 if (locations != nullptr && locations->CanCall()) { 464 MarkNotLeaf(); 465 } 466 if (instruction->NeedsCurrentMethod()) { 467 SetRequiresCurrentMethod(); 468 } 469 } 470} 471 472CodeGenerator* CodeGenerator::Create(HGraph* graph, 473 InstructionSet instruction_set, 474 const InstructionSetFeatures& isa_features, 475 const CompilerOptions& compiler_options) { 476 switch (instruction_set) { 477 case kArm: 478 case kThumb2: { 479 return new arm::CodeGeneratorARM(graph, 480 *isa_features.AsArmInstructionSetFeatures(), 481 compiler_options); 482 } 483 case kArm64: { 484 return new arm64::CodeGeneratorARM64(graph, 485 *isa_features.AsArm64InstructionSetFeatures(), 486 compiler_options); 487 } 488 case kMips: 489 return nullptr; 490 case kX86: { 491 return new x86::CodeGeneratorX86(graph, 492 *isa_features.AsX86InstructionSetFeatures(), 493 compiler_options); 494 } 495 case kX86_64: { 496 return new x86_64::CodeGeneratorX86_64(graph, 497 *isa_features.AsX86_64InstructionSetFeatures(), 498 compiler_options); 499 } 500 default: 501 return nullptr; 502 } 503} 504 505void CodeGenerator::BuildNativeGCMap( 506 std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const { 507 const std::vector<uint8_t>& gc_map_raw = 508 dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap(); 509 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 510 511 uint32_t max_native_offset = 0; 512 for (size_t i = 0; i < pc_infos_.Size(); i++) { 513 uint32_t native_offset = pc_infos_.Get(i).native_pc; 514 if (native_offset > max_native_offset) { 515 max_native_offset = native_offset; 516 } 517 } 518 519 GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth()); 520 for (size_t i = 0; i < pc_infos_.Size(); i++) { 521 struct PcInfo pc_info = pc_infos_.Get(i); 522 uint32_t native_offset = pc_info.native_pc; 523 uint32_t dex_pc = pc_info.dex_pc; 524 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 525 CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc; 526 builder.AddEntry(native_offset, references); 527 } 528} 529 530void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const { 531 for (size_t i = 0; i < pc_infos_.Size(); i++) { 532 struct PcInfo pc_info = pc_infos_.Get(i); 533 uint32_t pc2dex_offset = pc_info.native_pc; 534 int32_t pc2dex_dalvik_offset = pc_info.dex_pc; 535 src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset})); 536 } 537} 538 539void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const { 540 uint32_t pc2dex_data_size = 0u; 541 uint32_t pc2dex_entries = pc_infos_.Size(); 542 uint32_t pc2dex_offset = 0u; 543 int32_t pc2dex_dalvik_offset = 0; 544 uint32_t dex2pc_data_size = 0u; 545 uint32_t dex2pc_entries = 0u; 546 uint32_t dex2pc_offset = 0u; 547 int32_t dex2pc_dalvik_offset = 0; 548 549 for (size_t i = 0; i < pc2dex_entries; i++) { 550 struct PcInfo pc_info = pc_infos_.Get(i); 551 pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset); 552 pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset); 553 pc2dex_offset = pc_info.native_pc; 554 pc2dex_dalvik_offset = pc_info.dex_pc; 555 } 556 557 // Walk over the blocks and find which ones correspond to catch block entries. 558 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 559 HBasicBlock* block = graph_->GetBlocks().Get(i); 560 if (block->IsCatchBlock()) { 561 intptr_t native_pc = GetAddressOf(block); 562 ++dex2pc_entries; 563 dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset); 564 dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset); 565 dex2pc_offset = native_pc; 566 dex2pc_dalvik_offset = block->GetDexPc(); 567 } 568 } 569 570 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 571 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 572 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 573 data->resize(data_size); 574 575 uint8_t* data_ptr = &(*data)[0]; 576 uint8_t* write_pos = data_ptr; 577 578 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 579 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 580 DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size); 581 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 582 583 pc2dex_offset = 0u; 584 pc2dex_dalvik_offset = 0u; 585 dex2pc_offset = 0u; 586 dex2pc_dalvik_offset = 0u; 587 588 for (size_t i = 0; i < pc2dex_entries; i++) { 589 struct PcInfo pc_info = pc_infos_.Get(i); 590 DCHECK(pc2dex_offset <= pc_info.native_pc); 591 write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset); 592 write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset); 593 pc2dex_offset = pc_info.native_pc; 594 pc2dex_dalvik_offset = pc_info.dex_pc; 595 } 596 597 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 598 HBasicBlock* block = graph_->GetBlocks().Get(i); 599 if (block->IsCatchBlock()) { 600 intptr_t native_pc = GetAddressOf(block); 601 write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset); 602 write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset); 603 dex2pc_offset = native_pc; 604 dex2pc_dalvik_offset = block->GetDexPc(); 605 } 606 } 607 608 609 DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size); 610 DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size); 611 612 if (kIsDebugBuild) { 613 // Verify the encoded table holds the expected data. 614 MappingTable table(data_ptr); 615 CHECK_EQ(table.TotalSize(), total_entries); 616 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 617 auto it = table.PcToDexBegin(); 618 auto it2 = table.DexToPcBegin(); 619 for (size_t i = 0; i < pc2dex_entries; i++) { 620 struct PcInfo pc_info = pc_infos_.Get(i); 621 CHECK_EQ(pc_info.native_pc, it.NativePcOffset()); 622 CHECK_EQ(pc_info.dex_pc, it.DexPc()); 623 ++it; 624 } 625 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 626 HBasicBlock* block = graph_->GetBlocks().Get(i); 627 if (block->IsCatchBlock()) { 628 CHECK_EQ(GetAddressOf(block), it2.NativePcOffset()); 629 CHECK_EQ(block->GetDexPc(), it2.DexPc()); 630 ++it2; 631 } 632 } 633 CHECK(it == table.PcToDexEnd()); 634 CHECK(it2 == table.DexToPcEnd()); 635 } 636} 637 638void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const { 639 Leb128EncodingVector vmap_encoder; 640 // We currently don't use callee-saved registers. 641 size_t size = 0 + 1 /* marker */ + 0; 642 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 643 vmap_encoder.PushBackUnsigned(size); 644 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 645 646 *data = vmap_encoder.GetData(); 647} 648 649void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) { 650 uint32_t size = stack_map_stream_.PrepareForFillIn(); 651 data->resize(size); 652 MemoryRegion region(data->data(), size); 653 stack_map_stream_.FillIn(region); 654} 655 656void CodeGenerator::RecordPcInfo(HInstruction* instruction, 657 uint32_t dex_pc, 658 SlowPathCode* slow_path) { 659 if (instruction != nullptr) { 660 // The code generated for some type conversions may call the 661 // runtime, thus normally requiring a subsequent call to this 662 // method. However, the method verifier does not produce PC 663 // information for certain instructions, which are considered "atomic" 664 // (they cannot join a GC). 665 // Therefore we do not currently record PC information for such 666 // instructions. As this may change later, we added this special 667 // case so that code generators may nevertheless call 668 // CodeGenerator::RecordPcInfo without triggering an error in 669 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x") 670 // thereafter. 671 if (instruction->IsTypeConversion()) { 672 return; 673 } 674 if (instruction->IsRem()) { 675 Primitive::Type type = instruction->AsRem()->GetResultType(); 676 if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) { 677 return; 678 } 679 } 680 } 681 682 uint32_t outer_dex_pc = dex_pc; 683 uint32_t outer_environment_size = 0; 684 uint32_t inlining_depth = 0; 685 if (instruction != nullptr) { 686 for (HEnvironment* environment = instruction->GetEnvironment(); 687 environment != nullptr; 688 environment = environment->GetParent()) { 689 outer_dex_pc = environment->GetDexPc(); 690 outer_environment_size = environment->Size(); 691 if (environment != instruction->GetEnvironment()) { 692 inlining_depth++; 693 } 694 } 695 } 696 697 // Collect PC infos for the mapping table. 698 struct PcInfo pc_info; 699 pc_info.dex_pc = outer_dex_pc; 700 pc_info.native_pc = GetAssembler()->CodeSize(); 701 pc_infos_.Add(pc_info); 702 703 if (instruction == nullptr) { 704 // For stack overflow checks. 705 stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc, pc_info.native_pc, 0, 0, 0, 0); 706 stack_map_stream_.EndStackMapEntry(); 707 return; 708 } 709 LocationSummary* locations = instruction->GetLocations(); 710 711 uint32_t register_mask = locations->GetRegisterMask(); 712 if (locations->OnlyCallsOnSlowPath()) { 713 // In case of slow path, we currently set the location of caller-save registers 714 // to register (instead of their stack location when pushed before the slow-path 715 // call). Therefore register_mask contains both callee-save and caller-save 716 // registers that hold objects. We must remove the caller-save from the mask, since 717 // they will be overwritten by the callee. 718 register_mask &= core_callee_save_mask_; 719 } 720 // The register mask must be a subset of callee-save registers. 721 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask); 722 stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc, 723 pc_info.native_pc, 724 register_mask, 725 locations->GetStackMask(), 726 outer_environment_size, 727 inlining_depth); 728 729 EmitEnvironment(instruction->GetEnvironment(), slow_path); 730 stack_map_stream_.EndStackMapEntry(); 731} 732 733void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) { 734 if (environment == nullptr) return; 735 736 if (environment->GetParent() != nullptr) { 737 // We emit the parent environment first. 738 EmitEnvironment(environment->GetParent(), slow_path); 739 stack_map_stream_.BeginInlineInfoEntry(environment->GetMethodIdx(), 740 environment->GetDexPc(), 741 environment->GetInvokeType(), 742 environment->Size()); 743 } 744 745 // Walk over the environment, and record the location of dex registers. 746 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { 747 HInstruction* current = environment->GetInstructionAt(i); 748 if (current == nullptr) { 749 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); 750 continue; 751 } 752 753 Location location = environment->GetLocationAt(i); 754 switch (location.GetKind()) { 755 case Location::kConstant: { 756 DCHECK_EQ(current, location.GetConstant()); 757 if (current->IsLongConstant()) { 758 int64_t value = current->AsLongConstant()->GetValue(); 759 stack_map_stream_.AddDexRegisterEntry( 760 DexRegisterLocation::Kind::kConstant, Low32Bits(value)); 761 stack_map_stream_.AddDexRegisterEntry( 762 DexRegisterLocation::Kind::kConstant, High32Bits(value)); 763 ++i; 764 DCHECK_LT(i, environment_size); 765 } else if (current->IsDoubleConstant()) { 766 int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue()); 767 stack_map_stream_.AddDexRegisterEntry( 768 DexRegisterLocation::Kind::kConstant, Low32Bits(value)); 769 stack_map_stream_.AddDexRegisterEntry( 770 DexRegisterLocation::Kind::kConstant, High32Bits(value)); 771 ++i; 772 DCHECK_LT(i, environment_size); 773 } else if (current->IsIntConstant()) { 774 int32_t value = current->AsIntConstant()->GetValue(); 775 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); 776 } else if (current->IsNullConstant()) { 777 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0); 778 } else { 779 DCHECK(current->IsFloatConstant()) << current->DebugName(); 780 int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue()); 781 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value); 782 } 783 break; 784 } 785 786 case Location::kStackSlot: { 787 stack_map_stream_.AddDexRegisterEntry( 788 DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); 789 break; 790 } 791 792 case Location::kDoubleStackSlot: { 793 stack_map_stream_.AddDexRegisterEntry( 794 DexRegisterLocation::Kind::kInStack, location.GetStackIndex()); 795 stack_map_stream_.AddDexRegisterEntry( 796 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize)); 797 ++i; 798 DCHECK_LT(i, environment_size); 799 break; 800 } 801 802 case Location::kRegister : { 803 int id = location.reg(); 804 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) { 805 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id); 806 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); 807 if (current->GetType() == Primitive::kPrimLong) { 808 stack_map_stream_.AddDexRegisterEntry( 809 DexRegisterLocation::Kind::kInStack, offset + kVRegSize); 810 ++i; 811 DCHECK_LT(i, environment_size); 812 } 813 } else { 814 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id); 815 if (current->GetType() == Primitive::kPrimLong) { 816 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id); 817 ++i; 818 DCHECK_LT(i, environment_size); 819 } 820 } 821 break; 822 } 823 824 case Location::kFpuRegister : { 825 int id = location.reg(); 826 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) { 827 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id); 828 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); 829 if (current->GetType() == Primitive::kPrimDouble) { 830 stack_map_stream_.AddDexRegisterEntry( 831 DexRegisterLocation::Kind::kInStack, offset + kVRegSize); 832 ++i; 833 DCHECK_LT(i, environment_size); 834 } 835 } else { 836 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id); 837 if (current->GetType() == Primitive::kPrimDouble) { 838 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id); 839 ++i; 840 DCHECK_LT(i, environment_size); 841 } 842 } 843 break; 844 } 845 846 case Location::kFpuRegisterPair : { 847 int low = location.low(); 848 int high = location.high(); 849 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) { 850 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low); 851 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); 852 } else { 853 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low); 854 } 855 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) { 856 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high); 857 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); 858 ++i; 859 } else { 860 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high); 861 ++i; 862 } 863 DCHECK_LT(i, environment_size); 864 break; 865 } 866 867 case Location::kRegisterPair : { 868 int low = location.low(); 869 int high = location.high(); 870 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) { 871 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low); 872 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); 873 } else { 874 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low); 875 } 876 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) { 877 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high); 878 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset); 879 } else { 880 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high); 881 } 882 ++i; 883 DCHECK_LT(i, environment_size); 884 break; 885 } 886 887 case Location::kInvalid: { 888 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); 889 break; 890 } 891 892 default: 893 LOG(FATAL) << "Unexpected kind " << location.GetKind(); 894 } 895 } 896 897 if (environment->GetParent() != nullptr) { 898 stack_map_stream_.EndInlineInfoEntry(); 899 } 900} 901 902bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { 903 HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves(); 904 905 return (first_next_not_move != nullptr) 906 && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0)); 907} 908 909void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { 910 // If we are from a static path don't record the pc as we can't throw NPE. 911 // NB: having the checks here makes the code much less verbose in the arch 912 // specific code generators. 913 if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) { 914 return; 915 } 916 917 if (!compiler_options_.GetImplicitNullChecks()) { 918 return; 919 } 920 921 if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) { 922 return; 923 } 924 925 // Find the first previous instruction which is not a move. 926 HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves(); 927 928 // If the instruction is a null check it means that `instr` is the first user 929 // and needs to record the pc. 930 if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) { 931 HNullCheck* null_check = first_prev_not_move->AsNullCheck(); 932 // TODO: The parallel moves modify the environment. Their changes need to be reverted 933 // otherwise the stack maps at the throw point will not be correct. 934 RecordPcInfo(null_check, null_check->GetDexPc()); 935 } 936} 937 938void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const { 939 LocationSummary* locations = suspend_check->GetLocations(); 940 HBasicBlock* block = suspend_check->GetBlock(); 941 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check); 942 DCHECK(block->IsLoopHeader()); 943 944 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { 945 HInstruction* current = it.Current(); 946 LiveInterval* interval = current->GetLiveInterval(); 947 // We only need to clear bits of loop phis containing objects and allocated in register. 948 // Loop phis allocated on stack already have the object in the stack. 949 if (current->GetType() == Primitive::kPrimNot 950 && interval->HasRegister() 951 && interval->HasSpillSlot()) { 952 locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize); 953 } 954 } 955} 956 957void CodeGenerator::EmitParallelMoves(Location from1, 958 Location to1, 959 Primitive::Type type1, 960 Location from2, 961 Location to2, 962 Primitive::Type type2) { 963 HParallelMove parallel_move(GetGraph()->GetArena()); 964 parallel_move.AddMove(from1, to1, type1, nullptr); 965 parallel_move.AddMove(from2, to2, type2, nullptr); 966 GetMoveResolver()->EmitNativeCode(¶llel_move); 967} 968 969void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) { 970 codegen->RecordPcInfo(instruction, dex_pc, this); 971} 972 973void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { 974 RegisterSet* register_set = locations->GetLiveRegisters(); 975 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); 976 for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { 977 if (!codegen->IsCoreCalleeSaveRegister(i)) { 978 if (register_set->ContainsCoreRegister(i)) { 979 // If the register holds an object, update the stack mask. 980 if (locations->RegisterContainsObject(i)) { 981 locations->SetStackBit(stack_offset / kVRegSize); 982 } 983 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); 984 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); 985 saved_core_stack_offsets_[i] = stack_offset; 986 stack_offset += codegen->SaveCoreRegister(stack_offset, i); 987 } 988 } 989 } 990 991 for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) { 992 if (!codegen->IsFloatingPointCalleeSaveRegister(i)) { 993 if (register_set->ContainsFloatingPointRegister(i)) { 994 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); 995 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); 996 saved_fpu_stack_offsets_[i] = stack_offset; 997 stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i); 998 } 999 } 1000 } 1001} 1002 1003void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { 1004 RegisterSet* register_set = locations->GetLiveRegisters(); 1005 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); 1006 for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { 1007 if (!codegen->IsCoreCalleeSaveRegister(i)) { 1008 if (register_set->ContainsCoreRegister(i)) { 1009 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); 1010 stack_offset += codegen->RestoreCoreRegister(stack_offset, i); 1011 } 1012 } 1013 } 1014 1015 for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) { 1016 if (!codegen->IsFloatingPointCalleeSaveRegister(i)) { 1017 if (register_set->ContainsFloatingPointRegister(i)) { 1018 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); 1019 stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i); 1020 } 1021 } 1022 } 1023} 1024 1025} // namespace art 1026