code_generator.cc revision d97dc40d186aec46bfd318b6a2026a98241d7e9c
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator.h" 18 19#include "code_generator_arm.h" 20#include "code_generator_arm64.h" 21#include "code_generator_x86.h" 22#include "code_generator_x86_64.h" 23#include "compiled_method.h" 24#include "dex/verified_method.h" 25#include "driver/dex_compilation_unit.h" 26#include "gc_map_builder.h" 27#include "leb128.h" 28#include "mapping_table.h" 29#include "mirror/array-inl.h" 30#include "mirror/object_array-inl.h" 31#include "mirror/object_reference.h" 32#include "ssa_liveness_analysis.h" 33#include "utils/assembler.h" 34#include "verifier/dex_gc_map.h" 35#include "vmap_table.h" 36 37namespace art { 38 39size_t CodeGenerator::GetCacheOffset(uint32_t index) { 40 return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue(); 41} 42 43void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) { 44 const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks(); 45 DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock()); 46 DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1))); 47 Initialize(); 48 49 DCHECK_EQ(frame_size_, kUninitializedFrameSize); 50 if (!is_leaf) { 51 MarkNotLeaf(); 52 } 53 ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs() 54 + GetGraph()->GetTemporariesVRegSlots() 55 + 1 /* filler */, 56 0, /* the baseline compiler does not have live registers at slow path */ 57 0, /* the baseline compiler does not have live registers at slow path */ 58 GetGraph()->GetMaximumNumberOfOutVRegs() 59 + 1 /* current method */); 60 GenerateFrameEntry(); 61 62 HGraphVisitor* location_builder = GetLocationBuilder(); 63 HGraphVisitor* instruction_visitor = GetInstructionVisitor(); 64 for (size_t i = 0, e = blocks.Size(); i < e; ++i) { 65 HBasicBlock* block = blocks.Get(i); 66 Bind(block); 67 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 68 HInstruction* current = it.Current(); 69 current->Accept(location_builder); 70 InitLocations(current); 71 current->Accept(instruction_visitor); 72 } 73 } 74 GenerateSlowPaths(); 75 Finalize(allocator); 76} 77 78void CodeGenerator::CompileOptimized(CodeAllocator* allocator) { 79 // The frame size has already been computed during register allocation. 80 DCHECK_NE(frame_size_, kUninitializedFrameSize); 81 const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks(); 82 DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock()); 83 DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1))); 84 Initialize(); 85 86 GenerateFrameEntry(); 87 HGraphVisitor* instruction_visitor = GetInstructionVisitor(); 88 for (size_t i = 0, e = blocks.Size(); i < e; ++i) { 89 HBasicBlock* block = blocks.Get(i); 90 Bind(block); 91 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 92 HInstruction* current = it.Current(); 93 current->Accept(instruction_visitor); 94 } 95 } 96 GenerateSlowPaths(); 97 Finalize(allocator); 98} 99 100void CodeGenerator::Finalize(CodeAllocator* allocator) { 101 size_t code_size = GetAssembler()->CodeSize(); 102 uint8_t* buffer = allocator->Allocate(code_size); 103 104 MemoryRegion code(buffer, code_size); 105 GetAssembler()->FinalizeInstructions(code); 106} 107 108void CodeGenerator::GenerateSlowPaths() { 109 for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) { 110 slow_paths_.Get(i)->EmitNativeCode(this); 111 } 112} 113 114size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) { 115 for (size_t i = 0; i < length; ++i) { 116 if (!array[i]) { 117 array[i] = true; 118 return i; 119 } 120 } 121 LOG(FATAL) << "Could not find a register in baseline register allocator"; 122 UNREACHABLE(); 123 return -1; 124} 125 126size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) { 127 for (size_t i = 0; i < length - 1; i += 2) { 128 if (!array[i] && !array[i + 1]) { 129 array[i] = true; 130 array[i + 1] = true; 131 return i; 132 } 133 } 134 LOG(FATAL) << "Could not find a register in baseline register allocator"; 135 UNREACHABLE(); 136 return -1; 137} 138 139void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots, 140 size_t maximum_number_of_live_core_registers, 141 size_t maximum_number_of_live_fp_registers, 142 size_t number_of_out_slots) { 143 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_; 144 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved"; 145 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_; 146 first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize; 147 148 SetFrameSize(RoundUp( 149 number_of_spill_slots * kVRegSize 150 + number_of_out_slots * kVRegSize 151 + maximum_number_of_live_core_registers * GetWordSize() 152 + maximum_number_of_live_fp_registers * GetFloatingPointSpillSlotSize() 153 + FrameEntrySpillSize(), 154 kStackAlignment)); 155} 156 157Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const { 158 uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); 159 // The type of the previous instruction tells us if we need a single or double stack slot. 160 Primitive::Type type = temp->GetType(); 161 int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1; 162 // Use the temporary region (right below the dex registers). 163 int32_t slot = GetFrameSize() - FrameEntrySpillSize() 164 - kVRegSize // filler 165 - (number_of_locals * kVRegSize) 166 - ((temp_size + temp->GetIndex()) * kVRegSize); 167 return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot); 168} 169 170int32_t CodeGenerator::GetStackSlot(HLocal* local) const { 171 uint16_t reg_number = local->GetRegNumber(); 172 uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); 173 if (reg_number >= number_of_locals) { 174 // Local is a parameter of the method. It is stored in the caller's frame. 175 return GetFrameSize() + kVRegSize // ART method 176 + (reg_number - number_of_locals) * kVRegSize; 177 } else { 178 // Local is a temporary in this method. It is stored in this method's frame. 179 return GetFrameSize() - FrameEntrySpillSize() 180 - kVRegSize // filler. 181 - (number_of_locals * kVRegSize) 182 + (reg_number * kVRegSize); 183 } 184} 185 186void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const { 187 LocationSummary* locations = instruction->GetLocations(); 188 if (locations == nullptr) return; 189 190 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 191 blocked_core_registers_[i] = false; 192 } 193 194 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 195 blocked_fpu_registers_[i] = false; 196 } 197 198 for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) { 199 blocked_register_pairs_[i] = false; 200 } 201 202 // Mark all fixed input, temp and output registers as used. 203 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { 204 Location loc = locations->InAt(i); 205 // The DCHECKS below check that a register is not specified twice in 206 // the summary. 207 if (loc.IsRegister()) { 208 DCHECK(!blocked_core_registers_[loc.reg()]); 209 blocked_core_registers_[loc.reg()] = true; 210 } else if (loc.IsFpuRegister()) { 211 DCHECK(!blocked_fpu_registers_[loc.reg()]); 212 blocked_fpu_registers_[loc.reg()] = true; 213 } else if (loc.IsFpuRegisterPair()) { 214 DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()]); 215 blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()] = true; 216 DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()]); 217 blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()] = true; 218 } else if (loc.IsRegisterPair()) { 219 DCHECK(!blocked_core_registers_[loc.AsRegisterPairLow<int>()]); 220 blocked_core_registers_[loc.AsRegisterPairLow<int>()] = true; 221 DCHECK(!blocked_core_registers_[loc.AsRegisterPairHigh<int>()]); 222 blocked_core_registers_[loc.AsRegisterPairHigh<int>()] = true; 223 } 224 } 225 226 for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) { 227 Location loc = locations->GetTemp(i); 228 // The DCHECKS below check that a register is not specified twice in 229 // the summary. 230 if (loc.IsRegister()) { 231 DCHECK(!blocked_core_registers_[loc.reg()]); 232 blocked_core_registers_[loc.reg()] = true; 233 } else if (loc.IsFpuRegister()) { 234 DCHECK(!blocked_fpu_registers_[loc.reg()]); 235 blocked_fpu_registers_[loc.reg()] = true; 236 } else { 237 DCHECK(loc.GetPolicy() == Location::kRequiresRegister 238 || loc.GetPolicy() == Location::kRequiresFpuRegister); 239 } 240 } 241 242 static constexpr bool kBaseline = true; 243 SetupBlockedRegisters(kBaseline); 244 245 // Allocate all unallocated input locations. 246 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { 247 Location loc = locations->InAt(i); 248 HInstruction* input = instruction->InputAt(i); 249 if (loc.IsUnallocated()) { 250 if ((loc.GetPolicy() == Location::kRequiresRegister) 251 || (loc.GetPolicy() == Location::kRequiresFpuRegister)) { 252 loc = AllocateFreeRegister(input->GetType()); 253 } else { 254 DCHECK_EQ(loc.GetPolicy(), Location::kAny); 255 HLoadLocal* load = input->AsLoadLocal(); 256 if (load != nullptr) { 257 loc = GetStackLocation(load); 258 } else { 259 loc = AllocateFreeRegister(input->GetType()); 260 } 261 } 262 locations->SetInAt(i, loc); 263 } 264 } 265 266 // Allocate all unallocated temp locations. 267 for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) { 268 Location loc = locations->GetTemp(i); 269 if (loc.IsUnallocated()) { 270 switch (loc.GetPolicy()) { 271 case Location::kRequiresRegister: 272 // Allocate a core register (large enough to fit a 32-bit integer). 273 loc = AllocateFreeRegister(Primitive::kPrimInt); 274 break; 275 276 case Location::kRequiresFpuRegister: 277 // Allocate a core register (large enough to fit a 64-bit double). 278 loc = AllocateFreeRegister(Primitive::kPrimDouble); 279 break; 280 281 default: 282 LOG(FATAL) << "Unexpected policy for temporary location " 283 << loc.GetPolicy(); 284 } 285 locations->SetTempAt(i, loc); 286 } 287 } 288 Location result_location = locations->Out(); 289 if (result_location.IsUnallocated()) { 290 switch (result_location.GetPolicy()) { 291 case Location::kAny: 292 case Location::kRequiresRegister: 293 case Location::kRequiresFpuRegister: 294 result_location = AllocateFreeRegister(instruction->GetType()); 295 break; 296 case Location::kSameAsFirstInput: 297 result_location = locations->InAt(0); 298 break; 299 } 300 locations->SetOut(result_location); 301 } 302} 303 304void CodeGenerator::InitLocations(HInstruction* instruction) { 305 if (instruction->GetLocations() == nullptr) { 306 if (instruction->IsTemporary()) { 307 HInstruction* previous = instruction->GetPrevious(); 308 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 309 Move(previous, temp_location, instruction); 310 } 311 return; 312 } 313 AllocateRegistersLocally(instruction); 314 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 315 Location location = instruction->GetLocations()->InAt(i); 316 HInstruction* input = instruction->InputAt(i); 317 if (location.IsValid()) { 318 // Move the input to the desired location. 319 if (input->GetNext()->IsTemporary()) { 320 // If the input was stored in a temporary, use that temporary to 321 // perform the move. 322 Move(input->GetNext(), location, instruction); 323 } else { 324 Move(input, location, instruction); 325 } 326 } 327 } 328} 329 330bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const { 331 // We currently iterate over the block in insertion order. 332 return current->GetBlockId() + 1 == next->GetBlockId(); 333} 334 335CodeGenerator* CodeGenerator::Create(HGraph* graph, 336 InstructionSet instruction_set, 337 const InstructionSetFeatures& isa_features, 338 const CompilerOptions& compiler_options) { 339 switch (instruction_set) { 340 case kArm: 341 case kThumb2: { 342 return new arm::CodeGeneratorARM(graph, 343 *isa_features.AsArmInstructionSetFeatures(), 344 compiler_options); 345 } 346 case kArm64: { 347 return new arm64::CodeGeneratorARM64(graph, compiler_options); 348 } 349 case kMips: 350 return nullptr; 351 case kX86: { 352 return new x86::CodeGeneratorX86(graph, compiler_options); 353 } 354 case kX86_64: { 355 return new x86_64::CodeGeneratorX86_64(graph, compiler_options); 356 } 357 default: 358 return nullptr; 359 } 360} 361 362void CodeGenerator::BuildNativeGCMap( 363 std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const { 364 const std::vector<uint8_t>& gc_map_raw = 365 dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap(); 366 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 367 368 uint32_t max_native_offset = 0; 369 for (size_t i = 0; i < pc_infos_.Size(); i++) { 370 uint32_t native_offset = pc_infos_.Get(i).native_pc; 371 if (native_offset > max_native_offset) { 372 max_native_offset = native_offset; 373 } 374 } 375 376 GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth()); 377 for (size_t i = 0; i < pc_infos_.Size(); i++) { 378 struct PcInfo pc_info = pc_infos_.Get(i); 379 uint32_t native_offset = pc_info.native_pc; 380 uint32_t dex_pc = pc_info.dex_pc; 381 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 382 CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc; 383 builder.AddEntry(native_offset, references); 384 } 385} 386 387void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, DefaultSrcMap* src_map) const { 388 uint32_t pc2dex_data_size = 0u; 389 uint32_t pc2dex_entries = pc_infos_.Size(); 390 uint32_t pc2dex_offset = 0u; 391 int32_t pc2dex_dalvik_offset = 0; 392 uint32_t dex2pc_data_size = 0u; 393 uint32_t dex2pc_entries = 0u; 394 uint32_t dex2pc_offset = 0u; 395 int32_t dex2pc_dalvik_offset = 0; 396 397 if (src_map != nullptr) { 398 src_map->reserve(pc2dex_entries); 399 } 400 401 for (size_t i = 0; i < pc2dex_entries; i++) { 402 struct PcInfo pc_info = pc_infos_.Get(i); 403 pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset); 404 pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset); 405 pc2dex_offset = pc_info.native_pc; 406 pc2dex_dalvik_offset = pc_info.dex_pc; 407 if (src_map != nullptr) { 408 src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset})); 409 } 410 } 411 412 // Walk over the blocks and find which ones correspond to catch block entries. 413 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 414 HBasicBlock* block = graph_->GetBlocks().Get(i); 415 if (block->IsCatchBlock()) { 416 intptr_t native_pc = GetAddressOf(block); 417 ++dex2pc_entries; 418 dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset); 419 dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset); 420 dex2pc_offset = native_pc; 421 dex2pc_dalvik_offset = block->GetDexPc(); 422 } 423 } 424 425 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 426 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 427 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 428 data->resize(data_size); 429 430 uint8_t* data_ptr = &(*data)[0]; 431 uint8_t* write_pos = data_ptr; 432 433 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 434 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 435 DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size); 436 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 437 438 pc2dex_offset = 0u; 439 pc2dex_dalvik_offset = 0u; 440 dex2pc_offset = 0u; 441 dex2pc_dalvik_offset = 0u; 442 443 for (size_t i = 0; i < pc2dex_entries; i++) { 444 struct PcInfo pc_info = pc_infos_.Get(i); 445 DCHECK(pc2dex_offset <= pc_info.native_pc); 446 write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset); 447 write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset); 448 pc2dex_offset = pc_info.native_pc; 449 pc2dex_dalvik_offset = pc_info.dex_pc; 450 } 451 452 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 453 HBasicBlock* block = graph_->GetBlocks().Get(i); 454 if (block->IsCatchBlock()) { 455 intptr_t native_pc = GetAddressOf(block); 456 write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset); 457 write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset); 458 dex2pc_offset = native_pc; 459 dex2pc_dalvik_offset = block->GetDexPc(); 460 } 461 } 462 463 464 DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size); 465 DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size); 466 467 if (kIsDebugBuild) { 468 // Verify the encoded table holds the expected data. 469 MappingTable table(data_ptr); 470 CHECK_EQ(table.TotalSize(), total_entries); 471 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 472 auto it = table.PcToDexBegin(); 473 auto it2 = table.DexToPcBegin(); 474 for (size_t i = 0; i < pc2dex_entries; i++) { 475 struct PcInfo pc_info = pc_infos_.Get(i); 476 CHECK_EQ(pc_info.native_pc, it.NativePcOffset()); 477 CHECK_EQ(pc_info.dex_pc, it.DexPc()); 478 ++it; 479 } 480 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 481 HBasicBlock* block = graph_->GetBlocks().Get(i); 482 if (block->IsCatchBlock()) { 483 CHECK_EQ(GetAddressOf(block), it2.NativePcOffset()); 484 CHECK_EQ(block->GetDexPc(), it2.DexPc()); 485 ++it2; 486 } 487 } 488 CHECK(it == table.PcToDexEnd()); 489 CHECK(it2 == table.DexToPcEnd()); 490 } 491} 492 493void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const { 494 Leb128EncodingVector vmap_encoder; 495 // We currently don't use callee-saved registers. 496 size_t size = 0 + 1 /* marker */ + 0; 497 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 498 vmap_encoder.PushBackUnsigned(size); 499 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 500 501 *data = vmap_encoder.GetData(); 502} 503 504void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) { 505 uint32_t size = stack_map_stream_.ComputeNeededSize(); 506 data->resize(size); 507 MemoryRegion region(data->data(), size); 508 stack_map_stream_.FillIn(region); 509} 510 511void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) { 512 if (instruction != nullptr) { 513 // The code generated for some type conversions may call the 514 // runtime, thus normally requiring a subsequent call to this 515 // method. However, the method verifier does not produce PC 516 // information for certain instructions, which are considered "atomic" 517 // (they cannot join a GC). 518 // Therefore we do not currently record PC information for such 519 // instructions. As this may change later, we added this special 520 // case so that code generators may nevertheless call 521 // CodeGenerator::RecordPcInfo without triggering an error in 522 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x") 523 // thereafter. 524 if (instruction->IsTypeConversion()) { 525 return; 526 } 527 if (instruction->IsRem()) { 528 Primitive::Type type = instruction->AsRem()->GetResultType(); 529 if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) { 530 return; 531 } 532 } 533 } 534 535 // Collect PC infos for the mapping table. 536 struct PcInfo pc_info; 537 pc_info.dex_pc = dex_pc; 538 pc_info.native_pc = GetAssembler()->CodeSize(); 539 pc_infos_.Add(pc_info); 540 541 // Populate stack map information. 542 543 if (instruction == nullptr) { 544 // For stack overflow checks. 545 stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, 0); 546 return; 547 } 548 549 LocationSummary* locations = instruction->GetLocations(); 550 HEnvironment* environment = instruction->GetEnvironment(); 551 552 size_t environment_size = instruction->EnvironmentSize(); 553 554 size_t inlining_depth = 0; 555 uint32_t register_mask = locations->GetRegisterMask(); 556 if (locations->OnlyCallsOnSlowPath()) { 557 // In case of slow path, we currently set the location of caller-save registers 558 // to register (instead of their stack location when pushed before the slow-path 559 // call). Therefore register_mask contains both callee-save and caller-save 560 // registers that hold objects. We must remove the caller-save from the mask, since 561 // they will be overwritten by the callee. 562 register_mask &= core_callee_save_mask_; 563 } 564 // The register mask must be a subset of callee-save registers. 565 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask); 566 stack_map_stream_.AddStackMapEntry( 567 dex_pc, pc_info.native_pc, register_mask, 568 locations->GetStackMask(), environment_size, inlining_depth); 569 570 // Walk over the environment, and record the location of dex registers. 571 for (size_t i = 0; i < environment_size; ++i) { 572 HInstruction* current = environment->GetInstructionAt(i); 573 if (current == nullptr) { 574 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kNone, 0); 575 continue; 576 } 577 578 Location location = locations->GetEnvironmentAt(i); 579 switch (location.GetKind()) { 580 case Location::kConstant: { 581 DCHECK(current == location.GetConstant()); 582 if (current->IsLongConstant()) { 583 int64_t value = current->AsLongConstant()->GetValue(); 584 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value)); 585 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value)); 586 ++i; 587 DCHECK_LT(i, environment_size); 588 } else if (current->IsDoubleConstant()) { 589 int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue()); 590 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value)); 591 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value)); 592 ++i; 593 DCHECK_LT(i, environment_size); 594 } else if (current->IsIntConstant()) { 595 int32_t value = current->AsIntConstant()->GetValue(); 596 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value); 597 } else { 598 DCHECK(current->IsFloatConstant()); 599 int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue()); 600 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value); 601 } 602 break; 603 } 604 605 case Location::kStackSlot: { 606 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, location.GetStackIndex()); 607 break; 608 } 609 610 case Location::kDoubleStackSlot: { 611 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, location.GetStackIndex()); 612 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, 613 location.GetHighStackIndex(kVRegSize)); 614 ++i; 615 DCHECK_LT(i, environment_size); 616 break; 617 } 618 619 case Location::kRegister : { 620 int id = location.reg(); 621 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id); 622 if (current->GetType() == Primitive::kPrimLong) { 623 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id); 624 ++i; 625 DCHECK_LT(i, environment_size); 626 } 627 break; 628 } 629 630 case Location::kFpuRegister : { 631 int id = location.reg(); 632 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id); 633 if (current->GetType() == Primitive::kPrimDouble) { 634 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id); 635 ++i; 636 DCHECK_LT(i, environment_size); 637 } 638 break; 639 } 640 641 case Location::kFpuRegisterPair : { 642 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, location.low()); 643 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, location.high()); 644 ++i; 645 DCHECK_LT(i, environment_size); 646 break; 647 } 648 649 case Location::kRegisterPair : { 650 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, location.low()); 651 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, location.high()); 652 ++i; 653 DCHECK_LT(i, environment_size); 654 break; 655 } 656 657 default: 658 LOG(FATAL) << "Unexpected kind " << location.GetKind(); 659 } 660 } 661} 662 663bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { 664 HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves(); 665 return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck(); 666} 667 668void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { 669 // If we are from a static path don't record the pc as we can't throw NPE. 670 // NB: having the checks here makes the code much less verbose in the arch 671 // specific code generators. 672 if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) { 673 return; 674 } 675 676 if (!compiler_options_.GetImplicitNullChecks()) { 677 return; 678 } 679 680 if (!instr->CanDoImplicitNullCheck()) { 681 return; 682 } 683 684 // Find the first previous instruction which is not a move. 685 HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves(); 686 687 // If the instruction is a null check it means that `instr` is the first user 688 // and needs to record the pc. 689 if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) { 690 HNullCheck* null_check = first_prev_not_move->AsNullCheck(); 691 // TODO: The parallel moves modify the environment. Their changes need to be reverted 692 // otherwise the stack maps at the throw point will not be correct. 693 RecordPcInfo(null_check, null_check->GetDexPc()); 694 } 695} 696 697void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) { 698 RegisterSet* register_set = locations->GetLiveRegisters(); 699 size_t stack_offset = first_register_slot_in_slow_path_; 700 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 701 if (!IsCoreCalleeSaveRegister(i)) { 702 if (register_set->ContainsCoreRegister(i)) { 703 // If the register holds an object, update the stack mask. 704 if (locations->RegisterContainsObject(i)) { 705 locations->SetStackBit(stack_offset / kVRegSize); 706 } 707 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 708 stack_offset += SaveCoreRegister(stack_offset, i); 709 } 710 } 711 } 712 713 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 714 if (!IsFloatingPointCalleeSaveRegister(i)) { 715 if (register_set->ContainsFloatingPointRegister(i)) { 716 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 717 stack_offset += SaveFloatingPointRegister(stack_offset, i); 718 } 719 } 720 } 721} 722 723void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) { 724 RegisterSet* register_set = locations->GetLiveRegisters(); 725 size_t stack_offset = first_register_slot_in_slow_path_; 726 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 727 if (!IsCoreCalleeSaveRegister(i)) { 728 if (register_set->ContainsCoreRegister(i)) { 729 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 730 stack_offset += RestoreCoreRegister(stack_offset, i); 731 } 732 } 733 } 734 735 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 736 if (!IsFloatingPointCalleeSaveRegister(i)) { 737 if (register_set->ContainsFloatingPointRegister(i)) { 738 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 739 stack_offset += RestoreFloatingPointRegister(stack_offset, i); 740 } 741 } 742 } 743} 744 745void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const { 746 LocationSummary* locations = suspend_check->GetLocations(); 747 HBasicBlock* block = suspend_check->GetBlock(); 748 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check); 749 DCHECK(block->IsLoopHeader()); 750 751 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { 752 HInstruction* current = it.Current(); 753 LiveInterval* interval = current->GetLiveInterval(); 754 // We only need to clear bits of loop phis containing objects and allocated in register. 755 // Loop phis allocated on stack already have the object in the stack. 756 if (current->GetType() == Primitive::kPrimNot 757 && interval->HasRegister() 758 && interval->HasSpillSlot()) { 759 locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize); 760 } 761 } 762} 763 764void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) { 765 HParallelMove parallel_move(GetGraph()->GetArena()); 766 parallel_move.AddMove(from1, to1, nullptr); 767 parallel_move.AddMove(from2, to2, nullptr); 768 GetMoveResolver()->EmitNativeCode(¶llel_move); 769} 770 771} // namespace art 772