code_generator.cc revision 0ada95d8de4b04b5f201b4b7e9c3c2fd2cc321ae
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator.h" 18 19#include "code_generator_arm.h" 20#include "code_generator_arm64.h" 21#include "code_generator_x86.h" 22#include "code_generator_x86_64.h" 23#include "compiled_method.h" 24#include "dex/verified_method.h" 25#include "driver/dex_compilation_unit.h" 26#include "gc_map_builder.h" 27#include "leb128.h" 28#include "mapping_table.h" 29#include "mirror/array-inl.h" 30#include "mirror/object_array-inl.h" 31#include "mirror/object_reference.h" 32#include "ssa_liveness_analysis.h" 33#include "utils/assembler.h" 34#include "verifier/dex_gc_map.h" 35#include "vmap_table.h" 36 37namespace art { 38 39size_t CodeGenerator::GetCacheOffset(uint32_t index) { 40 return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue(); 41} 42 43void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) { 44 const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks(); 45 DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock()); 46 DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1))); 47 Initialize(); 48 49 DCHECK_EQ(frame_size_, kUninitializedFrameSize); 50 if (!is_leaf) { 51 MarkNotLeaf(); 52 } 53 ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs() 54 + GetGraph()->GetTemporariesVRegSlots() 55 + 1 /* filler */, 56 0, /* the baseline compiler does not have live registers at slow path */ 57 GetGraph()->GetMaximumNumberOfOutVRegs() 58 + 1 /* current method */); 59 GenerateFrameEntry(); 60 61 HGraphVisitor* location_builder = GetLocationBuilder(); 62 HGraphVisitor* instruction_visitor = GetInstructionVisitor(); 63 for (size_t i = 0, e = blocks.Size(); i < e; ++i) { 64 HBasicBlock* block = blocks.Get(i); 65 Bind(block); 66 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 67 HInstruction* current = it.Current(); 68 current->Accept(location_builder); 69 InitLocations(current); 70 current->Accept(instruction_visitor); 71 } 72 } 73 GenerateSlowPaths(); 74 Finalize(allocator); 75} 76 77void CodeGenerator::CompileOptimized(CodeAllocator* allocator) { 78 // The frame size has already been computed during register allocation. 79 DCHECK_NE(frame_size_, kUninitializedFrameSize); 80 const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks(); 81 DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock()); 82 DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1))); 83 Initialize(); 84 85 GenerateFrameEntry(); 86 HGraphVisitor* instruction_visitor = GetInstructionVisitor(); 87 for (size_t i = 0, e = blocks.Size(); i < e; ++i) { 88 HBasicBlock* block = blocks.Get(i); 89 Bind(block); 90 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { 91 HInstruction* current = it.Current(); 92 current->Accept(instruction_visitor); 93 } 94 } 95 GenerateSlowPaths(); 96 Finalize(allocator); 97} 98 99void CodeGenerator::Finalize(CodeAllocator* allocator) { 100 size_t code_size = GetAssembler()->CodeSize(); 101 uint8_t* buffer = allocator->Allocate(code_size); 102 103 MemoryRegion code(buffer, code_size); 104 GetAssembler()->FinalizeInstructions(code); 105} 106 107void CodeGenerator::GenerateSlowPaths() { 108 for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) { 109 slow_paths_.Get(i)->EmitNativeCode(this); 110 } 111} 112 113size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) { 114 for (size_t i = 0; i < length; ++i) { 115 if (!array[i]) { 116 array[i] = true; 117 return i; 118 } 119 } 120 LOG(FATAL) << "Could not find a register in baseline register allocator"; 121 UNREACHABLE(); 122 return -1; 123} 124 125size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) { 126 for (size_t i = 0; i < length - 1; i += 2) { 127 if (!array[i] && !array[i + 1]) { 128 array[i] = true; 129 array[i + 1] = true; 130 return i; 131 } 132 } 133 LOG(FATAL) << "Could not find a register in baseline register allocator"; 134 UNREACHABLE(); 135 return -1; 136} 137 138void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots, 139 size_t maximum_number_of_live_registers, 140 size_t number_of_out_slots) { 141 first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize; 142 143 SetFrameSize(RoundUp( 144 number_of_spill_slots * kVRegSize 145 + number_of_out_slots * kVRegSize 146 + maximum_number_of_live_registers * GetWordSize() 147 + FrameEntrySpillSize(), 148 kStackAlignment)); 149} 150 151Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const { 152 uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); 153 // The type of the previous instruction tells us if we need a single or double stack slot. 154 Primitive::Type type = temp->GetType(); 155 int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1; 156 // Use the temporary region (right below the dex registers). 157 int32_t slot = GetFrameSize() - FrameEntrySpillSize() 158 - kVRegSize // filler 159 - (number_of_locals * kVRegSize) 160 - ((temp_size + temp->GetIndex()) * kVRegSize); 161 return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot); 162} 163 164int32_t CodeGenerator::GetStackSlot(HLocal* local) const { 165 uint16_t reg_number = local->GetRegNumber(); 166 uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs(); 167 if (reg_number >= number_of_locals) { 168 // Local is a parameter of the method. It is stored in the caller's frame. 169 return GetFrameSize() + kVRegSize // ART method 170 + (reg_number - number_of_locals) * kVRegSize; 171 } else { 172 // Local is a temporary in this method. It is stored in this method's frame. 173 return GetFrameSize() - FrameEntrySpillSize() 174 - kVRegSize // filler. 175 - (number_of_locals * kVRegSize) 176 + (reg_number * kVRegSize); 177 } 178} 179 180void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const { 181 LocationSummary* locations = instruction->GetLocations(); 182 if (locations == nullptr) return; 183 184 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 185 blocked_core_registers_[i] = false; 186 } 187 188 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 189 blocked_fpu_registers_[i] = false; 190 } 191 192 for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) { 193 blocked_register_pairs_[i] = false; 194 } 195 196 // Mark all fixed input, temp and output registers as used. 197 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { 198 Location loc = locations->InAt(i); 199 // The DCHECKS below check that a register is not specified twice in 200 // the summary. 201 if (loc.IsRegister()) { 202 DCHECK(!blocked_core_registers_[loc.reg()]); 203 blocked_core_registers_[loc.reg()] = true; 204 } else if (loc.IsFpuRegister()) { 205 DCHECK(!blocked_fpu_registers_[loc.reg()]); 206 blocked_fpu_registers_[loc.reg()] = true; 207 } else if (loc.IsFpuRegisterPair()) { 208 DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()]); 209 blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()] = true; 210 DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()]); 211 blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()] = true; 212 } else if (loc.IsRegisterPair()) { 213 DCHECK(!blocked_core_registers_[loc.AsRegisterPairLow<int>()]); 214 blocked_core_registers_[loc.AsRegisterPairLow<int>()] = true; 215 DCHECK(!blocked_core_registers_[loc.AsRegisterPairHigh<int>()]); 216 blocked_core_registers_[loc.AsRegisterPairHigh<int>()] = true; 217 } 218 } 219 220 for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) { 221 Location loc = locations->GetTemp(i); 222 // The DCHECKS below check that a register is not specified twice in 223 // the summary. 224 if (loc.IsRegister()) { 225 DCHECK(!blocked_core_registers_[loc.reg()]); 226 blocked_core_registers_[loc.reg()] = true; 227 } else if (loc.IsFpuRegister()) { 228 DCHECK(!blocked_fpu_registers_[loc.reg()]); 229 blocked_fpu_registers_[loc.reg()] = true; 230 } else { 231 DCHECK(loc.GetPolicy() == Location::kRequiresRegister 232 || loc.GetPolicy() == Location::kRequiresFpuRegister); 233 } 234 } 235 236 SetupBlockedRegisters(); 237 238 // Allocate all unallocated input locations. 239 for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) { 240 Location loc = locations->InAt(i); 241 HInstruction* input = instruction->InputAt(i); 242 if (loc.IsUnallocated()) { 243 if ((loc.GetPolicy() == Location::kRequiresRegister) 244 || (loc.GetPolicy() == Location::kRequiresFpuRegister)) { 245 loc = AllocateFreeRegister(input->GetType()); 246 } else { 247 DCHECK_EQ(loc.GetPolicy(), Location::kAny); 248 HLoadLocal* load = input->AsLoadLocal(); 249 if (load != nullptr) { 250 loc = GetStackLocation(load); 251 } else { 252 loc = AllocateFreeRegister(input->GetType()); 253 } 254 } 255 locations->SetInAt(i, loc); 256 } 257 } 258 259 // Allocate all unallocated temp locations. 260 for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) { 261 Location loc = locations->GetTemp(i); 262 if (loc.IsUnallocated()) { 263 switch (loc.GetPolicy()) { 264 case Location::kRequiresRegister: 265 // Allocate a core register (large enough to fit a 32-bit integer). 266 loc = AllocateFreeRegister(Primitive::kPrimInt); 267 break; 268 269 case Location::kRequiresFpuRegister: 270 // Allocate a core register (large enough to fit a 64-bit double). 271 loc = AllocateFreeRegister(Primitive::kPrimDouble); 272 break; 273 274 default: 275 LOG(FATAL) << "Unexpected policy for temporary location " 276 << loc.GetPolicy(); 277 } 278 locations->SetTempAt(i, loc); 279 } 280 } 281 Location result_location = locations->Out(); 282 if (result_location.IsUnallocated()) { 283 switch (result_location.GetPolicy()) { 284 case Location::kAny: 285 case Location::kRequiresRegister: 286 case Location::kRequiresFpuRegister: 287 result_location = AllocateFreeRegister(instruction->GetType()); 288 break; 289 case Location::kSameAsFirstInput: 290 result_location = locations->InAt(0); 291 break; 292 } 293 locations->SetOut(result_location); 294 } 295} 296 297void CodeGenerator::InitLocations(HInstruction* instruction) { 298 if (instruction->GetLocations() == nullptr) { 299 if (instruction->IsTemporary()) { 300 HInstruction* previous = instruction->GetPrevious(); 301 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 302 Move(previous, temp_location, instruction); 303 } 304 return; 305 } 306 AllocateRegistersLocally(instruction); 307 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 308 Location location = instruction->GetLocations()->InAt(i); 309 HInstruction* input = instruction->InputAt(i); 310 if (location.IsValid()) { 311 // Move the input to the desired location. 312 if (input->GetNext()->IsTemporary()) { 313 // If the input was stored in a temporary, use that temporary to 314 // perform the move. 315 Move(input->GetNext(), location, instruction); 316 } else { 317 Move(input, location, instruction); 318 } 319 } 320 } 321} 322 323bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const { 324 // We currently iterate over the block in insertion order. 325 return current->GetBlockId() + 1 == next->GetBlockId(); 326} 327 328CodeGenerator* CodeGenerator::Create(ArenaAllocator* allocator, 329 HGraph* graph, 330 InstructionSet instruction_set) { 331 switch (instruction_set) { 332 case kArm: 333 case kThumb2: { 334 return new (allocator) arm::CodeGeneratorARM(graph); 335 } 336 case kArm64: { 337 return new (allocator) arm64::CodeGeneratorARM64(graph); 338 } 339 case kMips: 340 return nullptr; 341 case kX86: { 342 return new (allocator) x86::CodeGeneratorX86(graph); 343 } 344 case kX86_64: { 345 return new (allocator) x86_64::CodeGeneratorX86_64(graph); 346 } 347 default: 348 return nullptr; 349 } 350} 351 352void CodeGenerator::BuildNativeGCMap( 353 std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const { 354 const std::vector<uint8_t>& gc_map_raw = 355 dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap(); 356 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); 357 358 uint32_t max_native_offset = 0; 359 for (size_t i = 0; i < pc_infos_.Size(); i++) { 360 uint32_t native_offset = pc_infos_.Get(i).native_pc; 361 if (native_offset > max_native_offset) { 362 max_native_offset = native_offset; 363 } 364 } 365 366 GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth()); 367 for (size_t i = 0; i < pc_infos_.Size(); i++) { 368 struct PcInfo pc_info = pc_infos_.Get(i); 369 uint32_t native_offset = pc_info.native_pc; 370 uint32_t dex_pc = pc_info.dex_pc; 371 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false); 372 CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc; 373 builder.AddEntry(native_offset, references); 374 } 375} 376 377void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_map) const { 378 uint32_t pc2dex_data_size = 0u; 379 uint32_t pc2dex_entries = pc_infos_.Size(); 380 uint32_t pc2dex_offset = 0u; 381 int32_t pc2dex_dalvik_offset = 0; 382 uint32_t dex2pc_data_size = 0u; 383 uint32_t dex2pc_entries = 0u; 384 uint32_t dex2pc_offset = 0u; 385 int32_t dex2pc_dalvik_offset = 0; 386 387 if (src_map != nullptr) { 388 src_map->reserve(pc2dex_entries); 389 } 390 391 for (size_t i = 0; i < pc2dex_entries; i++) { 392 struct PcInfo pc_info = pc_infos_.Get(i); 393 pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset); 394 pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset); 395 pc2dex_offset = pc_info.native_pc; 396 pc2dex_dalvik_offset = pc_info.dex_pc; 397 if (src_map != nullptr) { 398 src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset})); 399 } 400 } 401 402 // Walk over the blocks and find which ones correspond to catch block entries. 403 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 404 HBasicBlock* block = graph_->GetBlocks().Get(i); 405 if (block->IsCatchBlock()) { 406 intptr_t native_pc = GetAddressOf(block); 407 ++dex2pc_entries; 408 dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset); 409 dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset); 410 dex2pc_offset = native_pc; 411 dex2pc_dalvik_offset = block->GetDexPc(); 412 } 413 } 414 415 uint32_t total_entries = pc2dex_entries + dex2pc_entries; 416 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries); 417 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size; 418 data->resize(data_size); 419 420 uint8_t* data_ptr = &(*data)[0]; 421 uint8_t* write_pos = data_ptr; 422 423 write_pos = EncodeUnsignedLeb128(write_pos, total_entries); 424 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries); 425 DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size); 426 uint8_t* write_pos2 = write_pos + pc2dex_data_size; 427 428 pc2dex_offset = 0u; 429 pc2dex_dalvik_offset = 0u; 430 dex2pc_offset = 0u; 431 dex2pc_dalvik_offset = 0u; 432 433 for (size_t i = 0; i < pc2dex_entries; i++) { 434 struct PcInfo pc_info = pc_infos_.Get(i); 435 DCHECK(pc2dex_offset <= pc_info.native_pc); 436 write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset); 437 write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset); 438 pc2dex_offset = pc_info.native_pc; 439 pc2dex_dalvik_offset = pc_info.dex_pc; 440 } 441 442 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 443 HBasicBlock* block = graph_->GetBlocks().Get(i); 444 if (block->IsCatchBlock()) { 445 intptr_t native_pc = GetAddressOf(block); 446 write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset); 447 write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset); 448 dex2pc_offset = native_pc; 449 dex2pc_dalvik_offset = block->GetDexPc(); 450 } 451 } 452 453 454 DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size); 455 DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size); 456 457 if (kIsDebugBuild) { 458 // Verify the encoded table holds the expected data. 459 MappingTable table(data_ptr); 460 CHECK_EQ(table.TotalSize(), total_entries); 461 CHECK_EQ(table.PcToDexSize(), pc2dex_entries); 462 auto it = table.PcToDexBegin(); 463 auto it2 = table.DexToPcBegin(); 464 for (size_t i = 0; i < pc2dex_entries; i++) { 465 struct PcInfo pc_info = pc_infos_.Get(i); 466 CHECK_EQ(pc_info.native_pc, it.NativePcOffset()); 467 CHECK_EQ(pc_info.dex_pc, it.DexPc()); 468 ++it; 469 } 470 for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) { 471 HBasicBlock* block = graph_->GetBlocks().Get(i); 472 if (block->IsCatchBlock()) { 473 CHECK_EQ(GetAddressOf(block), it2.NativePcOffset()); 474 CHECK_EQ(block->GetDexPc(), it2.DexPc()); 475 ++it2; 476 } 477 } 478 CHECK(it == table.PcToDexEnd()); 479 CHECK(it2 == table.DexToPcEnd()); 480 } 481} 482 483void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const { 484 Leb128EncodingVector vmap_encoder; 485 // We currently don't use callee-saved registers. 486 size_t size = 0 + 1 /* marker */ + 0; 487 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128). 488 vmap_encoder.PushBackUnsigned(size); 489 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker); 490 491 *data = vmap_encoder.GetData(); 492} 493 494void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) { 495 uint32_t size = stack_map_stream_.ComputeNeededSize(); 496 data->resize(size); 497 MemoryRegion region(data->data(), size); 498 stack_map_stream_.FillIn(region); 499} 500 501void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) { 502 if (instruction != nullptr) { 503 // The code generated for some type conversions may call the 504 // runtime, thus normally requiring a subsequent call to this 505 // method. However, the method verifier does not produce PC 506 // information for certain instructions, which are considered "atomic" 507 // (they cannot join a GC). 508 // Therefore we do not currently record PC information for such 509 // instructions. As this may change later, we added this special 510 // case so that code generators may nevertheless call 511 // CodeGenerator::RecordPcInfo without triggering an error in 512 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x") 513 // thereafter. 514 if (instruction->IsTypeConversion()) { 515 return; 516 } 517 if (instruction->IsRem()) { 518 Primitive::Type type = instruction->AsRem()->GetResultType(); 519 if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) { 520 return; 521 } 522 } 523 } 524 525 // Collect PC infos for the mapping table. 526 struct PcInfo pc_info; 527 pc_info.dex_pc = dex_pc; 528 pc_info.native_pc = GetAssembler()->CodeSize(); 529 pc_infos_.Add(pc_info); 530 531 // Populate stack map information. 532 533 if (instruction == nullptr) { 534 // For stack overflow checks. 535 stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, 0); 536 return; 537 } 538 539 LocationSummary* locations = instruction->GetLocations(); 540 HEnvironment* environment = instruction->GetEnvironment(); 541 542 size_t environment_size = instruction->EnvironmentSize(); 543 544 size_t register_mask = 0; 545 size_t inlining_depth = 0; 546 stack_map_stream_.AddStackMapEntry( 547 dex_pc, pc_info.native_pc, register_mask, 548 locations->GetStackMask(), environment_size, inlining_depth); 549 550 // Walk over the environment, and record the location of dex registers. 551 for (size_t i = 0; i < environment_size; ++i) { 552 HInstruction* current = environment->GetInstructionAt(i); 553 if (current == nullptr) { 554 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kNone, 0); 555 continue; 556 } 557 558 Location location = locations->GetEnvironmentAt(i); 559 switch (location.GetKind()) { 560 case Location::kConstant: { 561 DCHECK(current == location.GetConstant()); 562 if (current->IsLongConstant()) { 563 int64_t value = current->AsLongConstant()->GetValue(); 564 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value)); 565 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value)); 566 ++i; 567 DCHECK_LT(i, environment_size); 568 } else if (current->IsDoubleConstant()) { 569 int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue()); 570 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value)); 571 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value)); 572 ++i; 573 DCHECK_LT(i, environment_size); 574 } else if (current->IsIntConstant()) { 575 int32_t value = current->AsIntConstant()->GetValue(); 576 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value); 577 } else { 578 DCHECK(current->IsFloatConstant()); 579 int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue()); 580 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value); 581 } 582 break; 583 } 584 585 case Location::kStackSlot: { 586 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, location.GetStackIndex()); 587 break; 588 } 589 590 case Location::kDoubleStackSlot: { 591 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, location.GetStackIndex()); 592 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInStack, 593 location.GetHighStackIndex(kVRegSize)); 594 ++i; 595 DCHECK_LT(i, environment_size); 596 break; 597 } 598 599 case Location::kRegister : { 600 int id = location.reg(); 601 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id); 602 if (current->GetType() == Primitive::kPrimLong) { 603 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id); 604 ++i; 605 DCHECK_LT(i, environment_size); 606 } 607 break; 608 } 609 610 case Location::kFpuRegister : { 611 int id = location.reg(); 612 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id); 613 if (current->GetType() == Primitive::kPrimDouble) { 614 stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, id); 615 ++i; 616 DCHECK_LT(i, environment_size); 617 } 618 break; 619 } 620 621 default: 622 LOG(FATAL) << "Unexpected kind " << location.GetKind(); 623 } 624 } 625} 626 627void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) { 628 RegisterSet* register_set = locations->GetLiveRegisters(); 629 size_t stack_offset = first_register_slot_in_slow_path_; 630 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 631 if (register_set->ContainsCoreRegister(i)) { 632 // If the register holds an object, update the stack mask. 633 if (locations->RegisterContainsObject(i)) { 634 locations->SetStackBit(stack_offset / kVRegSize); 635 } 636 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 637 stack_offset += SaveCoreRegister(stack_offset, i); 638 } 639 } 640 641 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 642 if (register_set->ContainsFloatingPointRegister(i)) { 643 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 644 stack_offset += SaveFloatingPointRegister(stack_offset, i); 645 } 646 } 647} 648 649void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) { 650 RegisterSet* register_set = locations->GetLiveRegisters(); 651 size_t stack_offset = first_register_slot_in_slow_path_; 652 for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) { 653 if (register_set->ContainsCoreRegister(i)) { 654 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 655 stack_offset += RestoreCoreRegister(stack_offset, i); 656 } 657 } 658 659 for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) { 660 if (register_set->ContainsFloatingPointRegister(i)) { 661 DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize()); 662 stack_offset += RestoreFloatingPointRegister(stack_offset, i); 663 } 664 } 665} 666 667void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const { 668 LocationSummary* locations = suspend_check->GetLocations(); 669 HBasicBlock* block = suspend_check->GetBlock(); 670 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check); 671 DCHECK(block->IsLoopHeader()); 672 673 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { 674 HInstruction* current = it.Current(); 675 LiveInterval* interval = current->GetLiveInterval(); 676 // We only need to clear bits of loop phis containing objects and allocated in register. 677 // Loop phis allocated on stack already have the object in the stack. 678 if (current->GetType() == Primitive::kPrimNot 679 && interval->HasRegister() 680 && interval->HasSpillSlot()) { 681 locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize); 682 } 683 } 684} 685 686void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) { 687 MoveOperands move1(from1, to1, nullptr); 688 MoveOperands move2(from2, to2, nullptr); 689 HParallelMove parallel_move(GetGraph()->GetArena()); 690 parallel_move.AddMove(&move1); 691 parallel_move.AddMove(&move2); 692 GetMoveResolver()->EmitNativeCode(¶llel_move); 693} 694 695} // namespace art 696