code_generator.cc revision 6058455d486219994921b63a2d774dc9908415a2
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator.h"
18
19#ifdef ART_ENABLE_CODEGEN_arm
20#include "code_generator_arm.h"
21#endif
22
23#ifdef ART_ENABLE_CODEGEN_arm64
24#include "code_generator_arm64.h"
25#endif
26
27#ifdef ART_ENABLE_CODEGEN_x86
28#include "code_generator_x86.h"
29#endif
30
31#ifdef ART_ENABLE_CODEGEN_x86_64
32#include "code_generator_x86_64.h"
33#endif
34
35#ifdef ART_ENABLE_CODEGEN_mips64
36#include "code_generator_mips64.h"
37#endif
38
39#include "compiled_method.h"
40#include "dex/verified_method.h"
41#include "driver/dex_compilation_unit.h"
42#include "gc_map_builder.h"
43#include "graph_visualizer.h"
44#include "leb128.h"
45#include "mapping_table.h"
46#include "mirror/array-inl.h"
47#include "mirror/object_array-inl.h"
48#include "mirror/object_reference.h"
49#include "parallel_move_resolver.h"
50#include "ssa_liveness_analysis.h"
51#include "utils/assembler.h"
52#include "verifier/dex_gc_map.h"
53#include "vmap_table.h"
54
55namespace art {
56
57// Return whether a location is consistent with a type.
58static bool CheckType(Primitive::Type type, Location location) {
59  if (location.IsFpuRegister()
60      || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
61    return (type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble);
62  } else if (location.IsRegister() ||
63             (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
64    return Primitive::IsIntegralType(type) || (type == Primitive::kPrimNot);
65  } else if (location.IsRegisterPair()) {
66    return type == Primitive::kPrimLong;
67  } else if (location.IsFpuRegisterPair()) {
68    return type == Primitive::kPrimDouble;
69  } else if (location.IsStackSlot()) {
70    return (Primitive::IsIntegralType(type) && type != Primitive::kPrimLong)
71           || (type == Primitive::kPrimFloat)
72           || (type == Primitive::kPrimNot);
73  } else if (location.IsDoubleStackSlot()) {
74    return (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
75  } else if (location.IsConstant()) {
76    if (location.GetConstant()->IsIntConstant()) {
77      return Primitive::IsIntegralType(type) && (type != Primitive::kPrimLong);
78    } else if (location.GetConstant()->IsNullConstant()) {
79      return type == Primitive::kPrimNot;
80    } else if (location.GetConstant()->IsLongConstant()) {
81      return type == Primitive::kPrimLong;
82    } else if (location.GetConstant()->IsFloatConstant()) {
83      return type == Primitive::kPrimFloat;
84    } else {
85      return location.GetConstant()->IsDoubleConstant()
86          && (type == Primitive::kPrimDouble);
87    }
88  } else {
89    return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
90  }
91}
92
93// Check that a location summary is consistent with an instruction.
94static bool CheckTypeConsistency(HInstruction* instruction) {
95  LocationSummary* locations = instruction->GetLocations();
96  if (locations == nullptr) {
97    return true;
98  }
99
100  if (locations->Out().IsUnallocated()
101      && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
102    DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
103        << instruction->GetType()
104        << " " << locations->InAt(0);
105  } else {
106    DCHECK(CheckType(instruction->GetType(), locations->Out()))
107        << instruction->GetType()
108        << " " << locations->Out();
109  }
110
111  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
112    DCHECK(CheckType(instruction->InputAt(i)->GetType(), locations->InAt(i)))
113      << instruction->InputAt(i)->GetType()
114      << " " << locations->InAt(i);
115  }
116
117  HEnvironment* environment = instruction->GetEnvironment();
118  for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
119    if (environment->GetInstructionAt(i) != nullptr) {
120      Primitive::Type type = environment->GetInstructionAt(i)->GetType();
121      DCHECK(CheckType(type, environment->GetLocationAt(i)))
122        << type << " " << environment->GetLocationAt(i);
123    } else {
124      DCHECK(environment->GetLocationAt(i).IsInvalid())
125        << environment->GetLocationAt(i);
126    }
127  }
128  return true;
129}
130
131size_t CodeGenerator::GetCacheOffset(uint32_t index) {
132  return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
133}
134
135size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
136  auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
137  return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index;
138}
139
140void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
141  Initialize();
142  if (!is_leaf) {
143    MarkNotLeaf();
144  }
145  const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet());
146  InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
147                             + GetGraph()->GetTemporariesVRegSlots()
148                             + 1 /* filler */,
149                           0, /* the baseline compiler does not have live registers at slow path */
150                           0, /* the baseline compiler does not have live registers at slow path */
151                           GetGraph()->GetMaximumNumberOfOutVRegs()
152                             + (is_64_bit ? 2 : 1) /* current method */,
153                           GetGraph()->GetBlocks());
154  CompileInternal(allocator, /* is_baseline */ true);
155}
156
157bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
158  DCHECK_EQ(block_order_->Get(current_block_index_), current);
159  return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
160}
161
162HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
163  for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
164    HBasicBlock* block = block_order_->Get(i);
165    if (!block->IsSingleJump()) {
166      return block;
167    }
168  }
169  return nullptr;
170}
171
172HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
173  while (block->IsSingleJump()) {
174    block = block->GetSuccessor(0);
175  }
176  return block;
177}
178
179class DisassemblyScope {
180 public:
181  DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
182      : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
183    if (codegen_.GetDisassemblyInformation() != nullptr) {
184      start_offset_ = codegen_.GetAssembler().CodeSize();
185    }
186  }
187
188  ~DisassemblyScope() {
189    // We avoid building this data when we know it will not be used.
190    if (codegen_.GetDisassemblyInformation() != nullptr) {
191      codegen_.GetDisassemblyInformation()->AddInstructionInterval(
192          instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
193    }
194  }
195
196 private:
197  const CodeGenerator& codegen_;
198  HInstruction* instruction_;
199  size_t start_offset_;
200};
201
202
203void CodeGenerator::GenerateSlowPaths() {
204  size_t code_start = 0;
205  for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
206    if (disasm_info_ != nullptr) {
207      code_start = GetAssembler()->CodeSize();
208    }
209    slow_paths_.Get(i)->EmitNativeCode(this);
210    if (disasm_info_ != nullptr) {
211      disasm_info_->AddSlowPathInterval(slow_paths_.Get(i), code_start, GetAssembler()->CodeSize());
212    }
213  }
214}
215
216void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
217  is_baseline_ = is_baseline;
218  HGraphVisitor* instruction_visitor = GetInstructionVisitor();
219  DCHECK_EQ(current_block_index_, 0u);
220
221  size_t frame_start = GetAssembler()->CodeSize();
222  GenerateFrameEntry();
223  DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
224  if (disasm_info_ != nullptr) {
225    disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
226  }
227
228  for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
229    HBasicBlock* block = block_order_->Get(current_block_index_);
230    // Don't generate code for an empty block. Its predecessors will branch to its successor
231    // directly. Also, the label of that block will not be emitted, so this helps catch
232    // errors where we reference that label.
233    if (block->IsSingleJump()) continue;
234    Bind(block);
235    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
236      HInstruction* current = it.Current();
237      DisassemblyScope disassembly_scope(current, *this);
238      if (is_baseline) {
239        InitLocationsBaseline(current);
240      }
241      DCHECK(CheckTypeConsistency(current));
242      uintptr_t native_pc_begin = GetAssembler()->CodeSize();
243      current->Accept(instruction_visitor);
244      uintptr_t native_pc_end = GetAssembler()->CodeSize();
245      RecordNativeDebugInfo(current->GetDexPc(), native_pc_begin, native_pc_end);
246    }
247  }
248
249  GenerateSlowPaths();
250
251  // Finalize instructions in assember;
252  Finalize(allocator);
253}
254
255void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
256  // The register allocator already called `InitializeCodeGeneration`,
257  // where the frame size has been computed.
258  DCHECK(block_order_ != nullptr);
259  Initialize();
260  CompileInternal(allocator, /* is_baseline */ false);
261}
262
263void CodeGenerator::Finalize(CodeAllocator* allocator) {
264  size_t code_size = GetAssembler()->CodeSize();
265  uint8_t* buffer = allocator->Allocate(code_size);
266
267  MemoryRegion code(buffer, code_size);
268  GetAssembler()->FinalizeInstructions(code);
269}
270
271void CodeGenerator::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
272  // No linker patches by default.
273}
274
275size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
276  for (size_t i = 0; i < length; ++i) {
277    if (!array[i]) {
278      array[i] = true;
279      return i;
280    }
281  }
282  LOG(FATAL) << "Could not find a register in baseline register allocator";
283  UNREACHABLE();
284}
285
286size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
287  for (size_t i = 0; i < length - 1; i += 2) {
288    if (!array[i] && !array[i + 1]) {
289      array[i] = true;
290      array[i + 1] = true;
291      return i;
292    }
293  }
294  LOG(FATAL) << "Could not find a register in baseline register allocator";
295  UNREACHABLE();
296}
297
298void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
299                                             size_t maximum_number_of_live_core_registers,
300                                             size_t maximum_number_of_live_fp_registers,
301                                             size_t number_of_out_slots,
302                                             const GrowableArray<HBasicBlock*>& block_order) {
303  block_order_ = &block_order;
304  DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock());
305  ComputeSpillMask();
306  first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
307
308  if (number_of_spill_slots == 0
309      && !HasAllocatedCalleeSaveRegisters()
310      && IsLeafMethod()
311      && !RequiresCurrentMethod()) {
312    DCHECK_EQ(maximum_number_of_live_core_registers, 0u);
313    DCHECK_EQ(maximum_number_of_live_fp_registers, 0u);
314    SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
315  } else {
316    SetFrameSize(RoundUp(
317        number_of_spill_slots * kVRegSize
318        + number_of_out_slots * kVRegSize
319        + maximum_number_of_live_core_registers * GetWordSize()
320        + maximum_number_of_live_fp_registers * GetFloatingPointSpillSlotSize()
321        + FrameEntrySpillSize(),
322        kStackAlignment));
323  }
324}
325
326Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
327  uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
328  // The type of the previous instruction tells us if we need a single or double stack slot.
329  Primitive::Type type = temp->GetType();
330  int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
331  // Use the temporary region (right below the dex registers).
332  int32_t slot = GetFrameSize() - FrameEntrySpillSize()
333                                - kVRegSize  // filler
334                                - (number_of_locals * kVRegSize)
335                                - ((temp_size + temp->GetIndex()) * kVRegSize);
336  return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
337}
338
339int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
340  uint16_t reg_number = local->GetRegNumber();
341  uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
342  if (reg_number >= number_of_locals) {
343    // Local is a parameter of the method. It is stored in the caller's frame.
344    // TODO: Share this logic with StackVisitor::GetVRegOffsetFromQuickCode.
345    return GetFrameSize() + InstructionSetPointerSize(GetInstructionSet())  // ART method
346                          + (reg_number - number_of_locals) * kVRegSize;
347  } else {
348    // Local is a temporary in this method. It is stored in this method's frame.
349    return GetFrameSize() - FrameEntrySpillSize()
350                          - kVRegSize  // filler.
351                          - (number_of_locals * kVRegSize)
352                          + (reg_number * kVRegSize);
353  }
354}
355
356void CodeGenerator::CreateCommonInvokeLocationSummary(
357    HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
358  ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
359  LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCall);
360
361  for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
362    HInstruction* input = invoke->InputAt(i);
363    locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
364  }
365
366  locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
367
368  if (invoke->IsInvokeStaticOrDirect()) {
369    HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
370    if (call->IsStringInit()) {
371      locations->AddTemp(visitor->GetMethodLocation());
372    } else if (call->IsRecursive()) {
373      locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
374    } else {
375      locations->AddTemp(visitor->GetMethodLocation());
376      locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
377    }
378  } else {
379    locations->AddTemp(visitor->GetMethodLocation());
380  }
381}
382
383void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
384  // The DCHECKS below check that a register is not specified twice in
385  // the summary. The out location can overlap with an input, so we need
386  // to special case it.
387  if (location.IsRegister()) {
388    DCHECK(is_out || !blocked_core_registers_[location.reg()]);
389    blocked_core_registers_[location.reg()] = true;
390  } else if (location.IsFpuRegister()) {
391    DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
392    blocked_fpu_registers_[location.reg()] = true;
393  } else if (location.IsFpuRegisterPair()) {
394    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
395    blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
396    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
397    blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
398  } else if (location.IsRegisterPair()) {
399    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
400    blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
401    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
402    blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
403  }
404}
405
406void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
407  LocationSummary* locations = instruction->GetLocations();
408  if (locations == nullptr) return;
409
410  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
411    blocked_core_registers_[i] = false;
412  }
413
414  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
415    blocked_fpu_registers_[i] = false;
416  }
417
418  for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) {
419    blocked_register_pairs_[i] = false;
420  }
421
422  // Mark all fixed input, temp and output registers as used.
423  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
424    BlockIfInRegister(locations->InAt(i));
425  }
426
427  for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
428    Location loc = locations->GetTemp(i);
429    BlockIfInRegister(loc);
430  }
431  Location result_location = locations->Out();
432  if (locations->OutputCanOverlapWithInputs()) {
433    BlockIfInRegister(result_location, /* is_out */ true);
434  }
435
436  SetupBlockedRegisters(/* is_baseline */ true);
437
438  // Allocate all unallocated input locations.
439  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
440    Location loc = locations->InAt(i);
441    HInstruction* input = instruction->InputAt(i);
442    if (loc.IsUnallocated()) {
443      if ((loc.GetPolicy() == Location::kRequiresRegister)
444          || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
445        loc = AllocateFreeRegister(input->GetType());
446      } else {
447        DCHECK_EQ(loc.GetPolicy(), Location::kAny);
448        HLoadLocal* load = input->AsLoadLocal();
449        if (load != nullptr) {
450          loc = GetStackLocation(load);
451        } else {
452          loc = AllocateFreeRegister(input->GetType());
453        }
454      }
455      locations->SetInAt(i, loc);
456    }
457  }
458
459  // Allocate all unallocated temp locations.
460  for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
461    Location loc = locations->GetTemp(i);
462    if (loc.IsUnallocated()) {
463      switch (loc.GetPolicy()) {
464        case Location::kRequiresRegister:
465          // Allocate a core register (large enough to fit a 32-bit integer).
466          loc = AllocateFreeRegister(Primitive::kPrimInt);
467          break;
468
469        case Location::kRequiresFpuRegister:
470          // Allocate a core register (large enough to fit a 64-bit double).
471          loc = AllocateFreeRegister(Primitive::kPrimDouble);
472          break;
473
474        default:
475          LOG(FATAL) << "Unexpected policy for temporary location "
476                     << loc.GetPolicy();
477      }
478      locations->SetTempAt(i, loc);
479    }
480  }
481  if (result_location.IsUnallocated()) {
482    switch (result_location.GetPolicy()) {
483      case Location::kAny:
484      case Location::kRequiresRegister:
485      case Location::kRequiresFpuRegister:
486        result_location = AllocateFreeRegister(instruction->GetType());
487        break;
488      case Location::kSameAsFirstInput:
489        result_location = locations->InAt(0);
490        break;
491    }
492    locations->UpdateOut(result_location);
493  }
494}
495
496void CodeGenerator::InitLocationsBaseline(HInstruction* instruction) {
497  AllocateLocations(instruction);
498  if (instruction->GetLocations() == nullptr) {
499    if (instruction->IsTemporary()) {
500      HInstruction* previous = instruction->GetPrevious();
501      Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
502      Move(previous, temp_location, instruction);
503    }
504    return;
505  }
506  AllocateRegistersLocally(instruction);
507  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
508    Location location = instruction->GetLocations()->InAt(i);
509    HInstruction* input = instruction->InputAt(i);
510    if (location.IsValid()) {
511      // Move the input to the desired location.
512      if (input->GetNext()->IsTemporary()) {
513        // If the input was stored in a temporary, use that temporary to
514        // perform the move.
515        Move(input->GetNext(), location, instruction);
516      } else {
517        Move(input, location, instruction);
518      }
519    }
520  }
521}
522
523void CodeGenerator::AllocateLocations(HInstruction* instruction) {
524  instruction->Accept(GetLocationBuilder());
525  DCHECK(CheckTypeConsistency(instruction));
526  LocationSummary* locations = instruction->GetLocations();
527  if (!instruction->IsSuspendCheckEntry()) {
528    if (locations != nullptr && locations->CanCall()) {
529      MarkNotLeaf();
530    }
531    if (instruction->NeedsCurrentMethod()) {
532      SetRequiresCurrentMethod();
533    }
534  }
535}
536
537CodeGenerator* CodeGenerator::Create(HGraph* graph,
538                                     InstructionSet instruction_set,
539                                     const InstructionSetFeatures& isa_features,
540                                     const CompilerOptions& compiler_options) {
541  switch (instruction_set) {
542#ifdef ART_ENABLE_CODEGEN_arm
543    case kArm:
544    case kThumb2: {
545      return new arm::CodeGeneratorARM(graph,
546          *isa_features.AsArmInstructionSetFeatures(),
547          compiler_options);
548    }
549#endif
550#ifdef ART_ENABLE_CODEGEN_arm64
551    case kArm64: {
552      return new arm64::CodeGeneratorARM64(graph,
553          *isa_features.AsArm64InstructionSetFeatures(),
554          compiler_options);
555    }
556#endif
557#ifdef ART_ENABLE_CODEGEN_mips
558    case kMips:
559      UNUSED(compiler_options);
560      UNUSED(graph);
561      UNUSED(isa_features);
562      return nullptr;
563#endif
564#ifdef ART_ENABLE_CODEGEN_mips64
565    case kMips64: {
566      return new mips64::CodeGeneratorMIPS64(graph,
567          *isa_features.AsMips64InstructionSetFeatures(),
568          compiler_options);
569    }
570#endif
571#ifdef ART_ENABLE_CODEGEN_x86
572    case kX86: {
573      return new x86::CodeGeneratorX86(graph,
574           *isa_features.AsX86InstructionSetFeatures(),
575           compiler_options);
576    }
577#endif
578#ifdef ART_ENABLE_CODEGEN_x86_64
579    case kX86_64: {
580      return new x86_64::CodeGeneratorX86_64(graph,
581          *isa_features.AsX86_64InstructionSetFeatures(),
582          compiler_options);
583    }
584#endif
585    default:
586      return nullptr;
587  }
588}
589
590void CodeGenerator::BuildNativeGCMap(
591    ArenaVector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const {
592  const std::vector<uint8_t>& gc_map_raw =
593      dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
594  verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
595
596  uint32_t max_native_offset = stack_map_stream_.ComputeMaxNativePcOffset();
597
598  size_t num_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
599  GcMapBuilder builder(data, num_stack_maps, max_native_offset, dex_gc_map.RegWidth());
600  for (size_t i = 0; i != num_stack_maps; ++i) {
601    const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
602    uint32_t native_offset = stack_map_entry.native_pc_offset;
603    uint32_t dex_pc = stack_map_entry.dex_pc;
604    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
605    CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
606    builder.AddEntry(native_offset, references);
607  }
608}
609
610void CodeGenerator::BuildMappingTable(ArenaVector<uint8_t>* data) const {
611  uint32_t pc2dex_data_size = 0u;
612  uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps();
613  uint32_t pc2dex_offset = 0u;
614  int32_t pc2dex_dalvik_offset = 0;
615  uint32_t dex2pc_data_size = 0u;
616  uint32_t dex2pc_entries = 0u;
617  uint32_t dex2pc_offset = 0u;
618  int32_t dex2pc_dalvik_offset = 0;
619
620  for (size_t i = 0; i < pc2dex_entries; i++) {
621    const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
622    pc2dex_data_size += UnsignedLeb128Size(stack_map_entry.native_pc_offset - pc2dex_offset);
623    pc2dex_data_size += SignedLeb128Size(stack_map_entry.dex_pc - pc2dex_dalvik_offset);
624    pc2dex_offset = stack_map_entry.native_pc_offset;
625    pc2dex_dalvik_offset = stack_map_entry.dex_pc;
626  }
627
628  // Walk over the blocks and find which ones correspond to catch block entries.
629  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
630    HBasicBlock* block = graph_->GetBlocks().Get(i);
631    if (block->IsCatchBlock()) {
632      intptr_t native_pc = GetAddressOf(block);
633      ++dex2pc_entries;
634      dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
635      dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
636      dex2pc_offset = native_pc;
637      dex2pc_dalvik_offset = block->GetDexPc();
638    }
639  }
640
641  uint32_t total_entries = pc2dex_entries + dex2pc_entries;
642  uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
643  uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
644  data->resize(data_size);
645
646  uint8_t* data_ptr = &(*data)[0];
647  uint8_t* write_pos = data_ptr;
648
649  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
650  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
651  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
652  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
653
654  pc2dex_offset = 0u;
655  pc2dex_dalvik_offset = 0u;
656  dex2pc_offset = 0u;
657  dex2pc_dalvik_offset = 0u;
658
659  for (size_t i = 0; i < pc2dex_entries; i++) {
660    const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
661    DCHECK(pc2dex_offset <= stack_map_entry.native_pc_offset);
662    write_pos = EncodeUnsignedLeb128(write_pos, stack_map_entry.native_pc_offset - pc2dex_offset);
663    write_pos = EncodeSignedLeb128(write_pos, stack_map_entry.dex_pc - pc2dex_dalvik_offset);
664    pc2dex_offset = stack_map_entry.native_pc_offset;
665    pc2dex_dalvik_offset = stack_map_entry.dex_pc;
666  }
667
668  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
669    HBasicBlock* block = graph_->GetBlocks().Get(i);
670    if (block->IsCatchBlock()) {
671      intptr_t native_pc = GetAddressOf(block);
672      write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
673      write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
674      dex2pc_offset = native_pc;
675      dex2pc_dalvik_offset = block->GetDexPc();
676    }
677  }
678
679
680  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
681  DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
682
683  if (kIsDebugBuild) {
684    // Verify the encoded table holds the expected data.
685    MappingTable table(data_ptr);
686    CHECK_EQ(table.TotalSize(), total_entries);
687    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
688    auto it = table.PcToDexBegin();
689    auto it2 = table.DexToPcBegin();
690    for (size_t i = 0; i < pc2dex_entries; i++) {
691      const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
692      CHECK_EQ(stack_map_entry.native_pc_offset, it.NativePcOffset());
693      CHECK_EQ(stack_map_entry.dex_pc, it.DexPc());
694      ++it;
695    }
696    for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
697      HBasicBlock* block = graph_->GetBlocks().Get(i);
698      if (block->IsCatchBlock()) {
699        CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
700        CHECK_EQ(block->GetDexPc(), it2.DexPc());
701        ++it2;
702      }
703    }
704    CHECK(it == table.PcToDexEnd());
705    CHECK(it2 == table.DexToPcEnd());
706  }
707}
708
709void CodeGenerator::BuildVMapTable(ArenaVector<uint8_t>* data) const {
710  Leb128Encoder<ArenaAllocatorAdapter<uint8_t>> vmap_encoder(data);
711  // We currently don't use callee-saved registers.
712  size_t size = 0 + 1 /* marker */ + 0;
713  vmap_encoder.Reserve(size + 1u);  // All values are likely to be one byte in ULEB128 (<128).
714  vmap_encoder.PushBackUnsigned(size);
715  vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
716}
717
718void CodeGenerator::BuildStackMaps(ArenaVector<uint8_t>* data) {
719  uint32_t size = stack_map_stream_.PrepareForFillIn();
720  data->resize(size);
721  MemoryRegion region(data->data(), size);
722  stack_map_stream_.FillIn(region);
723}
724
725void CodeGenerator::RecordNativeDebugInfo(uint32_t dex_pc,
726                                          uintptr_t native_pc_begin,
727                                          uintptr_t native_pc_end) {
728  if (src_map_ != nullptr && dex_pc != kNoDexPc && native_pc_begin != native_pc_end) {
729    src_map_->push_back(SrcMapElem({static_cast<uint32_t>(native_pc_begin),
730                                    static_cast<int32_t>(dex_pc)}));
731  }
732}
733
734void CodeGenerator::RecordPcInfo(HInstruction* instruction,
735                                 uint32_t dex_pc,
736                                 SlowPathCode* slow_path) {
737  if (instruction != nullptr) {
738    // The code generated for some type conversions and comparisons
739    // may call the runtime, thus normally requiring a subsequent
740    // call to this method. However, the method verifier does not
741    // produce PC information for certain instructions, which are
742    // considered "atomic" (they cannot join a GC).
743    // Therefore we do not currently record PC information for such
744    // instructions.  As this may change later, we added this special
745    // case so that code generators may nevertheless call
746    // CodeGenerator::RecordPcInfo without triggering an error in
747    // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
748    // thereafter.
749    if (instruction->IsTypeConversion() || instruction->IsCompare()) {
750      return;
751    }
752    if (instruction->IsRem()) {
753      Primitive::Type type = instruction->AsRem()->GetResultType();
754      if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) {
755        return;
756      }
757    }
758  }
759
760  uint32_t outer_dex_pc = dex_pc;
761  uint32_t outer_environment_size = 0;
762  uint32_t inlining_depth = 0;
763  if (instruction != nullptr) {
764    for (HEnvironment* environment = instruction->GetEnvironment();
765         environment != nullptr;
766         environment = environment->GetParent()) {
767      outer_dex_pc = environment->GetDexPc();
768      outer_environment_size = environment->Size();
769      if (environment != instruction->GetEnvironment()) {
770        inlining_depth++;
771      }
772    }
773  }
774
775  // Collect PC infos for the mapping table.
776  uint32_t native_pc = GetAssembler()->CodeSize();
777
778  if (instruction == nullptr) {
779    // For stack overflow checks.
780    stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
781    stack_map_stream_.EndStackMapEntry();
782    return;
783  }
784  LocationSummary* locations = instruction->GetLocations();
785
786  uint32_t register_mask = locations->GetRegisterMask();
787  if (locations->OnlyCallsOnSlowPath()) {
788    // In case of slow path, we currently set the location of caller-save registers
789    // to register (instead of their stack location when pushed before the slow-path
790    // call). Therefore register_mask contains both callee-save and caller-save
791    // registers that hold objects. We must remove the caller-save from the mask, since
792    // they will be overwritten by the callee.
793    register_mask &= core_callee_save_mask_;
794  }
795  // The register mask must be a subset of callee-save registers.
796  DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
797  stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
798                                       native_pc,
799                                       register_mask,
800                                       locations->GetStackMask(),
801                                       outer_environment_size,
802                                       inlining_depth);
803
804  EmitEnvironment(instruction->GetEnvironment(), slow_path);
805  stack_map_stream_.EndStackMapEntry();
806}
807
808void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
809  if (environment == nullptr) return;
810
811  if (environment->GetParent() != nullptr) {
812    // We emit the parent environment first.
813    EmitEnvironment(environment->GetParent(), slow_path);
814    stack_map_stream_.BeginInlineInfoEntry(environment->GetMethodIdx(),
815                                           environment->GetDexPc(),
816                                           environment->GetInvokeType(),
817                                           environment->Size());
818  }
819
820  // Walk over the environment, and record the location of dex registers.
821  for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
822    HInstruction* current = environment->GetInstructionAt(i);
823    if (current == nullptr) {
824      stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
825      continue;
826    }
827
828    Location location = environment->GetLocationAt(i);
829    switch (location.GetKind()) {
830      case Location::kConstant: {
831        DCHECK_EQ(current, location.GetConstant());
832        if (current->IsLongConstant()) {
833          int64_t value = current->AsLongConstant()->GetValue();
834          stack_map_stream_.AddDexRegisterEntry(
835              DexRegisterLocation::Kind::kConstant, Low32Bits(value));
836          stack_map_stream_.AddDexRegisterEntry(
837              DexRegisterLocation::Kind::kConstant, High32Bits(value));
838          ++i;
839          DCHECK_LT(i, environment_size);
840        } else if (current->IsDoubleConstant()) {
841          int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
842          stack_map_stream_.AddDexRegisterEntry(
843              DexRegisterLocation::Kind::kConstant, Low32Bits(value));
844          stack_map_stream_.AddDexRegisterEntry(
845              DexRegisterLocation::Kind::kConstant, High32Bits(value));
846          ++i;
847          DCHECK_LT(i, environment_size);
848        } else if (current->IsIntConstant()) {
849          int32_t value = current->AsIntConstant()->GetValue();
850          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
851        } else if (current->IsNullConstant()) {
852          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
853        } else {
854          DCHECK(current->IsFloatConstant()) << current->DebugName();
855          int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
856          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
857        }
858        break;
859      }
860
861      case Location::kStackSlot: {
862        stack_map_stream_.AddDexRegisterEntry(
863            DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
864        break;
865      }
866
867      case Location::kDoubleStackSlot: {
868        stack_map_stream_.AddDexRegisterEntry(
869            DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
870        stack_map_stream_.AddDexRegisterEntry(
871            DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
872        ++i;
873        DCHECK_LT(i, environment_size);
874        break;
875      }
876
877      case Location::kRegister : {
878        int id = location.reg();
879        if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
880          uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
881          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
882          if (current->GetType() == Primitive::kPrimLong) {
883            stack_map_stream_.AddDexRegisterEntry(
884                DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
885            ++i;
886            DCHECK_LT(i, environment_size);
887          }
888        } else {
889          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
890          if (current->GetType() == Primitive::kPrimLong) {
891            stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
892            ++i;
893            DCHECK_LT(i, environment_size);
894          }
895        }
896        break;
897      }
898
899      case Location::kFpuRegister : {
900        int id = location.reg();
901        if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
902          uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
903          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
904          if (current->GetType() == Primitive::kPrimDouble) {
905            stack_map_stream_.AddDexRegisterEntry(
906                DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
907            ++i;
908            DCHECK_LT(i, environment_size);
909          }
910        } else {
911          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
912          if (current->GetType() == Primitive::kPrimDouble) {
913            stack_map_stream_.AddDexRegisterEntry(
914                DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
915            ++i;
916            DCHECK_LT(i, environment_size);
917          }
918        }
919        break;
920      }
921
922      case Location::kFpuRegisterPair : {
923        int low = location.low();
924        int high = location.high();
925        if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
926          uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
927          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
928        } else {
929          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
930        }
931        if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
932          uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
933          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
934          ++i;
935        } else {
936          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
937          ++i;
938        }
939        DCHECK_LT(i, environment_size);
940        break;
941      }
942
943      case Location::kRegisterPair : {
944        int low = location.low();
945        int high = location.high();
946        if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
947          uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
948          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
949        } else {
950          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
951        }
952        if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
953          uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
954          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
955        } else {
956          stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
957        }
958        ++i;
959        DCHECK_LT(i, environment_size);
960        break;
961      }
962
963      case Location::kInvalid: {
964        stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
965        break;
966      }
967
968      default:
969        LOG(FATAL) << "Unexpected kind " << location.GetKind();
970    }
971  }
972
973  if (environment->GetParent() != nullptr) {
974    stack_map_stream_.EndInlineInfoEntry();
975  }
976}
977
978bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
979  HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
980
981  return (first_next_not_move != nullptr)
982      && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
983}
984
985void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
986  // If we are from a static path don't record the pc as we can't throw NPE.
987  // NB: having the checks here makes the code much less verbose in the arch
988  // specific code generators.
989  if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
990    return;
991  }
992
993  if (!compiler_options_.GetImplicitNullChecks()) {
994    return;
995  }
996
997  if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
998    return;
999  }
1000
1001  // Find the first previous instruction which is not a move.
1002  HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
1003
1004  // If the instruction is a null check it means that `instr` is the first user
1005  // and needs to record the pc.
1006  if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
1007    HNullCheck* null_check = first_prev_not_move->AsNullCheck();
1008    // TODO: The parallel moves modify the environment. Their changes need to be reverted
1009    // otherwise the stack maps at the throw point will not be correct.
1010    RecordPcInfo(null_check, null_check->GetDexPc());
1011  }
1012}
1013
1014void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
1015  LocationSummary* locations = suspend_check->GetLocations();
1016  HBasicBlock* block = suspend_check->GetBlock();
1017  DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1018  DCHECK(block->IsLoopHeader());
1019
1020  for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
1021    HInstruction* current = it.Current();
1022    LiveInterval* interval = current->GetLiveInterval();
1023    // We only need to clear bits of loop phis containing objects and allocated in register.
1024    // Loop phis allocated on stack already have the object in the stack.
1025    if (current->GetType() == Primitive::kPrimNot
1026        && interval->HasRegister()
1027        && interval->HasSpillSlot()) {
1028      locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
1029    }
1030  }
1031}
1032
1033void CodeGenerator::EmitParallelMoves(Location from1,
1034                                      Location to1,
1035                                      Primitive::Type type1,
1036                                      Location from2,
1037                                      Location to2,
1038                                      Primitive::Type type2) {
1039  HParallelMove parallel_move(GetGraph()->GetArena());
1040  parallel_move.AddMove(from1, to1, type1, nullptr);
1041  parallel_move.AddMove(from2, to2, type2, nullptr);
1042  GetMoveResolver()->EmitNativeCode(&parallel_move);
1043}
1044
1045void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) {
1046  // Ensure that the call kind indication given to the register allocator is
1047  // coherent with the runtime call generated, and that the GC side effect is
1048  // set when required.
1049  if (slow_path == nullptr) {
1050    DCHECK(instruction->GetLocations()->WillCall()) << instruction->DebugName();
1051    DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1052        << instruction->DebugName() << instruction->GetSideEffects().ToString();
1053  } else {
1054    DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal())
1055        << instruction->DebugName() << slow_path->GetDescription();
1056    DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1057           // Control flow would not come back into the code if a fatal slow
1058           // path is taken, so we do not care if it triggers GC.
1059           slow_path->IsFatal() ||
1060           // HDeoptimize is a special case: we know we are not coming back from
1061           // it into the code.
1062           instruction->IsDeoptimize())
1063        << instruction->DebugName() << instruction->GetSideEffects().ToString()
1064        << slow_path->GetDescription();
1065  }
1066
1067  // Check the coherency of leaf information.
1068  DCHECK(instruction->IsSuspendCheck()
1069         || ((slow_path != nullptr) && slow_path->IsFatal())
1070         || instruction->GetLocations()->CanCall()
1071         || !IsLeafMethod())
1072      << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1073}
1074
1075void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1076  RegisterSet* register_set = locations->GetLiveRegisters();
1077  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1078  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
1079    if (!codegen->IsCoreCalleeSaveRegister(i)) {
1080      if (register_set->ContainsCoreRegister(i)) {
1081        // If the register holds an object, update the stack mask.
1082        if (locations->RegisterContainsObject(i)) {
1083          locations->SetStackBit(stack_offset / kVRegSize);
1084        }
1085        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1086        DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1087        saved_core_stack_offsets_[i] = stack_offset;
1088        stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1089      }
1090    }
1091  }
1092
1093  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
1094    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
1095      if (register_set->ContainsFloatingPointRegister(i)) {
1096        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1097        DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1098        saved_fpu_stack_offsets_[i] = stack_offset;
1099        stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1100      }
1101    }
1102  }
1103}
1104
1105void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1106  RegisterSet* register_set = locations->GetLiveRegisters();
1107  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1108  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
1109    if (!codegen->IsCoreCalleeSaveRegister(i)) {
1110      if (register_set->ContainsCoreRegister(i)) {
1111        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1112        stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1113      }
1114    }
1115  }
1116
1117  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
1118    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
1119      if (register_set->ContainsFloatingPointRegister(i)) {
1120        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1121        stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1122      }
1123    }
1124  }
1125}
1126
1127}  // namespace art
1128