code_generator.cc revision 88c13cddc3a4184908662b0f3de796565d348c76
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator.h"
18
19#include "code_generator_arm.h"
20#include "code_generator_arm64.h"
21#include "code_generator_x86.h"
22#include "code_generator_x86_64.h"
23#include "compiled_method.h"
24#include "dex/verified_method.h"
25#include "driver/dex_compilation_unit.h"
26#include "gc_map_builder.h"
27#include "leb128.h"
28#include "mapping_table.h"
29#include "mirror/array-inl.h"
30#include "mirror/object_array-inl.h"
31#include "mirror/object_reference.h"
32#include "ssa_liveness_analysis.h"
33#include "utils/assembler.h"
34#include "verifier/dex_gc_map.h"
35#include "vmap_table.h"
36
37namespace art {
38
39// Return whether a location is consistent with a type.
40static bool CheckType(Primitive::Type type, Location location) {
41  if (location.IsFpuRegister()
42      || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
43    return (type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble);
44  } else if (location.IsRegister() ||
45             (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
46    return Primitive::IsIntegralType(type) || (type == Primitive::kPrimNot);
47  } else if (location.IsRegisterPair()) {
48    return type == Primitive::kPrimLong;
49  } else if (location.IsFpuRegisterPair()) {
50    return type == Primitive::kPrimDouble;
51  } else if (location.IsStackSlot()) {
52    return (Primitive::IsIntegralType(type) && type != Primitive::kPrimLong)
53           || (type == Primitive::kPrimFloat)
54           || (type == Primitive::kPrimNot);
55  } else if (location.IsDoubleStackSlot()) {
56    return (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
57  } else if (location.IsConstant()) {
58    if (location.GetConstant()->IsIntConstant()) {
59      return Primitive::IsIntegralType(type) && (type != Primitive::kPrimLong);
60    } else if (location.GetConstant()->IsNullConstant()) {
61      return type == Primitive::kPrimNot;
62    } else if (location.GetConstant()->IsLongConstant()) {
63      return type == Primitive::kPrimLong;
64    } else if (location.GetConstant()->IsFloatConstant()) {
65      return type == Primitive::kPrimFloat;
66    } else {
67      return location.GetConstant()->IsDoubleConstant()
68          && (type == Primitive::kPrimDouble);
69    }
70  } else {
71    return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
72  }
73}
74
75// Check that a location summary is consistent with an instruction.
76static bool CheckTypeConsistency(HInstruction* instruction) {
77  LocationSummary* locations = instruction->GetLocations();
78  if (locations == nullptr) {
79    return true;
80  }
81
82  if (locations->Out().IsUnallocated()
83      && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
84    DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
85        << instruction->GetType()
86        << " " << locations->InAt(0);
87  } else {
88    DCHECK(CheckType(instruction->GetType(), locations->Out()))
89        << instruction->GetType()
90        << " " << locations->Out();
91  }
92
93  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
94    DCHECK(CheckType(instruction->InputAt(i)->GetType(), locations->InAt(i)))
95      << instruction->InputAt(i)->GetType()
96      << " " << locations->InAt(i);
97  }
98
99  HEnvironment* environment = instruction->GetEnvironment();
100  for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
101    if (environment->GetInstructionAt(i) != nullptr) {
102      Primitive::Type type = environment->GetInstructionAt(i)->GetType();
103      DCHECK(CheckType(type, locations->GetEnvironmentAt(i)))
104        << type << " " << locations->GetEnvironmentAt(i);
105    } else {
106      DCHECK(locations->GetEnvironmentAt(i).IsInvalid())
107        << locations->GetEnvironmentAt(i);
108    }
109  }
110  return true;
111}
112
113size_t CodeGenerator::GetCacheOffset(uint32_t index) {
114  return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
115}
116
117void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
118  Initialize();
119  if (!is_leaf) {
120    MarkNotLeaf();
121  }
122  InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
123                             + GetGraph()->GetTemporariesVRegSlots()
124                             + 1 /* filler */,
125                           0, /* the baseline compiler does not have live registers at slow path */
126                           0, /* the baseline compiler does not have live registers at slow path */
127                           GetGraph()->GetMaximumNumberOfOutVRegs()
128                             + 1 /* current method */,
129                           GetGraph()->GetBlocks());
130  CompileInternal(allocator, /* is_baseline */ true);
131}
132
133bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
134  DCHECK_EQ(block_order_->Get(current_block_index_), current);
135  return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
136}
137
138HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
139  for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
140    HBasicBlock* block = block_order_->Get(i);
141    if (!block->IsSingleGoto()) {
142      return block;
143    }
144  }
145  return nullptr;
146}
147
148HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
149  while (block->IsSingleGoto()) {
150    block = block->GetSuccessors().Get(0);
151  }
152  return block;
153}
154
155void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
156  HGraphVisitor* instruction_visitor = GetInstructionVisitor();
157  DCHECK_EQ(current_block_index_, 0u);
158  GenerateFrameEntry();
159  DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
160  for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
161    HBasicBlock* block = block_order_->Get(current_block_index_);
162    // Don't generate code for an empty block. Its predecessors will branch to its successor
163    // directly. Also, the label of that block will not be emitted, so this helps catch
164    // errors where we reference that label.
165    if (block->IsSingleGoto()) continue;
166    Bind(block);
167    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
168      HInstruction* current = it.Current();
169      if (is_baseline) {
170        InitLocationsBaseline(current);
171      }
172      DCHECK(CheckTypeConsistency(current));
173      current->Accept(instruction_visitor);
174    }
175  }
176
177  // Generate the slow paths.
178  for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
179    slow_paths_.Get(i)->EmitNativeCode(this);
180  }
181
182  // Finalize instructions in assember;
183  Finalize(allocator);
184}
185
186void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
187  // The register allocator already called `InitializeCodeGeneration`,
188  // where the frame size has been computed.
189  DCHECK(block_order_ != nullptr);
190  Initialize();
191  CompileInternal(allocator, /* is_baseline */ false);
192}
193
194void CodeGenerator::Finalize(CodeAllocator* allocator) {
195  size_t code_size = GetAssembler()->CodeSize();
196  uint8_t* buffer = allocator->Allocate(code_size);
197
198  MemoryRegion code(buffer, code_size);
199  GetAssembler()->FinalizeInstructions(code);
200}
201
202size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
203  for (size_t i = 0; i < length; ++i) {
204    if (!array[i]) {
205      array[i] = true;
206      return i;
207    }
208  }
209  LOG(FATAL) << "Could not find a register in baseline register allocator";
210  UNREACHABLE();
211}
212
213size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
214  for (size_t i = 0; i < length - 1; i += 2) {
215    if (!array[i] && !array[i + 1]) {
216      array[i] = true;
217      array[i + 1] = true;
218      return i;
219    }
220  }
221  LOG(FATAL) << "Could not find a register in baseline register allocator";
222  UNREACHABLE();
223}
224
225void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
226                                             size_t maximum_number_of_live_core_registers,
227                                             size_t maximum_number_of_live_fp_registers,
228                                             size_t number_of_out_slots,
229                                             const GrowableArray<HBasicBlock*>& block_order) {
230  block_order_ = &block_order;
231  DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock());
232  DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), block_order_->Get(1)));
233  ComputeSpillMask();
234  first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
235
236  if (number_of_spill_slots == 0
237      && !HasAllocatedCalleeSaveRegisters()
238      && IsLeafMethod()
239      && !RequiresCurrentMethod()) {
240    DCHECK_EQ(maximum_number_of_live_core_registers, 0u);
241    DCHECK_EQ(maximum_number_of_live_fp_registers, 0u);
242    SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
243  } else {
244    SetFrameSize(RoundUp(
245        number_of_spill_slots * kVRegSize
246        + number_of_out_slots * kVRegSize
247        + maximum_number_of_live_core_registers * GetWordSize()
248        + maximum_number_of_live_fp_registers * GetFloatingPointSpillSlotSize()
249        + FrameEntrySpillSize(),
250        kStackAlignment));
251  }
252}
253
254Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
255  uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
256  // The type of the previous instruction tells us if we need a single or double stack slot.
257  Primitive::Type type = temp->GetType();
258  int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
259  // Use the temporary region (right below the dex registers).
260  int32_t slot = GetFrameSize() - FrameEntrySpillSize()
261                                - kVRegSize  // filler
262                                - (number_of_locals * kVRegSize)
263                                - ((temp_size + temp->GetIndex()) * kVRegSize);
264  return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
265}
266
267int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
268  uint16_t reg_number = local->GetRegNumber();
269  uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
270  if (reg_number >= number_of_locals) {
271    // Local is a parameter of the method. It is stored in the caller's frame.
272    return GetFrameSize() + kVRegSize  // ART method
273                          + (reg_number - number_of_locals) * kVRegSize;
274  } else {
275    // Local is a temporary in this method. It is stored in this method's frame.
276    return GetFrameSize() - FrameEntrySpillSize()
277                          - kVRegSize  // filler.
278                          - (number_of_locals * kVRegSize)
279                          + (reg_number * kVRegSize);
280  }
281}
282
283void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
284  // The DCHECKS below check that a register is not specified twice in
285  // the summary. The out location can overlap with an input, so we need
286  // to special case it.
287  if (location.IsRegister()) {
288    DCHECK(is_out || !blocked_core_registers_[location.reg()]);
289    blocked_core_registers_[location.reg()] = true;
290  } else if (location.IsFpuRegister()) {
291    DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
292    blocked_fpu_registers_[location.reg()] = true;
293  } else if (location.IsFpuRegisterPair()) {
294    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
295    blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
296    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
297    blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
298  } else if (location.IsRegisterPair()) {
299    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
300    blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
301    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
302    blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
303  }
304}
305
306void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
307  LocationSummary* locations = instruction->GetLocations();
308  if (locations == nullptr) return;
309
310  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
311    blocked_core_registers_[i] = false;
312  }
313
314  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
315    blocked_fpu_registers_[i] = false;
316  }
317
318  for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) {
319    blocked_register_pairs_[i] = false;
320  }
321
322  // Mark all fixed input, temp and output registers as used.
323  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
324    BlockIfInRegister(locations->InAt(i));
325  }
326
327  for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
328    Location loc = locations->GetTemp(i);
329    BlockIfInRegister(loc);
330  }
331  Location result_location = locations->Out();
332  if (locations->OutputCanOverlapWithInputs()) {
333    BlockIfInRegister(result_location, /* is_out */ true);
334  }
335
336  SetupBlockedRegisters(/* is_baseline */ true);
337
338  // Allocate all unallocated input locations.
339  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
340    Location loc = locations->InAt(i);
341    HInstruction* input = instruction->InputAt(i);
342    if (loc.IsUnallocated()) {
343      if ((loc.GetPolicy() == Location::kRequiresRegister)
344          || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
345        loc = AllocateFreeRegister(input->GetType());
346      } else {
347        DCHECK_EQ(loc.GetPolicy(), Location::kAny);
348        HLoadLocal* load = input->AsLoadLocal();
349        if (load != nullptr) {
350          loc = GetStackLocation(load);
351        } else {
352          loc = AllocateFreeRegister(input->GetType());
353        }
354      }
355      locations->SetInAt(i, loc);
356    }
357  }
358
359  // Allocate all unallocated temp locations.
360  for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
361    Location loc = locations->GetTemp(i);
362    if (loc.IsUnallocated()) {
363      switch (loc.GetPolicy()) {
364        case Location::kRequiresRegister:
365          // Allocate a core register (large enough to fit a 32-bit integer).
366          loc = AllocateFreeRegister(Primitive::kPrimInt);
367          break;
368
369        case Location::kRequiresFpuRegister:
370          // Allocate a core register (large enough to fit a 64-bit double).
371          loc = AllocateFreeRegister(Primitive::kPrimDouble);
372          break;
373
374        default:
375          LOG(FATAL) << "Unexpected policy for temporary location "
376                     << loc.GetPolicy();
377      }
378      locations->SetTempAt(i, loc);
379    }
380  }
381  if (result_location.IsUnallocated()) {
382    switch (result_location.GetPolicy()) {
383      case Location::kAny:
384      case Location::kRequiresRegister:
385      case Location::kRequiresFpuRegister:
386        result_location = AllocateFreeRegister(instruction->GetType());
387        break;
388      case Location::kSameAsFirstInput:
389        result_location = locations->InAt(0);
390        break;
391    }
392    locations->UpdateOut(result_location);
393  }
394}
395
396void CodeGenerator::InitLocationsBaseline(HInstruction* instruction) {
397  AllocateLocations(instruction);
398  if (instruction->GetLocations() == nullptr) {
399    if (instruction->IsTemporary()) {
400      HInstruction* previous = instruction->GetPrevious();
401      Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
402      Move(previous, temp_location, instruction);
403    }
404    return;
405  }
406  AllocateRegistersLocally(instruction);
407  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
408    Location location = instruction->GetLocations()->InAt(i);
409    HInstruction* input = instruction->InputAt(i);
410    if (location.IsValid()) {
411      // Move the input to the desired location.
412      if (input->GetNext()->IsTemporary()) {
413        // If the input was stored in a temporary, use that temporary to
414        // perform the move.
415        Move(input->GetNext(), location, instruction);
416      } else {
417        Move(input, location, instruction);
418      }
419    }
420  }
421}
422
423void CodeGenerator::AllocateLocations(HInstruction* instruction) {
424  instruction->Accept(GetLocationBuilder());
425  DCHECK(CheckTypeConsistency(instruction));
426  LocationSummary* locations = instruction->GetLocations();
427  if (!instruction->IsSuspendCheckEntry()) {
428    if (locations != nullptr && locations->CanCall()) {
429      MarkNotLeaf();
430    }
431    if (instruction->NeedsCurrentMethod()) {
432      SetRequiresCurrentMethod();
433    }
434  }
435}
436
437CodeGenerator* CodeGenerator::Create(HGraph* graph,
438                                     InstructionSet instruction_set,
439                                     const InstructionSetFeatures& isa_features,
440                                     const CompilerOptions& compiler_options) {
441  switch (instruction_set) {
442    case kArm:
443    case kThumb2: {
444      return new arm::CodeGeneratorARM(graph,
445          *isa_features.AsArmInstructionSetFeatures(),
446          compiler_options);
447    }
448    case kArm64: {
449      return new arm64::CodeGeneratorARM64(graph,
450          *isa_features.AsArm64InstructionSetFeatures(),
451          compiler_options);
452    }
453    case kMips:
454      return nullptr;
455    case kX86: {
456      return new x86::CodeGeneratorX86(graph,
457           *isa_features.AsX86InstructionSetFeatures(),
458           compiler_options);
459    }
460    case kX86_64: {
461      return new x86_64::CodeGeneratorX86_64(graph,
462          *isa_features.AsX86_64InstructionSetFeatures(),
463          compiler_options);
464    }
465    default:
466      return nullptr;
467  }
468}
469
470void CodeGenerator::BuildNativeGCMap(
471    std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const {
472  const std::vector<uint8_t>& gc_map_raw =
473      dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
474  verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
475
476  uint32_t max_native_offset = 0;
477  for (size_t i = 0; i < pc_infos_.Size(); i++) {
478    uint32_t native_offset = pc_infos_.Get(i).native_pc;
479    if (native_offset > max_native_offset) {
480      max_native_offset = native_offset;
481    }
482  }
483
484  GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth());
485  for (size_t i = 0; i < pc_infos_.Size(); i++) {
486    struct PcInfo pc_info = pc_infos_.Get(i);
487    uint32_t native_offset = pc_info.native_pc;
488    uint32_t dex_pc = pc_info.dex_pc;
489    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
490    CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
491    builder.AddEntry(native_offset, references);
492  }
493}
494
495void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const {
496  for (size_t i = 0; i < pc_infos_.Size(); i++) {
497    struct PcInfo pc_info = pc_infos_.Get(i);
498    uint32_t pc2dex_offset = pc_info.native_pc;
499    int32_t pc2dex_dalvik_offset = pc_info.dex_pc;
500    src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
501  }
502}
503
504void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
505  uint32_t pc2dex_data_size = 0u;
506  uint32_t pc2dex_entries = pc_infos_.Size();
507  uint32_t pc2dex_offset = 0u;
508  int32_t pc2dex_dalvik_offset = 0;
509  uint32_t dex2pc_data_size = 0u;
510  uint32_t dex2pc_entries = 0u;
511  uint32_t dex2pc_offset = 0u;
512  int32_t dex2pc_dalvik_offset = 0;
513
514  for (size_t i = 0; i < pc2dex_entries; i++) {
515    struct PcInfo pc_info = pc_infos_.Get(i);
516    pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
517    pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
518    pc2dex_offset = pc_info.native_pc;
519    pc2dex_dalvik_offset = pc_info.dex_pc;
520  }
521
522  // Walk over the blocks and find which ones correspond to catch block entries.
523  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
524    HBasicBlock* block = graph_->GetBlocks().Get(i);
525    if (block->IsCatchBlock()) {
526      intptr_t native_pc = GetAddressOf(block);
527      ++dex2pc_entries;
528      dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
529      dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
530      dex2pc_offset = native_pc;
531      dex2pc_dalvik_offset = block->GetDexPc();
532    }
533  }
534
535  uint32_t total_entries = pc2dex_entries + dex2pc_entries;
536  uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
537  uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
538  data->resize(data_size);
539
540  uint8_t* data_ptr = &(*data)[0];
541  uint8_t* write_pos = data_ptr;
542
543  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
544  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
545  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
546  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
547
548  pc2dex_offset = 0u;
549  pc2dex_dalvik_offset = 0u;
550  dex2pc_offset = 0u;
551  dex2pc_dalvik_offset = 0u;
552
553  for (size_t i = 0; i < pc2dex_entries; i++) {
554    struct PcInfo pc_info = pc_infos_.Get(i);
555    DCHECK(pc2dex_offset <= pc_info.native_pc);
556    write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset);
557    write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset);
558    pc2dex_offset = pc_info.native_pc;
559    pc2dex_dalvik_offset = pc_info.dex_pc;
560  }
561
562  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
563    HBasicBlock* block = graph_->GetBlocks().Get(i);
564    if (block->IsCatchBlock()) {
565      intptr_t native_pc = GetAddressOf(block);
566      write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
567      write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
568      dex2pc_offset = native_pc;
569      dex2pc_dalvik_offset = block->GetDexPc();
570    }
571  }
572
573
574  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
575  DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
576
577  if (kIsDebugBuild) {
578    // Verify the encoded table holds the expected data.
579    MappingTable table(data_ptr);
580    CHECK_EQ(table.TotalSize(), total_entries);
581    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
582    auto it = table.PcToDexBegin();
583    auto it2 = table.DexToPcBegin();
584    for (size_t i = 0; i < pc2dex_entries; i++) {
585      struct PcInfo pc_info = pc_infos_.Get(i);
586      CHECK_EQ(pc_info.native_pc, it.NativePcOffset());
587      CHECK_EQ(pc_info.dex_pc, it.DexPc());
588      ++it;
589    }
590    for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
591      HBasicBlock* block = graph_->GetBlocks().Get(i);
592      if (block->IsCatchBlock()) {
593        CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
594        CHECK_EQ(block->GetDexPc(), it2.DexPc());
595        ++it2;
596      }
597    }
598    CHECK(it == table.PcToDexEnd());
599    CHECK(it2 == table.DexToPcEnd());
600  }
601}
602
603void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const {
604  Leb128EncodingVector vmap_encoder;
605  // We currently don't use callee-saved registers.
606  size_t size = 0 + 1 /* marker */ + 0;
607  vmap_encoder.Reserve(size + 1u);  // All values are likely to be one byte in ULEB128 (<128).
608  vmap_encoder.PushBackUnsigned(size);
609  vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
610
611  *data = vmap_encoder.GetData();
612}
613
614void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) {
615  uint32_t size = stack_map_stream_.ComputeNeededSize();
616  data->resize(size);
617  MemoryRegion region(data->data(), size);
618  stack_map_stream_.FillIn(region);
619}
620
621void CodeGenerator::RecordPcInfo(HInstruction* instruction,
622                                 uint32_t dex_pc,
623                                 SlowPathCode* slow_path) {
624  if (instruction != nullptr) {
625    // The code generated for some type conversions may call the
626    // runtime, thus normally requiring a subsequent call to this
627    // method.  However, the method verifier does not produce PC
628    // information for certain instructions, which are considered "atomic"
629    // (they cannot join a GC).
630    // Therefore we do not currently record PC information for such
631    // instructions.  As this may change later, we added this special
632    // case so that code generators may nevertheless call
633    // CodeGenerator::RecordPcInfo without triggering an error in
634    // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
635    // thereafter.
636    if (instruction->IsTypeConversion()) {
637      return;
638    }
639    if (instruction->IsRem()) {
640      Primitive::Type type = instruction->AsRem()->GetResultType();
641      if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) {
642        return;
643      }
644    }
645  }
646
647  // Collect PC infos for the mapping table.
648  struct PcInfo pc_info;
649  pc_info.dex_pc = dex_pc;
650  pc_info.native_pc = GetAssembler()->CodeSize();
651  pc_infos_.Add(pc_info);
652
653  uint32_t inlining_depth = 0;
654
655  if (instruction == nullptr) {
656    // For stack overflow checks.
657    stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, inlining_depth);
658    return;
659  }
660  LocationSummary* locations = instruction->GetLocations();
661  HEnvironment* environment = instruction->GetEnvironment();
662  size_t environment_size = instruction->EnvironmentSize();
663
664  uint32_t register_mask = locations->GetRegisterMask();
665  if (locations->OnlyCallsOnSlowPath()) {
666    // In case of slow path, we currently set the location of caller-save registers
667    // to register (instead of their stack location when pushed before the slow-path
668    // call). Therefore register_mask contains both callee-save and caller-save
669    // registers that hold objects. We must remove the caller-save from the mask, since
670    // they will be overwritten by the callee.
671    register_mask &= core_callee_save_mask_;
672  }
673  // The register mask must be a subset of callee-save registers.
674  DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
675  stack_map_stream_.AddStackMapEntry(dex_pc,
676                                     pc_info.native_pc,
677                                     register_mask,
678                                     locations->GetStackMask(),
679                                     environment_size,
680                                     inlining_depth);
681
682  // Walk over the environment, and record the location of dex registers.
683  for (size_t i = 0; i < environment_size; ++i) {
684    HInstruction* current = environment->GetInstructionAt(i);
685    if (current == nullptr) {
686      stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kNone, 0);
687      continue;
688    }
689
690    Location location = locations->GetEnvironmentAt(i);
691    switch (location.GetKind()) {
692      case Location::kConstant: {
693        DCHECK_EQ(current, location.GetConstant());
694        if (current->IsLongConstant()) {
695          int64_t value = current->AsLongConstant()->GetValue();
696          stack_map_stream_.AddDexRegisterEntry(
697              i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
698          stack_map_stream_.AddDexRegisterEntry(
699              ++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
700          DCHECK_LT(i, environment_size);
701        } else if (current->IsDoubleConstant()) {
702          int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
703          stack_map_stream_.AddDexRegisterEntry(
704              i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
705          stack_map_stream_.AddDexRegisterEntry(
706              ++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
707          DCHECK_LT(i, environment_size);
708        } else if (current->IsIntConstant()) {
709          int32_t value = current->AsIntConstant()->GetValue();
710          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
711        } else if (current->IsNullConstant()) {
712          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0);
713        } else {
714          DCHECK(current->IsFloatConstant()) << current->DebugName();
715          int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
716          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
717        }
718        break;
719      }
720
721      case Location::kStackSlot: {
722        stack_map_stream_.AddDexRegisterEntry(
723            i, DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
724        break;
725      }
726
727      case Location::kDoubleStackSlot: {
728        stack_map_stream_.AddDexRegisterEntry(
729            i, DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
730        stack_map_stream_.AddDexRegisterEntry(
731            ++i, DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
732        DCHECK_LT(i, environment_size);
733        break;
734      }
735
736      case Location::kRegister : {
737        int id = location.reg();
738        if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
739          uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
740          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
741          if (current->GetType() == Primitive::kPrimLong) {
742            stack_map_stream_.AddDexRegisterEntry(
743                ++i, DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
744            DCHECK_LT(i, environment_size);
745          }
746        } else {
747          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInRegister, id);
748          if (current->GetType() == Primitive::kPrimLong) {
749            stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInRegister, id);
750            DCHECK_LT(i, environment_size);
751          }
752        }
753        break;
754      }
755
756      case Location::kFpuRegister : {
757        int id = location.reg();
758        if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
759          uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
760          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
761          if (current->GetType() == Primitive::kPrimDouble) {
762            stack_map_stream_.AddDexRegisterEntry(
763                ++i, DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
764            DCHECK_LT(i, environment_size);
765          }
766        } else {
767          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInFpuRegister, id);
768          if (current->GetType() == Primitive::kPrimDouble) {
769            stack_map_stream_.AddDexRegisterEntry(
770                ++i, DexRegisterLocation::Kind::kInFpuRegister, id);
771            DCHECK_LT(i, environment_size);
772          }
773        }
774        break;
775      }
776
777      case Location::kFpuRegisterPair : {
778        int low = location.low();
779        int high = location.high();
780        if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
781          uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
782          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
783        } else {
784          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInFpuRegister, low);
785        }
786        if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
787          uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
788          stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInStack, offset);
789        } else {
790          stack_map_stream_.AddDexRegisterEntry(
791              ++i, DexRegisterLocation::Kind::kInFpuRegister, high);
792        }
793        DCHECK_LT(i, environment_size);
794        break;
795      }
796
797      case Location::kRegisterPair : {
798        int low = location.low();
799        int high = location.high();
800        if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
801          uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
802          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
803        } else {
804          stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInRegister, low);
805        }
806        if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
807          uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
808          stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInStack, offset);
809        } else {
810          stack_map_stream_.AddDexRegisterEntry(
811              ++i, DexRegisterLocation::Kind::kInRegister, high);
812        }
813        DCHECK_LT(i, environment_size);
814        break;
815      }
816
817      case Location::kInvalid: {
818        stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kNone, 0);
819        break;
820      }
821
822      default:
823        LOG(FATAL) << "Unexpected kind " << location.GetKind();
824    }
825  }
826}
827
828bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
829  HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
830  return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck();
831}
832
833void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
834  // If we are from a static path don't record the pc as we can't throw NPE.
835  // NB: having the checks here makes the code much less verbose in the arch
836  // specific code generators.
837  if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
838    return;
839  }
840
841  if (!compiler_options_.GetImplicitNullChecks()) {
842    return;
843  }
844
845  if (!instr->CanDoImplicitNullCheck()) {
846    return;
847  }
848
849  // Find the first previous instruction which is not a move.
850  HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
851
852  // If the instruction is a null check it means that `instr` is the first user
853  // and needs to record the pc.
854  if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
855    HNullCheck* null_check = first_prev_not_move->AsNullCheck();
856    // TODO: The parallel moves modify the environment. Their changes need to be reverted
857    // otherwise the stack maps at the throw point will not be correct.
858    RecordPcInfo(null_check, null_check->GetDexPc());
859  }
860}
861
862void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
863  LocationSummary* locations = suspend_check->GetLocations();
864  HBasicBlock* block = suspend_check->GetBlock();
865  DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
866  DCHECK(block->IsLoopHeader());
867
868  for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
869    HInstruction* current = it.Current();
870    LiveInterval* interval = current->GetLiveInterval();
871    // We only need to clear bits of loop phis containing objects and allocated in register.
872    // Loop phis allocated on stack already have the object in the stack.
873    if (current->GetType() == Primitive::kPrimNot
874        && interval->HasRegister()
875        && interval->HasSpillSlot()) {
876      locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
877    }
878  }
879}
880
881void CodeGenerator::EmitParallelMoves(Location from1,
882                                      Location to1,
883                                      Primitive::Type type1,
884                                      Location from2,
885                                      Location to2,
886                                      Primitive::Type type2) {
887  HParallelMove parallel_move(GetGraph()->GetArena());
888  parallel_move.AddMove(from1, to1, type1, nullptr);
889  parallel_move.AddMove(from2, to2, type2, nullptr);
890  GetMoveResolver()->EmitNativeCode(&parallel_move);
891}
892
893void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) {
894  codegen->RecordPcInfo(instruction, dex_pc, this);
895}
896
897void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
898  RegisterSet* register_set = locations->GetLiveRegisters();
899  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
900  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
901    if (!codegen->IsCoreCalleeSaveRegister(i)) {
902      if (register_set->ContainsCoreRegister(i)) {
903        // If the register holds an object, update the stack mask.
904        if (locations->RegisterContainsObject(i)) {
905          locations->SetStackBit(stack_offset / kVRegSize);
906        }
907        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
908        DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
909        saved_core_stack_offsets_[i] = stack_offset;
910        stack_offset += codegen->SaveCoreRegister(stack_offset, i);
911      }
912    }
913  }
914
915  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
916    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
917      if (register_set->ContainsFloatingPointRegister(i)) {
918        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
919        DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
920        saved_fpu_stack_offsets_[i] = stack_offset;
921        stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
922      }
923    }
924  }
925}
926
927void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
928  RegisterSet* register_set = locations->GetLiveRegisters();
929  size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
930  for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
931    if (!codegen->IsCoreCalleeSaveRegister(i)) {
932      if (register_set->ContainsCoreRegister(i)) {
933        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
934        stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
935      }
936    }
937  }
938
939  for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
940    if (!codegen->IsFloatingPointCalleeSaveRegister(i)) {
941      if (register_set->ContainsFloatingPointRegister(i)) {
942        DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
943        stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
944      }
945    }
946  }
947}
948
949}  // namespace art
950