code_generator.cc revision fead4e4f397455aa31905b2982d4d861126ab89d
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator.h"
18
19#include "code_generator_arm.h"
20#include "code_generator_arm64.h"
21#include "code_generator_x86.h"
22#include "code_generator_x86_64.h"
23#include "compiled_method.h"
24#include "dex/verified_method.h"
25#include "driver/dex_compilation_unit.h"
26#include "gc_map_builder.h"
27#include "leb128.h"
28#include "mapping_table.h"
29#include "mirror/array-inl.h"
30#include "mirror/object_array-inl.h"
31#include "mirror/object_reference.h"
32#include "ssa_liveness_analysis.h"
33#include "utils/assembler.h"
34#include "verifier/dex_gc_map.h"
35#include "vmap_table.h"
36
37namespace art {
38
39size_t CodeGenerator::GetCacheOffset(uint32_t index) {
40  return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
41}
42
43static bool IsSingleGoto(HBasicBlock* block) {
44  HLoopInformation* loop_info = block->GetLoopInformation();
45  // TODO: Remove the null check b/19084197.
46  return (block->GetFirstInstruction() != nullptr)
47      && (block->GetFirstInstruction() == block->GetLastInstruction())
48      && block->GetLastInstruction()->IsGoto()
49      // Back edges generate the suspend check.
50      && (loop_info == nullptr || !loop_info->IsBackEdge(block));
51}
52
53void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
54  Initialize();
55  if (!is_leaf) {
56    MarkNotLeaf();
57  }
58  InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
59                             + GetGraph()->GetTemporariesVRegSlots()
60                             + 1 /* filler */,
61                           0, /* the baseline compiler does not have live registers at slow path */
62                           0, /* the baseline compiler does not have live registers at slow path */
63                           GetGraph()->GetMaximumNumberOfOutVRegs()
64                             + 1 /* current method */,
65                           GetGraph()->GetBlocks());
66  CompileInternal(allocator, /* is_baseline */ true);
67}
68
69bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
70  DCHECK_EQ(block_order_->Get(current_block_index_), current);
71  return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
72}
73
74HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
75  for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
76    HBasicBlock* block = block_order_->Get(i);
77    if (!IsSingleGoto(block)) {
78      return block;
79    }
80  }
81  return nullptr;
82}
83
84HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
85  while (IsSingleGoto(block)) {
86    block = block->GetSuccessors().Get(0);
87  }
88  return block;
89}
90
91void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
92  HGraphVisitor* instruction_visitor = GetInstructionVisitor();
93  DCHECK_EQ(current_block_index_, 0u);
94  GenerateFrameEntry();
95  for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
96    HBasicBlock* block = block_order_->Get(current_block_index_);
97    // Don't generate code for an empty block. Its predecessors will branch to its successor
98    // directly. Also, the label of that block will not be emitted, so this helps catch
99    // errors where we reference that label.
100    if (IsSingleGoto(block)) continue;
101    Bind(block);
102    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
103      HInstruction* current = it.Current();
104      if (is_baseline) {
105        InitLocationsBaseline(current);
106      }
107      current->Accept(instruction_visitor);
108    }
109  }
110
111  // Generate the slow paths.
112  for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
113    slow_paths_.Get(i)->EmitNativeCode(this);
114  }
115
116  // Finalize instructions in assember;
117  Finalize(allocator);
118}
119
120void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
121  // The register allocator already called `InitializeCodeGeneration`,
122  // where the frame size has been computed.
123  DCHECK(block_order_ != nullptr);
124  Initialize();
125  CompileInternal(allocator, /* is_baseline */ false);
126}
127
128void CodeGenerator::Finalize(CodeAllocator* allocator) {
129  size_t code_size = GetAssembler()->CodeSize();
130  uint8_t* buffer = allocator->Allocate(code_size);
131
132  MemoryRegion code(buffer, code_size);
133  GetAssembler()->FinalizeInstructions(code);
134}
135
136size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
137  for (size_t i = 0; i < length; ++i) {
138    if (!array[i]) {
139      array[i] = true;
140      return i;
141    }
142  }
143  LOG(FATAL) << "Could not find a register in baseline register allocator";
144  UNREACHABLE();
145  return -1;
146}
147
148size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
149  for (size_t i = 0; i < length - 1; i += 2) {
150    if (!array[i] && !array[i + 1]) {
151      array[i] = true;
152      array[i + 1] = true;
153      return i;
154    }
155  }
156  LOG(FATAL) << "Could not find a register in baseline register allocator";
157  UNREACHABLE();
158  return -1;
159}
160
161void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
162                                             size_t maximum_number_of_live_core_registers,
163                                             size_t maximum_number_of_live_fp_registers,
164                                             size_t number_of_out_slots,
165                                             const GrowableArray<HBasicBlock*>& block_order) {
166  block_order_ = &block_order;
167  DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock());
168  DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), block_order_->Get(1)));
169  ComputeSpillMask();
170  first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
171
172  if (number_of_spill_slots == 0
173      && !HasAllocatedCalleeSaveRegisters()
174      && IsLeafMethod()
175      && !RequiresCurrentMethod()) {
176    DCHECK_EQ(maximum_number_of_live_core_registers, 0u);
177    DCHECK_EQ(maximum_number_of_live_fp_registers, 0u);
178    SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
179  } else {
180    SetFrameSize(RoundUp(
181        number_of_spill_slots * kVRegSize
182        + number_of_out_slots * kVRegSize
183        + maximum_number_of_live_core_registers * GetWordSize()
184        + maximum_number_of_live_fp_registers * GetFloatingPointSpillSlotSize()
185        + FrameEntrySpillSize(),
186        kStackAlignment));
187  }
188}
189
190Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
191  uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
192  // The type of the previous instruction tells us if we need a single or double stack slot.
193  Primitive::Type type = temp->GetType();
194  int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
195  // Use the temporary region (right below the dex registers).
196  int32_t slot = GetFrameSize() - FrameEntrySpillSize()
197                                - kVRegSize  // filler
198                                - (number_of_locals * kVRegSize)
199                                - ((temp_size + temp->GetIndex()) * kVRegSize);
200  return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
201}
202
203int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
204  uint16_t reg_number = local->GetRegNumber();
205  uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
206  if (reg_number >= number_of_locals) {
207    // Local is a parameter of the method. It is stored in the caller's frame.
208    return GetFrameSize() + kVRegSize  // ART method
209                          + (reg_number - number_of_locals) * kVRegSize;
210  } else {
211    // Local is a temporary in this method. It is stored in this method's frame.
212    return GetFrameSize() - FrameEntrySpillSize()
213                          - kVRegSize  // filler.
214                          - (number_of_locals * kVRegSize)
215                          + (reg_number * kVRegSize);
216  }
217}
218
219void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
220  // The DCHECKS below check that a register is not specified twice in
221  // the summary. The out location can overlap with an input, so we need
222  // to special case it.
223  if (location.IsRegister()) {
224    DCHECK(is_out || !blocked_core_registers_[location.reg()]);
225    blocked_core_registers_[location.reg()] = true;
226  } else if (location.IsFpuRegister()) {
227    DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
228    blocked_fpu_registers_[location.reg()] = true;
229  } else if (location.IsFpuRegisterPair()) {
230    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
231    blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
232    DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
233    blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
234  } else if (location.IsRegisterPair()) {
235    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
236    blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
237    DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
238    blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
239  }
240}
241
242void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
243  LocationSummary* locations = instruction->GetLocations();
244  if (locations == nullptr) return;
245
246  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
247    blocked_core_registers_[i] = false;
248  }
249
250  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
251    blocked_fpu_registers_[i] = false;
252  }
253
254  for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) {
255    blocked_register_pairs_[i] = false;
256  }
257
258  // Mark all fixed input, temp and output registers as used.
259  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
260    BlockIfInRegister(locations->InAt(i));
261  }
262
263  for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
264    Location loc = locations->GetTemp(i);
265    BlockIfInRegister(loc);
266  }
267  Location result_location = locations->Out();
268  if (locations->OutputCanOverlapWithInputs()) {
269    BlockIfInRegister(result_location, /* is_out */ true);
270  }
271
272  SetupBlockedRegisters(/* is_baseline */ true);
273
274  // Allocate all unallocated input locations.
275  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
276    Location loc = locations->InAt(i);
277    HInstruction* input = instruction->InputAt(i);
278    if (loc.IsUnallocated()) {
279      if ((loc.GetPolicy() == Location::kRequiresRegister)
280          || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
281        loc = AllocateFreeRegister(input->GetType());
282      } else {
283        DCHECK_EQ(loc.GetPolicy(), Location::kAny);
284        HLoadLocal* load = input->AsLoadLocal();
285        if (load != nullptr) {
286          loc = GetStackLocation(load);
287        } else {
288          loc = AllocateFreeRegister(input->GetType());
289        }
290      }
291      locations->SetInAt(i, loc);
292    }
293  }
294
295  // Allocate all unallocated temp locations.
296  for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
297    Location loc = locations->GetTemp(i);
298    if (loc.IsUnallocated()) {
299      switch (loc.GetPolicy()) {
300        case Location::kRequiresRegister:
301          // Allocate a core register (large enough to fit a 32-bit integer).
302          loc = AllocateFreeRegister(Primitive::kPrimInt);
303          break;
304
305        case Location::kRequiresFpuRegister:
306          // Allocate a core register (large enough to fit a 64-bit double).
307          loc = AllocateFreeRegister(Primitive::kPrimDouble);
308          break;
309
310        default:
311          LOG(FATAL) << "Unexpected policy for temporary location "
312                     << loc.GetPolicy();
313      }
314      locations->SetTempAt(i, loc);
315    }
316  }
317  if (result_location.IsUnallocated()) {
318    switch (result_location.GetPolicy()) {
319      case Location::kAny:
320      case Location::kRequiresRegister:
321      case Location::kRequiresFpuRegister:
322        result_location = AllocateFreeRegister(instruction->GetType());
323        break;
324      case Location::kSameAsFirstInput:
325        result_location = locations->InAt(0);
326        break;
327    }
328    locations->UpdateOut(result_location);
329  }
330}
331
332void CodeGenerator::InitLocationsBaseline(HInstruction* instruction) {
333  AllocateLocations(instruction);
334  if (instruction->GetLocations() == nullptr) {
335    if (instruction->IsTemporary()) {
336      HInstruction* previous = instruction->GetPrevious();
337      Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
338      Move(previous, temp_location, instruction);
339    }
340    return;
341  }
342  AllocateRegistersLocally(instruction);
343  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
344    Location location = instruction->GetLocations()->InAt(i);
345    HInstruction* input = instruction->InputAt(i);
346    if (location.IsValid()) {
347      // Move the input to the desired location.
348      if (input->GetNext()->IsTemporary()) {
349        // If the input was stored in a temporary, use that temporary to
350        // perform the move.
351        Move(input->GetNext(), location, instruction);
352      } else {
353        Move(input, location, instruction);
354      }
355    }
356  }
357}
358
359void CodeGenerator::AllocateLocations(HInstruction* instruction) {
360  instruction->Accept(GetLocationBuilder());
361  LocationSummary* locations = instruction->GetLocations();
362  if (!instruction->IsSuspendCheckEntry()) {
363    if (locations != nullptr && locations->CanCall()) {
364      MarkNotLeaf();
365    }
366    if (instruction->NeedsCurrentMethod()) {
367      SetRequiresCurrentMethod();
368    }
369  }
370}
371
372CodeGenerator* CodeGenerator::Create(HGraph* graph,
373                                     InstructionSet instruction_set,
374                                     const InstructionSetFeatures& isa_features,
375                                     const CompilerOptions& compiler_options) {
376  switch (instruction_set) {
377    case kArm:
378    case kThumb2: {
379      return new arm::CodeGeneratorARM(graph,
380          *isa_features.AsArmInstructionSetFeatures(),
381          compiler_options);
382    }
383    case kArm64: {
384      return new arm64::CodeGeneratorARM64(graph,
385          *isa_features.AsArm64InstructionSetFeatures(),
386          compiler_options);
387    }
388    case kMips:
389      return nullptr;
390    case kX86: {
391      return new x86::CodeGeneratorX86(graph, compiler_options);
392    }
393    case kX86_64: {
394      return new x86_64::CodeGeneratorX86_64(graph, compiler_options);
395    }
396    default:
397      return nullptr;
398  }
399}
400
401void CodeGenerator::BuildNativeGCMap(
402    std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const {
403  const std::vector<uint8_t>& gc_map_raw =
404      dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
405  verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
406
407  uint32_t max_native_offset = 0;
408  for (size_t i = 0; i < pc_infos_.Size(); i++) {
409    uint32_t native_offset = pc_infos_.Get(i).native_pc;
410    if (native_offset > max_native_offset) {
411      max_native_offset = native_offset;
412    }
413  }
414
415  GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth());
416  for (size_t i = 0; i < pc_infos_.Size(); i++) {
417    struct PcInfo pc_info = pc_infos_.Get(i);
418    uint32_t native_offset = pc_info.native_pc;
419    uint32_t dex_pc = pc_info.dex_pc;
420    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
421    CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
422    builder.AddEntry(native_offset, references);
423  }
424}
425
426void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, DefaultSrcMap* src_map) const {
427  uint32_t pc2dex_data_size = 0u;
428  uint32_t pc2dex_entries = pc_infos_.Size();
429  uint32_t pc2dex_offset = 0u;
430  int32_t pc2dex_dalvik_offset = 0;
431  uint32_t dex2pc_data_size = 0u;
432  uint32_t dex2pc_entries = 0u;
433  uint32_t dex2pc_offset = 0u;
434  int32_t dex2pc_dalvik_offset = 0;
435
436  if (src_map != nullptr) {
437    src_map->reserve(pc2dex_entries);
438  }
439
440  for (size_t i = 0; i < pc2dex_entries; i++) {
441    struct PcInfo pc_info = pc_infos_.Get(i);
442    pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
443    pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
444    pc2dex_offset = pc_info.native_pc;
445    pc2dex_dalvik_offset = pc_info.dex_pc;
446    if (src_map != nullptr) {
447      src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
448    }
449  }
450
451  // Walk over the blocks and find which ones correspond to catch block entries.
452  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
453    HBasicBlock* block = graph_->GetBlocks().Get(i);
454    if (block->IsCatchBlock()) {
455      intptr_t native_pc = GetAddressOf(block);
456      ++dex2pc_entries;
457      dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
458      dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
459      dex2pc_offset = native_pc;
460      dex2pc_dalvik_offset = block->GetDexPc();
461    }
462  }
463
464  uint32_t total_entries = pc2dex_entries + dex2pc_entries;
465  uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
466  uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
467  data->resize(data_size);
468
469  uint8_t* data_ptr = &(*data)[0];
470  uint8_t* write_pos = data_ptr;
471
472  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
473  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
474  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
475  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
476
477  pc2dex_offset = 0u;
478  pc2dex_dalvik_offset = 0u;
479  dex2pc_offset = 0u;
480  dex2pc_dalvik_offset = 0u;
481
482  for (size_t i = 0; i < pc2dex_entries; i++) {
483    struct PcInfo pc_info = pc_infos_.Get(i);
484    DCHECK(pc2dex_offset <= pc_info.native_pc);
485    write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset);
486    write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset);
487    pc2dex_offset = pc_info.native_pc;
488    pc2dex_dalvik_offset = pc_info.dex_pc;
489  }
490
491  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
492    HBasicBlock* block = graph_->GetBlocks().Get(i);
493    if (block->IsCatchBlock()) {
494      intptr_t native_pc = GetAddressOf(block);
495      write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
496      write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
497      dex2pc_offset = native_pc;
498      dex2pc_dalvik_offset = block->GetDexPc();
499    }
500  }
501
502
503  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
504  DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
505
506  if (kIsDebugBuild) {
507    // Verify the encoded table holds the expected data.
508    MappingTable table(data_ptr);
509    CHECK_EQ(table.TotalSize(), total_entries);
510    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
511    auto it = table.PcToDexBegin();
512    auto it2 = table.DexToPcBegin();
513    for (size_t i = 0; i < pc2dex_entries; i++) {
514      struct PcInfo pc_info = pc_infos_.Get(i);
515      CHECK_EQ(pc_info.native_pc, it.NativePcOffset());
516      CHECK_EQ(pc_info.dex_pc, it.DexPc());
517      ++it;
518    }
519    for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
520      HBasicBlock* block = graph_->GetBlocks().Get(i);
521      if (block->IsCatchBlock()) {
522        CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
523        CHECK_EQ(block->GetDexPc(), it2.DexPc());
524        ++it2;
525      }
526    }
527    CHECK(it == table.PcToDexEnd());
528    CHECK(it2 == table.DexToPcEnd());
529  }
530}
531
532void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const {
533  Leb128EncodingVector vmap_encoder;
534  // We currently don't use callee-saved registers.
535  size_t size = 0 + 1 /* marker */ + 0;
536  vmap_encoder.Reserve(size + 1u);  // All values are likely to be one byte in ULEB128 (<128).
537  vmap_encoder.PushBackUnsigned(size);
538  vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
539
540  *data = vmap_encoder.GetData();
541}
542
543void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) {
544  uint32_t size = stack_map_stream_.ComputeNeededSize();
545  data->resize(size);
546  MemoryRegion region(data->data(), size);
547  stack_map_stream_.FillIn(region);
548}
549
550void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) {
551  if (instruction != nullptr) {
552    // The code generated for some type conversions may call the
553    // runtime, thus normally requiring a subsequent call to this
554    // method.  However, the method verifier does not produce PC
555    // information for certain instructions, which are considered "atomic"
556    // (they cannot join a GC).
557    // Therefore we do not currently record PC information for such
558    // instructions.  As this may change later, we added this special
559    // case so that code generators may nevertheless call
560    // CodeGenerator::RecordPcInfo without triggering an error in
561    // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
562    // thereafter.
563    if (instruction->IsTypeConversion()) {
564      return;
565    }
566    if (instruction->IsRem()) {
567      Primitive::Type type = instruction->AsRem()->GetResultType();
568      if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) {
569        return;
570      }
571    }
572  }
573
574  // Collect PC infos for the mapping table.
575  struct PcInfo pc_info;
576  pc_info.dex_pc = dex_pc;
577  pc_info.native_pc = GetAssembler()->CodeSize();
578  pc_infos_.Add(pc_info);
579
580  uint32_t inlining_depth = 0;
581  if (instruction == nullptr) {
582    // For stack overflow checks.
583    stack_map_stream_.RecordEnvironment(
584       /* environment */ nullptr,
585       /* environment_size */ 0,
586       /* locations */ nullptr,
587       dex_pc,
588       pc_info.native_pc,
589       /* register_mask */ 0,
590       inlining_depth);
591  } else {
592    LocationSummary* locations = instruction->GetLocations();
593    HEnvironment* environment = instruction->GetEnvironment();
594    size_t environment_size = instruction->EnvironmentSize();
595
596    uint32_t register_mask = locations->GetRegisterMask();
597    if (locations->OnlyCallsOnSlowPath()) {
598      // In case of slow path, we currently set the location of caller-save registers
599      // to register (instead of their stack location when pushed before the slow-path
600      // call). Therefore register_mask contains both callee-save and caller-save
601      // registers that hold objects. We must remove the caller-save from the mask, since
602      // they will be overwritten by the callee.
603      register_mask &= core_callee_save_mask_;
604    }
605    // The register mask must be a subset of callee-save registers.
606    DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
607
608    // Populate stack map information.
609    stack_map_stream_.RecordEnvironment(environment,
610                                        environment_size,
611                                        locations,
612                                        dex_pc,
613                                        pc_info.native_pc,
614                                        register_mask,
615                                        inlining_depth);
616  }
617}
618
619bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
620  HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
621  return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck();
622}
623
624void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
625  // If we are from a static path don't record the pc as we can't throw NPE.
626  // NB: having the checks here makes the code much less verbose in the arch
627  // specific code generators.
628  if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
629    return;
630  }
631
632  if (!compiler_options_.GetImplicitNullChecks()) {
633    return;
634  }
635
636  if (!instr->CanDoImplicitNullCheck()) {
637    return;
638  }
639
640  // Find the first previous instruction which is not a move.
641  HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
642
643  // If the instruction is a null check it means that `instr` is the first user
644  // and needs to record the pc.
645  if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
646    HNullCheck* null_check = first_prev_not_move->AsNullCheck();
647    // TODO: The parallel moves modify the environment. Their changes need to be reverted
648    // otherwise the stack maps at the throw point will not be correct.
649    RecordPcInfo(null_check, null_check->GetDexPc());
650  }
651}
652
653void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
654  RegisterSet* register_set = locations->GetLiveRegisters();
655  size_t stack_offset = first_register_slot_in_slow_path_;
656  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
657    if (!IsCoreCalleeSaveRegister(i)) {
658      if (register_set->ContainsCoreRegister(i)) {
659        // If the register holds an object, update the stack mask.
660        if (locations->RegisterContainsObject(i)) {
661          locations->SetStackBit(stack_offset / kVRegSize);
662        }
663        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
664        stack_offset += SaveCoreRegister(stack_offset, i);
665      }
666    }
667  }
668
669  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
670    if (!IsFloatingPointCalleeSaveRegister(i)) {
671      if (register_set->ContainsFloatingPointRegister(i)) {
672        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
673        stack_offset += SaveFloatingPointRegister(stack_offset, i);
674      }
675    }
676  }
677}
678
679void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) {
680  RegisterSet* register_set = locations->GetLiveRegisters();
681  size_t stack_offset = first_register_slot_in_slow_path_;
682  for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
683    if (!IsCoreCalleeSaveRegister(i)) {
684      if (register_set->ContainsCoreRegister(i)) {
685        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
686        stack_offset += RestoreCoreRegister(stack_offset, i);
687      }
688    }
689  }
690
691  for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
692    if (!IsFloatingPointCalleeSaveRegister(i)) {
693      if (register_set->ContainsFloatingPointRegister(i)) {
694        DCHECK_LT(stack_offset, GetFrameSize() - FrameEntrySpillSize());
695        stack_offset += RestoreFloatingPointRegister(stack_offset, i);
696      }
697    }
698  }
699}
700
701void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
702  LocationSummary* locations = suspend_check->GetLocations();
703  HBasicBlock* block = suspend_check->GetBlock();
704  DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
705  DCHECK(block->IsLoopHeader());
706
707  for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
708    HInstruction* current = it.Current();
709    LiveInterval* interval = current->GetLiveInterval();
710    // We only need to clear bits of loop phis containing objects and allocated in register.
711    // Loop phis allocated on stack already have the object in the stack.
712    if (current->GetType() == Primitive::kPrimNot
713        && interval->HasRegister()
714        && interval->HasSpillSlot()) {
715      locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
716    }
717  }
718}
719
720void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
721  HParallelMove parallel_move(GetGraph()->GetArena());
722  parallel_move.AddMove(from1, to1, nullptr);
723  parallel_move.AddMove(from2, to2, nullptr);
724  GetMoveResolver()->EmitNativeCode(&parallel_move);
725}
726
727}  // namespace art
728