1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "type_inference.h"
18
19#include "base/bit_vector-inl.h"
20#include "compiler_ir.h"
21#include "dataflow_iterator-inl.h"
22#include "dex_flags.h"
23#include "dex_file-inl.h"
24#include "driver/dex_compilation_unit.h"
25#include "mir_field_info.h"
26#include "mir_graph.h"
27#include "mir_method_info.h"
28#include "utils.h"
29
30namespace art {
31
32inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
33  DCHECK_NE(array_depth, 0u);
34  return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
35              ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
36}
37
38inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
39  if (component_type.ArrayDepth() == 0u) {
40    return ArrayType(1u, component_type);
41  }
42  if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
43    return component_type;
44  }
45  return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart));  // array_depth + 1u;
46}
47
48TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
49  switch (shorty) {
50    case 'L':
51      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
52    case 'D':
53      return Type(kFlagLowWord | kFlagWide | kFlagFp);
54    case 'J':
55      return Type(kFlagLowWord | kFlagWide | kFlagCore);
56    case 'F':
57      return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
58    default:
59      DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
60      return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
61  }
62}
63
64TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
65  const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
66  if (UNLIKELY(desc[0] == 'V')) {
67    return Unknown();
68  } else if (UNLIKELY(desc[0] == '[')) {
69    size_t array_depth = 0u;
70    while (*desc == '[') {
71      ++array_depth;
72      ++desc;
73    }
74    if (UNLIKELY(array_depth > kMaxArrayDepth)) {
75      LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
76          << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
77      array_depth = kMaxArrayDepth;
78    }
79    Type shorty_result = Type::ShortyType(desc[0]);
80    return ArrayType(array_depth, shorty_result);
81  } else {
82    return ShortyType(desc[0]);
83  }
84}
85
86bool TypeInference::Type::MergeArrayConflict(Type src_type) {
87  DCHECK(Ref());
88  DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
89  DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
90  bool size_conflict =
91      (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
92      (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
93  // Mark all three array type bits so that merging any other type bits will not change this type.
94  return Copy(Type((raw_bits_ & kMaskNonArray) |
95                   (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
96                   kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
97}
98
99bool TypeInference::Type::MergeStrong(Type src_type) {
100  bool changed = MergeNonArrayFlags(src_type);
101  if (src_type.ArrayDepth() != 0u) {
102    if (ArrayDepth() == 0u) {
103      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
104      DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
105      raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
106      changed = true;
107    } else if (ArrayDepth() == src_type.ArrayDepth()) {
108      changed |= MergeBits(src_type, kMaskArrayWideAndType);
109    } else if (src_type.ArrayDepth() == 1u &&
110        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
111         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
112      // Source type is [L or [? but current type is at least [[, preserve it.
113    } else if (ArrayDepth() == 1u &&
114        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
115         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
116      // Overwrite [? or [L with the source array type which is at least [[.
117      raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
118      changed = true;
119    } else {
120      // Mark the array value type with conflict - both ref and fp.
121      changed |= MergeArrayConflict(src_type);
122    }
123  }
124  return changed;
125}
126
127bool TypeInference::Type::MergeWeak(Type src_type) {
128  bool changed = MergeNonArrayFlags(src_type);
129  if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
130    DCHECK_NE(src_type.ArrayDepth(), 0u);
131    if (ArrayDepth() == 0u) {
132      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
133      // Preserve current type.
134    } else if (ArrayDepth() == src_type.ArrayDepth()) {
135      changed |= MergeBits(src_type, kMaskArrayWideAndType);
136    } else if (src_type.ArrayDepth() == 1u &&
137        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
138         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
139      // Source type is [L or [? but current type is at least [[, preserve it.
140    } else if (ArrayDepth() == 1u &&
141        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
142         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
143      // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
144      changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
145    } else {
146      // Mark the array value type with conflict - both ref and fp.
147      changed |= MergeArrayConflict(src_type);
148    }
149  }
150  return changed;
151}
152
153TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
154    : mir_graph_(mir_graph),
155      alloc_(alloc),
156      num_blocks_(mir_graph->GetNumBlocks()),
157      num_sregs_(mir_graph->GetNumSSARegs()),
158      check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
159      split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
160}
161
162void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
163  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
164  type.CheckPureRef();
165  int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
166  num_sregs_ += 1;
167  check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type});  // NOLINT
168  int32_t s_reg = check_cast->ssa_rep->uses[0];
169  auto lb = split_sreg_data_.lower_bound(s_reg);
170  if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
171    SplitSRegData split_s_reg_data = {
172        0,
173        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
174        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
175        new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
176    };
177    std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
178    std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
179    split_s_reg_data.def_phi_blocks_->ClearAllBits();
180    BasicBlock* def_bb = FindDefBlock(check_cast);
181    split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
182    split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
183    lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
184  }
185  lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
186  lb->second.def_phi_blocks_->SetBit(check_cast->bb);
187}
188
189void TypeInference::CheckCastData::AddPseudoPhis() {
190  // Look for pseudo-phis where a split SSA reg merges with a differently typed version
191  // and initialize all starting_mod_s_reg.
192  DCHECK(!split_sreg_data_.empty());
193  ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
194
195  for (auto& entry : split_sreg_data_) {
196    SplitSRegData& data = entry.second;
197
198    // Find pseudo-phi nodes.
199    phi_blocks->ClearAllBits();
200    ArenaBitVector* input_blocks = data.def_phi_blocks_;
201    do {
202      for (uint32_t idx : input_blocks->Indexes()) {
203        BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
204        if (def_bb->dom_frontier != nullptr) {
205          phi_blocks->Union(def_bb->dom_frontier);
206        }
207      }
208    } while (input_blocks->Union(phi_blocks));
209
210    // Find live pseudo-phis. Make sure they're merging the same SSA reg.
211    data.def_phi_blocks_->ClearAllBits();
212    int32_t s_reg = entry.first;
213    int v_reg = mir_graph_->SRegToVReg(s_reg);
214    for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
215      BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
216      DCHECK(phi_bb != nullptr);
217      DCHECK(phi_bb->data_flow_info != nullptr);
218      DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
219      if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
220        int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
221        num_sregs_ += 1;
222        data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
223        data.def_phi_blocks_->SetBit(phi_bb_id);
224      }
225    }
226
227    // SSA rename for s_reg.
228    TopologicalSortIterator iter(mir_graph_);
229    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
230      if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
231        continue;
232      }
233      BasicBlockId bb_id = bb->id;
234      if (data.def_phi_blocks_->IsBitSet(bb_id)) {
235        DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
236      } else {
237        DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
238        if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
239          // The earliest predecessor must have been processed already.
240          BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
241          int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
242          data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
243        } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
244          // Start the original defining block with s_reg.
245          data.starting_mod_s_reg[bb_id] = s_reg;
246        }
247      }
248      if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
249        // If the block doesn't define the modified SSA reg, it propagates the starting type.
250        data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
251      }
252    }
253  }
254}
255
256void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
257  for (const auto& entry : check_cast_map_) {
258    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
259    sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
260  }
261}
262
263void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
264  for (const auto& entry : check_cast_map_) {
265    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
266    sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
267        sregs[entry.second.modified_s_reg].AsNull());
268  }
269}
270
271void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
272  for (auto& entry : split_sreg_data_) {
273    for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
274      bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
275    }
276  }
277}
278
279void TypeInference::CheckCastData::Start(BasicBlock* bb) {
280  for (auto& entry : split_sreg_data_) {
281    entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
282  }
283}
284
285bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
286  bool changed = false;
287  for (auto& entry : split_sreg_data_) {
288    DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
289    if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
290      int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
291      Type merged_type = sregs[entry.second.current_mod_s_reg];
292      for (BasicBlockId pred_id : bb->predecessors) {
293        DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
294        merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
295      }
296      if (UNLIKELY(!merged_type.IsDefined())) {
297        // This can happen during an initial merge of a loop head if the original def is
298        // actually an untyped null. (All other definitions are typed using the check-cast.)
299      } else if (merged_type.Wide()) {
300        // Ignore the pseudo-phi, just remember that there's a size mismatch.
301        sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
302      } else {
303        DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
304        // Propagate both down (fully) and up (without the "non-null" flag).
305        changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
306        merged_type = merged_type.AsNull();
307        for (BasicBlockId pred_id : bb->predecessors) {
308          DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
309          sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
310        }
311      }
312    }
313  }
314  return changed;
315}
316
317void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
318  auto mir_it = check_cast_map_.find(mir);
319  DCHECK(mir_it != check_cast_map_.end());
320  auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
321  DCHECK(sreg_it != split_sreg_data_.end());
322  sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
323}
324
325TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
326  auto it = split_sreg_data_.find(s_reg);
327  return (it == split_sreg_data_.end()) ? nullptr : &it->second;
328}
329
330BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
331  // Find the initial definition of the SSA reg used by the check-cast.
332  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
333  int32_t s_reg = check_cast->ssa_rep->uses[0];
334  if (mir_graph_->IsInVReg(s_reg)) {
335    return mir_graph_->GetEntryBlock();
336  }
337  int v_reg = mir_graph_->SRegToVReg(s_reg);
338  BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
339  DCHECK(bb != nullptr);
340  while (true) {
341    // Find the earliest predecessor in the topological sort order to ensure we don't
342    // go in a loop.
343    BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
344    DCHECK(pred_bb != nullptr);
345    DCHECK(pred_bb->data_flow_info != nullptr);
346    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
347    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
348      // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
349      return bb;
350    }
351    bb = pred_bb;
352  }
353}
354
355BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
356  DCHECK(!bb->predecessors.empty());
357  const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
358  DCHECK_LT(bb->id, indexes.size());
359  size_t best_idx = indexes[bb->id];
360  BasicBlockId best_id = NullBasicBlockId;
361  for (BasicBlockId pred_id : bb->predecessors) {
362    DCHECK_LT(pred_id, indexes.size());
363    if (best_idx > indexes[pred_id]) {
364      best_idx = indexes[pred_id];
365      best_id = pred_id;
366    }
367  }
368  // There must be at least one predecessor earlier than the bb.
369  DCHECK_LT(best_idx, indexes[bb->id]);
370  return mir_graph_->GetBasicBlock(best_id);
371}
372
373bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
374  DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
375  DCHECK(bb != nullptr);
376  DCHECK(bb->data_flow_info != nullptr);
377  DCHECK(bb->data_flow_info->live_in_v != nullptr);
378  if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
379    return false;
380  }
381  for (BasicBlockId pred_id : bb->predecessors) {
382    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
383    DCHECK(pred_bb != nullptr);
384    DCHECK(pred_bb->data_flow_info != nullptr);
385    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
386    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
387      return false;
388    }
389  }
390  return true;
391}
392
393TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
394    : mir_graph_(mir_graph),
395      cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
396      check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
397          InitializeCheckCastData(mir_graph, alloc)),
398      num_sregs_(
399          check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
400      ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
401          PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
402      sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
403          PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
404      signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
405          PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
406      current_method_signature_(
407          Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
408      sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
409      bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
410  InitializeSRegs();
411}
412
413bool TypeInference::Apply(BasicBlock* bb) {
414  bool changed = false;
415  uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
416  if (bb_df_attrs != 0u) {
417    if (UNLIKELY(check_cast_data_ != nullptr)) {
418      check_cast_data_->Start(bb);
419      if (bb_df_attrs & DF_NULL_TRANSFER_N) {
420        changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
421      }
422    }
423    MIR* mir = bb->first_mir_insn;
424    MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
425    for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
426        mir = mir->next) {
427      // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
428      // At least one input must have been previously processed. Look for the first
429      // occurrence of a high_word or low_word flag to determine the type.
430      size_t num_uses = mir->ssa_rep->num_uses;
431      const int32_t* uses = mir->ssa_rep->uses;
432      const int32_t* defs = mir->ssa_rep->defs;
433      DCHECK_EQ(bb->predecessors.size(), num_uses);
434      Type merged_type = sregs_[defs[0]];
435      for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
436        int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
437        merged_type.MergeWeak(sregs_[input_mod_s_reg]);
438      }
439      if (UNLIKELY(!merged_type.IsDefined())) {
440        // No change
441      } else if (merged_type.HighWord()) {
442        // Ignore the high word phi, just remember if there's a size mismatch.
443        if (UNLIKELY(merged_type.LowWord())) {
444          sregs_[defs[0]].MarkSizeConflict();
445        }
446      } else {
447        // Propagate both down (fully) and up (without the "non-null" flag).
448        changed |= sregs_[defs[0]].Copy(merged_type);
449        merged_type = merged_type.AsNull();
450        for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
451          int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
452          changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
453        }
454      }
455    }
456
457    // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
458    for (; mir != main_mirs_end; mir = mir->next) {
459      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
460      size_t num_uses = mir->ssa_rep->num_uses;
461      const int32_t* uses = mir->ssa_rep->uses;
462      const int32_t* defs = mir->ssa_rep->defs;
463
464      // Special handling for moves. Propagate type both ways.
465      if ((attrs & DF_IS_MOVE) != 0) {
466        int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
467        int32_t defd_mod_s_reg = defs[0];
468
469        // The "non-null" flag is propagated only downwards from actual definitions and it's
470        // not initially marked for moves, so used sreg must be marked before defined sreg.
471        // The only exception is an inlined move where we know the type from the original invoke.
472        DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
473               (mir->optimization_flags & MIR_CALLEE) != 0);
474        changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
475
476        // The value is the same, so either both registers are null or no register is.
477        // In any case we can safely propagate the array type down.
478        changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
479        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
480          // Mark type conflict: move instead of move-object.
481          sregs_[used_mod_s_reg].MarkTypeConflict();
482        }
483        continue;
484      }
485
486      // Handle AGET/APUT.
487      if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
488        int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
489        int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
490        DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
491        if (!sregs_[base_mod_s_reg].NonNull()) {
492          // If the base is null, don't propagate anything. All that we could determine
493          // has already been merged in the previous stage.
494        } else {
495          changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
496          Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
497          if ((attrs & DF_DA) != 0) {
498            changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
499          } else {
500            changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
501          }
502        }
503        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
504          // Mark type conflict: aget/aput instead of aget/aput-object.
505          sregs_[mod_s_reg].MarkTypeConflict();
506        }
507        continue;
508      }
509
510      // Special-case handling for check-cast to advance modified SSA reg.
511      if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
512        DCHECK(check_cast_data_ != nullptr);
513        check_cast_data_->ProcessCheckCast(mir);
514      }
515    }
516
517    // Propagate types for IF_cc if present.
518    if (mir != nullptr) {
519      DCHECK(mir == bb->last_mir_insn);
520      DCHECK(mir->next == nullptr);
521      DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
522      DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
523      const int32_t* uses = mir->ssa_rep->uses;
524      int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
525      int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
526      changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
527      changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
528    }
529  }
530  return changed;
531}
532
533void TypeInference::Finish() {
534  if (UNLIKELY(check_cast_data_ != nullptr)) {
535    check_cast_data_->MergeCheckCastConflicts(sregs_);
536  }
537
538  size_t num_sregs = mir_graph_->GetNumSSARegs();  // Without the extra SSA regs.
539  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
540    if (sregs_[s_reg].SizeConflict()) {
541      /*
542       * The dex bytecode definition does not explicitly outlaw the definition of the same
543       * virtual register to be used in both a 32-bit and 64-bit pair context.  However, dx
544       * does not generate this pattern (at least recently).  Further, in the next revision of
545       * dex, we will forbid this.  To support the few cases in the wild, detect this pattern
546       * and punt to the interpreter.
547       */
548      LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
549                   << " has size conflict block for sreg " << s_reg
550                   << ", punting to interpreter.";
551      mir_graph_->SetPuntToInterpreter(true);
552      return;
553    }
554  }
555
556  size_t conflict_s_reg = 0;
557  bool type_conflict = false;
558  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
559    Type type = sregs_[s_reg];
560    RegLocation* loc = &mir_graph_->reg_location_[s_reg];
561    loc->wide = type.Wide();
562    loc->defined = type.IsDefined();
563    loc->fp = type.Fp();
564    loc->core = type.Core();
565    loc->ref = type.Ref();
566    loc->high_word = type.HighWord();
567    if (UNLIKELY(type.TypeConflict())) {
568      type_conflict = true;
569      conflict_s_reg = s_reg;
570    }
571  }
572
573  if (type_conflict) {
574    /*
575     * Each dalvik register definition should be used either as a reference, or an
576     * integer or a floating point value. We don't normally expect to see a Dalvik
577     * register definition used in two or three of these roles though technically it
578     * could happen with constants (0 for all three roles, non-zero for integer and
579     * FP). Detect this situation and disable optimizations that rely on correct
580     * typing, i.e. register promotion, GVN/LVN and GVN-based DCE.
581     */
582    LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
583                 << " has type conflict block for sreg " << conflict_s_reg
584                 << ", disabling register promotion.";
585    cu_->disable_opt |=
586        (1u << kPromoteRegs) |
587        (1u << kGlobalValueNumbering) |
588        (1u << kGvnDeadCodeElimination) |
589        (1u << kLocalValueNumbering);
590  }
591}
592
593TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
594  uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
595  Type result = Type::DexType(dex_file, type_idx);
596  return result;
597}
598
599TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
600                                                       MIRGraph* mir_graph,
601                                                       ScopedArenaAllocator* alloc) {
602  size_t count = mir_graph->GetIFieldLoweringInfoCount();
603  Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
604  for (uint32_t i = 0u; i != count; ++i) {
605    // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
606    const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
607    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
608    uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
609    ifields[i] = FieldType(current_dex_file, field_idx);
610    DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
611    DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
612  }
613  return ifields;
614}
615
616TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
617                                                       MIRGraph* mir_graph,
618                                                       ScopedArenaAllocator* alloc) {
619  size_t count = mir_graph->GetSFieldLoweringInfoCount();
620  Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
621  for (uint32_t i = 0u; i != count; ++i) {
622    // FieldIndex() is always valid for static fields (no quickened instructions).
623    sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
624  }
625  return sfields;
626}
627
628TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
629                                                        uint32_t method_idx,
630                                                        bool is_static,
631                                                        ScopedArenaAllocator* alloc) {
632  const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
633  const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
634  Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
635  const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
636  size_t this_size = (is_static ? 0u : 1u);
637  size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
638  size_t size = this_size + param_size;
639  Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
640  if (!is_static) {
641    param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
642  }
643  for (size_t i = 0; i != param_size; ++i)  {
644    uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
645    param_types[this_size + i] = Type::DexType(dex_file, type_idx);
646  }
647  return MethodSignature{ return_type, size, param_types };  // NOLINT
648}
649
650TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
651                                                                 MIRGraph* mir_graph,
652                                                                 ScopedArenaAllocator* alloc) {
653  size_t count = mir_graph->GetMethodLoweringInfoCount();
654  MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
655  for (uint32_t i = 0u; i != count; ++i) {
656    // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
657    const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
658    uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
659    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
660    signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
661  }
662  return signatures;
663}
664
665TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
666                                                                     ScopedArenaAllocator* alloc) {
667  if (!mir_graph->HasCheckCast()) {
668    return nullptr;
669  }
670
671  CheckCastData* data = nullptr;
672  const DexFile* dex_file = nullptr;
673  PreOrderDfsIterator iter(mir_graph);
674  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
675    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
676      if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
677        if (data == nullptr) {
678          data = new (alloc) CheckCastData(mir_graph, alloc);
679          dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
680        }
681        Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
682        data->AddCheckCast(mir, type);
683      }
684    }
685  }
686  if (data != nullptr) {
687    data->AddPseudoPhis();
688  }
689  return data;
690}
691
692void TypeInference::InitializeSRegs() {
693  std::fill_n(sregs_, num_sregs_, Type::Unknown());
694
695  /* Treat ArtMethod* specially since they are pointer sized */
696  sregs_[mir_graph_->GetMethodSReg()] = Type::ArtMethodType(cu_->target64);
697
698  // Initialize parameter SSA regs at method entry.
699  int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
700  for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i)  {
701    Type param_type = current_method_signature_.param_types[i].AsNonNull();
702    sregs_[entry_param_s_reg] = param_type;
703    entry_param_s_reg += param_type.Wide() ? 2 : 1;
704  }
705  DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
706            mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
707
708  // Initialize check-cast types.
709  if (UNLIKELY(check_cast_data_ != nullptr)) {
710    check_cast_data_->InitializeCheckCastSRegs(sregs_);
711  }
712
713  // Initialize well-known SSA register definition types. Merge inferred types
714  // upwards where a single merge is enough (INVOKE arguments and return type,
715  // RETURN type, IPUT/SPUT source type).
716  // NOTE: Using topological sort order to make sure the definition comes before
717  // any upward merging. This allows simple assignment of the defined types
718  // instead of MergeStrong().
719  TopologicalSortIterator iter(mir_graph_);
720  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
721    uint64_t bb_df_attrs = 0u;
722    if (UNLIKELY(check_cast_data_ != nullptr)) {
723      check_cast_data_->Start(bb);
724    }
725    // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
726    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
727      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
728      bb_df_attrs |= attrs;
729
730      const uint32_t num_uses = mir->ssa_rep->num_uses;
731      const int32_t* uses = mir->ssa_rep->uses;
732      const int32_t* defs = mir->ssa_rep->defs;
733
734      uint16_t opcode = mir->dalvikInsn.opcode;
735      switch (opcode) {
736        case Instruction::CONST_4:
737        case Instruction::CONST_16:
738        case Instruction::CONST:
739        case Instruction::CONST_HIGH16:
740        case Instruction::CONST_WIDE_16:
741        case Instruction::CONST_WIDE_32:
742        case Instruction::CONST_WIDE:
743        case Instruction::CONST_WIDE_HIGH16:
744        case Instruction::MOVE:
745        case Instruction::MOVE_FROM16:
746        case Instruction::MOVE_16:
747        case Instruction::MOVE_WIDE:
748        case Instruction::MOVE_WIDE_FROM16:
749        case Instruction::MOVE_WIDE_16:
750        case Instruction::MOVE_OBJECT:
751        case Instruction::MOVE_OBJECT_FROM16:
752        case Instruction::MOVE_OBJECT_16:
753          if ((mir->optimization_flags & MIR_CALLEE) != 0) {
754            // Inlined const/move keeps method_lowering_info for type inference.
755            DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
756            Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
757            DCHECK(return_type.IsDefined());  // Method return type can't be void.
758            sregs_[defs[0]] = return_type.AsNonNull();
759            if (return_type.Wide()) {
760              DCHECK_EQ(defs[0] + 1, defs[1]);
761              sregs_[defs[1]] = return_type.ToHighWord();
762            }
763            break;
764          }
765          FALLTHROUGH_INTENDED;
766        case kMirOpPhi:
767          // These cannot be determined in this simple pass and will be processed later.
768          break;
769
770        case Instruction::MOVE_RESULT:
771        case Instruction::MOVE_RESULT_WIDE:
772        case Instruction::MOVE_RESULT_OBJECT:
773          // Nothing to do, handled with invoke-* or filled-new-array/-range.
774          break;
775        case Instruction::MOVE_EXCEPTION:
776          // NOTE: We can never catch an array.
777          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
778          break;
779        case Instruction::CONST_STRING:
780        case Instruction::CONST_STRING_JUMBO:
781          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
782          break;
783        case Instruction::CONST_CLASS:
784          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
785          break;
786        case Instruction::CHECK_CAST:
787          DCHECK(check_cast_data_ != nullptr);
788          check_cast_data_->ProcessCheckCast(mir);
789          break;
790        case Instruction::ARRAY_LENGTH:
791          sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
792          break;
793        case Instruction::NEW_INSTANCE:
794          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
795          DCHECK(sregs_[defs[0]].Ref());
796          DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
797          break;
798        case Instruction::NEW_ARRAY:
799          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
800          DCHECK(sregs_[defs[0]].Ref());
801          DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
802          break;
803        case Instruction::FILLED_NEW_ARRAY:
804        case Instruction::FILLED_NEW_ARRAY_RANGE: {
805          Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
806          array_type.CheckPureRef();  // Previously checked by the method verifier.
807          DCHECK_NE(array_type.ArrayDepth(), 0u);
808          Type component_type = array_type.ComponentType();
809          DCHECK(!component_type.Wide());
810          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
811          if (move_result_mir != nullptr) {
812            DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
813            sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
814          }
815          DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
816          for (size_t next = 0u; next != num_uses; ++next) {
817            int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
818            sregs_[input_mod_s_reg].MergeStrong(component_type);
819          }
820          break;
821        }
822        case Instruction::INVOKE_VIRTUAL:
823        case Instruction::INVOKE_SUPER:
824        case Instruction::INVOKE_DIRECT:
825        case Instruction::INVOKE_STATIC:
826        case Instruction::INVOKE_INTERFACE:
827        case Instruction::INVOKE_VIRTUAL_RANGE:
828        case Instruction::INVOKE_SUPER_RANGE:
829        case Instruction::INVOKE_DIRECT_RANGE:
830        case Instruction::INVOKE_STATIC_RANGE:
831        case Instruction::INVOKE_INTERFACE_RANGE:
832        case Instruction::INVOKE_VIRTUAL_QUICK:
833        case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
834          const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
835          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
836          if (move_result_mir != nullptr) {
837            Type return_type = signature->return_type;
838            sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
839            if (return_type.Wide()) {
840              DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
841              sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
842            }
843          }
844          size_t next = 0u;
845          for (size_t i = 0, size = signature->num_params; i != size; ++i)  {
846            Type param_type = signature->param_types[i];
847            int32_t param_s_reg = ModifiedSReg(uses[next]);
848            DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
849            UpdateSRegFromLowWordType(param_s_reg, param_type);
850            next += param_type.Wide() ? 2 : 1;
851          }
852          DCHECK_EQ(next, num_uses);
853          DCHECK_EQ(next, mir->dalvikInsn.vA);
854          break;
855        }
856
857        case Instruction::RETURN_WIDE:
858          DCHECK(current_method_signature_.return_type.Wide());
859          DCHECK_EQ(uses[0] + 1, uses[1]);
860          DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
861          FALLTHROUGH_INTENDED;
862        case Instruction::RETURN:
863        case Instruction::RETURN_OBJECT: {
864          int32_t mod_s_reg = ModifiedSReg(uses[0]);
865          UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
866          break;
867        }
868
869        // NOTE: For AGET/APUT we set only the array type. The operand type is set
870        // below based on the data flow attributes.
871        case Instruction::AGET:
872        case Instruction::APUT:
873          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
874          break;
875        case Instruction::AGET_WIDE:
876        case Instruction::APUT_WIDE:
877          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
878          break;
879        case Instruction::AGET_OBJECT:
880          sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
881          FALLTHROUGH_INTENDED;
882        case Instruction::APUT_OBJECT:
883          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
884          break;
885        case Instruction::AGET_BOOLEAN:
886        case Instruction::APUT_BOOLEAN:
887        case Instruction::AGET_BYTE:
888        case Instruction::APUT_BYTE:
889        case Instruction::AGET_CHAR:
890        case Instruction::APUT_CHAR:
891        case Instruction::AGET_SHORT:
892        case Instruction::APUT_SHORT:
893          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
894          break;
895
896        case Instruction::IGET_WIDE:
897        case Instruction::IGET_WIDE_QUICK:
898          DCHECK_EQ(defs[0] + 1, defs[1]);
899          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
900          sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
901          FALLTHROUGH_INTENDED;
902        case Instruction::IGET:
903        case Instruction::IGET_OBJECT:
904        case Instruction::IGET_BOOLEAN:
905        case Instruction::IGET_BYTE:
906        case Instruction::IGET_CHAR:
907        case Instruction::IGET_SHORT:
908        case Instruction::IGET_QUICK:
909        case Instruction::IGET_OBJECT_QUICK:
910        case Instruction::IGET_BOOLEAN_QUICK:
911        case Instruction::IGET_BYTE_QUICK:
912        case Instruction::IGET_CHAR_QUICK:
913        case Instruction::IGET_SHORT_QUICK:
914          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
915          sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
916          break;
917        case Instruction::IPUT_WIDE:
918        case Instruction::IPUT_WIDE_QUICK:
919          DCHECK_EQ(uses[0] + 1, uses[1]);
920          FALLTHROUGH_INTENDED;
921        case Instruction::IPUT:
922        case Instruction::IPUT_OBJECT:
923        case Instruction::IPUT_BOOLEAN:
924        case Instruction::IPUT_BYTE:
925        case Instruction::IPUT_CHAR:
926        case Instruction::IPUT_SHORT:
927        case Instruction::IPUT_QUICK:
928        case Instruction::IPUT_OBJECT_QUICK:
929        case Instruction::IPUT_BOOLEAN_QUICK:
930        case Instruction::IPUT_BYTE_QUICK:
931        case Instruction::IPUT_CHAR_QUICK:
932        case Instruction::IPUT_SHORT_QUICK:
933          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
934          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
935                                    ifields_[mir->meta.ifield_lowering_info]);
936          break;
937        case Instruction::SGET_WIDE:
938          DCHECK_EQ(defs[0] + 1, defs[1]);
939          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
940          sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
941          FALLTHROUGH_INTENDED;
942        case Instruction::SGET:
943        case Instruction::SGET_OBJECT:
944        case Instruction::SGET_BOOLEAN:
945        case Instruction::SGET_BYTE:
946        case Instruction::SGET_CHAR:
947        case Instruction::SGET_SHORT:
948          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
949          sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
950          break;
951        case Instruction::SPUT_WIDE:
952          DCHECK_EQ(uses[0] + 1, uses[1]);
953          FALLTHROUGH_INTENDED;
954        case Instruction::SPUT:
955        case Instruction::SPUT_OBJECT:
956        case Instruction::SPUT_BOOLEAN:
957        case Instruction::SPUT_BYTE:
958        case Instruction::SPUT_CHAR:
959        case Instruction::SPUT_SHORT:
960          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
961          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
962                                          sfields_[mir->meta.sfield_lowering_info]);
963          break;
964
965        default:
966          // No invokes or reference definitions here.
967          DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
968          DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
969          break;
970      }
971
972      if ((attrs & DF_NULL_TRANSFER_N) != 0) {
973        // Don't process Phis at this stage.
974        continue;
975      }
976
977      // Handle defs
978      if (attrs & DF_DA) {
979        int32_t s_reg = defs[0];
980        sregs_[s_reg].SetLowWord();
981        if (attrs & DF_FP_A) {
982          sregs_[s_reg].SetFp();
983        }
984        if (attrs & DF_CORE_A) {
985          sregs_[s_reg].SetCore();
986        }
987        if (attrs & DF_REF_A) {
988          sregs_[s_reg].SetRef();
989        }
990        if (attrs & DF_A_WIDE) {
991          sregs_[s_reg].SetWide();
992          DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
993          sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
994        } else {
995          sregs_[s_reg].SetNarrow();
996        }
997      }
998
999      // Handles uses
1000      size_t next = 0;
1001  #define PROCESS(REG)                                                        \
1002      if (attrs & DF_U##REG) {                                                \
1003        int32_t mod_s_reg = ModifiedSReg(uses[next]);                         \
1004        sregs_[mod_s_reg].SetLowWord();                                       \
1005        if (attrs & DF_FP_##REG) {                                            \
1006          sregs_[mod_s_reg].SetFp();                                          \
1007        }                                                                     \
1008        if (attrs & DF_CORE_##REG) {                                          \
1009          sregs_[mod_s_reg].SetCore();                                        \
1010        }                                                                     \
1011        if (attrs & DF_REF_##REG) {                                           \
1012          sregs_[mod_s_reg].SetRef();                                         \
1013        }                                                                     \
1014        if (attrs & DF_##REG##_WIDE) {                                        \
1015          sregs_[mod_s_reg].SetWide();                                        \
1016          DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1]));             \
1017          sregs_[mod_s_reg + 1].SetWide();                                    \
1018          sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);             \
1019          next += 2;                                                          \
1020        } else {                                                              \
1021          sregs_[mod_s_reg].SetNarrow();                                      \
1022          next++;                                                             \
1023        }                                                                     \
1024      }
1025      PROCESS(A)
1026      PROCESS(B)
1027      PROCESS(C)
1028  #undef PROCESS
1029      DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
1030    }
1031    // Record relevant attributes.
1032    bb_df_attrs_[bb->id] = bb_df_attrs &
1033        (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
1034  }
1035
1036  if (UNLIKELY(check_cast_data_ != nullptr)) {
1037    check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
1038  }
1039}
1040
1041int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
1042  if (UNLIKELY(check_cast_data_ != nullptr)) {
1043    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
1044    if (UNLIKELY(split_data != nullptr)) {
1045      DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
1046      return split_data->current_mod_s_reg;
1047    }
1048  }
1049  return s_reg;
1050}
1051
1052int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
1053  DCHECK_LT(pred_idx, bb->predecessors.size());
1054  if (UNLIKELY(check_cast_data_ != nullptr)) {
1055    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
1056    if (UNLIKELY(split_data != nullptr)) {
1057      return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
1058    }
1059  }
1060  return s_reg;
1061}
1062
1063bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
1064  DCHECK(low_word_type.LowWord());
1065  bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
1066  if (!sregs_[mod_s_reg].Narrow()) {  // Wide without conflict with narrow.
1067    DCHECK(!low_word_type.Narrow());
1068    DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs());  // Original SSA reg.
1069    changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
1070  }
1071  return changed;
1072}
1073
1074}  // namespace art
1075