mir_to_lir.cc revision 73ed718e7b08d17fd2e4af9bceb5e74ac46db676
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_internals.h"
18#include "dex/dataflow_iterator-inl.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "mir_to_lir-inl.h"
21#include "object_utils.h"
22#include "thread-inl.h"
23
24namespace art {
25
26void Mir2Lir::LockArg(int in_position, bool wide) {
27  RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
28  RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
29      RegStorage::InvalidReg();
30
31  if (reg_arg_low.Valid()) {
32    LockTemp(reg_arg_low);
33  }
34  if (reg_arg_high.Valid() && reg_arg_low != reg_arg_high) {
35    LockTemp(reg_arg_high);
36  }
37}
38
39// TODO: needs revisit for 64-bit.
40RegStorage Mir2Lir::LoadArg(int in_position, bool wide) {
41  RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
42  RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
43      RegStorage::InvalidReg();
44
45  int offset = StackVisitor::GetOutVROffset(in_position);
46  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
47    /*
48     * When doing a call for x86, it moves the stack pointer in order to push return.
49     * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
50     * TODO: This needs revisited for 64-bit.
51     */
52    offset += sizeof(uint32_t);
53  }
54
55  // If the VR is wide and there is no register for high part, we need to load it.
56  if (wide && !reg_arg_high.Valid()) {
57    // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
58    if (!reg_arg_low.Valid()) {
59      RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
60      reg_arg_low = new_regs.GetLow();
61      reg_arg_high = new_regs.GetHigh();
62      LoadBaseDispWide(TargetReg(kSp), offset, new_regs, INVALID_SREG);
63    } else {
64      reg_arg_high = AllocTemp();
65      int offset_high = offset + sizeof(uint32_t);
66      LoadWordDisp(TargetReg(kSp), offset_high, reg_arg_high);
67    }
68  }
69
70  // If the low part is not in a register yet, we need to load it.
71  if (!reg_arg_low.Valid()) {
72    reg_arg_low = AllocTemp();
73    LoadWordDisp(TargetReg(kSp), offset, reg_arg_low);
74  }
75
76  if (wide) {
77    return RegStorage::MakeRegPair(reg_arg_low, reg_arg_high);
78  } else {
79    return reg_arg_low;
80  }
81}
82
83void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
84  int offset = StackVisitor::GetOutVROffset(in_position);
85  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
86    /*
87     * When doing a call for x86, it moves the stack pointer in order to push return.
88     * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
89     * TODO: This needs revisited for 64-bit.
90     */
91    offset += sizeof(uint32_t);
92  }
93
94  if (!rl_dest.wide) {
95    RegStorage reg = GetArgMappingToPhysicalReg(in_position);
96    if (reg.Valid()) {
97      OpRegCopy(rl_dest.reg, reg);
98    } else {
99      LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg);
100    }
101  } else {
102    RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
103    RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
104
105    if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
106      OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
107    } else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
108      OpRegCopy(rl_dest.reg, reg_arg_low);
109      int offset_high = offset + sizeof(uint32_t);
110      LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh());
111    } else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) {
112      OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
113      LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
114    } else {
115      LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG);
116    }
117  }
118}
119
120bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
121  // FastInstance() already checked by DexFileMethodInliner.
122  const InlineIGetIPutData& data = special.d.ifield_data;
123  if (data.method_is_static || data.object_arg != 0) {
124    // The object is not "this" and has to be null-checked.
125    return false;
126  }
127
128  bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
129  // The inliner doesn't distinguish kDouble or kFloat, use shorty.
130  bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D';
131
132  // Point of no return - no aborts after this
133  GenPrintLabel(mir);
134  LockArg(data.object_arg);
135  RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
136  RegStorage reg_obj = LoadArg(data.object_arg);
137  if (wide) {
138    LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG);
139  } else {
140    LoadWordDisp(reg_obj, data.field_offset, rl_dest.reg);
141  }
142  if (data.is_volatile) {
143    // Without context sensitive analysis, we must issue the most conservative barriers.
144    // In this case, either a load or store may follow so we issue both barriers.
145    GenMemBarrier(kLoadLoad);
146    GenMemBarrier(kLoadStore);
147  }
148  return true;
149}
150
151bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
152  // FastInstance() already checked by DexFileMethodInliner.
153  const InlineIGetIPutData& data = special.d.ifield_data;
154  if (data.method_is_static || data.object_arg != 0) {
155    // The object is not "this" and has to be null-checked.
156    return false;
157  }
158
159  bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
160
161  // Point of no return - no aborts after this
162  GenPrintLabel(mir);
163  LockArg(data.object_arg);
164  LockArg(data.src_arg, wide);
165  RegStorage reg_obj = LoadArg(data.object_arg);
166  RegStorage reg_src = LoadArg(data.src_arg, wide);
167  if (data.is_volatile) {
168    // There might have been a store before this volatile one so insert StoreStore barrier.
169    GenMemBarrier(kStoreStore);
170  }
171  if (wide) {
172    StoreBaseDispWide(reg_obj, data.field_offset, reg_src);
173  } else {
174    StoreBaseDisp(reg_obj, data.field_offset, reg_src, kWord);
175  }
176  if (data.is_volatile) {
177    // A load might follow the volatile store so insert a StoreLoad barrier.
178    GenMemBarrier(kStoreLoad);
179  }
180  if (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT)) {
181    MarkGCCard(reg_src, reg_obj);
182  }
183  return true;
184}
185
186bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
187  const InlineReturnArgData& data = special.d.return_data;
188  bool wide = (data.is_wide != 0u);
189  // The inliner doesn't distinguish kDouble or kFloat, use shorty.
190  bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D';
191
192  // Point of no return - no aborts after this
193  GenPrintLabel(mir);
194  LockArg(data.arg, wide);
195  RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
196  LoadArgDirect(data.arg, rl_dest);
197  return true;
198}
199
200/*
201 * Special-case code generation for simple non-throwing leaf methods.
202 */
203bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
204  DCHECK(special.flags & kInlineSpecial);
205  current_dalvik_offset_ = mir->offset;
206  MIR* return_mir = nullptr;
207  bool successful = false;
208
209  switch (special.opcode) {
210    case kInlineOpNop:
211      successful = true;
212      DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID);
213      return_mir = mir;
214      break;
215    case kInlineOpNonWideConst: {
216      successful = true;
217      RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F');
218      GenPrintLabel(mir);
219      LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
220      return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
221      break;
222    }
223    case kInlineOpReturnArg:
224      successful = GenSpecialIdentity(mir, special);
225      return_mir = mir;
226      break;
227    case kInlineOpIGet:
228      successful = GenSpecialIGet(mir, special);
229      return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
230      break;
231    case kInlineOpIPut:
232      successful = GenSpecialIPut(mir, special);
233      return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
234      break;
235    default:
236      break;
237  }
238
239  if (successful) {
240    if (kIsDebugBuild) {
241      // Clear unreachable catch entries.
242      mir_graph_->catches_.clear();
243    }
244
245    // Handle verbosity for return MIR.
246    if (return_mir != nullptr) {
247      current_dalvik_offset_ = return_mir->offset;
248      // Not handling special identity case because it already generated code as part
249      // of the return. The label should have been added before any code was generated.
250      if (special.opcode != kInlineOpReturnArg) {
251        GenPrintLabel(return_mir);
252      }
253    }
254    GenSpecialExitSequence();
255
256    core_spill_mask_ = 0;
257    num_core_spills_ = 0;
258    fp_spill_mask_ = 0;
259    num_fp_spills_ = 0;
260    frame_size_ = 0;
261    core_vmap_table_.clear();
262    fp_vmap_table_.clear();
263  }
264
265  return successful;
266}
267
268/*
269 * Target-independent code generation.  Use only high-level
270 * load/store utilities here, or target-dependent genXX() handlers
271 * when necessary.
272 */
273void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) {
274  RegLocation rl_src[3];
275  RegLocation rl_dest = mir_graph_->GetBadLoc();
276  RegLocation rl_result = mir_graph_->GetBadLoc();
277  Instruction::Code opcode = mir->dalvikInsn.opcode;
278  int opt_flags = mir->optimization_flags;
279  uint32_t vB = mir->dalvikInsn.vB;
280  uint32_t vC = mir->dalvikInsn.vC;
281
282  // Prep Src and Dest locations.
283  int next_sreg = 0;
284  int next_loc = 0;
285  uint64_t attrs = mir_graph_->oat_data_flow_attributes_[opcode];
286  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
287  if (attrs & DF_UA) {
288    if (attrs & DF_A_WIDE) {
289      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
290      next_sreg+= 2;
291    } else {
292      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
293      next_sreg++;
294    }
295  }
296  if (attrs & DF_UB) {
297    if (attrs & DF_B_WIDE) {
298      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
299      next_sreg+= 2;
300    } else {
301      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
302      next_sreg++;
303    }
304  }
305  if (attrs & DF_UC) {
306    if (attrs & DF_C_WIDE) {
307      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
308    } else {
309      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
310    }
311  }
312  if (attrs & DF_DA) {
313    if (attrs & DF_A_WIDE) {
314      rl_dest = mir_graph_->GetDestWide(mir);
315    } else {
316      rl_dest = mir_graph_->GetDest(mir);
317    }
318  }
319  switch (opcode) {
320    case Instruction::NOP:
321      break;
322
323    case Instruction::MOVE_EXCEPTION:
324      GenMoveException(rl_dest);
325      break;
326
327    case Instruction::RETURN_VOID:
328      if (((cu_->access_flags & kAccConstructor) != 0) &&
329          cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
330                                                          cu_->class_def_idx)) {
331        GenMemBarrier(kStoreStore);
332      }
333      if (!mir_graph_->MethodIsLeaf()) {
334        GenSuspendTest(opt_flags);
335      }
336      break;
337
338    case Instruction::RETURN:
339    case Instruction::RETURN_OBJECT:
340      if (!mir_graph_->MethodIsLeaf()) {
341        GenSuspendTest(opt_flags);
342      }
343      StoreValue(GetReturn(cu_->shorty[0] == 'F'), rl_src[0]);
344      break;
345
346    case Instruction::RETURN_WIDE:
347      if (!mir_graph_->MethodIsLeaf()) {
348        GenSuspendTest(opt_flags);
349      }
350      StoreValueWide(GetReturnWide(cu_->shorty[0] == 'D'), rl_src[0]);
351      break;
352
353    case Instruction::MOVE_RESULT_WIDE:
354      if ((opt_flags & MIR_INLINED) != 0) {
355        break;  // Nop - combined w/ previous invoke.
356      }
357      StoreValueWide(rl_dest, GetReturnWide(rl_dest.fp));
358      break;
359
360    case Instruction::MOVE_RESULT:
361    case Instruction::MOVE_RESULT_OBJECT:
362      if ((opt_flags & MIR_INLINED) != 0) {
363        break;  // Nop - combined w/ previous invoke.
364      }
365      StoreValue(rl_dest, GetReturn(rl_dest.fp));
366      break;
367
368    case Instruction::MOVE:
369    case Instruction::MOVE_OBJECT:
370    case Instruction::MOVE_16:
371    case Instruction::MOVE_OBJECT_16:
372    case Instruction::MOVE_FROM16:
373    case Instruction::MOVE_OBJECT_FROM16:
374      StoreValue(rl_dest, rl_src[0]);
375      break;
376
377    case Instruction::MOVE_WIDE:
378    case Instruction::MOVE_WIDE_16:
379    case Instruction::MOVE_WIDE_FROM16:
380      StoreValueWide(rl_dest, rl_src[0]);
381      break;
382
383    case Instruction::CONST:
384    case Instruction::CONST_4:
385    case Instruction::CONST_16:
386      rl_result = EvalLoc(rl_dest, kAnyReg, true);
387      LoadConstantNoClobber(rl_result.reg, vB);
388      StoreValue(rl_dest, rl_result);
389      if (vB == 0) {
390        Workaround7250540(rl_dest, rl_result.reg);
391      }
392      break;
393
394    case Instruction::CONST_HIGH16:
395      rl_result = EvalLoc(rl_dest, kAnyReg, true);
396      LoadConstantNoClobber(rl_result.reg, vB << 16);
397      StoreValue(rl_dest, rl_result);
398      if (vB == 0) {
399        Workaround7250540(rl_dest, rl_result.reg);
400      }
401      break;
402
403    case Instruction::CONST_WIDE_16:
404    case Instruction::CONST_WIDE_32:
405      GenConstWide(rl_dest, static_cast<int64_t>(static_cast<int32_t>(vB)));
406      break;
407
408    case Instruction::CONST_WIDE:
409      GenConstWide(rl_dest, mir->dalvikInsn.vB_wide);
410      break;
411
412    case Instruction::CONST_WIDE_HIGH16:
413      rl_result = EvalLoc(rl_dest, kAnyReg, true);
414      LoadConstantWide(rl_result.reg, static_cast<int64_t>(vB) << 48);
415      StoreValueWide(rl_dest, rl_result);
416      break;
417
418    case Instruction::MONITOR_ENTER:
419      GenMonitorEnter(opt_flags, rl_src[0]);
420      break;
421
422    case Instruction::MONITOR_EXIT:
423      GenMonitorExit(opt_flags, rl_src[0]);
424      break;
425
426    case Instruction::CHECK_CAST: {
427      GenCheckCast(mir->offset, vB, rl_src[0]);
428      break;
429    }
430    case Instruction::INSTANCE_OF:
431      GenInstanceof(vC, rl_dest, rl_src[0]);
432      break;
433
434    case Instruction::NEW_INSTANCE:
435      GenNewInstance(vB, rl_dest);
436      break;
437
438    case Instruction::THROW:
439      GenThrow(rl_src[0]);
440      break;
441
442    case Instruction::ARRAY_LENGTH:
443      int len_offset;
444      len_offset = mirror::Array::LengthOffset().Int32Value();
445      rl_src[0] = LoadValue(rl_src[0], kCoreReg);
446      GenNullCheck(rl_src[0].reg, opt_flags);
447      rl_result = EvalLoc(rl_dest, kCoreReg, true);
448      LoadWordDisp(rl_src[0].reg, len_offset, rl_result.reg);
449      MarkPossibleNullPointerException(opt_flags);
450      StoreValue(rl_dest, rl_result);
451      break;
452
453    case Instruction::CONST_STRING:
454    case Instruction::CONST_STRING_JUMBO:
455      GenConstString(vB, rl_dest);
456      break;
457
458    case Instruction::CONST_CLASS:
459      GenConstClass(vB, rl_dest);
460      break;
461
462    case Instruction::FILL_ARRAY_DATA:
463      GenFillArrayData(vB, rl_src[0]);
464      break;
465
466    case Instruction::FILLED_NEW_ARRAY:
467      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
468                        false /* not range */));
469      break;
470
471    case Instruction::FILLED_NEW_ARRAY_RANGE:
472      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
473                        true /* range */));
474      break;
475
476    case Instruction::NEW_ARRAY:
477      GenNewArray(vC, rl_dest, rl_src[0]);
478      break;
479
480    case Instruction::GOTO:
481    case Instruction::GOTO_16:
482    case Instruction::GOTO_32:
483      if (mir_graph_->IsBackedge(bb, bb->taken)) {
484        GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
485      } else {
486        OpUnconditionalBranch(&label_list[bb->taken]);
487      }
488      break;
489
490    case Instruction::PACKED_SWITCH:
491      GenPackedSwitch(mir, vB, rl_src[0]);
492      break;
493
494    case Instruction::SPARSE_SWITCH:
495      GenSparseSwitch(mir, vB, rl_src[0]);
496      break;
497
498    case Instruction::CMPL_FLOAT:
499    case Instruction::CMPG_FLOAT:
500    case Instruction::CMPL_DOUBLE:
501    case Instruction::CMPG_DOUBLE:
502      GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
503      break;
504
505    case Instruction::CMP_LONG:
506      GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
507      break;
508
509    case Instruction::IF_EQ:
510    case Instruction::IF_NE:
511    case Instruction::IF_LT:
512    case Instruction::IF_GE:
513    case Instruction::IF_GT:
514    case Instruction::IF_LE: {
515      LIR* taken = &label_list[bb->taken];
516      LIR* fall_through = &label_list[bb->fall_through];
517      // Result known at compile time?
518      if (rl_src[0].is_const && rl_src[1].is_const) {
519        bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
520                                       mir_graph_->ConstantValue(rl_src[1].orig_sreg));
521        BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
522        if (mir_graph_->IsBackedge(bb, target_id)) {
523          GenSuspendTest(opt_flags);
524        }
525        OpUnconditionalBranch(&label_list[target_id]);
526      } else {
527        if (mir_graph_->IsBackwardsBranch(bb)) {
528          GenSuspendTest(opt_flags);
529        }
530        GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
531      }
532      break;
533      }
534
535    case Instruction::IF_EQZ:
536    case Instruction::IF_NEZ:
537    case Instruction::IF_LTZ:
538    case Instruction::IF_GEZ:
539    case Instruction::IF_GTZ:
540    case Instruction::IF_LEZ: {
541      LIR* taken = &label_list[bb->taken];
542      LIR* fall_through = &label_list[bb->fall_through];
543      // Result known at compile time?
544      if (rl_src[0].is_const) {
545        bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
546        BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
547        if (mir_graph_->IsBackedge(bb, target_id)) {
548          GenSuspendTest(opt_flags);
549        }
550        OpUnconditionalBranch(&label_list[target_id]);
551      } else {
552        if (mir_graph_->IsBackwardsBranch(bb)) {
553          GenSuspendTest(opt_flags);
554        }
555        GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
556      }
557      break;
558      }
559
560    case Instruction::AGET_WIDE:
561      GenArrayGet(opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
562      break;
563    case Instruction::AGET:
564    case Instruction::AGET_OBJECT:
565      GenArrayGet(opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
566      break;
567    case Instruction::AGET_BOOLEAN:
568      GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
569      break;
570    case Instruction::AGET_BYTE:
571      GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
572      break;
573    case Instruction::AGET_CHAR:
574      GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
575      break;
576    case Instruction::AGET_SHORT:
577      GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
578      break;
579    case Instruction::APUT_WIDE:
580      GenArrayPut(opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3, false);
581      break;
582    case Instruction::APUT:
583      GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2, false);
584      break;
585    case Instruction::APUT_OBJECT: {
586      bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]);
587      bool is_safe = is_null;  // Always safe to store null.
588      if (!is_safe) {
589        // Check safety from verifier type information.
590        const DexCompilationUnit* unit = mir_graph_->GetCurrentDexCompilationUnit();
591        is_safe = cu_->compiler_driver->IsSafeCast(unit, mir->offset);
592      }
593      if (is_null || is_safe) {
594        // Store of constant null doesn't require an assignability test and can be generated inline
595        // without fixed register usage or a card mark.
596        GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
597      } else {
598        GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]);
599      }
600      break;
601    }
602    case Instruction::APUT_SHORT:
603    case Instruction::APUT_CHAR:
604      GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1, false);
605      break;
606    case Instruction::APUT_BYTE:
607    case Instruction::APUT_BOOLEAN:
608      GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
609      break;
610
611    case Instruction::IGET_OBJECT:
612      GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
613      break;
614
615    case Instruction::IGET_WIDE:
616      GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
617      break;
618
619    case Instruction::IGET:
620      GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
621      break;
622
623    case Instruction::IGET_CHAR:
624      GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
625      break;
626
627    case Instruction::IGET_SHORT:
628      GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
629      break;
630
631    case Instruction::IGET_BOOLEAN:
632    case Instruction::IGET_BYTE:
633      GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
634      break;
635
636    case Instruction::IPUT_WIDE:
637      GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
638      break;
639
640    case Instruction::IPUT_OBJECT:
641      GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
642      break;
643
644    case Instruction::IPUT:
645      GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
646      break;
647
648    case Instruction::IPUT_BOOLEAN:
649    case Instruction::IPUT_BYTE:
650      GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
651      break;
652
653    case Instruction::IPUT_CHAR:
654      GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
655      break;
656
657    case Instruction::IPUT_SHORT:
658      GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
659      break;
660
661    case Instruction::SGET_OBJECT:
662      GenSget(mir, rl_dest, false, true);
663      break;
664    case Instruction::SGET:
665    case Instruction::SGET_BOOLEAN:
666    case Instruction::SGET_BYTE:
667    case Instruction::SGET_CHAR:
668    case Instruction::SGET_SHORT:
669      GenSget(mir, rl_dest, false, false);
670      break;
671
672    case Instruction::SGET_WIDE:
673      GenSget(mir, rl_dest, true, false);
674      break;
675
676    case Instruction::SPUT_OBJECT:
677      GenSput(mir, rl_src[0], false, true);
678      break;
679
680    case Instruction::SPUT:
681    case Instruction::SPUT_BOOLEAN:
682    case Instruction::SPUT_BYTE:
683    case Instruction::SPUT_CHAR:
684    case Instruction::SPUT_SHORT:
685      GenSput(mir, rl_src[0], false, false);
686      break;
687
688    case Instruction::SPUT_WIDE:
689      GenSput(mir, rl_src[0], true, false);
690      break;
691
692    case Instruction::INVOKE_STATIC_RANGE:
693      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
694      break;
695    case Instruction::INVOKE_STATIC:
696      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
697      break;
698
699    case Instruction::INVOKE_DIRECT:
700      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
701      break;
702    case Instruction::INVOKE_DIRECT_RANGE:
703      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
704      break;
705
706    case Instruction::INVOKE_VIRTUAL:
707      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
708      break;
709    case Instruction::INVOKE_VIRTUAL_RANGE:
710      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
711      break;
712
713    case Instruction::INVOKE_SUPER:
714      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
715      break;
716    case Instruction::INVOKE_SUPER_RANGE:
717      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
718      break;
719
720    case Instruction::INVOKE_INTERFACE:
721      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
722      break;
723    case Instruction::INVOKE_INTERFACE_RANGE:
724      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
725      break;
726
727    case Instruction::NEG_INT:
728    case Instruction::NOT_INT:
729      GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]);
730      break;
731
732    case Instruction::NEG_LONG:
733    case Instruction::NOT_LONG:
734      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0]);
735      break;
736
737    case Instruction::NEG_FLOAT:
738      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
739      break;
740
741    case Instruction::NEG_DOUBLE:
742      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
743      break;
744
745    case Instruction::INT_TO_LONG:
746      GenIntToLong(rl_dest, rl_src[0]);
747      break;
748
749    case Instruction::LONG_TO_INT:
750      rl_src[0] = UpdateLocWide(rl_src[0]);
751      rl_src[0] = WideToNarrow(rl_src[0]);
752      StoreValue(rl_dest, rl_src[0]);
753      break;
754
755    case Instruction::INT_TO_BYTE:
756    case Instruction::INT_TO_SHORT:
757    case Instruction::INT_TO_CHAR:
758      GenIntNarrowing(opcode, rl_dest, rl_src[0]);
759      break;
760
761    case Instruction::INT_TO_FLOAT:
762    case Instruction::INT_TO_DOUBLE:
763    case Instruction::LONG_TO_FLOAT:
764    case Instruction::LONG_TO_DOUBLE:
765    case Instruction::FLOAT_TO_INT:
766    case Instruction::FLOAT_TO_LONG:
767    case Instruction::FLOAT_TO_DOUBLE:
768    case Instruction::DOUBLE_TO_INT:
769    case Instruction::DOUBLE_TO_LONG:
770    case Instruction::DOUBLE_TO_FLOAT:
771      GenConversion(opcode, rl_dest, rl_src[0]);
772      break;
773
774
775    case Instruction::ADD_INT:
776    case Instruction::ADD_INT_2ADDR:
777    case Instruction::MUL_INT:
778    case Instruction::MUL_INT_2ADDR:
779    case Instruction::AND_INT:
780    case Instruction::AND_INT_2ADDR:
781    case Instruction::OR_INT:
782    case Instruction::OR_INT_2ADDR:
783    case Instruction::XOR_INT:
784    case Instruction::XOR_INT_2ADDR:
785      if (rl_src[0].is_const &&
786          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]))) {
787        GenArithOpIntLit(opcode, rl_dest, rl_src[1],
788                             mir_graph_->ConstantValue(rl_src[0].orig_sreg));
789      } else if (rl_src[1].is_const &&
790          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
791        GenArithOpIntLit(opcode, rl_dest, rl_src[0],
792                             mir_graph_->ConstantValue(rl_src[1].orig_sreg));
793      } else {
794        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
795      }
796      break;
797
798    case Instruction::SUB_INT:
799    case Instruction::SUB_INT_2ADDR:
800    case Instruction::DIV_INT:
801    case Instruction::DIV_INT_2ADDR:
802    case Instruction::REM_INT:
803    case Instruction::REM_INT_2ADDR:
804    case Instruction::SHL_INT:
805    case Instruction::SHL_INT_2ADDR:
806    case Instruction::SHR_INT:
807    case Instruction::SHR_INT_2ADDR:
808    case Instruction::USHR_INT:
809    case Instruction::USHR_INT_2ADDR:
810      if (rl_src[1].is_const &&
811          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
812        GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
813      } else {
814        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
815      }
816      break;
817
818    case Instruction::ADD_LONG:
819    case Instruction::SUB_LONG:
820    case Instruction::AND_LONG:
821    case Instruction::OR_LONG:
822    case Instruction::XOR_LONG:
823    case Instruction::ADD_LONG_2ADDR:
824    case Instruction::SUB_LONG_2ADDR:
825    case Instruction::AND_LONG_2ADDR:
826    case Instruction::OR_LONG_2ADDR:
827    case Instruction::XOR_LONG_2ADDR:
828      if (rl_src[0].is_const || rl_src[1].is_const) {
829        GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
830        break;
831      }
832      // Note: intentional fallthrough.
833
834    case Instruction::MUL_LONG:
835    case Instruction::DIV_LONG:
836    case Instruction::REM_LONG:
837    case Instruction::MUL_LONG_2ADDR:
838    case Instruction::DIV_LONG_2ADDR:
839    case Instruction::REM_LONG_2ADDR:
840      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
841      break;
842
843    case Instruction::SHL_LONG:
844    case Instruction::SHR_LONG:
845    case Instruction::USHR_LONG:
846    case Instruction::SHL_LONG_2ADDR:
847    case Instruction::SHR_LONG_2ADDR:
848    case Instruction::USHR_LONG_2ADDR:
849      if (rl_src[1].is_const) {
850        GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
851      } else {
852        GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
853      }
854      break;
855
856    case Instruction::ADD_FLOAT:
857    case Instruction::SUB_FLOAT:
858    case Instruction::MUL_FLOAT:
859    case Instruction::DIV_FLOAT:
860    case Instruction::REM_FLOAT:
861    case Instruction::ADD_FLOAT_2ADDR:
862    case Instruction::SUB_FLOAT_2ADDR:
863    case Instruction::MUL_FLOAT_2ADDR:
864    case Instruction::DIV_FLOAT_2ADDR:
865    case Instruction::REM_FLOAT_2ADDR:
866      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
867      break;
868
869    case Instruction::ADD_DOUBLE:
870    case Instruction::SUB_DOUBLE:
871    case Instruction::MUL_DOUBLE:
872    case Instruction::DIV_DOUBLE:
873    case Instruction::REM_DOUBLE:
874    case Instruction::ADD_DOUBLE_2ADDR:
875    case Instruction::SUB_DOUBLE_2ADDR:
876    case Instruction::MUL_DOUBLE_2ADDR:
877    case Instruction::DIV_DOUBLE_2ADDR:
878    case Instruction::REM_DOUBLE_2ADDR:
879      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
880      break;
881
882    case Instruction::RSUB_INT:
883    case Instruction::ADD_INT_LIT16:
884    case Instruction::MUL_INT_LIT16:
885    case Instruction::DIV_INT_LIT16:
886    case Instruction::REM_INT_LIT16:
887    case Instruction::AND_INT_LIT16:
888    case Instruction::OR_INT_LIT16:
889    case Instruction::XOR_INT_LIT16:
890    case Instruction::ADD_INT_LIT8:
891    case Instruction::RSUB_INT_LIT8:
892    case Instruction::MUL_INT_LIT8:
893    case Instruction::DIV_INT_LIT8:
894    case Instruction::REM_INT_LIT8:
895    case Instruction::AND_INT_LIT8:
896    case Instruction::OR_INT_LIT8:
897    case Instruction::XOR_INT_LIT8:
898    case Instruction::SHL_INT_LIT8:
899    case Instruction::SHR_INT_LIT8:
900    case Instruction::USHR_INT_LIT8:
901      GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
902      break;
903
904    default:
905      LOG(FATAL) << "Unexpected opcode: " << opcode;
906  }
907}  // NOLINT(readability/fn_size)
908
909// Process extended MIR instructions
910void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
911  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
912    case kMirOpCopy: {
913      RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
914      RegLocation rl_dest = mir_graph_->GetDest(mir);
915      StoreValue(rl_dest, rl_src);
916      break;
917    }
918    case kMirOpFusedCmplFloat:
919      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
920      break;
921    case kMirOpFusedCmpgFloat:
922      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
923      break;
924    case kMirOpFusedCmplDouble:
925      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
926      break;
927    case kMirOpFusedCmpgDouble:
928      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
929      break;
930    case kMirOpFusedCmpLong:
931      GenFusedLongCmpBranch(bb, mir);
932      break;
933    case kMirOpSelect:
934      GenSelect(bb, mir);
935      break;
936    default:
937      break;
938  }
939}
940
941void Mir2Lir::GenPrintLabel(MIR* mir) {
942  // Mark the beginning of a Dalvik instruction for line tracking.
943  if (cu_->verbose) {
944     char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
945     MarkBoundary(mir->offset, inst_str);
946  }
947}
948
949// Handle the content in each basic block.
950bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
951  if (bb->block_type == kDead) return false;
952  current_dalvik_offset_ = bb->start_offset;
953  MIR* mir;
954  int block_id = bb->id;
955
956  block_label_list_[block_id].operands[0] = bb->start_offset;
957
958  // Insert the block label.
959  block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
960  block_label_list_[block_id].flags.fixup = kFixupLabel;
961  AppendLIR(&block_label_list_[block_id]);
962
963  LIR* head_lir = NULL;
964
965  // If this is a catch block, export the start address.
966  if (bb->catch_entry) {
967    head_lir = NewLIR0(kPseudoExportedPC);
968  }
969
970  // Free temp registers and reset redundant store tracking.
971  ClobberAllRegs();
972
973  if (bb->block_type == kEntryBlock) {
974    ResetRegPool();
975    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
976    GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
977                         mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
978  } else if (bb->block_type == kExitBlock) {
979    ResetRegPool();
980    GenExitSequence();
981  }
982
983  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
984    ResetRegPool();
985    if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
986      ClobberAllRegs();
987    }
988
989    if (cu_->disable_opt & (1 << kSuppressLoads)) {
990      ResetDefTracking();
991    }
992
993    // Reset temp tracking sanity check.
994    if (kIsDebugBuild) {
995      live_sreg_ = INVALID_SREG;
996    }
997
998    current_dalvik_offset_ = mir->offset;
999    int opcode = mir->dalvikInsn.opcode;
1000
1001    GenPrintLabel(mir);
1002
1003    // Remember the first LIR for this block.
1004    if (head_lir == NULL) {
1005      head_lir = &block_label_list_[bb->id];
1006      // Set the first label as a scheduling barrier.
1007      DCHECK(!head_lir->flags.use_def_invalid);
1008      head_lir->u.m.def_mask = ENCODE_ALL;
1009    }
1010
1011    if (opcode == kMirOpCheck) {
1012      // Combine check and work halves of throwing instruction.
1013      MIR* work_half = mir->meta.throw_insn;
1014      mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
1015      mir->meta = work_half->meta;  // Whatever the work_half had, we need to copy it.
1016      opcode = work_half->dalvikInsn.opcode;
1017      SSARepresentation* ssa_rep = work_half->ssa_rep;
1018      work_half->ssa_rep = mir->ssa_rep;
1019      mir->ssa_rep = ssa_rep;
1020      work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheckPart2);
1021      work_half->meta.throw_insn = mir;
1022    }
1023
1024    if (opcode >= kMirOpFirst) {
1025      HandleExtendedMethodMIR(bb, mir);
1026      continue;
1027    }
1028
1029    CompileDalvikInstruction(mir, bb, block_label_list_);
1030  }
1031
1032  if (head_lir) {
1033    // Eliminate redundant loads/stores and delay stores into later slots.
1034    ApplyLocalOptimizations(head_lir, last_lir_insn_);
1035  }
1036  return false;
1037}
1038
1039bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
1040  cu_->NewTimingSplit("SpecialMIR2LIR");
1041  // Find the first DalvikByteCode block.
1042  int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
1043  BasicBlock*bb = NULL;
1044  for (int idx = 0; idx < num_reachable_blocks; idx++) {
1045    // TODO: no direct access of growable lists.
1046    int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
1047    bb = mir_graph_->GetBasicBlock(dfs_index);
1048    if (bb->block_type == kDalvikByteCode) {
1049      break;
1050    }
1051  }
1052  if (bb == NULL) {
1053    return false;
1054  }
1055  DCHECK_EQ(bb->start_offset, 0);
1056  DCHECK(bb->first_mir_insn != NULL);
1057
1058  // Get the first instruction.
1059  MIR* mir = bb->first_mir_insn;
1060
1061  // Free temp registers and reset redundant store tracking.
1062  ResetRegPool();
1063  ResetDefTracking();
1064  ClobberAllRegs();
1065
1066  return GenSpecialCase(bb, mir, special);
1067}
1068
1069void Mir2Lir::MethodMIR2LIR() {
1070  cu_->NewTimingSplit("MIR2LIR");
1071
1072  // Hold the labels of each block.
1073  block_label_list_ =
1074      static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
1075                                      kArenaAllocLIR));
1076
1077  PreOrderDfsIterator iter(mir_graph_);
1078  BasicBlock* curr_bb = iter.Next();
1079  BasicBlock* next_bb = iter.Next();
1080  while (curr_bb != NULL) {
1081    MethodBlockCodeGen(curr_bb);
1082    // If the fall_through block is no longer laid out consecutively, drop in a branch.
1083    BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
1084    if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
1085      OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
1086    }
1087    curr_bb = next_bb;
1088    do {
1089      next_bb = iter.Next();
1090    } while ((next_bb != NULL) && (next_bb->block_type == kDead));
1091  }
1092  HandleSlowPaths();
1093
1094  cu_->NewTimingSplit("Launchpads");
1095  HandleSuspendLaunchPads();
1096
1097  HandleThrowLaunchPads();
1098}
1099
1100//
1101// LIR Slow Path
1102//
1103
1104LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel() {
1105  m2l_->SetCurrentDexPc(current_dex_pc_);
1106  LIR* target = m2l_->NewLIR0(kPseudoTargetLabel);
1107  fromfast_->target = target;
1108  return target;
1109}
1110
1111}  // namespace art
1112