local_optimizations.cc revision a7894cdb063edb88f1420a42207e0c4bd27ab4f9
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_internals.h"
18#include "dex/quick/mir_to_lir-inl.h"
19
20namespace art {
21
22#define DEBUG_OPT(X)
23
24#define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
25
26/* Check RAW, WAR, and RAW dependency on the register operands */
27#define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
28                                       (use.Union(def).Intersects(*check->u.m.def_mask))
29
30/* Load Store Elimination filter:
31 *  - Wide Load/Store
32 *  - Exclusive Load/Store
33 *  - Quad operand Load/Store
34 *  - List Load/Store
35 *  - IT blocks
36 *  - Branch
37 *  - Dmb
38 */
39#define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
40                                 (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
41                                 (flags & REG_USE012) == REG_USE012 || \
42                                 (flags & REG_DEF01) == REG_DEF01 || \
43                                 (flags & REG_DEF_LIST0) || \
44                                 (flags & REG_DEF_LIST1) || \
45                                 (flags & REG_USE_LIST0) || \
46                                 (flags & REG_USE_LIST1) || \
47                                 (flags & REG_DEF_FPCS_LIST0) || \
48                                 (flags & REG_DEF_FPCS_LIST2) || \
49                                 (flags & REG_USE_FPCS_LIST0) || \
50                                 (flags & REG_USE_FPCS_LIST2) || \
51                                 (flags & IS_VOLATILE) || \
52                                 (flags & IS_BRANCH) || \
53                                 (flags & IS_IT))
54
55/* Scheduler heuristics */
56#define MAX_HOIST_DISTANCE 20
57#define LDLD_DISTANCE 4
58#define LD_LATENCY 2
59
60static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
61  int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->flags.alias_info);
62  int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->flags.alias_info);
63  int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->flags.alias_info);
64  int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->flags.alias_info);
65
66  return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
67}
68
69/* Convert a more expensive instruction (ie load) into a move */
70void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src) {
71  /* Insert a move to replace the load */
72  LIR* move_lir;
73  move_lir = OpRegCopyNoInsert(dest, src);
74  move_lir->dalvik_offset = orig_lir->dalvik_offset;
75  /*
76   * Insert the converted instruction after the original since the
77   * optimization is scannng in the top-down order and the new instruction
78   * will need to be re-checked (eg the new dest clobbers the src used in
79   * this_lir).
80   */
81  InsertLIRAfter(orig_lir, move_lir);
82}
83
84void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
85  LOG(INFO) << type;
86  LOG(INFO) << "Check LIR:";
87  DumpLIRInsn(check_lir, 0);
88  LOG(INFO) << "This LIR:";
89  DumpLIRInsn(this_lir, 0);
90}
91
92inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
93  DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
94  RegStorage dest_reg, src_reg;
95
96  /* Same Register - Nop */
97  if (lir->operands[0] == reg_id) {
98    NopLIR(lir);
99    return;
100  }
101
102  /* different Regsister - Move + Nop */
103  switch (reg_id & RegStorage::kShapeTypeMask) {
104    case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
105      dest_reg = RegStorage::Solo32(lir->operands[0]);
106      src_reg = RegStorage::Solo32(reg_id);
107      break;
108    case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
109      dest_reg = RegStorage::Solo64(lir->operands[0]);
110      src_reg = RegStorage::Solo64(reg_id);
111      break;
112    case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
113      dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
114      src_reg = RegStorage::FloatSolo32(reg_id);
115      break;
116    case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
117      dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
118      src_reg = RegStorage::FloatSolo64(reg_id);
119      break;
120    default:
121      LOG(INFO) << "Load Store: Unsuported register type!";
122      return;
123  }
124  ConvertMemOpIntoMove(lir, dest_reg, src_reg);
125  NopLIR(lir);
126  return;
127}
128
129/*
130 * Perform a pass of top-down walk, from the first to the last instruction in the
131 * superblock, to eliminate redundant loads and stores.
132 *
133 * An earlier load can eliminate a later load iff
134 *   1) They are must-aliases
135 *   2) The native register is not clobbered in between
136 *   3) The memory location is not written to in between
137 *
138 * An earlier store can eliminate a later load iff
139 *   1) They are must-aliases
140 *   2) The native register is not clobbered in between
141 *   3) The memory location is not written to in between
142 *
143 * An earlier store can eliminate a later store iff
144 *   1) They are must-aliases
145 *   2) The memory location is not written to in between
146 */
147void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
148  LIR* this_lir, *check_lir;
149  std::vector<int> alias_list;
150
151  if (head_lir == tail_lir) {
152    return;
153  }
154
155  for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
156    if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
157      continue;
158    }
159
160    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
161    /* Target LIR - skip if instr is:
162     *  - NOP
163     *  - Branch
164     *  - Load and store
165     *  - Wide load
166     *  - Wide store
167     *  - Exclusive load/store
168     */
169    if (LOAD_STORE_FILTER(target_flags) ||
170        ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
171        !(target_flags & (IS_LOAD | IS_STORE))) {
172      continue;
173    }
174    int native_reg_id = this_lir->operands[0];
175    int dest_reg_id = this_lir->operands[1];
176    bool is_this_lir_load = target_flags & IS_LOAD;
177    ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
178                                                        *this_lir->u.m.def_mask));
179
180    /* Memory region */
181    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
182      (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
183      continue;
184    }
185
186    /* Does not redefine the address */
187    if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
188      continue;
189    }
190
191    ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
192    ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
193
194    /* The ARM backend can load/store PC */
195    ResourceMask uses_pc = GetPCUseDefEncoding();
196    if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
197      continue;
198    }
199
200    /* Initialize alias list */
201    alias_list.clear();
202    ResourceMask alias_reg_list_mask = kEncodeNone;
203    if (!this_mem_mask.Intersects(kEncodeMem) && !this_mem_mask.Intersects(kEncodeLiteral)) {
204      alias_list.push_back(dest_reg_id);
205      SetupRegMask(&alias_reg_list_mask, dest_reg_id);
206    }
207
208    /* Scan through the BB for posible elimination candidates */
209    for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
210      if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
211        continue;
212      }
213
214      if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
215        break;
216      }
217
218      ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
219                                                          *check_lir->u.m.def_mask));
220      ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
221      uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
222      bool stop_here = false;
223      bool pass_over = false;
224
225      /* Check LIR - skip if instr is:
226       *  - Wide Load
227       *  - Wide Store
228       *  - Branch
229       *  - Dmb
230       *  - Exclusive load/store
231       *  - IT blocks
232       *  - Quad loads
233       */
234      if (LOAD_STORE_FILTER(check_flags)) {
235        stop_here = true;
236        /* Possible alias or result of earlier pass */
237      } else if (check_flags & IS_MOVE) {
238        for (auto &reg : alias_list) {
239          if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
240            pass_over = true;
241            alias_list.push_back(check_lir->operands[0]);
242            SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
243          }
244        }
245      /* Memory regions */
246      } else if (!alias_mem_mask.Equals(kEncodeNone)) {
247        DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
248        bool is_check_lir_load = check_flags & IS_LOAD;
249        bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
250
251        if (!alias_mem_mask.Intersects(kEncodeMem) && alias_mem_mask.Equals(kEncodeLiteral)) {
252          DCHECK(check_flags & IS_LOAD);
253          /* Same value && same register type */
254          if (reg_compatible && (this_lir->target == check_lir->target)) {
255            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
256            EliminateLoad(check_lir, native_reg_id);
257          }
258        } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
259                   alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
260          bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
261          if (same_offset && !is_check_lir_load) {
262            if (check_lir->operands[0] != native_reg_id) {
263              DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
264              stop_here = true;
265              break;
266            }
267          }
268
269          if (reg_compatible && same_offset &&
270              ((is_this_lir_load && is_check_lir_load)  /* LDR - LDR */ ||
271              (!is_this_lir_load && is_check_lir_load)  /* STR - LDR */ ||
272              (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
273            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
274            EliminateLoad(check_lir, native_reg_id);
275          }
276        } else {
277          /* Unsupported memory region */
278        }
279      }
280
281      if (pass_over) {
282        continue;
283      }
284
285      if (stop_here == false) {
286        bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
287        if (stop_alias) {
288          /* Scan through alias list and if alias remove from alias list. */
289          for (auto &reg : alias_list) {
290            stop_alias = false;
291            ResourceMask alias_reg_mask = kEncodeNone;
292            SetupRegMask(&alias_reg_mask, reg);
293            stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
294            if (stop_alias) {
295              ClearRegMask(&alias_reg_list_mask, reg);
296              alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
297                                           reg), alias_list.end());
298            }
299          }
300        }
301        ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
302        stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
303        stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
304        if (stop_here) {
305          break;
306        }
307      } else {
308        break;
309      }
310    }
311  }
312}
313
314/*
315 * Perform a pass of bottom-up walk, from the second instruction in the
316 * superblock, to try to hoist loads to earlier slots.
317 */
318void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
319  LIR* this_lir, *check_lir;
320  /*
321   * Store the list of independent instructions that can be hoisted past.
322   * Will decide the best place to insert later.
323   */
324  LIR* prev_inst_list[MAX_HOIST_DISTANCE];
325
326  /* Empty block */
327  if (head_lir == tail_lir) {
328    return;
329  }
330
331  /* Start from the second instruction */
332  for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
333    if (IsPseudoLirOp(this_lir->opcode)) {
334      continue;
335    }
336
337    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
338    /* Skip non-interesting instructions */
339    if (!(target_flags & IS_LOAD) ||
340        (this_lir->flags.is_nop == true) ||
341        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
342        ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
343      continue;
344    }
345
346    ResourceMask stop_use_all_mask = *this_lir->u.m.use_mask;
347
348    /*
349     * Branches for null/range checks are marked with the true resource
350     * bits, and loads to Dalvik registers, constant pools, and non-alias
351     * locations are safe to be hoisted. So only mark the heap references
352     * conservatively here.
353     *
354     * Note: on x86(-64) and Arm64 this will add kEncodeNone.
355     * TODO: Sanity check. LoadStoreElimination uses kBranchBit to fake a PC.
356     */
357    if (stop_use_all_mask.HasBit(ResourceMask::kHeapRef)) {
358      stop_use_all_mask.SetBits(GetPCUseDefEncoding());
359    }
360
361    /* Similar as above, but just check for pure register dependency */
362    ResourceMask stop_use_reg_mask = stop_use_all_mask.Without(kEncodeMem);
363    ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
364
365    int next_slot = 0;
366    bool stop_here = false;
367
368    /* Try to hoist the load to a good spot */
369    for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
370      /*
371       * Skip already dead instructions (whose dataflow information is
372       * outdated and misleading).
373       */
374      if (check_lir->flags.is_nop) {
375        continue;
376      }
377
378      ResourceMask check_mem_mask = check_lir->u.m.def_mask->Intersection(kEncodeMem);
379      ResourceMask alias_condition = stop_use_all_mask.Intersection(check_mem_mask);
380      stop_here = false;
381
382      /* Potential WAR alias seen - check the exact relation */
383      if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
384        /* We can fully disambiguate Dalvik references */
385        if (alias_condition.Equals(kEncodeDalvikReg)) {
386          /* Must alias or partially overlap */
387          if ((check_lir->flags.alias_info == this_lir->flags.alias_info) ||
388            IsDalvikRegisterClobbered(this_lir, check_lir)) {
389            stop_here = true;
390          }
391        /* Conservatively treat all heap refs as may-alias */
392        } else {
393          DCHECK(alias_condition.Equals(kEncodeHeapRef));
394          stop_here = true;
395        }
396        /* Memory content may be updated. Stop looking now. */
397        if (stop_here) {
398          prev_inst_list[next_slot++] = check_lir;
399          break;
400        }
401      }
402
403      if (stop_here == false) {
404        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
405                     check_lir);
406      }
407
408      /*
409       * Store the dependent or non-pseudo/indepedent instruction to the
410       * list.
411       */
412      if (stop_here || !IsPseudoLirOp(check_lir->opcode)) {
413        prev_inst_list[next_slot++] = check_lir;
414        if (next_slot == MAX_HOIST_DISTANCE) {
415          break;
416        }
417      }
418
419      /* Found a new place to put the load - move it here */
420      if (stop_here == true) {
421        DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
422        break;
423      }
424    }
425
426    /*
427     * Reached the top - use head_lir as the dependent marker as all labels
428     * are barriers.
429     */
430    if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
431      prev_inst_list[next_slot++] = head_lir;
432    }
433
434    /*
435     * At least one independent instruction is found. Scan in the reversed
436     * direction to find a beneficial slot.
437     */
438    if (next_slot >= 2) {
439      int first_slot = next_slot - 2;
440      int slot;
441      LIR* dep_lir = prev_inst_list[next_slot-1];
442      /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
443      if (!IsPseudoLirOp(dep_lir->opcode) &&
444        (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
445        first_slot -= LDLD_DISTANCE;
446      }
447      /*
448       * Make sure we check slot >= 0 since first_slot may be negative
449       * when the loop is first entered.
450       */
451      for (slot = first_slot; slot >= 0; slot--) {
452        LIR* cur_lir = prev_inst_list[slot];
453        LIR* prev_lir = prev_inst_list[slot+1];
454
455        /* Check the highest instruction */
456        if (prev_lir->u.m.def_mask->Equals(kEncodeAll)) {
457          /*
458           * If the first instruction is a load, don't hoist anything
459           * above it since it is unlikely to be beneficial.
460           */
461          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) {
462            continue;
463          }
464          /*
465           * If the remaining number of slots is less than LD_LATENCY,
466           * insert the hoisted load here.
467           */
468          if (slot < LD_LATENCY) {
469            break;
470          }
471        }
472
473        // Don't look across a barrier label
474        if ((prev_lir->opcode == kPseudoTargetLabel) ||
475            (prev_lir->opcode == kPseudoSafepointPC) ||
476            (prev_lir->opcode == kPseudoBarrier)) {
477          break;
478        }
479
480        /*
481         * Try to find two instructions with load/use dependency until
482         * the remaining instructions are less than LD_LATENCY.
483         */
484        bool prev_is_load = IsPseudoLirOp(prev_lir->opcode) ? false :
485            (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
486        if ((prev_is_load && (cur_lir->u.m.use_mask->Intersects(*prev_lir->u.m.def_mask))) ||
487            (slot < LD_LATENCY)) {
488          break;
489        }
490      }
491
492      /* Found a slot to hoist to */
493      if (slot >= 0) {
494        LIR* cur_lir = prev_inst_list[slot];
495        LIR* new_load_lir =
496          static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
497        *new_load_lir = *this_lir;
498        /*
499         * Insertion is guaranteed to succeed since check_lir
500         * is never the first LIR on the list
501         */
502        InsertLIRBefore(cur_lir, new_load_lir);
503        NopLIR(this_lir);
504      }
505    }
506  }
507}
508
509void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) {
510  if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) {
511    ApplyLoadStoreElimination(head_lir, tail_lir);
512  }
513  if (!(cu_->disable_opt & (1 << kLoadHoisting))) {
514    ApplyLoadHoisting(head_lir, tail_lir);
515  }
516}
517
518}  // namespace art
519