1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_internals.h"
18
19namespace art {
20
21#define DEBUG_OPT(X)
22
23/* Check RAW, WAR, and RAW dependency on the register operands */
24#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \
25                                        ((use | def) & check->def_mask))
26
27/* Scheduler heuristics */
28#define MAX_HOIST_DISTANCE 20
29#define LDLD_DISTANCE 4
30#define LD_LATENCY 2
31
32static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
33  int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info);
34  int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info);
35  int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info);
36  int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info);
37
38  return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
39}
40
41/* Convert a more expensive instruction (ie load) into a move */
42void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) {
43  /* Insert a move to replace the load */
44  LIR* move_lir;
45  move_lir = OpRegCopyNoInsert(dest, src);
46  /*
47   * Insert the converted instruction after the original since the
48   * optimization is scannng in the top-down order and the new instruction
49   * will need to be re-checked (eg the new dest clobbers the src used in
50   * this_lir).
51   */
52  InsertLIRAfter(orig_lir, move_lir);
53}
54
55/*
56 * Perform a pass of top-down walk, from the second-last instruction in the
57 * superblock, to eliminate redundant loads and stores.
58 *
59 * An earlier load can eliminate a later load iff
60 *   1) They are must-aliases
61 *   2) The native register is not clobbered in between
62 *   3) The memory location is not written to in between
63 *
64 * An earlier store can eliminate a later load iff
65 *   1) They are must-aliases
66 *   2) The native register is not clobbered in between
67 *   3) The memory location is not written to in between
68 *
69 * A later store can be eliminated by an earlier store iff
70 *   1) They are must-aliases
71 *   2) The memory location is not written to in between
72 */
73void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
74  LIR* this_lir;
75
76  if (head_lir == tail_lir) {
77    return;
78  }
79
80  for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
81    if (is_pseudo_opcode(this_lir->opcode)) {
82      continue;
83    }
84
85    int sink_distance = 0;
86
87    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
88
89    /* Skip non-interesting instructions */
90    if ((this_lir->flags.is_nop == true) ||
91        (target_flags & IS_BRANCH) ||
92        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||  // Skip wide loads.
93        ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
94         (REG_USE0 | REG_USE1 | REG_USE2)) ||  // Skip wide stores.
95        !(target_flags & (IS_LOAD | IS_STORE))) {
96      continue;
97    }
98
99    int native_reg_id;
100    if (cu_->instruction_set == kX86) {
101      // If x86, location differs depending on whether memory/reg operation.
102      native_reg_id = (GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
103          : this_lir->operands[0];
104    } else {
105      native_reg_id = this_lir->operands[0];
106    }
107    bool is_this_lir_load = GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
108    LIR* check_lir;
109    /* Use the mem mask to determine the rough memory location */
110    uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
111
112    /*
113     * Currently only eliminate redundant ld/st for constant and Dalvik
114     * register accesses.
115     */
116    if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) {
117      continue;
118    }
119
120    uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
121    uint64_t stop_use_reg_mask;
122    if (cu_->instruction_set == kX86) {
123      stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
124    } else {
125      /*
126       * Add pc to the resource mask to prevent this instruction
127       * from sinking past branch instructions. Also take out the memory
128       * region bits since stop_mask is used to check data/control
129       * dependencies.
130       */
131        stop_use_reg_mask = (GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
132    }
133
134    for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
135      /*
136       * Skip already dead instructions (whose dataflow information is
137       * outdated and misleading).
138       */
139      if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) {
140        continue;
141      }
142
143      uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
144      uint64_t alias_condition = this_mem_mask & check_mem_mask;
145      bool stop_here = false;
146
147      /*
148       * Potential aliases seen - check the alias relations
149       */
150      uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
151      // TUNING: Support instructions with multiple register targets.
152      if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
153        stop_here = true;
154      } else if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
155        bool is_check_lir_load = check_flags & IS_LOAD;
156        if  (alias_condition == ENCODE_LITERAL) {
157          /*
158           * Should only see literal loads in the instruction
159           * stream.
160           */
161          DCHECK(!(check_flags & IS_STORE));
162          /* Same value && same register type */
163          if (check_lir->alias_info == this_lir->alias_info &&
164              SameRegType(check_lir->operands[0], native_reg_id)) {
165            /*
166             * Different destination register - insert
167             * a move
168             */
169            if (check_lir->operands[0] != native_reg_id) {
170              ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
171            }
172            check_lir->flags.is_nop = true;
173          }
174        } else if (alias_condition == ENCODE_DALVIK_REG) {
175          /* Must alias */
176          if (check_lir->alias_info == this_lir->alias_info) {
177            /* Only optimize compatible registers */
178            bool reg_compatible = SameRegType(check_lir->operands[0], native_reg_id);
179            if ((is_this_lir_load && is_check_lir_load) ||
180                (!is_this_lir_load && is_check_lir_load)) {
181              /* RAR or RAW */
182              if (reg_compatible) {
183                /*
184                 * Different destination register -
185                 * insert a move
186                 */
187                if (check_lir->operands[0] !=
188                  native_reg_id) {
189                  ConvertMemOpIntoMove(check_lir, check_lir->operands[0], native_reg_id);
190                }
191                check_lir->flags.is_nop = true;
192              } else {
193                /*
194                 * Destinaions are of different types -
195                 * something complicated going on so
196                 * stop looking now.
197                 */
198                stop_here = true;
199              }
200            } else if (is_this_lir_load && !is_check_lir_load) {
201              /* WAR - register value is killed */
202              stop_here = true;
203            } else if (!is_this_lir_load && !is_check_lir_load) {
204              /* WAW - nuke the earlier store */
205              this_lir->flags.is_nop = true;
206              stop_here = true;
207            }
208          /* Partial overlap */
209          } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
210            /*
211             * It is actually ok to continue if check_lir
212             * is a read. But it is hard to make a test
213             * case for this so we just stop here to be
214             * conservative.
215             */
216            stop_here = true;
217          }
218        }
219        /* Memory content may be updated. Stop looking now. */
220        if (stop_here) {
221          break;
222        /* The check_lir has been transformed - check the next one */
223        } else if (check_lir->flags.is_nop) {
224          continue;
225        }
226      }
227
228
229      /*
230       * this and check LIRs have no memory dependency. Now check if
231       * their register operands have any RAW, WAR, and WAW
232       * dependencies. If so, stop looking.
233       */
234      if (stop_here == false) {
235        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
236      }
237
238      if (stop_here == true) {
239        if (cu_->instruction_set == kX86) {
240          // Prevent stores from being sunk between ops that generate ccodes and
241          // ops that use them.
242          uint64_t flags = GetTargetInstFlags(check_lir->opcode);
243          if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
244            check_lir = PREV_LIR(check_lir);
245            sink_distance--;
246          }
247        }
248        DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
249        /* Only sink store instructions */
250        if (sink_distance && !is_this_lir_load) {
251          LIR* new_store_lir =
252              static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
253          *new_store_lir = *this_lir;
254          /*
255           * Stop point found - insert *before* the check_lir
256           * since the instruction list is scanned in the
257           * top-down order.
258           */
259          InsertLIRBefore(check_lir, new_store_lir);
260          this_lir->flags.is_nop = true;
261        }
262        break;
263      } else if (!check_lir->flags.is_nop) {
264        sink_distance++;
265      }
266    }
267  }
268}
269
270/*
271 * Perform a pass of bottom-up walk, from the second instruction in the
272 * superblock, to try to hoist loads to earlier slots.
273 */
274void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
275  LIR* this_lir, *check_lir;
276  /*
277   * Store the list of independent instructions that can be hoisted past.
278   * Will decide the best place to insert later.
279   */
280  LIR* prev_inst_list[MAX_HOIST_DISTANCE];
281
282  /* Empty block */
283  if (head_lir == tail_lir) {
284    return;
285  }
286
287  /* Start from the second instruction */
288  for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
289    if (is_pseudo_opcode(this_lir->opcode)) {
290      continue;
291    }
292
293    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
294    /* Skip non-interesting instructions */
295    if ((this_lir->flags.is_nop == true) ||
296        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
297        !(target_flags & IS_LOAD)) {
298      continue;
299    }
300
301    uint64_t stop_use_all_mask = this_lir->use_mask;
302
303    if (cu_->instruction_set != kX86) {
304      /*
305       * Branches for null/range checks are marked with the true resource
306       * bits, and loads to Dalvik registers, constant pools, and non-alias
307       * locations are safe to be hoisted. So only mark the heap references
308       * conservatively here.
309       */
310      if (stop_use_all_mask & ENCODE_HEAP_REF) {
311        stop_use_all_mask |= GetPCUseDefEncoding();
312      }
313    }
314
315    /* Similar as above, but just check for pure register dependency */
316    uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
317    uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
318
319    int next_slot = 0;
320    bool stop_here = false;
321
322    /* Try to hoist the load to a good spot */
323    for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
324      /*
325       * Skip already dead instructions (whose dataflow information is
326       * outdated and misleading).
327       */
328      if (check_lir->flags.is_nop) {
329        continue;
330      }
331
332      uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
333      uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
334      stop_here = false;
335
336      /* Potential WAR alias seen - check the exact relation */
337      if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
338        /* We can fully disambiguate Dalvik references */
339        if (alias_condition == ENCODE_DALVIK_REG) {
340          /* Must alias or partually overlap */
341          if ((check_lir->alias_info == this_lir->alias_info) ||
342            IsDalvikRegisterClobbered(this_lir, check_lir)) {
343            stop_here = true;
344          }
345        /* Conservatively treat all heap refs as may-alias */
346        } else {
347          DCHECK_EQ(alias_condition, ENCODE_HEAP_REF);
348          stop_here = true;
349        }
350        /* Memory content may be updated. Stop looking now. */
351        if (stop_here) {
352          prev_inst_list[next_slot++] = check_lir;
353          break;
354        }
355      }
356
357      if (stop_here == false) {
358        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
359                     check_lir);
360      }
361
362      /*
363       * Store the dependent or non-pseudo/indepedent instruction to the
364       * list.
365       */
366      if (stop_here || !is_pseudo_opcode(check_lir->opcode)) {
367        prev_inst_list[next_slot++] = check_lir;
368        if (next_slot == MAX_HOIST_DISTANCE) {
369          break;
370        }
371      }
372
373      /* Found a new place to put the load - move it here */
374      if (stop_here == true) {
375        DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
376        break;
377      }
378    }
379
380    /*
381     * Reached the top - use head_lir as the dependent marker as all labels
382     * are barriers.
383     */
384    if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
385      prev_inst_list[next_slot++] = head_lir;
386    }
387
388    /*
389     * At least one independent instruction is found. Scan in the reversed
390     * direction to find a beneficial slot.
391     */
392    if (next_slot >= 2) {
393      int first_slot = next_slot - 2;
394      int slot;
395      LIR* dep_lir = prev_inst_list[next_slot-1];
396      /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
397      if (!is_pseudo_opcode(dep_lir->opcode) &&
398        (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
399        first_slot -= LDLD_DISTANCE;
400      }
401      /*
402       * Make sure we check slot >= 0 since first_slot may be negative
403       * when the loop is first entered.
404       */
405      for (slot = first_slot; slot >= 0; slot--) {
406        LIR* cur_lir = prev_inst_list[slot];
407        LIR* prev_lir = prev_inst_list[slot+1];
408
409        /* Check the highest instruction */
410        if (prev_lir->def_mask == ENCODE_ALL) {
411          /*
412           * If the first instruction is a load, don't hoist anything
413           * above it since it is unlikely to be beneficial.
414           */
415          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) {
416            continue;
417          }
418          /*
419           * If the remaining number of slots is less than LD_LATENCY,
420           * insert the hoisted load here.
421           */
422          if (slot < LD_LATENCY) {
423            break;
424          }
425        }
426
427        // Don't look across a barrier label
428        if ((prev_lir->opcode == kPseudoTargetLabel) ||
429            (prev_lir->opcode == kPseudoSafepointPC) ||
430            (prev_lir->opcode == kPseudoBarrier)) {
431          break;
432        }
433
434        /*
435         * Try to find two instructions with load/use dependency until
436         * the remaining instructions are less than LD_LATENCY.
437         */
438        bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
439            (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
440        if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
441          break;
442        }
443      }
444
445      /* Found a slot to hoist to */
446      if (slot >= 0) {
447        LIR* cur_lir = prev_inst_list[slot];
448        LIR* new_load_lir =
449          static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
450        *new_load_lir = *this_lir;
451        /*
452         * Insertion is guaranteed to succeed since check_lir
453         * is never the first LIR on the list
454         */
455        InsertLIRBefore(cur_lir, new_load_lir);
456        this_lir->flags.is_nop = true;
457      }
458    }
459  }
460}
461
462void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) {
463  if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) {
464    ApplyLoadStoreElimination(head_lir, tail_lir);
465  }
466  if (!(cu_->disable_opt & (1 << kLoadHoisting))) {
467    ApplyLoadHoisting(head_lir, tail_lir);
468  }
469}
470
471/*
472 * Nop any unconditional branches that go to the next instruction.
473 * Note: new redundant branches may be inserted later, and we'll
474 * use a check in final instruction assembly to nop those out.
475 */
476void Mir2Lir::RemoveRedundantBranches() {
477  LIR* this_lir;
478
479  for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) {
480    /* Branch to the next instruction */
481    if (IsUnconditionalBranch(this_lir)) {
482      LIR* next_lir = this_lir;
483
484      while (true) {
485        next_lir = NEXT_LIR(next_lir);
486
487        /*
488         * Is the branch target the next instruction?
489         */
490        if (next_lir == this_lir->target) {
491          this_lir->flags.is_nop = true;
492          break;
493        }
494
495        /*
496         * Found real useful stuff between the branch and the target.
497         * Need to explicitly check the last_lir_insn_ here because it
498         * might be the last real instruction.
499         */
500        if (!is_pseudo_opcode(next_lir->opcode) ||
501          (next_lir == last_lir_insn_))
502          break;
503      }
504    }
505  }
506}
507
508}  // namespace art
509