1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28#include "nir.h"
29#include "nir_instr_set.h"
30
31/*
32 * Implements Global Code Motion.  A description of GCM can be found in
33 * "Global Code Motion; Global Value Numbering" by Cliff Click.
34 * Unfortunately, the algorithm presented in the paper is broken in a
35 * number of ways.  The algorithm used here differs substantially from the
36 * one in the paper but it is, in my opinion, much easier to read and
37 * verify correcness.
38 */
39
40struct gcm_block_info {
41   /* Number of loops this block is inside */
42   unsigned loop_depth;
43
44   /* The last instruction inserted into this block.  This is used as we
45    * traverse the instructions and insert them back into the program to
46    * put them in the right order.
47    */
48   nir_instr *last_instr;
49};
50
51/* Flags used in the instr->pass_flags field for various instruction states */
52enum {
53   GCM_INSTR_PINNED =            (1 << 0),
54   GCM_INSTR_SCHEDULED_EARLY =   (1 << 1),
55   GCM_INSTR_SCHEDULED_LATE =    (1 << 2),
56   GCM_INSTR_PLACED =            (1 << 3),
57};
58
59struct gcm_state {
60   nir_function_impl *impl;
61   nir_instr *instr;
62
63   /* The list of non-pinned instructions.  As we do the late scheduling,
64    * we pull non-pinned instructions out of their blocks and place them in
65    * this list.  This saves us from having linked-list problems when we go
66    * to put instructions back in their blocks.
67    */
68   struct exec_list instrs;
69
70   struct gcm_block_info *blocks;
71};
72
73/* Recursively walks the CFG and builds the block_info structure */
74static void
75gcm_build_block_info(struct exec_list *cf_list, struct gcm_state *state,
76                     unsigned loop_depth)
77{
78   foreach_list_typed(nir_cf_node, node, node, cf_list) {
79      switch (node->type) {
80      case nir_cf_node_block: {
81         nir_block *block = nir_cf_node_as_block(node);
82         state->blocks[block->index].loop_depth = loop_depth;
83         break;
84      }
85      case nir_cf_node_if: {
86         nir_if *if_stmt = nir_cf_node_as_if(node);
87         gcm_build_block_info(&if_stmt->then_list, state, loop_depth);
88         gcm_build_block_info(&if_stmt->else_list, state, loop_depth);
89         break;
90      }
91      case nir_cf_node_loop: {
92         nir_loop *loop = nir_cf_node_as_loop(node);
93         gcm_build_block_info(&loop->body, state, loop_depth + 1);
94         break;
95      }
96      default:
97         unreachable("Invalid CF node type");
98      }
99   }
100}
101
102/* Walks the instruction list and marks immovable instructions as pinned
103 *
104 * This function also serves to initialize the instr->pass_flags field.
105 * After this is completed, all instructions' pass_flags fields will be set
106 * to either GCM_INSTR_PINNED or 0.
107 */
108static bool
109gcm_pin_instructions_block(nir_block *block, struct gcm_state *state)
110{
111   nir_foreach_instr_safe(instr, block) {
112      switch (instr->type) {
113      case nir_instr_type_alu:
114         switch (nir_instr_as_alu(instr)->op) {
115         case nir_op_fddx:
116         case nir_op_fddy:
117         case nir_op_fddx_fine:
118         case nir_op_fddy_fine:
119         case nir_op_fddx_coarse:
120         case nir_op_fddy_coarse:
121            /* These can only go in uniform control flow; pin them for now */
122            instr->pass_flags = GCM_INSTR_PINNED;
123            break;
124
125         default:
126            instr->pass_flags = 0;
127            break;
128         }
129         break;
130
131      case nir_instr_type_tex:
132         switch (nir_instr_as_tex(instr)->op) {
133         case nir_texop_tex:
134         case nir_texop_txb:
135         case nir_texop_lod:
136            /* These two take implicit derivatives so they need to be pinned */
137            instr->pass_flags = GCM_INSTR_PINNED;
138            break;
139
140         default:
141            instr->pass_flags = 0;
142            break;
143         }
144         break;
145
146      case nir_instr_type_load_const:
147         instr->pass_flags = 0;
148         break;
149
150      case nir_instr_type_intrinsic: {
151         const nir_intrinsic_info *info =
152            &nir_intrinsic_infos[nir_instr_as_intrinsic(instr)->intrinsic];
153
154         if ((info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
155             (info->flags & NIR_INTRINSIC_CAN_REORDER)) {
156            instr->pass_flags = 0;
157         } else {
158            instr->pass_flags = GCM_INSTR_PINNED;
159         }
160         break;
161      }
162
163      case nir_instr_type_jump:
164      case nir_instr_type_ssa_undef:
165      case nir_instr_type_phi:
166         instr->pass_flags = GCM_INSTR_PINNED;
167         break;
168
169      default:
170         unreachable("Invalid instruction type in GCM");
171      }
172
173      if (!(instr->pass_flags & GCM_INSTR_PINNED)) {
174         /* If this is an unpinned instruction, go ahead and pull it out of
175          * the program and put it on the instrs list.  This has a couple
176          * of benifits.  First, it makes the scheduling algorithm more
177          * efficient because we can avoid walking over basic blocks and
178          * pinned instructions.  Second, it keeps us from causing linked
179          * list confusion when we're trying to put everything in its
180          * proper place at the end of the pass.
181          *
182          * Note that we don't use nir_instr_remove here because that also
183          * cleans up uses and defs and we want to keep that information.
184          */
185         exec_node_remove(&instr->node);
186         exec_list_push_tail(&state->instrs, &instr->node);
187      }
188   }
189
190   return true;
191}
192
193static void
194gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state);
195
196/** Update an instructions schedule for the given source
197 *
198 * This function is called iteratively as we walk the sources of an
199 * instruction.  It ensures that the given source instruction has been
200 * scheduled and then update this instruction's block if the source
201 * instruction is lower down the tree.
202 */
203static bool
204gcm_schedule_early_src(nir_src *src, void *void_state)
205{
206   struct gcm_state *state = void_state;
207   nir_instr *instr = state->instr;
208
209   assert(src->is_ssa);
210
211   gcm_schedule_early_instr(src->ssa->parent_instr, void_state);
212
213   /* While the index isn't a proper dominance depth, it does have the
214    * property that if A dominates B then A->index <= B->index.  Since we
215    * know that this instruction must have been dominated by all of its
216    * sources at some point (even if it's gone through value-numbering),
217    * all of the sources must lie on the same branch of the dominance tree.
218    * Therefore, we can just go ahead and just compare indices.
219    */
220   if (instr->block->index < src->ssa->parent_instr->block->index)
221      instr->block = src->ssa->parent_instr->block;
222
223   /* We need to restore the state instruction because it may have been
224    * changed through the gcm_schedule_early_instr call above.  Since we
225    * may still be iterating through sources and future calls to
226    * gcm_schedule_early_src for the same instruction will still need it.
227    */
228   state->instr = instr;
229
230   return true;
231}
232
233/** Schedules an instruction early
234 *
235 * This function performs a recursive depth-first search starting at the
236 * given instruction and proceeding through the sources to schedule
237 * instructions as early as they can possibly go in the dominance tree.
238 * The instructions are "scheduled" by updating their instr->block field.
239 */
240static void
241gcm_schedule_early_instr(nir_instr *instr, struct gcm_state *state)
242{
243   if (instr->pass_flags & GCM_INSTR_SCHEDULED_EARLY)
244      return;
245
246   instr->pass_flags |= GCM_INSTR_SCHEDULED_EARLY;
247
248   /* Pinned instructions are already scheduled so we don't need to do
249    * anything.  Also, bailing here keeps us from ever following the
250    * sources of phi nodes which can be back-edges.
251    */
252   if (instr->pass_flags & GCM_INSTR_PINNED)
253      return;
254
255   /* Start with the instruction at the top.  As we iterate over the
256    * sources, it will get moved down as needed.
257    */
258   instr->block = nir_start_block(state->impl);
259   state->instr = instr;
260
261   nir_foreach_src(instr, gcm_schedule_early_src, state);
262}
263
264static void
265gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state);
266
267/** Schedules the instruction associated with the given SSA def late
268 *
269 * This function works by first walking all of the uses of the given SSA
270 * definition, ensuring that they are scheduled, and then computing the LCA
271 * (least common ancestor) of its uses.  It then schedules this instruction
272 * as close to the LCA as possible while trying to stay out of loops.
273 */
274static bool
275gcm_schedule_late_def(nir_ssa_def *def, void *void_state)
276{
277   struct gcm_state *state = void_state;
278
279   nir_block *lca = NULL;
280
281   nir_foreach_use(use_src, def) {
282      nir_instr *use_instr = use_src->parent_instr;
283
284      gcm_schedule_late_instr(use_instr, state);
285
286      /* Phi instructions are a bit special.  SSA definitions don't have to
287       * dominate the sources of the phi nodes that use them; instead, they
288       * have to dominate the predecessor block corresponding to the phi
289       * source.  We handle this by looking through the sources, finding
290       * any that are usingg this SSA def, and using those blocks instead
291       * of the one the phi lives in.
292       */
293      if (use_instr->type == nir_instr_type_phi) {
294         nir_phi_instr *phi = nir_instr_as_phi(use_instr);
295
296         nir_foreach_phi_src(phi_src, phi) {
297            if (phi_src->src.ssa == def)
298               lca = nir_dominance_lca(lca, phi_src->pred);
299         }
300      } else {
301         lca = nir_dominance_lca(lca, use_instr->block);
302      }
303   }
304
305   nir_foreach_if_use(use_src, def) {
306      nir_if *if_stmt = use_src->parent_if;
307
308      /* For if statements, we consider the block to be the one immediately
309       * preceding the if CF node.
310       */
311      nir_block *pred_block =
312         nir_cf_node_as_block(nir_cf_node_prev(&if_stmt->cf_node));
313
314      lca = nir_dominance_lca(lca, pred_block);
315   }
316
317   /* Some instructions may never be used.  We'll just leave them scheduled
318    * early and let dead code clean them up.
319    */
320   if (lca == NULL)
321      return true;
322
323   /* We now have the LCA of all of the uses.  If our invariants hold,
324    * this is dominated by the block that we chose when scheduling early.
325    * We now walk up the dominance tree and pick the lowest block that is
326    * as far outside loops as we can get.
327    */
328   nir_block *best = lca;
329   for (nir_block *block = lca; block != NULL; block = block->imm_dom) {
330      if (state->blocks[block->index].loop_depth <
331          state->blocks[best->index].loop_depth)
332         best = block;
333
334      if (block == def->parent_instr->block)
335         break;
336   }
337   def->parent_instr->block = best;
338
339   return true;
340}
341
342/** Schedules an instruction late
343 *
344 * This function performs a depth-first search starting at the given
345 * instruction and proceeding through its uses to schedule instructions as
346 * late as they can reasonably go in the dominance tree.  The instructions
347 * are "scheduled" by updating their instr->block field.
348 *
349 * The name of this function is actually a bit of a misnomer as it doesn't
350 * schedule them "as late as possible" as the paper implies.  Instead, it
351 * first finds the lates possible place it can schedule the instruction and
352 * then possibly schedules it earlier than that.  The actual location is as
353 * far down the tree as we can go while trying to stay out of loops.
354 */
355static void
356gcm_schedule_late_instr(nir_instr *instr, struct gcm_state *state)
357{
358   if (instr->pass_flags & GCM_INSTR_SCHEDULED_LATE)
359      return;
360
361   instr->pass_flags |= GCM_INSTR_SCHEDULED_LATE;
362
363   /* Pinned instructions are already scheduled so we don't need to do
364    * anything.  Also, bailing here keeps us from ever following phi nodes
365    * which can be back-edges.
366    */
367   if (instr->pass_flags & GCM_INSTR_PINNED)
368      return;
369
370   nir_foreach_ssa_def(instr, gcm_schedule_late_def, state);
371}
372
373static void
374gcm_place_instr(nir_instr *instr, struct gcm_state *state);
375
376static bool
377gcm_place_instr_def(nir_ssa_def *def, void *state)
378{
379   nir_foreach_use(use_src, def)
380      gcm_place_instr(use_src->parent_instr, state);
381
382   return false;
383}
384
385/** Places an instrution back into the program
386 *
387 * The earlier passes of GCM simply choose blocks for each instruction and
388 * otherwise leave them alone.  This pass actually places the instructions
389 * into their chosen blocks.
390 *
391 * To do so, we use a standard post-order depth-first search linearization
392 * algorithm.  We walk over the uses of the given instruction and ensure
393 * that they are placed and then place this instruction.  Because we are
394 * working on multiple blocks at a time, we keep track of the last inserted
395 * instruction per-block in the state structure's block_info array.  When
396 * we insert an instruction in a block we insert it before the last
397 * instruction inserted in that block rather than the last instruction
398 * inserted globally.
399 */
400static void
401gcm_place_instr(nir_instr *instr, struct gcm_state *state)
402{
403   if (instr->pass_flags & GCM_INSTR_PLACED)
404      return;
405
406   instr->pass_flags |= GCM_INSTR_PLACED;
407
408   /* Phi nodes are our once source of back-edges.  Since right now we are
409    * only doing scheduling within blocks, we don't need to worry about
410    * them since they are always at the top.  Just skip them completely.
411    */
412   if (instr->type == nir_instr_type_phi) {
413      assert(instr->pass_flags & GCM_INSTR_PINNED);
414      return;
415   }
416
417   nir_foreach_ssa_def(instr, gcm_place_instr_def, state);
418
419   if (instr->pass_flags & GCM_INSTR_PINNED) {
420      /* Pinned instructions have an implicit dependence on the pinned
421       * instructions that come after them in the block.  Since the pinned
422       * instructions will naturally "chain" together, we only need to
423       * explicitly visit one of them.
424       */
425      for (nir_instr *after = nir_instr_next(instr);
426           after;
427           after = nir_instr_next(after)) {
428         if (after->pass_flags & GCM_INSTR_PINNED) {
429            gcm_place_instr(after, state);
430            break;
431         }
432      }
433   }
434
435   struct gcm_block_info *block_info = &state->blocks[instr->block->index];
436   if (!(instr->pass_flags & GCM_INSTR_PINNED)) {
437      exec_node_remove(&instr->node);
438
439      if (block_info->last_instr) {
440         exec_node_insert_node_before(&block_info->last_instr->node,
441                                      &instr->node);
442      } else {
443         /* Schedule it at the end of the block */
444         nir_instr *jump_instr = nir_block_last_instr(instr->block);
445         if (jump_instr && jump_instr->type == nir_instr_type_jump) {
446            exec_node_insert_node_before(&jump_instr->node, &instr->node);
447         } else {
448            exec_list_push_tail(&instr->block->instr_list, &instr->node);
449         }
450      }
451   }
452
453   block_info->last_instr = instr;
454}
455
456static bool
457opt_gcm_impl(nir_function_impl *impl, bool value_number)
458{
459   nir_metadata_require(impl, nir_metadata_block_index |
460                              nir_metadata_dominance);
461
462   struct gcm_state state;
463
464   state.impl = impl;
465   state.instr = NULL;
466   exec_list_make_empty(&state.instrs);
467   state.blocks = rzalloc_array(NULL, struct gcm_block_info, impl->num_blocks);
468
469   gcm_build_block_info(&impl->body, &state, 0);
470
471   nir_foreach_block(block, impl) {
472      gcm_pin_instructions_block(block, &state);
473   }
474
475   bool progress = false;
476   if (value_number) {
477      struct set *gvn_set = nir_instr_set_create(NULL);
478      foreach_list_typed_safe(nir_instr, instr, node, &state.instrs) {
479         if (nir_instr_set_add_or_rewrite(gvn_set, instr)) {
480            nir_instr_remove(instr);
481            progress = true;
482         }
483      }
484      nir_instr_set_destroy(gvn_set);
485   }
486
487   foreach_list_typed(nir_instr, instr, node, &state.instrs)
488      gcm_schedule_early_instr(instr, &state);
489
490   foreach_list_typed(nir_instr, instr, node, &state.instrs)
491      gcm_schedule_late_instr(instr, &state);
492
493   while (!exec_list_is_empty(&state.instrs)) {
494      nir_instr *instr = exec_node_data(nir_instr,
495                                        state.instrs.tail_sentinel.prev, node);
496      gcm_place_instr(instr, &state);
497   }
498
499   ralloc_free(state.blocks);
500
501   nir_metadata_preserve(impl, nir_metadata_block_index |
502                               nir_metadata_dominance);
503
504   return progress;
505}
506
507bool
508nir_opt_gcm(nir_shader *shader, bool value_number)
509{
510   bool progress = false;
511
512   nir_foreach_function(function, shader) {
513      if (function->impl)
514         progress |= opt_gcm_impl(function->impl, value_number);
515   }
516
517   return progress;
518}
519