1/*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2015 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25/**
26 * @file vc4_qir_schedule.c
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies from the bottom up, and make a list of the DAG
30 * heads.  Heuristically pick a DAG head and schedule (remove) it, then put
31 * all the parents that are now DAG heads into the list of things to
32 * schedule.
33 *
34 * The goal of scheduling here, before register allocation and conversion to
35 * QPU instructions, is to reduce register pressure by reordering instructions
36 * to consume values when possible.
37 */
38
39#include "vc4_qir.h"
40
41static bool debug;
42
43struct schedule_node {
44        struct list_head link;
45        struct qinst *inst;
46
47        struct schedule_node **children;
48        uint32_t child_count;
49        uint32_t child_array_size;
50        uint32_t parent_count;
51
52        /* Length of the longest (latency) chain from a DAG head to the this
53         * instruction.
54         */
55        uint32_t delay;
56
57        /* Longest time + latency_between(parent, this) of any parent of this
58         * node.
59         */
60        uint32_t unblocked_time;
61};
62
63struct schedule_state {
64        /* List of struct schedule_node *.  This starts out with all
65         * instructions, and after dependency updates it's trimmed to be just
66         * the DAG heads.
67         */
68        struct list_head worklist;
69
70        uint32_t time;
71
72        uint32_t *temp_writes;
73
74        BITSET_WORD *temp_live;
75};
76
77/* When walking the instructions in reverse, we need to swap before/after in
78 * add_dep().
79 */
80enum direction { F, R };
81
82/**
83 * Marks a dependency between two intructions, that \p after must appear after
84 * \p before.
85 *
86 * Our dependencies are tracked as a DAG.  Since we're scheduling bottom-up,
87 * the latest instructions with nothing left to schedule are the DAG heads,
88 * and their inputs are their children.
89 */
90static void
91add_dep(enum direction dir,
92        struct schedule_node *before,
93        struct schedule_node *after)
94{
95        if (!before || !after)
96                return;
97
98        assert(before != after);
99
100        if (dir == R) {
101                struct schedule_node *t = before;
102                before = after;
103                after = t;
104        }
105
106        for (int i = 0; i < after->child_count; i++) {
107                if (after->children[i] == after)
108                        return;
109        }
110
111        if (after->child_array_size <= after->child_count) {
112                after->child_array_size = MAX2(after->child_array_size * 2, 16);
113                after->children = reralloc(after, after->children,
114                                           struct schedule_node *,
115                                           after->child_array_size);
116        }
117
118        after->children[after->child_count] = before;
119        after->child_count++;
120        before->parent_count++;
121}
122
123static void
124add_write_dep(enum direction dir,
125              struct schedule_node **before,
126              struct schedule_node *after)
127{
128        add_dep(dir, *before, after);
129        *before = after;
130}
131
132struct schedule_setup_state {
133        struct schedule_node **last_temp_write;
134        struct schedule_node *last_sf;
135        struct schedule_node *last_vary_read;
136        struct schedule_node *last_vpm_read;
137        struct schedule_node *last_vpm_write;
138        struct schedule_node *last_tex_coord;
139        struct schedule_node *last_tex_result;
140        struct schedule_node *last_tlb;
141        struct schedule_node *last_uniforms_reset;
142        enum direction dir;
143
144	/**
145         * Texture FIFO tracking.  This is done top-to-bottom, and is used to
146         * track the QOP_TEX_RESULTs and add dependencies on previous ones
147         * when trying to submit texture coords with TFREQ full or new texture
148         * fetches with TXRCV full.
149         */
150        struct {
151                struct schedule_node *node;
152                int coords;
153        } tex_fifo[8];
154        int tfreq_count; /**< Number of texture coords outstanding. */
155        int tfrcv_count; /**< Number of texture results outstanding. */
156        int tex_fifo_pos;
157};
158
159static void
160block_until_tex_result(struct schedule_setup_state *state, struct schedule_node *n)
161{
162        add_dep(state->dir, state->tex_fifo[0].node, n);
163
164        state->tfreq_count -= state->tex_fifo[0].coords;
165        state->tfrcv_count--;
166
167        memmove(&state->tex_fifo[0],
168                &state->tex_fifo[1],
169                state->tex_fifo_pos * sizeof(state->tex_fifo[0]));
170        state->tex_fifo_pos--;
171}
172
173/**
174 * Common code for dependencies that need to be tracked both forward and
175 * backward.
176 *
177 * This is for things like "all VPM reads have to happen in order."
178 */
179static void
180calculate_deps(struct schedule_setup_state *state, struct schedule_node *n)
181{
182        struct qinst *inst = n->inst;
183        enum direction dir = state->dir;
184
185
186        /* Add deps for temp registers and varyings accesses.  Note that we
187         * ignore uniforms accesses, because qir_reorder_uniforms() happens
188         * after this.
189         */
190        for (int i = 0; i < qir_get_nsrc(inst); i++) {
191                switch (inst->src[i].file) {
192                case QFILE_TEMP:
193                        add_dep(dir,
194                                state->last_temp_write[inst->src[i].index], n);
195                        break;
196
197                case QFILE_VARY:
198                        add_write_dep(dir, &state->last_vary_read, n);
199                        break;
200
201                case QFILE_VPM:
202                        add_write_dep(dir, &state->last_vpm_read, n);
203                        break;
204
205                default:
206                        break;
207                }
208        }
209
210        switch (inst->op) {
211        case QOP_VARY_ADD_C:
212                add_dep(dir, state->last_vary_read, n);
213                break;
214
215        case QOP_TEX_RESULT:
216                /* Results have to be fetched in order. */
217                add_write_dep(dir, &state->last_tex_result, n);
218                break;
219
220        case QOP_THRSW:
221                /* After a new THRSW, one must collect all texture samples
222                 * queued since the previous THRSW/program start.  For now, we
223                 * have one THRSW in between each texture setup and its
224                 * results collection as our input, and we just make sure that
225                 * that ordering is maintained.
226                 */
227                add_write_dep(dir, &state->last_tex_coord, n);
228                add_write_dep(dir, &state->last_tex_result, n);
229
230                /* accumulators and flags are lost across thread switches. */
231                add_write_dep(dir, &state->last_sf, n);
232
233                /* Setup, like the varyings, will need to be drained before we
234                 * thread switch.
235                 */
236                add_write_dep(dir, &state->last_vary_read, n);
237
238                /* The TLB-locking operations have to stay after the last
239                 * thread switch.
240                 */
241                add_write_dep(dir, &state->last_tlb, n);
242                break;
243
244        case QOP_TLB_COLOR_READ:
245        case QOP_MS_MASK:
246                add_write_dep(dir, &state->last_tlb, n);
247                break;
248
249        default:
250                break;
251        }
252
253        switch (inst->dst.file) {
254        case QFILE_VPM:
255                add_write_dep(dir, &state->last_vpm_write, n);
256                break;
257
258        case QFILE_TEMP:
259                add_write_dep(dir, &state->last_temp_write[inst->dst.index], n);
260                break;
261
262        case QFILE_TLB_COLOR_WRITE:
263        case QFILE_TLB_COLOR_WRITE_MS:
264        case QFILE_TLB_Z_WRITE:
265        case QFILE_TLB_STENCIL_SETUP:
266                add_write_dep(dir, &state->last_tlb, n);
267                break;
268
269        case QFILE_TEX_S_DIRECT:
270        case QFILE_TEX_S:
271        case QFILE_TEX_T:
272        case QFILE_TEX_R:
273        case QFILE_TEX_B:
274                /* Texturing setup gets scheduled in order, because
275                 * the uniforms referenced by them have to land in a
276                 * specific order.
277                 */
278                add_write_dep(dir, &state->last_tex_coord, n);
279                break;
280
281        default:
282                break;
283        }
284
285        if (qir_depends_on_flags(inst))
286                add_dep(dir, state->last_sf, n);
287
288        if (inst->sf)
289                add_write_dep(dir, &state->last_sf, n);
290}
291
292static void
293calculate_forward_deps(struct vc4_compile *c, void *mem_ctx,
294                       struct list_head *schedule_list)
295{
296        struct schedule_setup_state state;
297
298        memset(&state, 0, sizeof(state));
299        state.last_temp_write = rzalloc_array(mem_ctx, struct schedule_node *,
300                                              c->num_temps);
301        state.dir = F;
302
303        list_for_each_entry(struct schedule_node, n, schedule_list, link) {
304                struct qinst *inst = n->inst;
305
306                calculate_deps(&state, n);
307
308                for (int i = 0; i < qir_get_nsrc(inst); i++) {
309                        switch (inst->src[i].file) {
310                        case QFILE_UNIF:
311                                add_dep(state.dir, state.last_uniforms_reset, n);
312                                break;
313                        default:
314                                break;
315                        }
316                }
317
318                switch (inst->dst.file) {
319                case QFILE_TEX_S_DIRECT:
320                case QFILE_TEX_S:
321                case QFILE_TEX_T:
322                case QFILE_TEX_R:
323                case QFILE_TEX_B:
324                        /* From the VC4 spec:
325                         *
326                         *     "The TFREQ input FIFO holds two full lots of s,
327                         *      t, r, b data, plus associated setup data, per
328                         *      QPU, that is, there are eight data slots. For
329                         *      each texture request, slots are only consumed
330                         *      for the components of s, t, r, and b actually
331                         *      written. Thus the FIFO can hold four requests
332                         *      of just (s, t) data, or eight requests of just
333                         *      s data (for direct addressed data lookups).
334                         *
335                         *      Note that there is one FIFO per QPU, and the
336                         *      FIFO has no concept of threads - that is,
337                         *      multi-threaded shaders must be careful to use
338                         *      only 1/2 the FIFO depth before reading
339                         *      back. Multi-threaded programs must also
340                         *      therefore always thread switch on texture
341                         *      fetch as the other thread may have data
342                         *      waiting in the FIFO."
343                         *
344                         * If the texture coordinate fifo is full, block this
345                         * on the last QOP_TEX_RESULT.
346                         */
347                        if (state.tfreq_count == (c->fs_threaded ? 4 : 8)) {
348                                block_until_tex_result(&state, n);
349                        }
350
351                        /* From the VC4 spec:
352                         *
353                         *     "Since the maximum number of texture requests
354                         *      in the input (TFREQ) FIFO is four lots of (s,
355                         *      t) data, the output (TFRCV) FIFO is sized to
356                         *      holds four lots of max-size color data per
357                         *      QPU. For non-float color, reads are packed
358                         *      RGBA8888 data (one read per pixel). For 16-bit
359                         *      float color, two reads are necessary per
360                         *      pixel, with reads packed as RG1616 then
361                         *      BA1616. So per QPU there are eight color slots
362                         *      in the TFRCV FIFO."
363                         *
364                         * If the texture result fifo is full, block adding
365                         * any more to it until the last QOP_TEX_RESULT.
366                         */
367                        if (inst->dst.file == QFILE_TEX_S ||
368                            inst->dst.file == QFILE_TEX_S_DIRECT) {
369                                if (state.tfrcv_count ==
370                                    (c->fs_threaded ? 2 : 4))
371                                        block_until_tex_result(&state, n);
372                                state.tfrcv_count++;
373                        }
374
375                        state.tex_fifo[state.tex_fifo_pos].coords++;
376                        state.tfreq_count++;
377                        break;
378
379                default:
380                        break;
381                }
382
383                switch (inst->op) {
384                case QOP_TEX_RESULT:
385                        /* Results have to be fetched after the
386                         * coordinate setup.  Note that we're assuming
387                         * here that our input shader has the texture
388                         * coord setup and result fetch in order,
389                         * which is true initially but not of our
390                         * instruction stream after this pass.
391                         */
392                        add_dep(state.dir, state.last_tex_coord, n);
393
394                        state.tex_fifo[state.tex_fifo_pos].node = n;
395
396                        state.tex_fifo_pos++;
397                        memset(&state.tex_fifo[state.tex_fifo_pos], 0,
398                               sizeof(state.tex_fifo[0]));
399                        break;
400
401                case QOP_UNIFORMS_RESET:
402                        add_write_dep(state.dir, &state.last_uniforms_reset, n);
403                        break;
404
405                default:
406                        break;
407                }
408        }
409}
410
411static void
412calculate_reverse_deps(struct vc4_compile *c, void *mem_ctx,
413                       struct list_head *schedule_list)
414{
415        struct schedule_setup_state state;
416
417        memset(&state, 0, sizeof(state));
418        state.dir = R;
419        state.last_temp_write = rzalloc_array(mem_ctx, struct schedule_node *,
420                                              c->num_temps);
421
422        list_for_each_entry_rev(struct schedule_node, n, schedule_list, link) {
423                calculate_deps(&state, n);
424        }
425}
426
427static int
428get_register_pressure_cost(struct schedule_state *state, struct qinst *inst)
429{
430        int cost = 0;
431
432        if (inst->dst.file == QFILE_TEMP &&
433            state->temp_writes[inst->dst.index] == 1)
434                cost--;
435
436        for (int i = 0; i < qir_get_nsrc(inst); i++) {
437                if (inst->src[i].file == QFILE_TEMP &&
438                    !BITSET_TEST(state->temp_live, inst->src[i].index)) {
439                        cost++;
440                }
441        }
442
443        return cost;
444}
445
446static bool
447locks_scoreboard(struct qinst *inst)
448{
449        if (inst->op == QOP_TLB_COLOR_READ)
450                return true;
451
452        switch (inst->dst.file) {
453        case QFILE_TLB_Z_WRITE:
454        case QFILE_TLB_COLOR_WRITE:
455        case QFILE_TLB_COLOR_WRITE_MS:
456                return true;
457        default:
458                return false;
459        }
460}
461
462static struct schedule_node *
463choose_instruction(struct schedule_state *state)
464{
465        struct schedule_node *chosen = NULL;
466
467        list_for_each_entry(struct schedule_node, n, &state->worklist, link) {
468                /* The branches aren't being tracked as dependencies.  Make
469                 * sure that they stay scheduled as the last instruction of
470                 * the block, which is to say the first one we choose to
471                 * schedule.
472                 */
473                if (n->inst->op == QOP_BRANCH)
474                        return n;
475
476                if (!chosen) {
477                        chosen = n;
478                        continue;
479                }
480
481                /* Prefer scheduling things that lock the scoreboard, so that
482                 * they appear late in the program and we get more parallelism
483                 * between shaders on multiple QPUs hitting the same fragment.
484                 */
485                if (locks_scoreboard(n->inst) &&
486                    !locks_scoreboard(chosen->inst)) {
487                        chosen = n;
488                        continue;
489                } else if (!locks_scoreboard(n->inst) &&
490                           locks_scoreboard(chosen->inst)) {
491                        continue;
492                }
493
494                /* If we would block on the previously chosen node, but would
495                 * block less on this one, then prefer it.
496                 */
497                if (chosen->unblocked_time > state->time &&
498                    n->unblocked_time < chosen->unblocked_time) {
499                        chosen = n;
500                        continue;
501                } else if (n->unblocked_time > state->time &&
502                           n->unblocked_time > chosen->unblocked_time) {
503                        continue;
504                }
505
506                /* If we can definitely reduce register pressure, do so
507                 * immediately.
508                 */
509                int register_pressure_cost =
510                        get_register_pressure_cost(state, n->inst);
511                int chosen_register_pressure_cost =
512                        get_register_pressure_cost(state, chosen->inst);
513
514                if (register_pressure_cost < chosen_register_pressure_cost) {
515                        chosen = n;
516                        continue;
517                } else if (register_pressure_cost >
518                           chosen_register_pressure_cost) {
519                        continue;
520                }
521
522                /* Otherwise, prefer instructions with the deepest chain to
523                 * the end of the program.  This avoids the problem of
524                 * "everything generates a temp, nothing finishes freeing one,
525                 * guess I'll just keep emitting varying mul/adds".
526                 */
527                if (n->delay > chosen->delay) {
528                        chosen = n;
529                        continue;
530                } else if (n->delay < chosen->delay) {
531                        continue;
532                }
533        }
534
535        return chosen;
536}
537
538static void
539dump_state(struct vc4_compile *c, struct schedule_state *state)
540{
541        uint32_t i = 0;
542        list_for_each_entry(struct schedule_node, n, &state->worklist, link) {
543                fprintf(stderr, "%3d: ", i++);
544                qir_dump_inst(c, n->inst);
545                fprintf(stderr, " (%d cost)\n",
546                        get_register_pressure_cost(state, n->inst));
547
548                for (int i = 0; i < n->child_count; i++) {
549                        struct schedule_node *child = n->children[i];
550                        fprintf(stderr, "   - ");
551                        qir_dump_inst(c, child->inst);
552                        fprintf(stderr, " (%d parents)\n", child->parent_count);
553                }
554        }
555}
556
557/* Estimate of how many instructions we should schedule between operations.
558 *
559 * These aren't in real cycle counts, because we're just estimating cycle
560 * times anyway.  QIR instructions will get paired up when turned into QPU
561 * instructions, or extra NOP delays will have to be added due to register
562 * allocation choices.
563 */
564static uint32_t
565latency_between(struct schedule_node *before, struct schedule_node *after)
566{
567        if ((before->inst->dst.file == QFILE_TEX_S ||
568             before->inst->dst.file == QFILE_TEX_S_DIRECT) &&
569            after->inst->op == QOP_TEX_RESULT)
570                return 100;
571
572        switch (before->inst->op) {
573        case QOP_RCP:
574        case QOP_RSQ:
575        case QOP_EXP2:
576        case QOP_LOG2:
577                for (int i = 0; i < qir_get_nsrc(after->inst); i++) {
578                        if (after->inst->src[i].file ==
579                            before->inst->dst.file &&
580                            after->inst->src[i].index ==
581                            before->inst->dst.index) {
582                                /* There are two QPU delay slots before we can
583                                 * read a math result, which could be up to 4
584                                 * QIR instructions if they packed well.
585                                 */
586                                return 4;
587                        }
588                }
589                break;
590        default:
591                break;
592        }
593
594        return 1;
595}
596
597/** Recursive computation of the delay member of a node. */
598static void
599compute_delay(struct schedule_node *n)
600{
601        if (!n->child_count) {
602                /* The color read needs to be scheduled late, to avoid locking
603                 * the scoreboard early.  This is our best tool for
604                 * encouraging that.  The other scoreboard locking ops will
605                 * have this happen by default, since they are generally the
606                 * DAG heads or close to them.
607                 */
608                if (n->inst->op == QOP_TLB_COLOR_READ)
609                        n->delay = 1000;
610                else
611                        n->delay = 1;
612        } else {
613                for (int i = 0; i < n->child_count; i++) {
614                        if (!n->children[i]->delay)
615                                compute_delay(n->children[i]);
616                        n->delay = MAX2(n->delay,
617                                        n->children[i]->delay +
618                                        latency_between(n->children[i], n));
619                }
620        }
621}
622
623static void
624schedule_instructions(struct vc4_compile *c,
625                      struct qblock *block, struct schedule_state *state)
626{
627        if (debug) {
628                fprintf(stderr, "initial deps:\n");
629                dump_state(c, state);
630        }
631
632        /* Remove non-DAG heads from the list. */
633        list_for_each_entry_safe(struct schedule_node, n,
634                                 &state->worklist, link) {
635                if (n->parent_count != 0)
636                        list_del(&n->link);
637        }
638
639        state->time = 0;
640        while (!list_empty(&state->worklist)) {
641                struct schedule_node *chosen = choose_instruction(state);
642                struct qinst *inst = chosen->inst;
643
644                if (debug) {
645                        fprintf(stderr, "current list:\n");
646                        dump_state(c, state);
647                        fprintf(stderr, "chose: ");
648                        qir_dump_inst(c, inst);
649                        fprintf(stderr, " (%d cost)\n",
650                                get_register_pressure_cost(state, inst));
651                }
652
653                state->time = MAX2(state->time, chosen->unblocked_time);
654
655                /* Schedule this instruction back onto the QIR list. */
656                list_del(&chosen->link);
657                list_add(&inst->link, &block->instructions);
658
659                /* Now that we've scheduled a new instruction, some of its
660                 * children can be promoted to the list of instructions ready to
661                 * be scheduled.  Update the children's unblocked time for this
662                 * DAG edge as we do so.
663                 */
664                for (int i = chosen->child_count - 1; i >= 0; i--) {
665                        struct schedule_node *child = chosen->children[i];
666
667                        child->unblocked_time = MAX2(child->unblocked_time,
668                                                     state->time +
669                                                     latency_between(child,
670                                                                     chosen));
671                        child->parent_count--;
672                        if (child->parent_count == 0)
673                                list_add(&child->link, &state->worklist);
674                }
675
676                /* Update our tracking of register pressure. */
677                for (int i = 0; i < qir_get_nsrc(inst); i++) {
678                        if (inst->src[i].file == QFILE_TEMP)
679                                BITSET_SET(state->temp_live, inst->src[i].index);
680                }
681                if (inst->dst.file == QFILE_TEMP) {
682                        state->temp_writes[inst->dst.index]--;
683                        if (state->temp_writes[inst->dst.index] == 0)
684                                BITSET_CLEAR(state->temp_live, inst->dst.index);
685                }
686
687                state->time++;
688        }
689}
690
691static void
692qir_schedule_instructions_block(struct vc4_compile *c,
693                                struct qblock *block)
694{
695        void *mem_ctx = ralloc_context(NULL);
696        struct schedule_state state = { { 0 } };
697
698        state.temp_writes = rzalloc_array(mem_ctx, uint32_t, c->num_temps);
699        state.temp_live = rzalloc_array(mem_ctx, BITSET_WORD,
700                                        BITSET_WORDS(c->num_temps));
701        list_inithead(&state.worklist);
702
703        /* Wrap each instruction in a scheduler structure. */
704        qir_for_each_inst_safe(inst, block) {
705                struct schedule_node *n = rzalloc(mem_ctx, struct schedule_node);
706
707                n->inst = inst;
708                list_del(&inst->link);
709                list_addtail(&n->link, &state.worklist);
710
711                if (inst->dst.file == QFILE_TEMP)
712                        state.temp_writes[inst->dst.index]++;
713        }
714
715        /* Dependencies tracked top-to-bottom. */
716        calculate_forward_deps(c, mem_ctx, &state.worklist);
717        /* Dependencies tracked bottom-to-top. */
718        calculate_reverse_deps(c, mem_ctx, &state.worklist);
719
720        list_for_each_entry(struct schedule_node, n, &state.worklist, link)
721                compute_delay(n);
722
723        schedule_instructions(c, block, &state);
724
725        ralloc_free(mem_ctx);
726}
727
728void
729qir_schedule_instructions(struct vc4_compile *c)
730{
731
732        if (debug) {
733                fprintf(stderr, "Pre-schedule instructions\n");
734                qir_dump(c);
735        }
736
737        qir_for_each_block(block, c)
738                qir_schedule_instructions_block(c, block);
739
740        if (debug) {
741                fprintf(stderr, "Post-schedule instructions\n");
742                qir_dump(c);
743        }
744}
745