brw_fs_reg_allocate.cpp revision 849a3d243d8a0d951202515c06d9b17daf59d2f2
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "brw_fs.h"
29#include "glsl/glsl_types.h"
30#include "glsl/ir_optimization.h"
31#include "glsl/ir_print_visitor.h"
32
33static void
34assign_reg(int *reg_hw_locations, fs_reg *reg, int reg_width)
35{
36   if (reg->file == GRF) {
37      assert(reg->reg_offset >= 0);
38      reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset * reg_width;
39      reg->reg_offset = 0;
40   }
41}
42
43void
44fs_visitor::assign_regs_trivial()
45{
46   int hw_reg_mapping[this->virtual_grf_count + 1];
47   int i;
48   int reg_width = c->dispatch_width / 8;
49
50   /* Note that compressed instructions require alignment to 2 registers. */
51   hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
52   for (i = 1; i <= this->virtual_grf_count; i++) {
53      hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
54			   this->virtual_grf_sizes[i - 1] * reg_width);
55   }
56   this->grf_used = hw_reg_mapping[this->virtual_grf_count];
57
58   foreach_list(node, &this->instructions) {
59      fs_inst *inst = (fs_inst *)node;
60
61      assign_reg(hw_reg_mapping, &inst->dst, reg_width);
62      assign_reg(hw_reg_mapping, &inst->src[0], reg_width);
63      assign_reg(hw_reg_mapping, &inst->src[1], reg_width);
64      assign_reg(hw_reg_mapping, &inst->src[2], reg_width);
65   }
66
67   if (this->grf_used >= max_grf) {
68      fail("Ran out of regs on trivial allocator (%d/%d)\n",
69	   this->grf_used, max_grf);
70   }
71
72}
73
74static void
75brw_alloc_reg_set_for_classes(struct brw_context *brw,
76			      int *class_sizes,
77			      int class_count,
78			      int reg_width,
79			      int base_reg_count)
80{
81   struct intel_context *intel = &brw->intel;
82
83   /* Compute the total number of registers across all classes. */
84   int ra_reg_count = 0;
85   for (int i = 0; i < class_count; i++) {
86      ra_reg_count += base_reg_count - (class_sizes[i] - 1);
87   }
88
89   ralloc_free(brw->wm.ra_reg_to_grf);
90   brw->wm.ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count);
91   ralloc_free(brw->wm.regs);
92   brw->wm.regs = ra_alloc_reg_set(brw, ra_reg_count);
93   ralloc_free(brw->wm.classes);
94   brw->wm.classes = ralloc_array(brw, int, class_count + 1);
95
96   brw->wm.aligned_pairs_class = -1;
97
98   /* Now, add the registers to their classes, and add the conflicts
99    * between them and the base GRF registers (and also each other).
100    */
101   int reg = 0;
102   int pairs_base_reg = 0;
103   int pairs_reg_count = 0;
104   for (int i = 0; i < class_count; i++) {
105      int class_reg_count = base_reg_count - (class_sizes[i] - 1);
106      brw->wm.classes[i] = ra_alloc_reg_class(brw->wm.regs);
107
108      /* Save this off for the aligned pair class at the end. */
109      if (class_sizes[i] == 2) {
110	 pairs_base_reg = reg;
111	 pairs_reg_count = class_reg_count;
112      }
113
114      for (int j = 0; j < class_reg_count; j++) {
115	 ra_class_add_reg(brw->wm.regs, brw->wm.classes[i], reg);
116
117	 brw->wm.ra_reg_to_grf[reg] = j;
118
119	 for (int base_reg = j;
120	      base_reg < j + class_sizes[i];
121	      base_reg++) {
122	    ra_add_transitive_reg_conflict(brw->wm.regs, base_reg, reg);
123	 }
124
125	 reg++;
126      }
127   }
128   assert(reg == ra_reg_count);
129
130   /* Add a special class for aligned pairs, which we'll put delta_x/y
131    * in on gen5 so that we can do PLN.
132    */
133   if (brw->has_pln && reg_width == 1 && intel->gen < 6) {
134      brw->wm.aligned_pairs_class = ra_alloc_reg_class(brw->wm.regs);
135
136      for (int i = 0; i < pairs_reg_count; i++) {
137	 if ((brw->wm.ra_reg_to_grf[pairs_base_reg + i] & 1) == 0) {
138	    ra_class_add_reg(brw->wm.regs, brw->wm.aligned_pairs_class,
139			     pairs_base_reg + i);
140	 }
141      }
142      class_count++;
143   }
144
145   ra_set_finalize(brw->wm.regs);
146}
147
148bool
149fs_visitor::assign_regs()
150{
151   /* Most of this allocation was written for a reg_width of 1
152    * (dispatch_width == 8).  In extending to 16-wide, the code was
153    * left in place and it was converted to have the hardware
154    * registers it's allocating be contiguous physical pairs of regs
155    * for reg_width == 2.
156    */
157   int reg_width = c->dispatch_width / 8;
158   int hw_reg_mapping[this->virtual_grf_count];
159   int first_assigned_grf = ALIGN(this->first_non_payload_grf, reg_width);
160   int base_reg_count = (max_grf - first_assigned_grf) / reg_width;
161   int class_sizes[base_reg_count];
162   int class_count = 0;
163
164   calculate_live_intervals();
165
166   /* Set up the register classes.
167    *
168    * The base registers store a scalar value.  For texture samples,
169    * we get virtual GRFs composed of 4 contiguous hw register.  For
170    * structures and arrays, we store them as contiguous larger things
171    * than that, though we should be able to do better most of the
172    * time.
173    */
174   class_sizes[class_count++] = 1;
175   if (brw->has_pln && intel->gen < 6) {
176      /* Always set up the (unaligned) pairs for gen5, so we can find
177       * them for making the aligned pair class.
178       */
179      class_sizes[class_count++] = 2;
180   }
181   for (int r = 0; r < this->virtual_grf_count; r++) {
182      int i;
183
184      for (i = 0; i < class_count; i++) {
185	 if (class_sizes[i] == this->virtual_grf_sizes[r])
186	    break;
187      }
188      if (i == class_count) {
189	 if (this->virtual_grf_sizes[r] >= base_reg_count) {
190	    fail("Object too large to register allocate.\n");
191	 }
192
193	 class_sizes[class_count++] = this->virtual_grf_sizes[r];
194      }
195   }
196
197   brw_alloc_reg_set_for_classes(brw, class_sizes, class_count,
198				 reg_width, base_reg_count);
199
200   struct ra_graph *g = ra_alloc_interference_graph(brw->wm.regs,
201						    this->virtual_grf_count);
202
203   for (int i = 0; i < this->virtual_grf_count; i++) {
204      for (int c = 0; c < class_count; c++) {
205	 if (class_sizes[c] == this->virtual_grf_sizes[i]) {
206            /* Special case: on pre-GEN6 hardware that supports PLN, the
207             * second operand of a PLN instruction needs to be an
208             * even-numbered register, so we have a special register class
209             * wm_aligned_pairs_class to handle this case.  pre-GEN6 always
210             * uses this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] as the
211             * second operand of a PLN instruction (since it doesn't support
212             * any other interpolation modes).  So all we need to do is find
213             * that register and set it to the appropriate class.
214             */
215	    if (brw->wm.aligned_pairs_class >= 0 &&
216		this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg == i) {
217	       ra_set_node_class(g, i, brw->wm.aligned_pairs_class);
218	    } else {
219	       ra_set_node_class(g, i, brw->wm.classes[c]);
220	    }
221	    break;
222	 }
223      }
224
225      for (int j = 0; j < i; j++) {
226	 if (virtual_grf_interferes(i, j)) {
227	    ra_add_node_interference(g, i, j);
228	 }
229      }
230   }
231
232   if (!ra_allocate_no_spills(g)) {
233      /* Failed to allocate registers.  Spill a reg, and the caller will
234       * loop back into here to try again.
235       */
236      int reg = choose_spill_reg(g);
237
238      if (reg == -1) {
239	 fail("no register to spill\n");
240      } else if (c->dispatch_width == 16) {
241	 fail("Failure to register allocate.  Reduce number of live scalar "
242              "values to avoid this.");
243      } else {
244	 spill_reg(reg);
245      }
246
247
248      ralloc_free(g);
249
250      return false;
251   }
252
253   /* Get the chosen virtual registers for each node, and map virtual
254    * regs in the register classes back down to real hardware reg
255    * numbers.
256    */
257   this->grf_used = first_assigned_grf;
258   for (int i = 0; i < this->virtual_grf_count; i++) {
259      int reg = ra_get_node_reg(g, i);
260
261      hw_reg_mapping[i] = (first_assigned_grf +
262			   brw->wm.ra_reg_to_grf[reg] * reg_width);
263      this->grf_used = MAX2(this->grf_used,
264			    hw_reg_mapping[i] + this->virtual_grf_sizes[i] *
265			    reg_width);
266   }
267
268   foreach_list(node, &this->instructions) {
269      fs_inst *inst = (fs_inst *)node;
270
271      assign_reg(hw_reg_mapping, &inst->dst, reg_width);
272      assign_reg(hw_reg_mapping, &inst->src[0], reg_width);
273      assign_reg(hw_reg_mapping, &inst->src[1], reg_width);
274      assign_reg(hw_reg_mapping, &inst->src[2], reg_width);
275   }
276
277   ralloc_free(g);
278
279   return true;
280}
281
282void
283fs_visitor::emit_unspill(fs_inst *inst, fs_reg dst, uint32_t spill_offset)
284{
285   fs_inst *unspill_inst = new(mem_ctx) fs_inst(FS_OPCODE_UNSPILL, dst);
286   unspill_inst->offset = spill_offset;
287   unspill_inst->ir = inst->ir;
288   unspill_inst->annotation = inst->annotation;
289
290   /* Choose a MRF that won't conflict with an MRF that's live across the
291    * spill.  Nothing else will make it up to MRF 14/15.
292    */
293   unspill_inst->base_mrf = 14;
294   unspill_inst->mlen = 1; /* header contains offset */
295   inst->insert_before(unspill_inst);
296}
297
298int
299fs_visitor::choose_spill_reg(struct ra_graph *g)
300{
301   float loop_scale = 1.0;
302   float spill_costs[this->virtual_grf_count];
303   bool no_spill[this->virtual_grf_count];
304
305   for (int i = 0; i < this->virtual_grf_count; i++) {
306      spill_costs[i] = 0.0;
307      no_spill[i] = false;
308   }
309
310   /* Calculate costs for spilling nodes.  Call it a cost of 1 per
311    * spill/unspill we'll have to do, and guess that the insides of
312    * loops run 10 times.
313    */
314   foreach_list(node, &this->instructions) {
315      fs_inst *inst = (fs_inst *)node;
316
317      for (unsigned int i = 0; i < 3; i++) {
318	 if (inst->src[i].file == GRF) {
319	    spill_costs[inst->src[i].reg] += loop_scale;
320
321            /* Register spilling logic assumes full-width registers; smeared
322             * registers have a width of 1 so if we try to spill them we'll
323             * generate invalid assembly.  This shouldn't be a problem because
324             * smeared registers are only used as short-term temporaries when
325             * loading pull constants, so spilling them is unlikely to reduce
326             * register pressure anyhow.
327             */
328            if (inst->src[i].smear >= 0) {
329               no_spill[inst->src[i].reg] = true;
330            }
331	 }
332      }
333
334      if (inst->dst.file == GRF) {
335	 spill_costs[inst->dst.reg] += inst->regs_written() * loop_scale;
336
337         if (inst->dst.smear >= 0) {
338            no_spill[inst->dst.reg] = true;
339         }
340      }
341
342      switch (inst->opcode) {
343
344      case BRW_OPCODE_DO:
345	 loop_scale *= 10;
346	 break;
347
348      case BRW_OPCODE_WHILE:
349	 loop_scale /= 10;
350	 break;
351
352      case FS_OPCODE_SPILL:
353	 if (inst->src[0].file == GRF)
354	    no_spill[inst->src[0].reg] = true;
355	 break;
356
357      case FS_OPCODE_UNSPILL:
358	 if (inst->dst.file == GRF)
359	    no_spill[inst->dst.reg] = true;
360	 break;
361
362      default:
363	 break;
364      }
365   }
366
367   for (int i = 0; i < this->virtual_grf_count; i++) {
368      if (!no_spill[i])
369	 ra_set_node_spill_cost(g, i, spill_costs[i]);
370   }
371
372   return ra_get_best_spill_node(g);
373}
374
375void
376fs_visitor::spill_reg(int spill_reg)
377{
378   int size = virtual_grf_sizes[spill_reg];
379   unsigned int spill_offset = c->last_scratch;
380   assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
381   c->last_scratch += size * REG_SIZE;
382
383   /* Generate spill/unspill instructions for the objects being
384    * spilled.  Right now, we spill or unspill the whole thing to a
385    * virtual grf of the same size.  For most instructions, though, we
386    * could just spill/unspill the GRF being accessed.
387    */
388   foreach_list(node, &this->instructions) {
389      fs_inst *inst = (fs_inst *)node;
390
391      for (unsigned int i = 0; i < 3; i++) {
392	 if (inst->src[i].file == GRF &&
393	     inst->src[i].reg == spill_reg) {
394	    inst->src[i].reg = virtual_grf_alloc(1);
395	    emit_unspill(inst, inst->src[i],
396                         spill_offset + REG_SIZE * inst->src[i].reg_offset);
397	 }
398      }
399
400      if (inst->dst.file == GRF &&
401	  inst->dst.reg == spill_reg) {
402         int subset_spill_offset = (spill_offset +
403                                    REG_SIZE * inst->dst.reg_offset);
404         inst->dst.reg = virtual_grf_alloc(inst->regs_written());
405         inst->dst.reg_offset = 0;
406
407	 /* If our write is going to affect just part of the
408          * inst->regs_written(), then we need to unspill the destination
409          * since we write back out all of the regs_written().
410	  */
411	 if (inst->predicated || inst->force_uncompressed || inst->force_sechalf) {
412            fs_reg unspill_reg = inst->dst;
413            for (int chan = 0; chan < inst->regs_written(); chan++) {
414               emit_unspill(inst, unspill_reg,
415                            subset_spill_offset + REG_SIZE * chan);
416               unspill_reg.reg_offset++;
417            }
418	 }
419
420	 fs_reg spill_src = inst->dst;
421	 spill_src.reg_offset = 0;
422	 spill_src.abs = false;
423	 spill_src.negate = false;
424	 spill_src.smear = -1;
425
426	 for (int chan = 0; chan < inst->regs_written(); chan++) {
427	    fs_inst *spill_inst = new(mem_ctx) fs_inst(FS_OPCODE_SPILL,
428						       reg_null_f, spill_src);
429	    spill_src.reg_offset++;
430	    spill_inst->offset = subset_spill_offset + chan * REG_SIZE;
431	    spill_inst->ir = inst->ir;
432	    spill_inst->annotation = inst->annotation;
433	    spill_inst->base_mrf = 14;
434	    spill_inst->mlen = 2; /* header, value */
435	    inst->insert_after(spill_inst);
436	 }
437      }
438   }
439
440   this->live_intervals_valid = false;
441}
442