brw_fs_reg_allocate.cpp revision e880a57a71bbd5152ed26367dcc7051f21c20981
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28extern "C" { 29 30#include <sys/types.h> 31 32#include "main/macros.h" 33#include "main/shaderobj.h" 34#include "main/uniforms.h" 35#include "program/prog_parameter.h" 36#include "program/prog_print.h" 37#include "program/prog_optimize.h" 38#include "program/register_allocate.h" 39#include "program/sampler.h" 40#include "program/hash_table.h" 41#include "brw_context.h" 42#include "brw_eu.h" 43#include "brw_wm.h" 44#include "talloc.h" 45} 46#include "brw_fs.h" 47#include "../glsl/glsl_types.h" 48#include "../glsl/ir_optimization.h" 49#include "../glsl/ir_print_visitor.h" 50 51static void 52assign_reg(int *reg_hw_locations, fs_reg *reg) 53{ 54 if (reg->file == GRF && reg->reg != 0) { 55 assert(reg->reg_offset >= 0); 56 reg->hw_reg = reg_hw_locations[reg->reg] + reg->reg_offset; 57 reg->reg = 0; 58 } 59} 60 61void 62fs_visitor::assign_regs_trivial() 63{ 64 int last_grf = 0; 65 int hw_reg_mapping[this->virtual_grf_next]; 66 int i; 67 68 hw_reg_mapping[0] = 0; 69 hw_reg_mapping[1] = this->first_non_payload_grf; 70 for (i = 2; i < this->virtual_grf_next; i++) { 71 hw_reg_mapping[i] = (hw_reg_mapping[i - 1] + 72 this->virtual_grf_sizes[i - 1]); 73 } 74 last_grf = hw_reg_mapping[i - 1] + this->virtual_grf_sizes[i - 1]; 75 76 foreach_iter(exec_list_iterator, iter, this->instructions) { 77 fs_inst *inst = (fs_inst *)iter.get(); 78 79 assign_reg(hw_reg_mapping, &inst->dst); 80 assign_reg(hw_reg_mapping, &inst->src[0]); 81 assign_reg(hw_reg_mapping, &inst->src[1]); 82 } 83 84 this->grf_used = last_grf + 1; 85} 86 87bool 88fs_visitor::assign_regs() 89{ 90 int last_grf = 0; 91 int hw_reg_mapping[this->virtual_grf_next + 1]; 92 int base_reg_count = BRW_MAX_GRF - this->first_non_payload_grf; 93 int class_sizes[base_reg_count]; 94 int class_count = 0; 95 int aligned_pair_class = -1; 96 97 calculate_live_intervals(); 98 99 /* Set up the register classes. 100 * 101 * The base registers store a scalar value. For texture samples, 102 * we get virtual GRFs composed of 4 contiguous hw register. For 103 * structures and arrays, we store them as contiguous larger things 104 * than that, though we should be able to do better most of the 105 * time. 106 */ 107 class_sizes[class_count++] = 1; 108 if (brw->has_pln && intel->gen < 6) { 109 /* Always set up the (unaligned) pairs for gen5, so we can find 110 * them for making the aligned pair class. 111 */ 112 class_sizes[class_count++] = 2; 113 } 114 for (int r = 1; r < this->virtual_grf_next; r++) { 115 int i; 116 117 for (i = 0; i < class_count; i++) { 118 if (class_sizes[i] == this->virtual_grf_sizes[r]) 119 break; 120 } 121 if (i == class_count) { 122 if (this->virtual_grf_sizes[r] >= base_reg_count) { 123 fprintf(stderr, "Object too large to register allocate.\n"); 124 this->fail = true; 125 } 126 127 class_sizes[class_count++] = this->virtual_grf_sizes[r]; 128 } 129 } 130 131 int ra_reg_count = 0; 132 int class_base_reg[class_count]; 133 int class_reg_count[class_count]; 134 int classes[class_count + 1]; 135 136 for (int i = 0; i < class_count; i++) { 137 class_base_reg[i] = ra_reg_count; 138 class_reg_count[i] = base_reg_count - (class_sizes[i] - 1); 139 ra_reg_count += class_reg_count[i]; 140 } 141 142 struct ra_regs *regs = ra_alloc_reg_set(ra_reg_count); 143 for (int i = 0; i < class_count; i++) { 144 classes[i] = ra_alloc_reg_class(regs); 145 146 for (int i_r = 0; i_r < class_reg_count[i]; i_r++) { 147 ra_class_add_reg(regs, classes[i], class_base_reg[i] + i_r); 148 } 149 150 /* Add conflicts between our contiguous registers aliasing 151 * base regs and other register classes' contiguous registers 152 * that alias base regs, or the base regs themselves for classes[0]. 153 */ 154 for (int c = 0; c <= i; c++) { 155 for (int i_r = 0; i_r < class_reg_count[i]; i_r++) { 156 for (int c_r = MAX2(0, i_r - (class_sizes[c] - 1)); 157 c_r < MIN2(class_reg_count[c], i_r + class_sizes[i]); 158 c_r++) { 159 160 if (0) { 161 printf("%d/%d conflicts %d/%d\n", 162 class_sizes[i], this->first_non_payload_grf + i_r, 163 class_sizes[c], this->first_non_payload_grf + c_r); 164 } 165 166 ra_add_reg_conflict(regs, 167 class_base_reg[i] + i_r, 168 class_base_reg[c] + c_r); 169 } 170 } 171 } 172 } 173 174 /* Add a special class for aligned pairs, which we'll put delta_x/y 175 * in on gen5 so that we can do PLN. 176 */ 177 if (brw->has_pln && intel->gen < 6) { 178 int reg_count = (base_reg_count - 1) / 2; 179 int unaligned_pair_class = 1; 180 assert(class_sizes[unaligned_pair_class] == 2); 181 182 aligned_pair_class = class_count; 183 classes[aligned_pair_class] = ra_alloc_reg_class(regs); 184 class_sizes[aligned_pair_class] = 2; 185 class_base_reg[aligned_pair_class] = 0; 186 class_reg_count[aligned_pair_class] = 0; 187 int start = (this->first_non_payload_grf & 1) ? 1 : 0; 188 189 for (int i = 0; i < reg_count; i++) { 190 ra_class_add_reg(regs, classes[aligned_pair_class], 191 class_base_reg[unaligned_pair_class] + i * 2 + start); 192 } 193 class_count++; 194 } 195 196 ra_set_finalize(regs); 197 198 struct ra_graph *g = ra_alloc_interference_graph(regs, 199 this->virtual_grf_next); 200 /* Node 0 is just a placeholder to keep virtual_grf[] mapping 1:1 201 * with nodes. 202 */ 203 ra_set_node_class(g, 0, classes[0]); 204 205 for (int i = 1; i < this->virtual_grf_next; i++) { 206 for (int c = 0; c < class_count; c++) { 207 if (class_sizes[c] == this->virtual_grf_sizes[i]) { 208 if (aligned_pair_class >= 0 && 209 this->delta_x.reg == i) { 210 ra_set_node_class(g, i, classes[aligned_pair_class]); 211 } else { 212 ra_set_node_class(g, i, classes[c]); 213 } 214 break; 215 } 216 } 217 218 for (int j = 1; j < i; j++) { 219 if (virtual_grf_interferes(i, j)) { 220 ra_add_node_interference(g, i, j); 221 } 222 } 223 } 224 225 if (!ra_allocate_no_spills(g)) { 226 /* Failed to allocate registers. Spill a reg, and the caller will 227 * loop back into here to try again. 228 */ 229 int reg = choose_spill_reg(g); 230 if (reg == -1 || intel->gen >= 6) { 231 this->fail = true; 232 } else { 233 spill_reg(reg); 234 } 235 236 237 talloc_free(g); 238 talloc_free(regs); 239 240 return false; 241 } 242 243 /* Get the chosen virtual registers for each node, and map virtual 244 * regs in the register classes back down to real hardware reg 245 * numbers. 246 */ 247 hw_reg_mapping[0] = 0; /* unused */ 248 for (int i = 1; i < this->virtual_grf_next; i++) { 249 int reg = ra_get_node_reg(g, i); 250 int hw_reg = -1; 251 252 for (int c = 0; c < class_count; c++) { 253 if (reg >= class_base_reg[c] && 254 reg < class_base_reg[c] + class_reg_count[c]) { 255 hw_reg = reg - class_base_reg[c]; 256 break; 257 } 258 } 259 260 assert(hw_reg >= 0); 261 hw_reg_mapping[i] = this->first_non_payload_grf + hw_reg; 262 last_grf = MAX2(last_grf, 263 hw_reg_mapping[i] + this->virtual_grf_sizes[i] - 1); 264 } 265 266 foreach_iter(exec_list_iterator, iter, this->instructions) { 267 fs_inst *inst = (fs_inst *)iter.get(); 268 269 assign_reg(hw_reg_mapping, &inst->dst); 270 assign_reg(hw_reg_mapping, &inst->src[0]); 271 assign_reg(hw_reg_mapping, &inst->src[1]); 272 } 273 274 this->grf_used = last_grf + 1; 275 276 talloc_free(g); 277 talloc_free(regs); 278 279 return true; 280} 281 282void 283fs_visitor::emit_unspill(fs_inst *inst, fs_reg dst, uint32_t spill_offset) 284{ 285 int size = virtual_grf_sizes[dst.reg]; 286 dst.reg_offset = 0; 287 288 for (int chan = 0; chan < size; chan++) { 289 fs_inst *unspill_inst = new(mem_ctx) fs_inst(FS_OPCODE_UNSPILL, 290 dst); 291 dst.reg_offset++; 292 unspill_inst->offset = spill_offset + chan * REG_SIZE; 293 unspill_inst->ir = inst->ir; 294 unspill_inst->annotation = inst->annotation; 295 296 /* Choose a MRF that won't conflict with an MRF that's live across the 297 * spill. Nothing else will make it up to MRF 14/15. 298 */ 299 unspill_inst->base_mrf = 14; 300 unspill_inst->mlen = 1; /* header contains offset */ 301 inst->insert_before(unspill_inst); 302 } 303} 304 305int 306fs_visitor::choose_spill_reg(struct ra_graph *g) 307{ 308 float loop_scale = 1.0; 309 float spill_costs[this->virtual_grf_next]; 310 bool no_spill[this->virtual_grf_next]; 311 312 for (int i = 0; i < this->virtual_grf_next; i++) { 313 spill_costs[i] = 0.0; 314 no_spill[i] = false; 315 } 316 317 /* Calculate costs for spilling nodes. Call it a cost of 1 per 318 * spill/unspill we'll have to do, and guess that the insides of 319 * loops run 10 times. 320 */ 321 foreach_iter(exec_list_iterator, iter, this->instructions) { 322 fs_inst *inst = (fs_inst *)iter.get(); 323 324 for (unsigned int i = 0; i < 3; i++) { 325 if (inst->src[i].file == GRF) { 326 int size = virtual_grf_sizes[inst->src[i].reg]; 327 spill_costs[inst->src[i].reg] += size * loop_scale; 328 } 329 } 330 331 if (inst->dst.file == GRF) { 332 int size = virtual_grf_sizes[inst->dst.reg]; 333 spill_costs[inst->dst.reg] += size * loop_scale; 334 } 335 336 switch (inst->opcode) { 337 338 case BRW_OPCODE_DO: 339 loop_scale *= 10; 340 break; 341 342 case BRW_OPCODE_WHILE: 343 loop_scale /= 10; 344 break; 345 346 case FS_OPCODE_SPILL: 347 if (inst->src[0].file == GRF) 348 no_spill[inst->src[0].reg] = true; 349 break; 350 351 case FS_OPCODE_UNSPILL: 352 if (inst->dst.file == GRF) 353 no_spill[inst->dst.reg] = true; 354 break; 355 } 356 } 357 358 for (int i = 0; i < this->virtual_grf_next; i++) { 359 if (!no_spill[i]) 360 ra_set_node_spill_cost(g, i, spill_costs[i]); 361 } 362 363 return ra_get_best_spill_node(g); 364} 365 366void 367fs_visitor::spill_reg(int spill_reg) 368{ 369 int size = virtual_grf_sizes[spill_reg]; 370 unsigned int spill_offset = c->last_scratch; 371 assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */ 372 c->last_scratch += size * REG_SIZE; 373 374 /* Generate spill/unspill instructions for the objects being 375 * spilled. Right now, we spill or unspill the whole thing to a 376 * virtual grf of the same size. For most instructions, though, we 377 * could just spill/unspill the GRF being accessed. 378 */ 379 foreach_iter(exec_list_iterator, iter, this->instructions) { 380 fs_inst *inst = (fs_inst *)iter.get(); 381 382 for (unsigned int i = 0; i < 3; i++) { 383 if (inst->src[i].file == GRF && 384 inst->src[i].reg == spill_reg) { 385 inst->src[i].reg = virtual_grf_alloc(size); 386 emit_unspill(inst, inst->src[i], spill_offset); 387 } 388 } 389 390 if (inst->dst.file == GRF && 391 inst->dst.reg == spill_reg) { 392 inst->dst.reg = virtual_grf_alloc(size); 393 394 /* Since we spill/unspill the whole thing even if we access 395 * just a component, we may need to unspill before the 396 * instruction we're spilling for. 397 */ 398 if (size != 1 || inst->predicated) { 399 emit_unspill(inst, inst->dst, spill_offset); 400 } 401 402 fs_reg spill_src = inst->dst; 403 spill_src.reg_offset = 0; 404 spill_src.abs = false; 405 spill_src.negate = false; 406 spill_src.smear = -1; 407 408 for (int chan = 0; chan < size; chan++) { 409 fs_inst *spill_inst = new(mem_ctx) fs_inst(FS_OPCODE_SPILL, 410 reg_null_f, spill_src); 411 spill_src.reg_offset++; 412 spill_inst->offset = spill_offset + chan * REG_SIZE; 413 spill_inst->ir = inst->ir; 414 spill_inst->annotation = inst->annotation; 415 spill_inst->base_mrf = 14; 416 spill_inst->mlen = 2; /* header, value */ 417 inst->insert_after(spill_inst); 418 } 419 } 420 } 421 422 this->live_intervals_valid = false; 423} 424