gen7_wm_surface_state.c revision 5bffbd7ba2ba2ff21469b2a69a0ed67f0802fec7
1/* 2 * Copyright © 2011 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23#include "main/mtypes.h" 24#include "main/samplerobj.h" 25#include "program/prog_parameter.h" 26 27#include "intel_mipmap_tree.h" 28#include "intel_batchbuffer.h" 29#include "intel_tex.h" 30#include "intel_fbo.h" 31#include "intel_buffer_objects.h" 32 33#include "brw_context.h" 34#include "brw_state.h" 35#include "brw_defines.h" 36#include "brw_wm.h" 37 38void 39gen7_set_surface_tiling(struct gen7_surface_state *surf, uint32_t tiling) 40{ 41 switch (tiling) { 42 case I915_TILING_NONE: 43 surf->ss0.tiled_surface = 0; 44 surf->ss0.tile_walk = 0; 45 break; 46 case I915_TILING_X: 47 surf->ss0.tiled_surface = 1; 48 surf->ss0.tile_walk = BRW_TILEWALK_XMAJOR; 49 break; 50 case I915_TILING_Y: 51 surf->ss0.tiled_surface = 1; 52 surf->ss0.tile_walk = BRW_TILEWALK_YMAJOR; 53 break; 54 } 55} 56 57 58void 59gen7_set_surface_msaa(struct gen7_surface_state *surf, unsigned num_samples, 60 enum intel_msaa_layout layout) 61{ 62 if (num_samples > 4) 63 surf->ss4.num_multisamples = GEN7_SURFACE_MULTISAMPLECOUNT_8; 64 else if (num_samples > 1) 65 surf->ss4.num_multisamples = GEN7_SURFACE_MULTISAMPLECOUNT_4; 66 else 67 surf->ss4.num_multisamples = GEN7_SURFACE_MULTISAMPLECOUNT_1; 68 69 surf->ss4.multisampled_surface_storage_format = 70 layout == INTEL_MSAA_LAYOUT_IMS ? 71 GEN7_SURFACE_MSFMT_DEPTH_STENCIL : 72 GEN7_SURFACE_MSFMT_MSS; 73} 74 75 76void 77gen7_set_surface_mcs_info(struct brw_context *brw, 78 struct gen7_surface_state *surf, 79 uint32_t surf_offset, 80 const struct intel_mipmap_tree *mcs_mt, 81 bool is_render_target) 82{ 83 /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address": 84 * 85 * "The MCS surface must be stored as Tile Y." 86 */ 87 assert(mcs_mt->region->tiling == I915_TILING_Y); 88 89 /* Compute the pitch in units of tiles. To do this we need to divide the 90 * pitch in bytes by 128, since a single Y-tile is 128 bytes wide. 91 */ 92 unsigned pitch_bytes = mcs_mt->region->pitch * mcs_mt->cpp; 93 unsigned pitch_tiles = pitch_bytes / 128; 94 95 /* The upper 20 bits of surface state DWORD 6 are the upper 20 bits of the 96 * GPU address of the MCS buffer; the lower 12 bits contain other control 97 * information. Since buffer addresses are always on 4k boundaries (and 98 * thus have their lower 12 bits zero), we can use an ordinary reloc to do 99 * the necessary address translation. 100 */ 101 assert ((mcs_mt->region->bo->offset & 0xfff) == 0); 102 surf->ss6.mcs_enabled.mcs_enable = 1; 103 surf->ss6.mcs_enabled.mcs_surface_pitch = pitch_tiles - 1; 104 surf->ss6.mcs_enabled.mcs_base_address = mcs_mt->region->bo->offset >> 12; 105 drm_intel_bo_emit_reloc(brw->intel.batch.bo, 106 surf_offset + 107 offsetof(struct gen7_surface_state, ss6), 108 mcs_mt->region->bo, 109 surf->ss6.raw_data & 0xfff, 110 is_render_target ? I915_GEM_DOMAIN_RENDER 111 : I915_GEM_DOMAIN_SAMPLER, 112 is_render_target ? I915_GEM_DOMAIN_RENDER : 0); 113} 114 115 116void 117gen7_check_surface_setup(struct gen7_surface_state *surf, 118 bool is_render_target) 119{ 120 bool is_multisampled = 121 surf->ss4.num_multisamples != GEN7_SURFACE_MULTISAMPLECOUNT_1; 122 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State > 123 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Surface Array 124 * Spacing: 125 * 126 * If Multisampled Surface Storage Format is MSFMT_MSS and Number of 127 * Multisamples is not MULTISAMPLECOUNT_1, this field must be set to 128 * ARYSPC_LOD0. 129 */ 130 if (surf->ss4.multisampled_surface_storage_format == GEN7_SURFACE_MSFMT_MSS 131 && is_multisampled) 132 assert(surf->ss0.surface_array_spacing == GEN7_SURFACE_ARYSPC_LOD0); 133 134 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State > 135 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled 136 * Surface Storage Format: 137 * 138 * All multisampled render target surfaces must have this field set to 139 * MSFMT_MSS. 140 * 141 * But also: 142 * 143 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1. 144 */ 145 if (is_render_target && is_multisampled) { 146 assert(surf->ss4.multisampled_surface_storage_format == 147 GEN7_SURFACE_MSFMT_MSS); 148 } 149 150 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State > 151 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled 152 * Surface Storage Format: 153 * 154 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width 155 * is >= 8192 (meaning the actual surface width is >= 8193 pixels), this 156 * field must be set to MSFMT_MSS. 157 */ 158 if (surf->ss4.num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 && 159 surf->ss2.width >= 8192) { 160 assert(surf->ss4.multisampled_surface_storage_format == 161 GEN7_SURFACE_MSFMT_MSS); 162 } 163 164 /* From the Graphics BSpec: vol5c Shared Functions [SNB+] > State > 165 * SURFACE_STATE > SURFACE_STATE for most messages [DevIVB]: Multisampled 166 * Surface Storage Format: 167 * 168 * If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, 169 * ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number of 170 * Multisamples is MULTISAMPLECOUNT_4, ((Depth+1) * (Height+1)) is > 171 * 8,388,608, this field must be set to MSFMT_DEPTH_STENCIL.This field 172 * must be set to MSFMT_DEPTH_STENCIL if Surface Format is one of the 173 * following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or 174 * R24_UNORM_X8_TYPELESS. 175 * 176 * But also: 177 * 178 * This field is ignored if Number of Multisamples is MULTISAMPLECOUNT_1. 179 */ 180 uint32_t depth = surf->ss3.depth + 1; 181 uint32_t height = surf->ss2.height + 1; 182 if (surf->ss4.num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_8 && 183 depth * height > 4194304) { 184 assert(surf->ss4.multisampled_surface_storage_format == 185 GEN7_SURFACE_MSFMT_DEPTH_STENCIL); 186 } 187 if (surf->ss4.num_multisamples == GEN7_SURFACE_MULTISAMPLECOUNT_4 && 188 depth * height > 8388608) { 189 assert(surf->ss4.multisampled_surface_storage_format == 190 GEN7_SURFACE_MSFMT_DEPTH_STENCIL); 191 } 192 if (is_multisampled) { 193 switch (surf->ss0.surface_format) { 194 case BRW_SURFACEFORMAT_I24X8_UNORM: 195 case BRW_SURFACEFORMAT_L24X8_UNORM: 196 case BRW_SURFACEFORMAT_A24X8_UNORM: 197 case BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS: 198 assert(surf->ss4.multisampled_surface_storage_format == 199 GEN7_SURFACE_MSFMT_DEPTH_STENCIL); 200 } 201 } 202} 203 204 205static void 206gen7_update_buffer_texture_surface(struct gl_context *ctx, GLuint unit) 207{ 208 struct brw_context *brw = brw_context(ctx); 209 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current; 210 const GLuint surf_index = SURF_INDEX_TEXTURE(unit); 211 struct gen7_surface_state *surf; 212 struct intel_buffer_object *intel_obj = 213 intel_buffer_object(tObj->BufferObject); 214 drm_intel_bo *bo = intel_obj ? intel_obj->buffer : NULL; 215 gl_format format = tObj->_BufferObjectFormat; 216 int texel_size = _mesa_get_format_bytes(format); 217 218 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 219 sizeof(*surf), 32, &brw->wm.surf_offset[surf_index]); 220 memset(surf, 0, sizeof(*surf)); 221 222 surf->ss0.surface_type = BRW_SURFACE_BUFFER; 223 surf->ss0.surface_format = brw_format_for_mesa_format(format); 224 225 surf->ss0.render_cache_read_write = 1; 226 227 if (surf->ss0.surface_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) { 228 _mesa_problem(NULL, "bad format %s for texture buffer\n", 229 _mesa_get_format_name(format)); 230 } 231 232 if (bo) { 233 surf->ss1.base_addr = bo->offset; /* reloc */ 234 235 /* Emit relocation to surface contents. Section 5.1.1 of the gen4 236 * bspec ("Data Cache") says that the data cache does not exist as 237 * a separate cache and is just the sampler cache. 238 */ 239 drm_intel_bo_emit_reloc(brw->intel.batch.bo, 240 (brw->wm.surf_offset[surf_index] + 241 offsetof(struct gen7_surface_state, ss1)), 242 bo, 0, 243 I915_GEM_DOMAIN_SAMPLER, 0); 244 245 int w = intel_obj->Base.Size / texel_size; 246 surf->ss2.width = w & 0x7f; /* bits 6:0 of size or width */ 247 surf->ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */ 248 surf->ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */ 249 surf->ss3.pitch = texel_size - 1; 250} else { 251 surf->ss1.base_addr = 0; 252 surf->ss2.width = 0; 253 surf->ss2.height = 0; 254 surf->ss3.depth = 0; 255 surf->ss3.pitch = 0; 256 } 257 258 gen7_set_surface_tiling(surf, I915_TILING_NONE); 259 260 gen7_check_surface_setup(surf, false /* is_render_target */); 261} 262 263static void 264gen7_update_texture_surface(struct gl_context *ctx, GLuint unit) 265{ 266 struct brw_context *brw = brw_context(ctx); 267 struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current; 268 struct intel_texture_object *intelObj = intel_texture_object(tObj); 269 struct intel_mipmap_tree *mt = intelObj->mt; 270 struct gl_texture_image *firstImage = tObj->Image[0][tObj->BaseLevel]; 271 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit); 272 const GLuint surf_index = SURF_INDEX_TEXTURE(unit); 273 struct gen7_surface_state *surf; 274 int width, height, depth; 275 276 if (tObj->Target == GL_TEXTURE_BUFFER) { 277 gen7_update_buffer_texture_surface(ctx, unit); 278 return; 279 } 280 281 /* We don't support MSAA for textures. */ 282 assert(!mt->array_spacing_lod0); 283 assert(mt->num_samples <= 1); 284 285 intel_miptree_get_dimensions_for_image(firstImage, &width, &height, &depth); 286 287 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 288 sizeof(*surf), 32, &brw->wm.surf_offset[surf_index]); 289 memset(surf, 0, sizeof(*surf)); 290 291 if (mt->align_h == 4) 292 surf->ss0.vertical_alignment = 1; 293 if (mt->align_w == 8) 294 surf->ss0.horizontal_alignment = 1; 295 296 surf->ss0.surface_type = translate_tex_target(tObj->Target); 297 surf->ss0.surface_format = translate_tex_format(mt->format, 298 firstImage->InternalFormat, 299 tObj->DepthMode, 300 sampler->sRGBDecode); 301 if (tObj->Target == GL_TEXTURE_CUBE_MAP) { 302 surf->ss0.cube_pos_x = 1; 303 surf->ss0.cube_pos_y = 1; 304 surf->ss0.cube_pos_z = 1; 305 surf->ss0.cube_neg_x = 1; 306 surf->ss0.cube_neg_y = 1; 307 surf->ss0.cube_neg_z = 1; 308 } 309 310 surf->ss0.is_array = depth > 1 && tObj->Target != GL_TEXTURE_3D; 311 312 gen7_set_surface_tiling(surf, intelObj->mt->region->tiling); 313 314 /* ss0 remaining fields: 315 * - vert_line_stride (exists on gen6 but we ignore it) 316 * - vert_line_stride_ofs (exists on gen6 but we ignore it) 317 * - surface_array_spacing 318 * - render_cache_read_write (exists on gen6 but ignored here) 319 */ 320 321 surf->ss1.base_addr = 322 intelObj->mt->region->bo->offset + intelObj->mt->offset; /* reloc */ 323 324 surf->ss2.width = width - 1; 325 surf->ss2.height = height - 1; 326 327 surf->ss3.pitch = (intelObj->mt->region->pitch * intelObj->mt->cpp) - 1; 328 surf->ss3.depth = depth - 1; 329 330 /* ss4: ignored? */ 331 332 surf->ss5.mip_count = intelObj->_MaxLevel - tObj->BaseLevel; 333 surf->ss5.min_lod = 0; 334 335 /* ss5 remaining fields: 336 * - x_offset (N/A for textures?) 337 * - y_offset (ditto) 338 * - cache_control 339 */ 340 341 if (brw->intel.is_haswell) { 342 surf->ss7.shader_channel_select_r = HSW_SCS_RED; 343 surf->ss7.shader_channel_select_g = HSW_SCS_GREEN; 344 surf->ss7.shader_channel_select_b = HSW_SCS_BLUE; 345 surf->ss7.shader_channel_select_a = HSW_SCS_ALPHA; 346 } 347 348 /* Emit relocation to surface contents */ 349 drm_intel_bo_emit_reloc(brw->intel.batch.bo, 350 brw->wm.surf_offset[surf_index] + 351 offsetof(struct gen7_surface_state, ss1), 352 intelObj->mt->region->bo, intelObj->mt->offset, 353 I915_GEM_DOMAIN_SAMPLER, 0); 354 355 gen7_check_surface_setup(surf, false /* is_render_target */); 356} 357 358/** 359 * Create the constant buffer surface. Vertex/fragment shader constants will 360 * be read from this buffer with Data Port Read instructions/messages. 361 */ 362void 363gen7_create_constant_surface(struct brw_context *brw, 364 drm_intel_bo *bo, 365 uint32_t offset, 366 int width, 367 uint32_t *out_offset) 368{ 369 const GLint w = width - 1; 370 struct gen7_surface_state *surf; 371 372 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 373 sizeof(*surf), 32, out_offset); 374 memset(surf, 0, sizeof(*surf)); 375 376 surf->ss0.surface_type = BRW_SURFACE_BUFFER; 377 surf->ss0.surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT; 378 379 surf->ss0.render_cache_read_write = 1; 380 381 assert(bo); 382 surf->ss1.base_addr = bo->offset + offset; /* reloc */ 383 384 surf->ss2.width = w & 0x7f; /* bits 6:0 of size or width */ 385 surf->ss2.height = (w >> 7) & 0x1fff; /* bits 19:7 of size or width */ 386 surf->ss3.depth = (w >> 20) & 0x7f; /* bits 26:20 of size or width */ 387 surf->ss3.pitch = (16 - 1); /* stride between samples */ 388 gen7_set_surface_tiling(surf, I915_TILING_NONE); /* tiling now allowed */ 389 390 if (brw->intel.is_haswell) { 391 surf->ss7.shader_channel_select_r = HSW_SCS_RED; 392 surf->ss7.shader_channel_select_g = HSW_SCS_GREEN; 393 surf->ss7.shader_channel_select_b = HSW_SCS_BLUE; 394 surf->ss7.shader_channel_select_a = HSW_SCS_ALPHA; 395 } 396 397 /* Emit relocation to surface contents. Section 5.1.1 of the gen4 398 * bspec ("Data Cache") says that the data cache does not exist as 399 * a separate cache and is just the sampler cache. 400 */ 401 drm_intel_bo_emit_reloc(brw->intel.batch.bo, 402 (*out_offset + 403 offsetof(struct gen7_surface_state, ss1)), 404 bo, offset, 405 I915_GEM_DOMAIN_SAMPLER, 0); 406 407 gen7_check_surface_setup(surf, false /* is_render_target */); 408} 409 410static void 411gen7_update_null_renderbuffer_surface(struct brw_context *brw, unsigned unit) 412{ 413 /* From the Ivy bridge PRM, Vol4 Part1 p62 (Surface Type: Programming 414 * Notes): 415 * 416 * A null surface is used in instances where an actual surface is not 417 * bound. When a write message is generated to a null surface, no 418 * actual surface is written to. When a read message (including any 419 * sampling engine message) is generated to a null surface, the result 420 * is all zeros. Note that a null surface type is allowed to be used 421 * with all messages, even if it is not specificially indicated as 422 * supported. All of the remaining fields in surface state are ignored 423 * for null surfaces, with the following exceptions: Width, Height, 424 * Depth, LOD, and Render Target View Extent fields must match the 425 * depth buffer’s corresponding state for all render target surfaces, 426 * including null. 427 */ 428 struct intel_context *intel = &brw->intel; 429 struct gl_context *ctx = &intel->ctx; 430 struct gen7_surface_state *surf; 431 432 /* _NEW_BUFFERS */ 433 const struct gl_framebuffer *fb = ctx->DrawBuffer; 434 435 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 436 sizeof(*surf), 32, &brw->wm.surf_offset[unit]); 437 memset(surf, 0, sizeof(*surf)); 438 439 surf->ss0.surface_type = BRW_SURFACE_NULL; 440 surf->ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM; 441 442 surf->ss2.width = fb->Width - 1; 443 surf->ss2.height = fb->Height - 1; 444 445 /* From the Ivy bridge PRM, Vol4 Part1 p65 (Tiled Surface: Programming Notes): 446 * 447 * If Surface Type is SURFTYPE_NULL, this field must be TRUE. 448 */ 449 gen7_set_surface_tiling(surf, I915_TILING_Y); 450 451 gen7_check_surface_setup(surf, true /* is_render_target */); 452} 453 454/** 455 * Sets up a surface state structure to point at the given region. 456 * While it is only used for the front/back buffer currently, it should be 457 * usable for further buffers when doing ARB_draw_buffer support. 458 */ 459static void 460gen7_update_renderbuffer_surface(struct brw_context *brw, 461 struct gl_renderbuffer *rb, 462 unsigned int unit) 463{ 464 struct intel_context *intel = &brw->intel; 465 struct gl_context *ctx = &intel->ctx; 466 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 467 struct intel_region *region = irb->mt->region; 468 struct gen7_surface_state *surf; 469 uint32_t tile_x, tile_y; 470 gl_format rb_format = intel_rb_format(irb); 471 472 surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 473 sizeof(*surf), 32, &brw->wm.surf_offset[unit]); 474 memset(surf, 0, sizeof(*surf)); 475 476 /* Render targets can't use IMS layout */ 477 assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS); 478 479 if (irb->mt->align_h == 4) 480 surf->ss0.vertical_alignment = 1; 481 if (irb->mt->align_w == 8) 482 surf->ss0.horizontal_alignment = 1; 483 484 switch (rb_format) { 485 case MESA_FORMAT_SARGB8: 486 /* _NEW_BUFFERS 487 * 488 * Without GL_EXT_framebuffer_sRGB we shouldn't bind sRGB surfaces to the 489 * blend/update as sRGB. 490 */ 491 if (ctx->Color.sRGBEnabled) 492 surf->ss0.surface_format = brw_format_for_mesa_format(rb_format); 493 else 494 surf->ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM; 495 break; 496 default: 497 assert(brw_render_target_supported(intel, rb)); 498 surf->ss0.surface_format = brw->render_target_format[rb_format]; 499 if (unlikely(!brw->format_supported_as_render_target[rb_format])) { 500 _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n", 501 __FUNCTION__, _mesa_get_format_name(rb_format)); 502 } 503 break; 504 } 505 506 surf->ss0.surface_type = BRW_SURFACE_2D; 507 surf->ss0.surface_array_spacing = irb->mt->array_spacing_lod0 ? 508 GEN7_SURFACE_ARYSPC_LOD0 : GEN7_SURFACE_ARYSPC_FULL; 509 510 /* reloc */ 511 surf->ss1.base_addr = intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 512 surf->ss1.base_addr += region->bo->offset; /* reloc */ 513 514 assert(brw->has_surface_tile_offset); 515 /* Note that the low bits of these fields are missing, so 516 * there's the possibility of getting in trouble. 517 */ 518 assert(tile_x % 4 == 0); 519 assert(tile_y % 2 == 0); 520 surf->ss5.x_offset = tile_x / 4; 521 surf->ss5.y_offset = tile_y / 2; 522 523 surf->ss2.width = rb->Width - 1; 524 surf->ss2.height = rb->Height - 1; 525 gen7_set_surface_tiling(surf, region->tiling); 526 surf->ss3.pitch = (region->pitch * region->cpp) - 1; 527 528 gen7_set_surface_msaa(surf, irb->mt->num_samples, irb->mt->msaa_layout); 529 530 if (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) { 531 gen7_set_surface_mcs_info(brw, surf, brw->wm.surf_offset[unit], 532 irb->mt->mcs_mt, true /* is_render_target */); 533 } 534 535 if (intel->is_haswell) { 536 surf->ss7.shader_channel_select_r = HSW_SCS_RED; 537 surf->ss7.shader_channel_select_g = HSW_SCS_GREEN; 538 surf->ss7.shader_channel_select_b = HSW_SCS_BLUE; 539 surf->ss7.shader_channel_select_a = HSW_SCS_ALPHA; 540 } 541 542 drm_intel_bo_emit_reloc(brw->intel.batch.bo, 543 brw->wm.surf_offset[unit] + 544 offsetof(struct gen7_surface_state, ss1), 545 region->bo, 546 surf->ss1.base_addr - region->bo->offset, 547 I915_GEM_DOMAIN_RENDER, 548 I915_GEM_DOMAIN_RENDER); 549 550 gen7_check_surface_setup(surf, true /* is_render_target */); 551} 552 553void 554gen7_init_vtable_surface_functions(struct brw_context *brw) 555{ 556 struct intel_context *intel = &brw->intel; 557 558 intel->vtbl.update_texture_surface = gen7_update_texture_surface; 559 intel->vtbl.update_renderbuffer_surface = gen7_update_renderbuffer_surface; 560 intel->vtbl.update_null_renderbuffer_surface = 561 gen7_update_null_renderbuffer_surface; 562 intel->vtbl.create_constant_surface = gen7_create_constant_surface; 563} 564