123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie/*
223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie Copyright (C) Intel Corp.  2006.  All Rights Reserved.
323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie Intel funded Tungsten Graphics to
423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie develop this 3D driver.
523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie Permission is hereby granted, free of charge, to any person obtaining
723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie a copy of this software and associated documentation files (the
823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie "Software"), to deal in the Software without restriction, including
923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie without limitation the rights to use, copy, modify, merge, publish,
1023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie distribute, sublicense, and/or sell copies of the Software, and to
1123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie permit persons to whom the Software is furnished to do so, subject to
1223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie the following conditions:
1323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
1423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie The above copyright notice and this permission notice (including the
1523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie next paragraph) shall be included in all copies or substantial
1623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie portions of the Software.
1723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
1823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
2123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
2223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
2323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
2423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
2623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie **********************************************************************/
2723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie /*
2823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie  * Authors:
2923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie  *   Keith Whitwell <keithw@vmware.com>
3023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie  */
3123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
3223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
3323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "compiler/nir/nir.h"
3423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "main/context.h"
3523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "main/blend.h"
3623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "main/mtypes.h"
3723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "main/samplerobj.h"
38da9d8f192431b0142e65bceb5ca8a2e52e21ac90Brian Paul#include "main/shaderimage.h"
39e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie#include "main/teximage.h"
40f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff#include "program/prog_parameter.h"
414ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie#include "program/prog_instruction.h"
4201daeadf8cd8c56820585c3da88cc626dcdc33d0Michel Dänzer#include "main/framebuffer.h"
43d513915d27eac8a57ff7f5c1973b4a07fe288c53Dave Airlie#include "main/shaderapi.h"
448ab6759cef6dc5101be3badce10a52d1d046f2eaEric Anholt
45e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie#include "isl/isl.h"
46e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie
47e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie#include "intel_mipmap_tree.h"
4823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "intel_batchbuffer.h"
4923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "intel_tex.h"
5023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "intel_fbo.h"
5123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie#include "intel_buffer_objects.h"
5223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
53c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle#include "brw_context.h"
54c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle#include "brw_state.h"
55c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle#include "brw_defines.h"
56c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle#include "brw_wm.h"
571401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti
58c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnleenum {
59c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   INTEL_RENDERBUFFER_LAYERED = 1 << 0,
60c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   INTEL_AUX_BUFFER_DISABLED = 1 << 1,
61c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle};
62c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle
631401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedrettiuint32_t tex_mocs[] = {
64c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   [7] = GEN7_MOCS_L3,
65c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   [8] = BDW_MOCS_WB,
66c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   [9] = SKL_MOCS_WB,
67c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle};
681401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti
69c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnleuint32_t rb_mocs[] = {
70c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   [7] = GEN7_MOCS_L3,
71c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   [8] = BDW_MOCS_PTE,
72c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle   [9] = SKL_MOCS_PTE,
739c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov};
74c1ccc7d5394c23a371540e1b2c3d35b0da3b30d6Nicolai Hähnle
759c65361457f2cc89685792957b66242d3e72b1b4Emil Velikovstatic void
7623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airliebrw_emit_surface_state(struct brw_context *brw,
779c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov                       struct intel_mipmap_tree *mt, uint32_t flags,
7823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                       GLenum target, struct isl_view view,
7923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                       uint32_t mocs, uint32_t *surf_offset, int surf_index,
809c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov                       unsigned read_domains, unsigned write_domains)
8123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie{
829c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov   uint32_t tile_x = mt->level[0].slice[0].x_offset;
839c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov   uint32_t tile_y = mt->level[0].slice[0].y_offset;
8423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   uint32_t offset = mt->offset;
859c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov
861401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti   struct isl_surf surf;
879c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov   intel_miptree_get_isl_surf(brw, mt, &surf);
881401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti
899c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov   surf.dim = get_isl_surf_dim(target);
901401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti
919c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov   const enum isl_dim_layout dim_layout =
929c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
9323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
949c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov   if (surf.dim_layout != dim_layout) {
9523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      /* The layout of the specified texture target is not compatible with the
969c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       * actual layout of the miptree structure in memory -- You're entering
9723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       * dangerous territory, this can only possibly work if you only intended
989c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       * to access a single level and slice of the texture, and the hardware
999c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       * supports the tile offset feature in order to allow non-tile-aligned
1009c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       * base offsets, since we'll have to point the hardware to the first
1019c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       * texel of the level instead of relying on the usual base level/layer
1029c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       * controls.
1039c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov       */
1049c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      assert(brw->has_surface_tile_offset);
1059c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      assert(view.levels == 1 && view.array_len == 1);
1069c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      assert(tile_x == 0 && tile_y == 0);
1079c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov
1089c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      offset += intel_miptree_get_tile_offsets(mt, view.base_level,
1099c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov                                               view.base_array_layer,
1109c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov                                               &tile_x, &tile_y);
1119c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov
1129c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      /* Minify the logical dimensions of the texture. */
1139c65361457f2cc89685792957b66242d3e72b1b4Emil Velikov      const unsigned l = view.base_level - mt->first_level;
11423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      surf.logical_level0_px.width = minify(surf.logical_level0_px.width, l);
11523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      surf.logical_level0_px.height = surf.dim <= ISL_SURF_DIM_1D ? 1 :
11623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         minify(surf.logical_level0_px.height, l);
11723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      surf.logical_level0_px.depth = surf.dim <= ISL_SURF_DIM_2D ? 1 :
11823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         minify(surf.logical_level0_px.depth, l);
11923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
12023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      /* Only the base level and layer can be addressed with the overridden
12123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       * layout.
12223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       */
12323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      surf.logical_level0_px.array_len = 1;
12423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      surf.levels = 1;
12523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      surf.dim_layout = dim_layout;
12623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
12723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      /* The requested slice of the texture is now at the base level and
12823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       * layer.
12923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       */
13023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      view.base_level = 0;
13123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      view.base_array_layer = 0;
13233214679bb632a80d4339ffa0f28f7620d510658Ian Romanick   }
13323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
134d3491e775fb07f891463b2185d74bbad62f3ed24Kristian Høgsberg   union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
135d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg
13623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   drm_intel_bo *aux_bo;
13723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   struct isl_surf *aux_surf = NULL, aux_surf_s;
138d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg   uint64_t aux_offset = 0;
139875a757ddd103722cfe9a2b21035024aa5a23d32George Sapountzis   enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
140f9995b30756140724f41daf963fa06167912be7fKristian Høgsberg   if ((mt->mcs_buf || intel_miptree_sample_with_hiz(brw, mt)) &&
141f9995b30756140724f41daf963fa06167912be7fKristian Høgsberg       !(flags & INTEL_AUX_BUFFER_DISABLED)) {
14223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      intel_miptree_get_aux_isl_surf(brw, mt, &aux_surf_s, &aux_usage);
14323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      aux_surf = &aux_surf_s;
14423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
14523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      if (mt->mcs_buf) {
14623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         aux_bo = mt->mcs_buf->bo;
14723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         aux_offset = mt->mcs_buf->bo->offset64 + mt->mcs_buf->offset;
14823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      } else {
14923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         aux_bo = mt->hiz_buf->aux_base.bo;
1505a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul         aux_offset = mt->hiz_buf->aux_base.bo->offset64;
15123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      }
15223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
1535a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul      /* We only really need a clear color if we also have an auxiliary
15433214679bb632a80d4339ffa0f28f7620d510658Ian Romanick       * surface.  Without one, it does nothing.
1555a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul       */
156733dba2a08765dddc478a015439ea2a684d5f2a0Brian Paul      clear_color = intel_miptree_get_isl_clear_color(brw, mt);
15723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   }
15823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
1595a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul   void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
16023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                                   brw->isl_dev.ss.size,
16123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                                   brw->isl_dev.ss.align,
162da9d8f192431b0142e65bceb5ca8a2e52e21ac90Brian Paul                                   surf_index, surf_offset);
163da9d8f192431b0142e65bceb5ca8a2e52e21ac90Brian Paul
16423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
16510a7b579fdc0e3f3b38920ae5c103c058cc63eecEmil Velikov                       .address = mt->bo->offset64 + offset,
16623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                       .aux_surf = aux_surf, .aux_usage = aux_usage,
16723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                       .aux_address = aux_offset,
16823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                       .mocs = mocs, .clear_color = clear_color,
16923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                       .x_offset_sa = tile_x, .y_offset_sa = tile_y);
17023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
1711869bdabbac0926c7da8bfd9e22616cab9457126Alex Deucher   drm_intel_bo_emit_reloc(brw->batch.bo,
1721869bdabbac0926c7da8bfd9e22616cab9457126Alex Deucher                           *surf_offset + brw->isl_dev.ss.addr_offset,
17323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                           mt->bo, offset,
17423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                           read_domains, write_domains);
17523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
17623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   if (aux_surf) {
17723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      /* On gen7 and prior, the upper 20 bits of surface state DWORD 6 are the
17823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       * upper 20 bits of the GPU address of the MCS buffer; the lower 12 bits
17923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       * contain other control information.  Since buffer addresses are always
18023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie       * on 4k boundaries (and thus have their lower 12 bits zero), we can use
1817dd184dc4da37233471875df6f40cce0560cb7bcNicolai Hähnle       * an ordinary reloc to do the necessary address translation.
1820a725db10c1491539d48370df7207206538bf945Dave Airlie       */
1830a725db10c1491539d48370df7207206538bf945Dave Airlie      assert((aux_offset & 0xfff) == 0);
1840a725db10c1491539d48370df7207206538bf945Dave Airlie      uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
1850a725db10c1491539d48370df7207206538bf945Dave Airlie      drm_intel_bo_emit_reloc(brw->batch.bo,
1860a725db10c1491539d48370df7207206538bf945Dave Airlie                              *surf_offset + brw->isl_dev.ss.aux_addr_offset,
18723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                              aux_bo, *aux_addr - aux_bo->offset64,
1881401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti                              read_domains, write_domains);
1891401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti   }
1901401b96bb9f485eb5c08fb724b4366a16ea93aefFabio Pedretti}
1910b22615c2c860968a027c04519e25864ae69f6cdMaciej Cencora
192bbf2b5c4ffcb6755d34a5b698445aecf604e45fbPauli Nieminenuint32_t
1939ad76e9479c9c3cb8b2947d5144de33bb31197b8Dave Airliebrw_update_renderbuffer_surface(struct brw_context *brw,
1949f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick                                struct gl_renderbuffer *rb,
1959f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick                                uint32_t flags, unsigned unit /* unused */,
1969f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick                                uint32_t surf_index)
1979f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick{
1989f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   struct gl_context *ctx = &brw->ctx;
1999f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
2009f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   struct intel_mipmap_tree *mt = irb->mt;
2019f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick
2029f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   if (brw->gen < 9) {
2039f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick      assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
2049f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   }
2059f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick
2069f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   assert(brw_render_target_supported(brw, rb));
2079f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick
2089f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
2099f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
2109f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick      _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
2119f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick                    __func__, _mesa_get_format_name(rb_format));
2129f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   }
2139f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick
2149f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick   const unsigned layer_multiplier =
2159f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick      (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
2169f261dc18dba0aa4dc43fc560d343ba9ffd486e9Ian Romanick       irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
21723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      MAX2(irb->mt->num_samples, 1) : 1;
21823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
21923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   struct isl_view view = {
220e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .format = brw->render_target_format[rb_format],
221e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .base_level = irb->mt_level - irb->mt->first_level,
222e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .levels = 1,
223e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .base_array_layer = irb->mt_layer / layer_multiplier,
224e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .array_len = MAX2(irb->layer_count, 1),
225e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .swizzle = ISL_SWIZZLE_IDENTITY,
226e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      .usage = ISL_SURF_USAGE_RENDER_TARGET_BIT,
227e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie   };
228e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie
229e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie   uint32_t offset;
230fe72a069d1fcce943f315907b4744b63158938b1Brian Paul   brw_emit_surface_state(brw, mt, flags, mt->target, view,
2315067506ea6ada5eeae33b1acf1c916e00121c12aMatt Turner                          rb_mocs[brw->gen],
232e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie                          &offset, surf_index,
233e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie                          I915_GEM_DOMAIN_RENDER,
234e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie                          I915_GEM_DOMAIN_RENDER);
235e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie   return offset;
23623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie}
23723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
2381401b96bb9f485eb5c08fb724b4366a16ea93aefFabio PedrettiGLuint
23923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlietranslate_tex_target(GLenum target)
240d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg{
24123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   switch (target) {
242d513915d27eac8a57ff7f5c1973b4a07fe288c53Dave Airlie   case GL_TEXTURE_1D:
24323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   case GL_TEXTURE_1D_ARRAY_EXT:
244d513915d27eac8a57ff7f5c1973b4a07fe288c53Dave Airlie      return BRW_SURFACE_1D;
245e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie
246e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie   case GL_TEXTURE_RECTANGLE_NV:
247e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      return BRW_SURFACE_2D;
2482a7a2c6b77116fd475e1d05fb0adbd1702d11f77Richard Li
249ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   case GL_TEXTURE_2D:
250ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   case GL_TEXTURE_2D_ARRAY_EXT:
2515a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul   case GL_TEXTURE_EXTERNAL_OES:
252da9d8f192431b0142e65bceb5ca8a2e52e21ac90Brian Paul   case GL_TEXTURE_2D_MULTISAMPLE:
253e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie   case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
254e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie      return BRW_SURFACE_2D;
255e00ef43d796f0ae0247b1072bf0aa8cdd8e3034dDave Airlie
2567dd184dc4da37233471875df6f40cce0560cb7bcNicolai Hähnle   case GL_TEXTURE_3D:
257fef9b532cd1631cc53056b9eba4369d1310b88dfHenri Verbeet      return BRW_SURFACE_3D;
258ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora
259d8b14a57a98f4bad6528eda8dd1406c15bdcce75Marius Predut   case GL_TEXTURE_CUBE_MAP:
260ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   case GL_TEXTURE_CUBE_MAP_ARRAY:
26123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      return BRW_SURFACE_CUBE;
262ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora
2635a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul   default:
264ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora      unreachable("not reached");
2655a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul   }
2665a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul}
2675a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul
2685a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Pauluint32_t
2695a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paulbrw_get_surface_tiling_bits(uint32_t tiling)
270ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora{
271ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   switch (tiling) {
2725a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul   case I915_TILING_X:
2735a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul      return BRW_SURFACE_TILED;
274ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   case I915_TILING_Y:
275ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora      return BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y;
276ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   default:
277ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora      return 0;
278ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora   }
279ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora}
280ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora
281ed14a4cb7bbcef45668a3ab6fe8efb267b7c4efdMaciej Cencora
282d513915d27eac8a57ff7f5c1973b4a07fe288c53Dave Airlieuint32_t
28323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airliebrw_get_surface_num_multisamples(unsigned num_samples)
28423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie{
28523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   if (num_samples > 1)
28623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      return BRW_SURFACE_MULTISAMPLECOUNT_4;
28723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   else
288d513915d27eac8a57ff7f5c1973b4a07fe288c53Dave Airlie      return BRW_SURFACE_MULTISAMPLECOUNT_1;
289fe72a069d1fcce943f315907b4744b63158938b1Brian Paul}
29023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
29123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie/**
29223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie * Compute the combination of DEPTH_TEXTURE_MODE and EXT_texture_swizzle
29323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie * swizzling.
294d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg */
29523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlieint
29623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airliebrw_get_texture_swizzle(const struct gl_context *ctx,
29723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                        const struct gl_texture_object *t)
2984e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen{
299d8b14a57a98f4bad6528eda8dd1406c15bdcce75Marius Predut   const struct gl_texture_image *img = t->Image[0][t->BaseLevel];
3005a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul
30123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   int swizzles[SWIZZLE_NIL + 1] = {
302b4bb6680200b5a898583392f4c831c02f41e63f7Kristian Høgsberg      SWIZZLE_X,
303b4bb6680200b5a898583392f4c831c02f41e63f7Kristian Høgsberg      SWIZZLE_Y,
304b4bb6680200b5a898583392f4c831c02f41e63f7Kristian Høgsberg      SWIZZLE_Z,
30523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      SWIZZLE_W,
30623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      SWIZZLE_ZERO,
30723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      SWIZZLE_ONE,
30823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      SWIZZLE_NIL
30998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   };
31098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld
31198bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   if (img->_BaseFormat == GL_DEPTH_COMPONENT ||
312c080202db5363a18a759a9a7c82b40ac558c8abeBrian Paul       img->_BaseFormat == GL_DEPTH_STENCIL) {
31398bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      GLenum depth_mode = t->DepthMode;
31423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
315646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      /* In ES 3.0, DEPTH_TEXTURE_MODE is expected to be GL_RED for textures
316646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner       * with depth component data specified with a sized internal format.
317646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner       * Otherwise, it's left at the old default, GL_LUMINANCE.
318646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner       */
319646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      if (_mesa_is_gles3(ctx) &&
320646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner          img->InternalFormat != GL_DEPTH_COMPONENT &&
321646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner          img->InternalFormat != GL_DEPTH_STENCIL) {
322646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         depth_mode = GL_RED;
323646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      }
324646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner
32510a7b579fdc0e3f3b38920ae5c103c058cc63eecEmil Velikov      switch (depth_mode) {
326646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      case GL_ALPHA:
327646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[0] = SWIZZLE_ZERO;
328646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[1] = SWIZZLE_ZERO;
329646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[2] = SWIZZLE_ZERO;
330646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[3] = SWIZZLE_X;
331646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         break;
332646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      case GL_LUMINANCE:
333646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[0] = SWIZZLE_X;
334646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[1] = SWIZZLE_X;
335646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[2] = SWIZZLE_X;
336646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[3] = SWIZZLE_ONE;
337646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         break;
338603741a86df0e43c0b52e8c202a35c7fe2fc1d9cDaniel Vetter      case GL_INTENSITY:
3395a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul         swizzles[0] = SWIZZLE_X;
340603741a86df0e43c0b52e8c202a35c7fe2fc1d9cDaniel Vetter         swizzles[1] = SWIZZLE_X;
341646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[2] = SWIZZLE_X;
342646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[3] = SWIZZLE_X;
343646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         break;
344646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      case GL_RED:
345646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[0] = SWIZZLE_X;
346646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[1] = SWIZZLE_ZERO;
347646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[2] = SWIZZLE_ZERO;
348646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[3] = SWIZZLE_ONE;
349646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         break;
350646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      }
351646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner   }
352646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner
353646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner   GLenum datatype = _mesa_get_format_datatype(img->TexFormat);
354646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner
355646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner   /* If the texture's format is alpha-only, force R, G, and B to
356646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner    * 0.0. Similarly, if the texture's format has no alpha channel,
357646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner    * force the alpha value read to 1.0. This allows for the
358646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner    * implementation to use an RGBA texture for any of these formats
35923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie    * without leaking any unexpected values.
360433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer    */
361433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer   switch (img->_BaseFormat) {
36223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   case GL_ALPHA:
36323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      swizzles[0] = SWIZZLE_ZERO;
3640bfa8dfaaf49703eb5c3237b5cae6201b8755e4dTormod Volden      swizzles[1] = SWIZZLE_ZERO;
36523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      swizzles[2] = SWIZZLE_ZERO;
36623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      break;
36723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   case GL_LUMINANCE:
3682b85fccae5ba33748846f74f90fe0f72c673a4b1Dave Airlie      if (t->_IsIntegerFormat || datatype == GL_SIGNED_NORMALIZED) {
36923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         swizzles[0] = SWIZZLE_X;
370bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie         swizzles[1] = SWIZZLE_X;
371f577c8e462fc924ea436d129ad64c8a1226b5f9cDave Airlie         swizzles[2] = SWIZZLE_X;
37223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         swizzles[3] = SWIZZLE_ONE;
3734e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen      }
37423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      break;
3757dd184dc4da37233471875df6f40cce0560cb7bcNicolai Hähnle   case GL_LUMINANCE_ALPHA:
37623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      if (datatype == GL_SIGNED_NORMALIZED) {
37723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         swizzles[0] = SWIZZLE_X;
37823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         swizzles[1] = SWIZZLE_X;
37998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld         swizzles[2] = SWIZZLE_X;
380646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner         swizzles[3] = SWIZZLE_W;
381646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      }
382646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      break;
383646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner   case GL_INTENSITY:
384646d2e9fbc41bf49075013009e9583bec4a51168Mario Kleiner      if (datatype == GL_SIGNED_NORMALIZED) {
3850bfa8dfaaf49703eb5c3237b5cae6201b8755e4dTormod Volden         swizzles[0] = SWIZZLE_X;
3860bfa8dfaaf49703eb5c3237b5cae6201b8755e4dTormod Volden         swizzles[1] = SWIZZLE_X;
38798bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld         swizzles[2] = SWIZZLE_X;
38898bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld         swizzles[3] = SWIZZLE_X;
38998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      }
39098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      break;
39198bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   case GL_RED:
392433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer   case GL_RG:
393bb4c70358778f28f644ae493b5d8163e76e9fddbDave Airlie   case GL_RGB:
394bb4c70358778f28f644ae493b5d8163e76e9fddbDave Airlie      if (_mesa_get_format_bits(img->TexFormat, GL_ALPHA_BITS) > 0)
395bb4c70358778f28f644ae493b5d8163e76e9fddbDave Airlie         swizzles[3] = SWIZZLE_ONE;
39698bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      break;
39798bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   }
39898bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld
39998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   return MAKE_SWIZZLE4(swizzles[GET_SWZ(t->_Swizzle, 0)],
400433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer                        swizzles[GET_SWZ(t->_Swizzle, 1)],
401433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer                        swizzles[GET_SWZ(t->_Swizzle, 2)],
402433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer                        swizzles[GET_SWZ(t->_Swizzle, 3)]);
403433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer}
404433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer
40598bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld/**
406433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer * Convert an swizzle enumeration (i.e. SWIZZLE_X) to one of the Gen7.5+
407433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED).  The mappings are
408433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer *
409433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
410433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer *         0          1          2          3             4            5
411433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer *         4          5          6          7             0            1
412433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer *   SCS_RED, SCS_GREEN,  SCS_BLUE, SCS_ALPHA,     SCS_ZERO,     SCS_ONE
413433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer *
414433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer * which is simply adding 4 then modding by 8 (or anding with 7).
415433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer *
416433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer * We then may need to apply workarounds for textureGather hardware bugs.
417433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer */
418433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzerstatic unsigned
41998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveldswizzle_to_scs(GLenum swizzle, bool need_green_to_blue)
42098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld{
4212d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding   unsigned scs = (swizzle + 4) & 7;
4222d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding
4232d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding   return (need_green_to_blue && scs == HSW_SCS_GREEN) ? HSW_SCS_BLUE : scs;
4242d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding}
4252d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding
4262d05425d3e00a74da54730e4fa7230662dca2b48Boyan Dingstatic unsigned
4270bfa8dfaaf49703eb5c3237b5cae6201b8755e4dTormod Voldenbrw_find_matching_rb(const struct gl_framebuffer *fb,
42898bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                     const struct intel_mipmap_tree *mt)
42998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld{
43098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
431433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer      const struct intel_renderbuffer *irb =
432433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer         intel_renderbuffer(fb->_ColorDrawBuffers[i]);
433433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer
434433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer      if (irb && irb->mt == mt)
435433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer         return i;
436433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer   }
437433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer
438433f0a82f5a4696e6b0c4061f645485ec8079bb4Michel Dänzer   return fb->_NumColorDrawBuffers;
4397dd184dc4da37233471875df6f40cce0560cb7bcNicolai Hähnle}
4402d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding
4412d05425d3e00a74da54730e4fa7230662dca2b48Boyan Dingstatic inline bool
4422d05425d3e00a74da54730e4fa7230662dca2b48Boyan Dingbrw_texture_view_sane(const struct brw_context *brw,
4432d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding                      const struct intel_mipmap_tree *mt,
4442d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding                      const struct isl_view *view)
4452d05425d3e00a74da54730e4fa7230662dca2b48Boyan Ding{
44698bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   /* There are special cases only for lossless compression. */
44798bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   if (!intel_miptree_is_lossless_compressed(brw, mt))
44823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      return true;
44923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
45023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
45123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                                                view->format))
45223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      return true;
45323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
454bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie   /* Logic elsewhere needs to take care to resolve the color buffer prior
455bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie    * to sampling it as non-compressed.
45623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie    */
45798bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld   if (intel_miptree_has_color_unresolved(mt, view->base_level, view->levels,
45898bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                                          view->base_array_layer,
45998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                                          view->array_len))
46098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      return false;
46123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
462bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie   const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
463bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie   const unsigned rb_index = brw_find_matching_rb(fb, mt);
46423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
46523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   if (rb_index == fb->_NumColorDrawBuffers)
466bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie      return true;
467bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie
46823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   /* Underlying surface is compressed but it is sampled using a format that
46998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld    * the sampling engine doesn't support as compressed. Compression must be
47098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld    * disabled for both sampling engine and data port in case the same surface
47198bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld    * is used also as render target.
47298bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld    */
47323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   return brw->draw_aux_buffer_disabled[rb_index];
4748c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie}
475bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie
47623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airliestatic bool
47723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airliebrw_disable_aux_surface(const struct brw_context *brw,
47823d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                        const struct intel_mipmap_tree *mt,
47923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                        const struct isl_view *view)
48023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie{
48123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   /* Nothing to disable. */
48223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   if (!mt->mcs_buf)
48323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      return false;
484bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie
485bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie   const bool is_unresolved = intel_miptree_has_color_unresolved(
486bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie                                 mt, view->base_level, view->levels,
487bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie                                 view->base_array_layer, view->array_len);
488bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie
4898c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie   /* There are special cases only for lossless compression. */
4908c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie   if (!intel_miptree_is_lossless_compressed(brw, mt))
4918c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie      return !is_unresolved;
492bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie
4938c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie   const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
4944e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen   const unsigned rb_index = brw_find_matching_rb(fb, mt);
4958c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie
4968c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie   /* If we are drawing into this with compression enabled, then we must also
4978c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie    * enable compression when texturing from it regardless of
4988c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie    * fast_clear_state.  If we don't then, after the first draw call with
4998c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie    * this setup, there will be data in the CCS which won't get picked up by
500bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie    * subsequent texturing operations as required by ARB_texture_barrier.
501bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie    * Since we don't want to re-emit the binding table or do a resolve
502c080202db5363a18a759a9a7c82b40ac558c8abeBrian Paul    * operation every draw call, the easiest thing to do is just enable
503c080202db5363a18a759a9a7c82b40ac558c8abeBrian Paul    * compression on the texturing side.  This is completely safe to do
504bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie    * since, if compressed texturing weren't allowed, we would have disabled
505bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie    * compression of render targets in whatever_that_function_is_called().
5068c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie    */
5074e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen   if (rb_index < fb->_NumColorDrawBuffers) {
5088c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie      if (brw->draw_aux_buffer_disabled[rb_index]) {
5098c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie         assert(!is_unresolved);
5108c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie      }
5118c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie
512239c8bfb10d3cd61547ccc460f0b89062f3520bcDave Airlie      return brw->draw_aux_buffer_disabled[rb_index];
513239c8bfb10d3cd61547ccc460f0b89062f3520bcDave Airlie   }
514c3380ded10200f2df0cfba4abbe9a9eb892f7cbbAlex Deucher
5158c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie   return !is_unresolved;
5168c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie}
5178c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie
5188c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlievoid
5198c7e30fb950c83f5e9e29e60735e999ac608145aDave Airliebrw_update_texture_surface(struct gl_context *ctx,
5208c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie                           unsigned unit,
521c3380ded10200f2df0cfba4abbe9a9eb892f7cbbAlex Deucher                           uint32_t *surf_offset,
5228c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie                           bool for_gather,
5238c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie                           uint32_t plane)
5248c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie{
525ef0720758e5ba34d0d163bdf5efc6e8dabd65aa8Daniel Lichtenberger   struct brw_context *brw = brw_context(ctx);
5268c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie   struct gl_texture_object *obj = ctx->Texture.Unit[unit]._Current;
527239c8bfb10d3cd61547ccc460f0b89062f3520bcDave Airlie
528239c8bfb10d3cd61547ccc460f0b89062f3520bcDave Airlie   if (obj->Target == GL_TEXTURE_BUFFER) {
5299954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt      brw_update_buffer_texture_surface(ctx, unit, surf_offset);
5309954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt
5319954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt   } else {
5329954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt      struct intel_texture_object *intel_obj = intel_texture_object(obj);
5339954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt      struct intel_mipmap_tree *mt = intel_obj->mt;
5349954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt
5359954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt      if (plane > 0) {
5369954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt         if (mt->plane[plane - 1] == NULL)
5379954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt            return;
5389954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt         mt = mt->plane[plane - 1];
5399954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt      }
5409954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt
5419954a93ab77e64b01b95837f90a567df9e8c94dfEric Anholt      struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, unit);
542bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie      /* If this is a view with restricted NumLayers, then our effective depth
5438c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie       * is not just the miptree depth.
544d81a48757a9bde35299fd7c3cfbe83885238409aDave Airlie       */
545d81a48757a9bde35299fd7c3cfbe83885238409aDave Airlie      const unsigned view_num_layers =
546d81a48757a9bde35299fd7c3cfbe83885238409aDave Airlie         (obj->Immutable && obj->Target != GL_TEXTURE_3D) ? obj->NumLayers :
5478c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie                                                            mt->logical_depth0;
548d81a48757a9bde35299fd7c3cfbe83885238409aDave Airlie
5498c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie      /* Handling GL_ALPHA as a surface format override breaks 1.30+ style
5508c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie       * texturing functions that return a float, as our code generation always
5518c7e30fb950c83f5e9e29e60735e999ac608145aDave Airlie       * selects the .x channel (which would always be 0).
55298bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld       */
55398bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      struct gl_texture_image *firstImage = obj->Image[0][obj->BaseLevel];
55498bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      const bool alpha_depth = obj->DepthMode == GL_ALPHA &&
55598bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld         (firstImage->_BaseFormat == GL_DEPTH_COMPONENT ||
55698bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld          firstImage->_BaseFormat == GL_DEPTH_STENCIL);
55798bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      const unsigned swizzle = (unlikely(alpha_depth) ? SWIZZLE_XYZW :
55898bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                                brw_get_texture_swizzle(&brw->ctx, obj));
55998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld
56098bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      mesa_format mesa_fmt = plane == 0 ? intel_obj->_Format : mt->format;
56198bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      unsigned format = translate_tex_format(brw, mesa_fmt,
56298bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                                             sampler->sRGBDecode);
56398bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld
56498bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      /* Implement gen6 and gen7 gather work-around */
56598bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      bool need_green_to_blue = false;
56698bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld      if (for_gather) {
56798bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld         if (brw->gen == 7 && (format == BRW_SURFACEFORMAT_R32G32_FLOAT ||
56898bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                               format == BRW_SURFACEFORMAT_R32G32_SINT ||
56998bb5c610dc68d8e9a185216ce9d2dc6d278c114Joel Bosveld                               format == BRW_SURFACEFORMAT_R32G32_UINT)) {
57023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie            format = BRW_SURFACEFORMAT_R32G32_FLOAT_LD;
571bdaa0341caffc353fd26bbd91865c2d86eed11c1Dave Airlie            need_green_to_blue = brw->is_haswell;
5725a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul         } else if (brw->gen == 6) {
57323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie            /* Sandybridge's gather4 message is broken for integer formats.
57423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie             * To work around this, we pretend the surface is UNORM for
57523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie             * 8 or 16-bit formats, and emit shader instructions to recover
57623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie             * the real INT/UINT value.  For 32-bit formats, we pretend
57723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie             * the surface is FLOAT, and simply reinterpret the resulting
578d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg             * bits.
579d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg             */
580d61f07318c8678901b948fdaa8ccdf37aa3203e9Kristian Høgsberg            switch (format) {
58123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie            case BRW_SURFACEFORMAT_R8_SINT:
58223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie            case BRW_SURFACEFORMAT_R8_UINT:
5834ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie               format = BRW_SURFACEFORMAT_R8_UNORM;
584f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff               break;
58523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie
5864ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie            case BRW_SURFACEFORMAT_R16_SINT:
5874ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie            case BRW_SURFACEFORMAT_R16_UINT:
5884ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie               format = BRW_SURFACEFORMAT_R16_UNORM;
5894ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie               break;
5904ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie
5914ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie            case BRW_SURFACEFORMAT_R32_SINT:
5924ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie            case BRW_SURFACEFORMAT_R32_UINT:
5934ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie               format = BRW_SURFACEFORMAT_R32_FLOAT;
5944ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie               break;
5954ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie
5964ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie            default:
5974ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie               break;
5984ed8a585840865550bb9acac38c5ef26670a1fb8Dave Airlie            }
59923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         }
6004e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen      }
601d8b14a57a98f4bad6528eda8dd1406c15bdcce75Marius Predut
60223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      if (obj->StencilSampling && firstImage->_BaseFormat == GL_DEPTH_STENCIL) {
60323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         if (brw->gen <= 7) {
60423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie            assert(mt->r8stencil_mt && !mt->stencil_mt->r8stencil_needs_update);
6052b85fccae5ba33748846f74f90fe0f72c673a4b1Dave Airlie            mt = mt->r8stencil_mt;
606f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff         } else {
6075a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul            mt = mt->stencil_mt;
608f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff         }
609f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff         format = BRW_SURFACEFORMAT_R8_UINT;
610f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff      } else if (brw->gen <= 7 && mt->format == MESA_FORMAT_S_UINT8) {
611f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff         assert(mt->r8stencil_mt && !mt->r8stencil_needs_update);
612f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff         mt = mt->r8stencil_mt;
613f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff         format = BRW_SURFACEFORMAT_R8_UINT;
61423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      }
61503855bc2accbeb508458f70bdbdcef292672b2b8Kristian Høgsberg
61603855bc2accbeb508458f70bdbdcef292672b2b8Kristian Høgsberg      const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
61703855bc2accbeb508458f70bdbdcef292672b2b8Kristian Høgsberg
61803855bc2accbeb508458f70bdbdcef292672b2b8Kristian Høgsberg      struct isl_view view = {
61903855bc2accbeb508458f70bdbdcef292672b2b8Kristian Høgsberg         .format = format,
620c080202db5363a18a759a9a7c82b40ac558c8abeBrian Paul         .base_level = obj->MinLevel + obj->BaseLevel,
62103855bc2accbeb508458f70bdbdcef292672b2b8Kristian Høgsberg         .levels = intel_obj->_MaxLevel - obj->BaseLevel + 1,
622c080202db5363a18a759a9a7c82b40ac558c8abeBrian Paul         .base_array_layer = obj->MinLayer,
62323d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         .array_len = view_num_layers,
6244e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen         .swizzle = {
625d8b14a57a98f4bad6528eda8dd1406c15bdcce75Marius Predut            .r = swizzle_to_scs(GET_SWZ(swizzle, 0), need_green_to_blue),
62623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie            .g = swizzle_to_scs(GET_SWZ(swizzle, 1), need_green_to_blue),
627f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff            .b = swizzle_to_scs(GET_SWZ(swizzle, 2), need_green_to_blue),
6285a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul            .a = swizzle_to_scs(GET_SWZ(swizzle, 3), need_green_to_blue),
62923d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         },
6305a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul         .usage = ISL_SURF_USAGE_TEXTURE_BIT,
63123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      };
6325a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul
633f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff      if (obj->Target == GL_TEXTURE_CUBE_MAP ||
634f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff          obj->Target == GL_TEXTURE_CUBE_MAP_ARRAY)
63523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie         view.usage |= ISL_SURF_USAGE_CUBE_BIT;
6365a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul
63723d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie      assert(brw_texture_view_sane(brw, mt, &view));
6385a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul
639f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff      const int flags = brw_disable_aux_surface(brw, mt, &view) ?
640f9b5201dbdca4a612afdd694a583177fb4af8685Johann Rudloff                           INTEL_AUX_BUFFER_DISABLED : 0;
6412b85fccae5ba33748846f74f90fe0f72c673a4b1Dave Airlie      brw_emit_surface_state(brw, mt, flags, mt->target, view,
6427dd184dc4da37233471875df6f40cce0560cb7bcNicolai Hähnle                             tex_mocs[brw->gen],
6435a63634a136caa905e7a1fa5da8fe5dc9f26add5Brian Paul                             surf_offset, surf_index,
64423d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                             I915_GEM_DOMAIN_SAMPLER, 0);
64523d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie   }
64623d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie}
6474e0d99a63588c67a955f797733da32d04e6f4ee6Pauli Nieminen
648d8b14a57a98f4bad6528eda8dd1406c15bdcce75Marius Predutvoid
6493149b87ac43a5f10983c6682dff7a00cf1d99c7cRichard Librw_emit_buffer_surface_state(struct brw_context *brw,
65023d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                              uint32_t *out_offset,
65123d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                              drm_intel_bo *bo,
65223d3559bd4ece1fcab5513ebdaa38600d6654374Dave Airlie                              unsigned buffer_offset,
653                              unsigned surface_format,
654                              unsigned buffer_size,
655                              unsigned pitch,
656                              bool rw)
657{
658   uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
659                                  brw->isl_dev.ss.size,
660                                  brw->isl_dev.ss.align,
661                                  out_offset);
662
663   isl_buffer_fill_state(&brw->isl_dev, dw,
664                         .address = (bo ? bo->offset64 : 0) + buffer_offset,
665                         .size = buffer_size,
666                         .format = surface_format,
667                         .stride = pitch,
668                         .mocs = tex_mocs[brw->gen]);
669
670   if (bo) {
671      drm_intel_bo_emit_reloc(brw->batch.bo,
672                              *out_offset + brw->isl_dev.ss.addr_offset,
673                              bo, buffer_offset,
674                              I915_GEM_DOMAIN_SAMPLER,
675                              (rw ? I915_GEM_DOMAIN_SAMPLER : 0));
676   }
677}
678
679void
680brw_update_buffer_texture_surface(struct gl_context *ctx,
681                                  unsigned unit,
682                                  uint32_t *surf_offset)
683{
684   struct brw_context *brw = brw_context(ctx);
685   struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
686   struct intel_buffer_object *intel_obj =
687      intel_buffer_object(tObj->BufferObject);
688   uint32_t size = tObj->BufferSize;
689   drm_intel_bo *bo = NULL;
690   mesa_format format = tObj->_BufferObjectFormat;
691   uint32_t brw_format = brw_format_for_mesa_format(format);
692   int texel_size = _mesa_get_format_bytes(format);
693
694   if (intel_obj) {
695      size = MIN2(size, intel_obj->Base.Size);
696      bo = intel_bufferobj_buffer(brw, intel_obj, tObj->BufferOffset, size);
697   }
698
699   if (brw_format == 0 && format != MESA_FORMAT_RGBA_FLOAT32) {
700      _mesa_problem(NULL, "bad format %s for texture buffer\n",
701		    _mesa_get_format_name(format));
702   }
703
704   brw_emit_buffer_surface_state(brw, surf_offset, bo,
705                                 tObj->BufferOffset,
706                                 brw_format,
707                                 size,
708                                 texel_size,
709                                 false /* rw */);
710}
711
712/**
713 * Create the constant buffer surface.  Vertex/fragment shader constants will be
714 * read from this buffer with Data Port Read instructions/messages.
715 */
716void
717brw_create_constant_surface(struct brw_context *brw,
718			    drm_intel_bo *bo,
719			    uint32_t offset,
720			    uint32_t size,
721			    uint32_t *out_offset)
722{
723   brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
724                                 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT,
725                                 size, 1, false);
726}
727
728/**
729 * Create the buffer surface. Shader buffer variables will be
730 * read from / write to this buffer with Data Port Read/Write
731 * instructions/messages.
732 */
733void
734brw_create_buffer_surface(struct brw_context *brw,
735                          drm_intel_bo *bo,
736                          uint32_t offset,
737                          uint32_t size,
738                          uint32_t *out_offset)
739{
740   /* Use a raw surface so we can reuse existing untyped read/write/atomic
741    * messages. We need these specifically for the fragment shader since they
742    * include a pixel mask header that we need to ensure correct behavior
743    * with helper invocations, which cannot write to the buffer.
744    */
745   brw_emit_buffer_surface_state(brw, out_offset, bo, offset,
746                                 BRW_SURFACEFORMAT_RAW,
747                                 size, 1, true);
748}
749
750/**
751 * Set up a binding table entry for use by stream output logic (transform
752 * feedback).
753 *
754 * buffer_size_minus_1 must be less than BRW_MAX_NUM_BUFFER_ENTRIES.
755 */
756void
757brw_update_sol_surface(struct brw_context *brw,
758                       struct gl_buffer_object *buffer_obj,
759                       uint32_t *out_offset, unsigned num_vector_components,
760                       unsigned stride_dwords, unsigned offset_dwords)
761{
762   struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
763   uint32_t offset_bytes = 4 * offset_dwords;
764   drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
765                                             offset_bytes,
766                                             buffer_obj->Size - offset_bytes);
767   uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
768                                    out_offset);
769   uint32_t pitch_minus_1 = 4*stride_dwords - 1;
770   size_t size_dwords = buffer_obj->Size / 4;
771   uint32_t buffer_size_minus_1, width, height, depth, surface_format;
772
773   /* FIXME: can we rely on core Mesa to ensure that the buffer isn't
774    * too big to map using a single binding table entry?
775    */
776   assert((size_dwords - offset_dwords) / stride_dwords
777          <= BRW_MAX_NUM_BUFFER_ENTRIES);
778
779   if (size_dwords > offset_dwords + num_vector_components) {
780      /* There is room for at least 1 transform feedback output in the buffer.
781       * Compute the number of additional transform feedback outputs the
782       * buffer has room for.
783       */
784      buffer_size_minus_1 =
785         (size_dwords - offset_dwords - num_vector_components) / stride_dwords;
786   } else {
787      /* There isn't even room for a single transform feedback output in the
788       * buffer.  We can't configure the binding table entry to prevent output
789       * entirely; we'll have to rely on the geometry shader to detect
790       * overflow.  But to minimize the damage in case of a bug, set up the
791       * binding table entry to just allow a single output.
792       */
793      buffer_size_minus_1 = 0;
794   }
795   width = buffer_size_minus_1 & 0x7f;
796   height = (buffer_size_minus_1 & 0xfff80) >> 7;
797   depth = (buffer_size_minus_1 & 0x7f00000) >> 20;
798
799   switch (num_vector_components) {
800   case 1:
801      surface_format = BRW_SURFACEFORMAT_R32_FLOAT;
802      break;
803   case 2:
804      surface_format = BRW_SURFACEFORMAT_R32G32_FLOAT;
805      break;
806   case 3:
807      surface_format = BRW_SURFACEFORMAT_R32G32B32_FLOAT;
808      break;
809   case 4:
810      surface_format = BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
811      break;
812   default:
813      unreachable("Invalid vector size for transform feedback output");
814   }
815
816   surf[0] = BRW_SURFACE_BUFFER << BRW_SURFACE_TYPE_SHIFT |
817      BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
818      surface_format << BRW_SURFACE_FORMAT_SHIFT |
819      BRW_SURFACE_RC_READ_WRITE;
820   surf[1] = bo->offset64 + offset_bytes; /* reloc */
821   surf[2] = (width << BRW_SURFACE_WIDTH_SHIFT |
822	      height << BRW_SURFACE_HEIGHT_SHIFT);
823   surf[3] = (depth << BRW_SURFACE_DEPTH_SHIFT |
824              pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
825   surf[4] = 0;
826   surf[5] = 0;
827
828   /* Emit relocation to surface contents. */
829   drm_intel_bo_emit_reloc(brw->batch.bo,
830			   *out_offset + 4,
831			   bo, offset_bytes,
832			   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
833}
834
835/* Creates a new WM constant buffer reflecting the current fragment program's
836 * constants, if needed by the fragment program.
837 *
838 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
839 * state atom.
840 */
841static void
842brw_upload_wm_pull_constants(struct brw_context *brw)
843{
844   struct brw_stage_state *stage_state = &brw->wm.base;
845   /* BRW_NEW_FRAGMENT_PROGRAM */
846   struct brw_program *fp = (struct brw_program *) brw->fragment_program;
847   /* BRW_NEW_FS_PROG_DATA */
848   struct brw_stage_prog_data *prog_data = brw->wm.base.prog_data;
849
850   _mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
851   /* _NEW_PROGRAM_CONSTANTS */
852   brw_upload_pull_constants(brw, BRW_NEW_SURFACES, &fp->program,
853                             stage_state, prog_data);
854}
855
856const struct brw_tracked_state brw_wm_pull_constants = {
857   .dirty = {
858      .mesa = _NEW_PROGRAM_CONSTANTS,
859      .brw = BRW_NEW_BATCH |
860             BRW_NEW_BLORP |
861             BRW_NEW_FRAGMENT_PROGRAM |
862             BRW_NEW_FS_PROG_DATA,
863   },
864   .emit = brw_upload_wm_pull_constants,
865};
866
867/**
868 * Creates a null renderbuffer surface.
869 *
870 * This is used when the shader doesn't write to any color output.  An FB
871 * write to target 0 will still be emitted, because that's how the thread is
872 * terminated (and computed depth is returned), so we need to have the
873 * hardware discard the target 0 color output..
874 */
875static void
876brw_emit_null_surface_state(struct brw_context *brw,
877                            unsigned width,
878                            unsigned height,
879                            unsigned samples,
880                            uint32_t *out_offset)
881{
882   /* From the Sandy bridge PRM, Vol4 Part1 p71 (Surface Type: Programming
883    * Notes):
884    *
885    *     A null surface will be used in instances where an actual surface is
886    *     not bound. When a write message is generated to a null surface, no
887    *     actual surface is written to. When a read message (including any
888    *     sampling engine message) is generated to a null surface, the result
889    *     is all zeros. Note that a null surface type is allowed to be used
890    *     with all messages, even if it is not specificially indicated as
891    *     supported. All of the remaining fields in surface state are ignored
892    *     for null surfaces, with the following exceptions:
893    *
894    *     - [DevSNB+]: Width, Height, Depth, and LOD fields must match the
895    *       depth buffer’s corresponding state for all render target surfaces,
896    *       including null.
897    *
898    *     - Surface Format must be R8G8B8A8_UNORM.
899    */
900   unsigned surface_type = BRW_SURFACE_NULL;
901   drm_intel_bo *bo = NULL;
902   unsigned pitch_minus_1 = 0;
903   uint32_t multisampling_state = 0;
904   uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
905                                    out_offset);
906
907   if (samples > 1) {
908      /* On Gen6, null render targets seem to cause GPU hangs when
909       * multisampling.  So work around this problem by rendering into dummy
910       * color buffer.
911       *
912       * To decrease the amount of memory needed by the workaround buffer, we
913       * set its pitch to 128 bytes (the width of a Y tile).  This means that
914       * the amount of memory needed for the workaround buffer is
915       * (width_in_tiles + height_in_tiles - 1) tiles.
916       *
917       * Note that since the workaround buffer will be interpreted by the
918       * hardware as an interleaved multisampled buffer, we need to compute
919       * width_in_tiles and height_in_tiles by dividing the width and height
920       * by 16 rather than the normal Y-tile size of 32.
921       */
922      unsigned width_in_tiles = ALIGN(width, 16) / 16;
923      unsigned height_in_tiles = ALIGN(height, 16) / 16;
924      unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
925      brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
926                         size_needed);
927      bo = brw->wm.multisampled_null_render_target_bo;
928      surface_type = BRW_SURFACE_2D;
929      pitch_minus_1 = 127;
930      multisampling_state = brw_get_surface_num_multisamples(samples);
931   }
932
933   surf[0] = (surface_type << BRW_SURFACE_TYPE_SHIFT |
934	      BRW_SURFACEFORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT);
935   if (brw->gen < 6) {
936      surf[0] |= (1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT |
937		  1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT |
938		  1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT |
939		  1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT);
940   }
941   surf[1] = bo ? bo->offset64 : 0;
942   surf[2] = ((width - 1) << BRW_SURFACE_WIDTH_SHIFT |
943              (height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
944
945   /* From Sandy bridge PRM, Vol4 Part1 p82 (Tiled Surface: Programming
946    * Notes):
947    *
948    *     If Surface Type is SURFTYPE_NULL, this field must be TRUE
949    */
950   surf[3] = (BRW_SURFACE_TILED | BRW_SURFACE_TILED_Y |
951              pitch_minus_1 << BRW_SURFACE_PITCH_SHIFT);
952   surf[4] = multisampling_state;
953   surf[5] = 0;
954
955   if (bo) {
956      drm_intel_bo_emit_reloc(brw->batch.bo,
957                              *out_offset + 4,
958                              bo, 0,
959                              I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
960   }
961}
962
963/**
964 * Sets up a surface state structure to point at the given region.
965 * While it is only used for the front/back buffer currently, it should be
966 * usable for further buffers when doing ARB_draw_buffer support.
967 */
968static uint32_t
969gen4_update_renderbuffer_surface(struct brw_context *brw,
970                                 struct gl_renderbuffer *rb,
971                                 uint32_t flags, unsigned unit,
972                                 uint32_t surf_index)
973{
974   struct gl_context *ctx = &brw->ctx;
975   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
976   struct intel_mipmap_tree *mt = irb->mt;
977   uint32_t *surf;
978   uint32_t tile_x, tile_y;
979   uint32_t format = 0;
980   uint32_t offset;
981   /* _NEW_BUFFERS */
982   mesa_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
983   /* BRW_NEW_FS_PROG_DATA */
984
985   assert(!(flags & INTEL_RENDERBUFFER_LAYERED));
986   assert(!(flags & INTEL_AUX_BUFFER_DISABLED));
987
988   if (rb->TexImage && !brw->has_surface_tile_offset) {
989      intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y);
990
991      if (tile_x != 0 || tile_y != 0) {
992	 /* Original gen4 hardware couldn't draw to a non-tile-aligned
993	  * destination in a miptree unless you actually setup your renderbuffer
994	  * as a miptree and used the fragile lod/array_index/etc. controls to
995	  * select the image.  So, instead, we just make a new single-level
996	  * miptree and render into that.
997	  */
998	 intel_renderbuffer_move_to_temp(brw, irb, false);
999	 mt = irb->mt;
1000      }
1001   }
1002
1003   surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
1004
1005   format = brw->render_target_format[rb_format];
1006   if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
1007      _mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
1008                    __func__, _mesa_get_format_name(rb_format));
1009   }
1010
1011   surf[0] = (BRW_SURFACE_2D << BRW_SURFACE_TYPE_SHIFT |
1012	      format << BRW_SURFACE_FORMAT_SHIFT);
1013
1014   /* reloc */
1015   assert(mt->offset % mt->cpp == 0);
1016   surf[1] = (intel_renderbuffer_get_tile_offsets(irb, &tile_x, &tile_y) +
1017	      mt->bo->offset64 + mt->offset);
1018
1019   surf[2] = ((rb->Width - 1) << BRW_SURFACE_WIDTH_SHIFT |
1020	      (rb->Height - 1) << BRW_SURFACE_HEIGHT_SHIFT);
1021
1022   surf[3] = (brw_get_surface_tiling_bits(mt->tiling) |
1023	      (mt->pitch - 1) << BRW_SURFACE_PITCH_SHIFT);
1024
1025   surf[4] = brw_get_surface_num_multisamples(mt->num_samples);
1026
1027   assert(brw->has_surface_tile_offset || (tile_x == 0 && tile_y == 0));
1028   /* Note that the low bits of these fields are missing, so
1029    * there's the possibility of getting in trouble.
1030    */
1031   assert(tile_x % 4 == 0);
1032   assert(tile_y % 2 == 0);
1033   surf[5] = ((tile_x / 4) << BRW_SURFACE_X_OFFSET_SHIFT |
1034	      (tile_y / 2) << BRW_SURFACE_Y_OFFSET_SHIFT |
1035	      (mt->valign == 4 ? BRW_SURFACE_VERTICAL_ALIGN_ENABLE : 0));
1036
1037   if (brw->gen < 6) {
1038      /* _NEW_COLOR */
1039      if (!ctx->Color.ColorLogicOpEnabled && !ctx->Color._AdvancedBlendMode &&
1040          (ctx->Color.BlendEnabled & (1 << unit)))
1041	 surf[0] |= BRW_SURFACE_BLEND_ENABLED;
1042
1043      if (!ctx->Color.ColorMask[unit][0])
1044	 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_R_SHIFT;
1045      if (!ctx->Color.ColorMask[unit][1])
1046	 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_G_SHIFT;
1047      if (!ctx->Color.ColorMask[unit][2])
1048	 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_B_SHIFT;
1049
1050      /* As mentioned above, disable writes to the alpha component when the
1051       * renderbuffer is XRGB.
1052       */
1053      if (ctx->DrawBuffer->Visual.alphaBits == 0 ||
1054	  !ctx->Color.ColorMask[unit][3]) {
1055	 surf[0] |= 1 << BRW_SURFACE_WRITEDISABLE_A_SHIFT;
1056      }
1057   }
1058
1059   drm_intel_bo_emit_reloc(brw->batch.bo,
1060                           offset + 4,
1061                           mt->bo,
1062                           surf[1] - mt->bo->offset64,
1063                           I915_GEM_DOMAIN_RENDER,
1064                           I915_GEM_DOMAIN_RENDER);
1065
1066   return offset;
1067}
1068
1069/**
1070 * Construct SURFACE_STATE objects for renderbuffers/draw buffers.
1071 */
1072void
1073brw_update_renderbuffer_surfaces(struct brw_context *brw,
1074                                 const struct gl_framebuffer *fb,
1075                                 uint32_t render_target_start,
1076                                 uint32_t *surf_offset)
1077{
1078   GLuint i;
1079   const unsigned int w = _mesa_geometric_width(fb);
1080   const unsigned int h = _mesa_geometric_height(fb);
1081   const unsigned int s = _mesa_geometric_samples(fb);
1082
1083   /* Update surfaces for drawing buffers */
1084   if (fb->_NumColorDrawBuffers >= 1) {
1085      for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
1086         const uint32_t surf_index = render_target_start + i;
1087         const int flags = (_mesa_geometric_layers(fb) > 0 ?
1088                              INTEL_RENDERBUFFER_LAYERED : 0) |
1089                           (brw->draw_aux_buffer_disabled[i] ?
1090                              INTEL_AUX_BUFFER_DISABLED : 0);
1091
1092	 if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
1093            surf_offset[surf_index] =
1094               brw->vtbl.update_renderbuffer_surface(
1095                  brw, fb->_ColorDrawBuffers[i], flags, i, surf_index);
1096	 } else {
1097            brw->vtbl.emit_null_surface_state(brw, w, h, s,
1098               &surf_offset[surf_index]);
1099	 }
1100      }
1101   } else {
1102      const uint32_t surf_index = render_target_start;
1103      brw->vtbl.emit_null_surface_state(brw, w, h, s,
1104         &surf_offset[surf_index]);
1105   }
1106}
1107
1108static void
1109update_renderbuffer_surfaces(struct brw_context *brw)
1110{
1111   const struct gl_context *ctx = &brw->ctx;
1112
1113   /* BRW_NEW_FS_PROG_DATA */
1114   const struct brw_wm_prog_data *wm_prog_data =
1115      brw_wm_prog_data(brw->wm.base.prog_data);
1116
1117   /* _NEW_BUFFERS | _NEW_COLOR */
1118   const struct gl_framebuffer *fb = ctx->DrawBuffer;
1119   brw_update_renderbuffer_surfaces(
1120      brw, fb,
1121      wm_prog_data->binding_table.render_target_start,
1122      brw->wm.base.surf_offset);
1123   brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1124}
1125
1126const struct brw_tracked_state brw_renderbuffer_surfaces = {
1127   .dirty = {
1128      .mesa = _NEW_BUFFERS |
1129              _NEW_COLOR,
1130      .brw = BRW_NEW_BATCH |
1131             BRW_NEW_BLORP |
1132             BRW_NEW_FS_PROG_DATA,
1133   },
1134   .emit = update_renderbuffer_surfaces,
1135};
1136
1137const struct brw_tracked_state gen6_renderbuffer_surfaces = {
1138   .dirty = {
1139      .mesa = _NEW_BUFFERS,
1140      .brw = BRW_NEW_BATCH |
1141             BRW_NEW_BLORP,
1142   },
1143   .emit = update_renderbuffer_surfaces,
1144};
1145
1146static void
1147update_renderbuffer_read_surfaces(struct brw_context *brw)
1148{
1149   const struct gl_context *ctx = &brw->ctx;
1150
1151   /* BRW_NEW_FS_PROG_DATA */
1152   const struct brw_wm_prog_data *wm_prog_data =
1153      brw_wm_prog_data(brw->wm.base.prog_data);
1154
1155   /* BRW_NEW_FRAGMENT_PROGRAM */
1156   if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
1157       brw->fragment_program && brw->fragment_program->info.outputs_read) {
1158      /* _NEW_BUFFERS */
1159      const struct gl_framebuffer *fb = ctx->DrawBuffer;
1160
1161      for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
1162         struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
1163         const struct intel_renderbuffer *irb = intel_renderbuffer(rb);
1164         const unsigned surf_index =
1165            wm_prog_data->binding_table.render_target_read_start + i;
1166         uint32_t *surf_offset = &brw->wm.base.surf_offset[surf_index];
1167
1168         if (irb) {
1169            const unsigned format = brw->render_target_format[
1170               _mesa_get_render_format(ctx, intel_rb_format(irb))];
1171            assert(isl_format_supports_sampling(&brw->screen->devinfo,
1172                                                format));
1173
1174            /* Override the target of the texture if the render buffer is a
1175             * single slice of a 3D texture (since the minimum array element
1176             * field of the surface state structure is ignored by the sampler
1177             * unit for 3D textures on some hardware), or if the render buffer
1178             * is a 1D array (since shaders always provide the array index
1179             * coordinate at the Z component to avoid state-dependent
1180             * recompiles when changing the texture target of the
1181             * framebuffer).
1182             */
1183            const GLenum target =
1184               (irb->mt->target == GL_TEXTURE_3D &&
1185                irb->layer_count == 1) ? GL_TEXTURE_2D :
1186               irb->mt->target == GL_TEXTURE_1D_ARRAY ? GL_TEXTURE_2D_ARRAY :
1187               irb->mt->target;
1188
1189            /* intel_renderbuffer::mt_layer is expressed in sample units for
1190             * the UMS and CMS multisample layouts, but
1191             * intel_renderbuffer::layer_count is expressed in units of whole
1192             * logical layers regardless of the multisample layout.
1193             */
1194            const unsigned mt_layer_unit =
1195               (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
1196                irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) ?
1197               MAX2(irb->mt->num_samples, 1) : 1;
1198
1199            const struct isl_view view = {
1200               .format = format,
1201               .base_level = irb->mt_level - irb->mt->first_level,
1202               .levels = 1,
1203               .base_array_layer = irb->mt_layer / mt_layer_unit,
1204               .array_len = irb->layer_count,
1205               .swizzle = ISL_SWIZZLE_IDENTITY,
1206               .usage = ISL_SURF_USAGE_TEXTURE_BIT,
1207            };
1208
1209            const int flags = brw->draw_aux_buffer_disabled[i] ?
1210                                 INTEL_AUX_BUFFER_DISABLED : 0;
1211            brw_emit_surface_state(brw, irb->mt, flags, target, view,
1212                                   tex_mocs[brw->gen],
1213                                   surf_offset, surf_index,
1214                                   I915_GEM_DOMAIN_SAMPLER, 0);
1215
1216         } else {
1217            brw->vtbl.emit_null_surface_state(
1218               brw, _mesa_geometric_width(fb), _mesa_geometric_height(fb),
1219               _mesa_geometric_samples(fb), surf_offset);
1220         }
1221      }
1222
1223      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1224   }
1225}
1226
1227const struct brw_tracked_state brw_renderbuffer_read_surfaces = {
1228   .dirty = {
1229      .mesa = _NEW_BUFFERS,
1230      .brw = BRW_NEW_BATCH |
1231             BRW_NEW_FRAGMENT_PROGRAM |
1232             BRW_NEW_FS_PROG_DATA,
1233   },
1234   .emit = update_renderbuffer_read_surfaces,
1235};
1236
1237static void
1238update_stage_texture_surfaces(struct brw_context *brw,
1239                              const struct gl_program *prog,
1240                              struct brw_stage_state *stage_state,
1241                              bool for_gather, uint32_t plane)
1242{
1243   if (!prog)
1244      return;
1245
1246   struct gl_context *ctx = &brw->ctx;
1247
1248   uint32_t *surf_offset = stage_state->surf_offset;
1249
1250   /* BRW_NEW_*_PROG_DATA */
1251   if (for_gather)
1252      surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
1253   else
1254      surf_offset += stage_state->prog_data->binding_table.plane_start[plane];
1255
1256   unsigned num_samplers = util_last_bit(prog->SamplersUsed);
1257   for (unsigned s = 0; s < num_samplers; s++) {
1258      surf_offset[s] = 0;
1259
1260      if (prog->SamplersUsed & (1 << s)) {
1261         const unsigned unit = prog->SamplerUnits[s];
1262
1263         /* _NEW_TEXTURE */
1264         if (ctx->Texture.Unit[unit]._Current) {
1265            brw_update_texture_surface(ctx, unit, surf_offset + s, for_gather, plane);
1266         }
1267      }
1268   }
1269}
1270
1271
1272/**
1273 * Construct SURFACE_STATE objects for enabled textures.
1274 */
1275static void
1276brw_update_texture_surfaces(struct brw_context *brw)
1277{
1278   /* BRW_NEW_VERTEX_PROGRAM */
1279   struct gl_program *vs = (struct gl_program *) brw->vertex_program;
1280
1281   /* BRW_NEW_TESS_PROGRAMS */
1282   struct gl_program *tcs = (struct gl_program *) brw->tess_ctrl_program;
1283   struct gl_program *tes = (struct gl_program *) brw->tess_eval_program;
1284
1285   /* BRW_NEW_GEOMETRY_PROGRAM */
1286   struct gl_program *gs = (struct gl_program *) brw->geometry_program;
1287
1288   /* BRW_NEW_FRAGMENT_PROGRAM */
1289   struct gl_program *fs = (struct gl_program *) brw->fragment_program;
1290
1291   /* _NEW_TEXTURE */
1292   update_stage_texture_surfaces(brw, vs, &brw->vs.base, false, 0);
1293   update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, false, 0);
1294   update_stage_texture_surfaces(brw, tes, &brw->tes.base, false, 0);
1295   update_stage_texture_surfaces(brw, gs, &brw->gs.base, false, 0);
1296   update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 0);
1297
1298   /* emit alternate set of surface state for gather. this
1299    * allows the surface format to be overriden for only the
1300    * gather4 messages. */
1301   if (brw->gen < 8) {
1302      if (vs && vs->nir->info->uses_texture_gather)
1303         update_stage_texture_surfaces(brw, vs, &brw->vs.base, true, 0);
1304      if (tcs && tcs->nir->info->uses_texture_gather)
1305         update_stage_texture_surfaces(brw, tcs, &brw->tcs.base, true, 0);
1306      if (tes && tes->nir->info->uses_texture_gather)
1307         update_stage_texture_surfaces(brw, tes, &brw->tes.base, true, 0);
1308      if (gs && gs->nir->info->uses_texture_gather)
1309         update_stage_texture_surfaces(brw, gs, &brw->gs.base, true, 0);
1310      if (fs && fs->nir->info->uses_texture_gather)
1311         update_stage_texture_surfaces(brw, fs, &brw->wm.base, true, 0);
1312   }
1313
1314   if (fs) {
1315      update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 1);
1316      update_stage_texture_surfaces(brw, fs, &brw->wm.base, false, 2);
1317   }
1318
1319   brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1320}
1321
1322const struct brw_tracked_state brw_texture_surfaces = {
1323   .dirty = {
1324      .mesa = _NEW_TEXTURE,
1325      .brw = BRW_NEW_BATCH |
1326             BRW_NEW_BLORP |
1327             BRW_NEW_FRAGMENT_PROGRAM |
1328             BRW_NEW_FS_PROG_DATA |
1329             BRW_NEW_GEOMETRY_PROGRAM |
1330             BRW_NEW_GS_PROG_DATA |
1331             BRW_NEW_TESS_PROGRAMS |
1332             BRW_NEW_TCS_PROG_DATA |
1333             BRW_NEW_TES_PROG_DATA |
1334             BRW_NEW_TEXTURE_BUFFER |
1335             BRW_NEW_VERTEX_PROGRAM |
1336             BRW_NEW_VS_PROG_DATA,
1337   },
1338   .emit = brw_update_texture_surfaces,
1339};
1340
1341static void
1342brw_update_cs_texture_surfaces(struct brw_context *brw)
1343{
1344   /* BRW_NEW_COMPUTE_PROGRAM */
1345   struct gl_program *cs = (struct gl_program *) brw->compute_program;
1346
1347   /* _NEW_TEXTURE */
1348   update_stage_texture_surfaces(brw, cs, &brw->cs.base, false, 0);
1349
1350   /* emit alternate set of surface state for gather. this
1351    * allows the surface format to be overriden for only the
1352    * gather4 messages.
1353    */
1354   if (brw->gen < 8) {
1355      if (cs && cs->nir->info->uses_texture_gather)
1356         update_stage_texture_surfaces(brw, cs, &brw->cs.base, true, 0);
1357   }
1358
1359   brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1360}
1361
1362const struct brw_tracked_state brw_cs_texture_surfaces = {
1363   .dirty = {
1364      .mesa = _NEW_TEXTURE,
1365      .brw = BRW_NEW_BATCH |
1366             BRW_NEW_BLORP |
1367             BRW_NEW_COMPUTE_PROGRAM,
1368   },
1369   .emit = brw_update_cs_texture_surfaces,
1370};
1371
1372
1373void
1374brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
1375                        struct brw_stage_state *stage_state,
1376                        struct brw_stage_prog_data *prog_data)
1377{
1378   struct gl_context *ctx = &brw->ctx;
1379
1380   if (!prog)
1381      return;
1382
1383   uint32_t *ubo_surf_offsets =
1384      &stage_state->surf_offset[prog_data->binding_table.ubo_start];
1385
1386   for (int i = 0; i < prog->info.num_ubos; i++) {
1387      struct gl_uniform_buffer_binding *binding =
1388         &ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
1389
1390      if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1391         brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ubo_surf_offsets[i]);
1392      } else {
1393         struct intel_buffer_object *intel_bo =
1394            intel_buffer_object(binding->BufferObject);
1395         GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1396         if (!binding->AutomaticSize)
1397            size = MIN2(size, binding->Size);
1398         drm_intel_bo *bo =
1399            intel_bufferobj_buffer(brw, intel_bo,
1400                                   binding->Offset,
1401                                   size);
1402         brw_create_constant_surface(brw, bo, binding->Offset,
1403                                     size,
1404                                     &ubo_surf_offsets[i]);
1405      }
1406   }
1407
1408   uint32_t *ssbo_surf_offsets =
1409      &stage_state->surf_offset[prog_data->binding_table.ssbo_start];
1410
1411   for (int i = 0; i < prog->info.num_ssbos; i++) {
1412      struct gl_shader_storage_buffer_binding *binding =
1413         &ctx->ShaderStorageBufferBindings[prog->sh.ShaderStorageBlocks[i]->Binding];
1414
1415      if (binding->BufferObject == ctx->Shared->NullBufferObj) {
1416         brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, &ssbo_surf_offsets[i]);
1417      } else {
1418         struct intel_buffer_object *intel_bo =
1419            intel_buffer_object(binding->BufferObject);
1420         GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
1421         if (!binding->AutomaticSize)
1422            size = MIN2(size, binding->Size);
1423         drm_intel_bo *bo =
1424            intel_bufferobj_buffer(brw, intel_bo,
1425                                   binding->Offset,
1426                                   size);
1427         brw_create_buffer_surface(brw, bo, binding->Offset,
1428                                   size,
1429                                   &ssbo_surf_offsets[i]);
1430      }
1431   }
1432
1433   if (prog->info.num_ubos || prog->info.num_ssbos)
1434      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1435}
1436
1437static void
1438brw_upload_wm_ubo_surfaces(struct brw_context *brw)
1439{
1440   struct gl_context *ctx = &brw->ctx;
1441   /* _NEW_PROGRAM */
1442   struct gl_program *prog = ctx->_Shader->_CurrentFragmentProgram;
1443
1444   /* BRW_NEW_FS_PROG_DATA */
1445   brw_upload_ubo_surfaces(brw, prog, &brw->wm.base, brw->wm.base.prog_data);
1446}
1447
1448const struct brw_tracked_state brw_wm_ubo_surfaces = {
1449   .dirty = {
1450      .mesa = _NEW_PROGRAM,
1451      .brw = BRW_NEW_BATCH |
1452             BRW_NEW_BLORP |
1453             BRW_NEW_FS_PROG_DATA |
1454             BRW_NEW_UNIFORM_BUFFER,
1455   },
1456   .emit = brw_upload_wm_ubo_surfaces,
1457};
1458
1459static void
1460brw_upload_cs_ubo_surfaces(struct brw_context *brw)
1461{
1462   struct gl_context *ctx = &brw->ctx;
1463   /* _NEW_PROGRAM */
1464   struct gl_shader_program *prog =
1465      ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1466
1467   if (!prog || !prog->_LinkedShaders[MESA_SHADER_COMPUTE])
1468      return;
1469
1470   /* BRW_NEW_CS_PROG_DATA */
1471   brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_COMPUTE]->Program,
1472                           &brw->cs.base, brw->cs.base.prog_data);
1473}
1474
1475const struct brw_tracked_state brw_cs_ubo_surfaces = {
1476   .dirty = {
1477      .mesa = _NEW_PROGRAM,
1478      .brw = BRW_NEW_BATCH |
1479             BRW_NEW_BLORP |
1480             BRW_NEW_CS_PROG_DATA |
1481             BRW_NEW_UNIFORM_BUFFER,
1482   },
1483   .emit = brw_upload_cs_ubo_surfaces,
1484};
1485
1486void
1487brw_upload_abo_surfaces(struct brw_context *brw,
1488                        const struct gl_program *prog,
1489                        struct brw_stage_state *stage_state,
1490                        struct brw_stage_prog_data *prog_data)
1491{
1492   struct gl_context *ctx = &brw->ctx;
1493   uint32_t *surf_offsets =
1494      &stage_state->surf_offset[prog_data->binding_table.abo_start];
1495
1496   if (prog->info.num_abos) {
1497      for (unsigned i = 0; i < prog->info.num_abos; i++) {
1498         struct gl_atomic_buffer_binding *binding =
1499            &ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
1500         struct intel_buffer_object *intel_bo =
1501            intel_buffer_object(binding->BufferObject);
1502         drm_intel_bo *bo = intel_bufferobj_buffer(
1503            brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
1504
1505         brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
1506                                       binding->Offset, BRW_SURFACEFORMAT_RAW,
1507                                       bo->size - binding->Offset, 1, true);
1508      }
1509
1510      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1511   }
1512}
1513
1514static void
1515brw_upload_wm_abo_surfaces(struct brw_context *brw)
1516{
1517   /* _NEW_PROGRAM */
1518   const struct gl_program *wm = brw->fragment_program;
1519
1520   if (wm) {
1521      /* BRW_NEW_FS_PROG_DATA */
1522      brw_upload_abo_surfaces(brw, wm, &brw->wm.base, brw->wm.base.prog_data);
1523   }
1524}
1525
1526const struct brw_tracked_state brw_wm_abo_surfaces = {
1527   .dirty = {
1528      .mesa = _NEW_PROGRAM,
1529      .brw = BRW_NEW_ATOMIC_BUFFER |
1530             BRW_NEW_BLORP |
1531             BRW_NEW_BATCH |
1532             BRW_NEW_FS_PROG_DATA,
1533   },
1534   .emit = brw_upload_wm_abo_surfaces,
1535};
1536
1537static void
1538brw_upload_cs_abo_surfaces(struct brw_context *brw)
1539{
1540   /* _NEW_PROGRAM */
1541   const struct gl_program *cp = brw->compute_program;
1542
1543   if (cp) {
1544      /* BRW_NEW_CS_PROG_DATA */
1545      brw_upload_abo_surfaces(brw, cp, &brw->cs.base, brw->cs.base.prog_data);
1546   }
1547}
1548
1549const struct brw_tracked_state brw_cs_abo_surfaces = {
1550   .dirty = {
1551      .mesa = _NEW_PROGRAM,
1552      .brw = BRW_NEW_ATOMIC_BUFFER |
1553             BRW_NEW_BLORP |
1554             BRW_NEW_BATCH |
1555             BRW_NEW_CS_PROG_DATA,
1556   },
1557   .emit = brw_upload_cs_abo_surfaces,
1558};
1559
1560static void
1561brw_upload_cs_image_surfaces(struct brw_context *brw)
1562{
1563   /* _NEW_PROGRAM */
1564   const struct gl_program *cp = brw->compute_program;
1565
1566   if (cp) {
1567      /* BRW_NEW_CS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1568      brw_upload_image_surfaces(brw, cp, &brw->cs.base,
1569                                brw->cs.base.prog_data);
1570   }
1571}
1572
1573const struct brw_tracked_state brw_cs_image_surfaces = {
1574   .dirty = {
1575      .mesa = _NEW_TEXTURE | _NEW_PROGRAM,
1576      .brw = BRW_NEW_BATCH |
1577             BRW_NEW_BLORP |
1578             BRW_NEW_CS_PROG_DATA |
1579             BRW_NEW_IMAGE_UNITS
1580   },
1581   .emit = brw_upload_cs_image_surfaces,
1582};
1583
1584static uint32_t
1585get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
1586{
1587   const struct gen_device_info *devinfo = &brw->screen->devinfo;
1588   uint32_t hw_format = brw_format_for_mesa_format(format);
1589   if (access == GL_WRITE_ONLY) {
1590      return hw_format;
1591   } else if (isl_has_matching_typed_storage_image_format(devinfo, hw_format)) {
1592      /* Typed surface reads support a very limited subset of the shader
1593       * image formats.  Translate it into the closest format the
1594       * hardware supports.
1595       */
1596      return isl_lower_storage_image_format(devinfo, hw_format);
1597   } else {
1598      /* The hardware doesn't actually support a typed format that we can use
1599       * so we have to fall back to untyped read/write messages.
1600       */
1601      return BRW_SURFACEFORMAT_RAW;
1602   }
1603}
1604
1605static void
1606update_default_image_param(struct brw_context *brw,
1607                           struct gl_image_unit *u,
1608                           unsigned surface_idx,
1609                           struct brw_image_param *param)
1610{
1611   memset(param, 0, sizeof(*param));
1612   param->surface_idx = surface_idx;
1613   /* Set the swizzling shifts to all-ones to effectively disable swizzling --
1614    * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
1615    * detailed explanation of these parameters.
1616    */
1617   param->swizzling[0] = 0xff;
1618   param->swizzling[1] = 0xff;
1619}
1620
1621static void
1622update_buffer_image_param(struct brw_context *brw,
1623                          struct gl_image_unit *u,
1624                          unsigned surface_idx,
1625                          struct brw_image_param *param)
1626{
1627   struct gl_buffer_object *obj = u->TexObj->BufferObject;
1628   const uint32_t size = MIN2((uint32_t)u->TexObj->BufferSize, obj->Size);
1629   update_default_image_param(brw, u, surface_idx, param);
1630
1631   param->size[0] = size / _mesa_get_format_bytes(u->_ActualFormat);
1632   param->stride[0] = _mesa_get_format_bytes(u->_ActualFormat);
1633}
1634
1635static void
1636update_texture_image_param(struct brw_context *brw,
1637                           struct gl_image_unit *u,
1638                           unsigned surface_idx,
1639                           struct brw_image_param *param)
1640{
1641   struct intel_mipmap_tree *mt = intel_texture_object(u->TexObj)->mt;
1642
1643   update_default_image_param(brw, u, surface_idx, param);
1644
1645   param->size[0] = minify(mt->logical_width0, u->Level);
1646   param->size[1] = minify(mt->logical_height0, u->Level);
1647   param->size[2] = (!u->Layered ? 1 :
1648                     u->TexObj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1649                     u->TexObj->Target == GL_TEXTURE_3D ?
1650                     minify(mt->logical_depth0, u->Level) :
1651                     mt->logical_depth0);
1652
1653   intel_miptree_get_image_offset(mt, u->Level, u->_Layer,
1654                                  &param->offset[0],
1655                                  &param->offset[1]);
1656
1657   param->stride[0] = mt->cpp;
1658   param->stride[1] = mt->pitch / mt->cpp;
1659   param->stride[2] =
1660      brw_miptree_get_horizontal_slice_pitch(brw, mt, u->Level);
1661   param->stride[3] =
1662      brw_miptree_get_vertical_slice_pitch(brw, mt, u->Level);
1663
1664   if (mt->tiling == I915_TILING_X) {
1665      /* An X tile is a rectangular block of 512x8 bytes. */
1666      param->tiling[0] = _mesa_logbase2(512 / mt->cpp);
1667      param->tiling[1] = _mesa_logbase2(8);
1668
1669      if (brw->has_swizzling) {
1670         /* Right shifts required to swizzle bits 9 and 10 of the memory
1671          * address with bit 6.
1672          */
1673         param->swizzling[0] = 3;
1674         param->swizzling[1] = 4;
1675      }
1676   } else if (mt->tiling == I915_TILING_Y) {
1677      /* The layout of a Y-tiled surface in memory isn't really fundamentally
1678       * different to the layout of an X-tiled surface, we simply pretend that
1679       * the surface is broken up in a number of smaller 16Bx32 tiles, each
1680       * one arranged in X-major order just like is the case for X-tiling.
1681       */
1682      param->tiling[0] = _mesa_logbase2(16 / mt->cpp);
1683      param->tiling[1] = _mesa_logbase2(32);
1684
1685      if (brw->has_swizzling) {
1686         /* Right shift required to swizzle bit 9 of the memory address with
1687          * bit 6.
1688          */
1689         param->swizzling[0] = 3;
1690      }
1691   }
1692
1693   /* 3D textures are arranged in 2D in memory with 2^lod slices per row.  The
1694    * address calculation algorithm (emit_address_calculation() in
1695    * brw_fs_surface_builder.cpp) handles this as a sort of tiling with
1696    * modulus equal to the LOD.
1697    */
1698   param->tiling[2] = (u->TexObj->Target == GL_TEXTURE_3D ? u->Level :
1699                       0);
1700}
1701
1702static void
1703update_image_surface(struct brw_context *brw,
1704                     struct gl_image_unit *u,
1705                     GLenum access,
1706                     unsigned surface_idx,
1707                     uint32_t *surf_offset,
1708                     struct brw_image_param *param)
1709{
1710   if (_mesa_is_image_unit_valid(&brw->ctx, u)) {
1711      struct gl_texture_object *obj = u->TexObj;
1712      const unsigned format = get_image_format(brw, u->_ActualFormat, access);
1713
1714      if (obj->Target == GL_TEXTURE_BUFFER) {
1715         struct intel_buffer_object *intel_obj =
1716            intel_buffer_object(obj->BufferObject);
1717         const unsigned texel_size = (format == BRW_SURFACEFORMAT_RAW ? 1 :
1718                                      _mesa_get_format_bytes(u->_ActualFormat));
1719
1720         brw_emit_buffer_surface_state(
1721            brw, surf_offset, intel_obj->buffer, obj->BufferOffset,
1722            format, intel_obj->Base.Size, texel_size,
1723            access != GL_READ_ONLY);
1724
1725         update_buffer_image_param(brw, u, surface_idx, param);
1726
1727      } else {
1728         struct intel_texture_object *intel_obj = intel_texture_object(obj);
1729         struct intel_mipmap_tree *mt = intel_obj->mt;
1730
1731         if (format == BRW_SURFACEFORMAT_RAW) {
1732            brw_emit_buffer_surface_state(
1733               brw, surf_offset, mt->bo, mt->offset,
1734               format, mt->bo->size - mt->offset, 1 /* pitch */,
1735               access != GL_READ_ONLY);
1736
1737         } else {
1738            const unsigned num_layers = (!u->Layered ? 1 :
1739                                         obj->Target == GL_TEXTURE_CUBE_MAP ? 6 :
1740                                         mt->logical_depth0);
1741
1742            struct isl_view view = {
1743               .format = format,
1744               .base_level = obj->MinLevel + u->Level,
1745               .levels = 1,
1746               .base_array_layer = obj->MinLayer + u->_Layer,
1747               .array_len = num_layers,
1748               .swizzle = ISL_SWIZZLE_IDENTITY,
1749               .usage = ISL_SURF_USAGE_STORAGE_BIT,
1750            };
1751
1752            const int surf_index = surf_offset - &brw->wm.base.surf_offset[0];
1753            const bool unresolved = intel_miptree_has_color_unresolved(
1754                                       mt, view.base_level, view.levels,
1755                                       view.base_array_layer, view.array_len);
1756            const int flags = unresolved ? 0 : INTEL_AUX_BUFFER_DISABLED;
1757            brw_emit_surface_state(brw, mt, flags, mt->target, view,
1758                                   tex_mocs[brw->gen],
1759                                   surf_offset, surf_index,
1760                                   I915_GEM_DOMAIN_SAMPLER,
1761                                   access == GL_READ_ONLY ? 0 :
1762                                             I915_GEM_DOMAIN_SAMPLER);
1763         }
1764
1765         update_texture_image_param(brw, u, surface_idx, param);
1766      }
1767
1768   } else {
1769      brw->vtbl.emit_null_surface_state(brw, 1, 1, 1, surf_offset);
1770      update_default_image_param(brw, u, surface_idx, param);
1771   }
1772}
1773
1774void
1775brw_upload_image_surfaces(struct brw_context *brw,
1776                          const struct gl_program *prog,
1777                          struct brw_stage_state *stage_state,
1778                          struct brw_stage_prog_data *prog_data)
1779{
1780   assert(prog);
1781   struct gl_context *ctx = &brw->ctx;
1782
1783   if (prog->info.num_images) {
1784      for (unsigned i = 0; i < prog->info.num_images; i++) {
1785         struct gl_image_unit *u = &ctx->ImageUnits[prog->sh.ImageUnits[i]];
1786         const unsigned surf_idx = prog_data->binding_table.image_start + i;
1787
1788         update_image_surface(brw, u, prog->sh.ImageAccess[i],
1789                              surf_idx,
1790                              &stage_state->surf_offset[surf_idx],
1791                              &prog_data->image_param[i]);
1792      }
1793
1794      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1795      /* This may have changed the image metadata dependent on the context
1796       * image unit state and passed to the program as uniforms, make sure
1797       * that push and pull constants are reuploaded.
1798       */
1799      brw->NewGLState |= _NEW_PROGRAM_CONSTANTS;
1800   }
1801}
1802
1803static void
1804brw_upload_wm_image_surfaces(struct brw_context *brw)
1805{
1806   /* BRW_NEW_FRAGMENT_PROGRAM */
1807   const struct gl_program *wm = brw->fragment_program;
1808
1809   if (wm) {
1810      /* BRW_NEW_FS_PROG_DATA, BRW_NEW_IMAGE_UNITS, _NEW_TEXTURE */
1811      brw_upload_image_surfaces(brw, wm, &brw->wm.base,
1812                                brw->wm.base.prog_data);
1813   }
1814}
1815
1816const struct brw_tracked_state brw_wm_image_surfaces = {
1817   .dirty = {
1818      .mesa = _NEW_TEXTURE,
1819      .brw = BRW_NEW_BATCH |
1820             BRW_NEW_BLORP |
1821             BRW_NEW_FRAGMENT_PROGRAM |
1822             BRW_NEW_FS_PROG_DATA |
1823             BRW_NEW_IMAGE_UNITS
1824   },
1825   .emit = brw_upload_wm_image_surfaces,
1826};
1827
1828void
1829gen4_init_vtable_surface_functions(struct brw_context *brw)
1830{
1831   brw->vtbl.update_renderbuffer_surface = gen4_update_renderbuffer_surface;
1832   brw->vtbl.emit_null_surface_state = brw_emit_null_surface_state;
1833}
1834
1835void
1836gen6_init_vtable_surface_functions(struct brw_context *brw)
1837{
1838   gen4_init_vtable_surface_functions(brw);
1839   brw->vtbl.update_renderbuffer_surface = brw_update_renderbuffer_surface;
1840}
1841
1842static void
1843brw_upload_cs_work_groups_surface(struct brw_context *brw)
1844{
1845   struct gl_context *ctx = &brw->ctx;
1846   /* _NEW_PROGRAM */
1847   struct gl_shader_program *prog =
1848      ctx->_Shader->CurrentProgram[MESA_SHADER_COMPUTE];
1849   /* BRW_NEW_CS_PROG_DATA */
1850   const struct brw_cs_prog_data *cs_prog_data =
1851      brw_cs_prog_data(brw->cs.base.prog_data);
1852
1853   if (prog && cs_prog_data->uses_num_work_groups) {
1854      const unsigned surf_idx =
1855         cs_prog_data->binding_table.work_groups_start;
1856      uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
1857      drm_intel_bo *bo;
1858      uint32_t bo_offset;
1859
1860      if (brw->compute.num_work_groups_bo == NULL) {
1861         bo = NULL;
1862         intel_upload_data(brw,
1863                           (void *)brw->compute.num_work_groups,
1864                           3 * sizeof(GLuint),
1865                           sizeof(GLuint),
1866                           &bo,
1867                           &bo_offset);
1868      } else {
1869         bo = brw->compute.num_work_groups_bo;
1870         bo_offset = brw->compute.num_work_groups_offset;
1871      }
1872
1873      brw_emit_buffer_surface_state(brw, surf_offset,
1874                                    bo, bo_offset,
1875                                    BRW_SURFACEFORMAT_RAW,
1876                                    3 * sizeof(GLuint), 1, true);
1877      brw->ctx.NewDriverState |= BRW_NEW_SURFACES;
1878   }
1879}
1880
1881const struct brw_tracked_state brw_cs_work_groups_surface = {
1882   .dirty = {
1883      .brw = BRW_NEW_BLORP |
1884             BRW_NEW_CS_PROG_DATA |
1885             BRW_NEW_CS_WORK_GROUPS
1886   },
1887   .emit = brw_upload_cs_work_groups_surface,
1888};
1889