r600_texture.c revision 8698a3b85dd89c5d2fa473e7942b7dc8d25f3c8f
12b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner/* 22b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 32b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * 42b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * Permission is hereby granted, free of charge, to any person obtaining a 52b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * copy of this software and associated documentation files (the "Software"), 62b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * to deal in the Software without restriction, including without limitation 72b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * on the rights to use, copy, modify, merge, publish, distribute, sub 82b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * license, and/or sell copies of the Software, and to permit persons to whom 92b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * the Software is furnished to do so, subject to the following conditions: 102b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * 112b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * The above copyright notice and this permission notice (including the next 122b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * paragraph) shall be included in all copies or substantial portions of the 132b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * Software. 142b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * 152b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 162b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 172b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 182b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 192b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 2004df049014396fe97a31bf3fa8951201b2ed8ffeChris Lattner * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 2104df049014396fe97a31bf3fa8951201b2ed8ffeChris Lattner * USE OR OTHER DEALINGS IN THE SOFTWARE. 222b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * 232b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * Authors: 242b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * Jerome Glisse 252b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner * Corbin Simpson 262b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner */ 272b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner#include "r600_formats.h" 282b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner#include "r600d.h" 292b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 302b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner#include <errno.h> 312b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner#include "util/u_format_s3tc.h" 322b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner#include "util/u_memory.h" 332b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 342b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner/* Copy from a full GPU texture to a transfer's staging one. */ 352b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattnerstatic void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer) 362b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner{ 372b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer; 382b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner struct pipe_resource *texture = transfer->resource; 392b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 402b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner ctx->resource_copy_region(ctx, &rtransfer->staging->b.b, 412b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 0, 0, 0, 0, texture, transfer->level, 422b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner &transfer->box); 432b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner} 442b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 452b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 462b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner/* Copy from a transfer's staging texture to a full GPU one. */ 472b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattnerstatic void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer) 482b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner{ 492b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer; 502b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner struct pipe_resource *texture = transfer->resource; 512b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner struct pipe_box sbox; 522b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 532b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner u_box_origin_2d(transfer->box.width, transfer->box.height, &sbox); 542b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 552b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner ctx->resource_copy_region(ctx, texture, transfer->level, 5604df049014396fe97a31bf3fa8951201b2ed8ffeChris Lattner transfer->box.x, transfer->box.y, transfer->box.z, 572b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner &rtransfer->staging->b.b, 582b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 0, &sbox); 592b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner} 602b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 612b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattnerunsigned r600_texture_get_offset(struct r600_texture *rtex, 622b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner unsigned level, unsigned layer) 632b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner{ 642b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner return rtex->surface.level[level].offset + 652b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner layer * rtex->surface.level[level].slice_size; 662b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner} 67438208e8cb29c67b2177619a339b84291729b6b7Frits van Bommel 68438208e8cb29c67b2177619a339b84291729b6b7Frits van Bommelstatic int r600_init_surface(struct r600_screen *rscreen, 69438208e8cb29c67b2177619a339b84291729b6b7Frits van Bommel struct radeon_surface *surface, 70438208e8cb29c67b2177619a339b84291729b6b7Frits van Bommel const struct pipe_resource *ptex, 712b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner unsigned array_mode, 722b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner bool is_transfer, bool is_flushed_depth) 732b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner{ 742b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner const struct util_format_description *desc = 752b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner util_format_description(ptex->format); 762b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner bool is_depth, is_stencil; 772b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 782b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner is_depth = util_format_has_depth(desc); 7904b2f0d99feb9cdf87eb8f35483816d757d170ddChris Lattner is_stencil = util_format_has_stencil(desc); 802b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 812b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->npix_x = ptex->width0; 82878ad7afa512ef300d5df4e7ca0189775342dfc2Chris Lattner surface->npix_y = ptex->height0; 83878ad7afa512ef300d5df4e7ca0189775342dfc2Chris Lattner surface->npix_z = ptex->depth0; 8404b2f0d99feb9cdf87eb8f35483816d757d170ddChris Lattner surface->blk_w = util_format_get_blockwidth(ptex->format); 852b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->blk_h = util_format_get_blockheight(ptex->format); 862b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->blk_d = 1; 872b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->array_size = 1; 882b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->last_level = ptex->last_level; 892b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 902b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner if (rscreen->chip_class >= EVERGREEN && 912b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner !is_transfer && !is_flushed_depth && 922b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) { 9304df049014396fe97a31bf3fa8951201b2ed8ffeChris Lattner surface->bpe = 4; /* stencil is allocated separately on evergreen */ 942b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner } else { 952b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->bpe = util_format_get_blocksize(ptex->format); 962b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner /* align byte per element on dword */ 972b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner if (surface->bpe == 3) { 982b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->bpe = 4; 992b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner } 1002b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner } 1012b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 1022b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->nsamples = ptex->nr_samples ? ptex->nr_samples : 1; 10304df049014396fe97a31bf3fa8951201b2ed8ffeChris Lattner surface->flags = 0; 1042b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner 1052b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner switch (array_mode) { 1062b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case V_038000_ARRAY_1D_TILED_THIN1: 1072b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE); 1082b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner break; 1092b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case V_038000_ARRAY_2D_TILED_THIN1: 1102b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE); 1112b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner break; 1122b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case V_038000_ARRAY_LINEAR_ALIGNED: 1132b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR_ALIGNED, MODE); 1142b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner break; 1152b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case V_038000_ARRAY_LINEAR_GENERAL: 1162b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner default: 1172b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR, MODE); 1182b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner break; 1192b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner } 1202b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner switch (ptex->target) { 1212b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case PIPE_TEXTURE_1D: 1222b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE); 1232b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner break; 1242b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case PIPE_TEXTURE_RECT: 1252b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner case PIPE_TEXTURE_2D: 1262b9bc422a5e6840f5b925316bc06d5943deb610aChris Lattner surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE); 127 break; 128 case PIPE_TEXTURE_3D: 129 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE); 130 break; 131 case PIPE_TEXTURE_1D_ARRAY: 132 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE); 133 surface->array_size = ptex->array_size; 134 break; 135 case PIPE_TEXTURE_2D_ARRAY: 136 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE); 137 surface->array_size = ptex->array_size; 138 break; 139 case PIPE_TEXTURE_CUBE: 140 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE); 141 break; 142 case PIPE_BUFFER: 143 default: 144 return -EINVAL; 145 } 146 if (ptex->bind & PIPE_BIND_SCANOUT) { 147 surface->flags |= RADEON_SURF_SCANOUT; 148 } 149 150 if (!is_transfer && !is_flushed_depth && is_depth) { 151 surface->flags |= RADEON_SURF_ZBUFFER; 152 153 if (is_stencil) { 154 surface->flags |= RADEON_SURF_SBUFFER; 155 } 156 } 157 return 0; 158} 159 160static int r600_setup_surface(struct pipe_screen *screen, 161 struct r600_texture *rtex, 162 unsigned pitch_in_bytes_override) 163{ 164 struct pipe_resource *ptex = &rtex->resource.b.b; 165 struct r600_screen *rscreen = (struct r600_screen*)screen; 166 unsigned i; 167 int r; 168 169 r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface); 170 if (r) { 171 return r; 172 } 173 rtex->size = rtex->surface.bo_size; 174 if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) { 175 /* old ddx on evergreen over estimate alignment for 1d, only 1 level 176 * for those 177 */ 178 rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe; 179 rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override; 180 rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y; 181 if (rtex->surface.flags & RADEON_SURF_SBUFFER) { 182 rtex->surface.stencil_offset = rtex->surface.level[0].slice_size; 183 } 184 } 185 for (i = 0; i <= ptex->last_level; i++) { 186 switch (rtex->surface.level[i].mode) { 187 case RADEON_SURF_MODE_LINEAR_ALIGNED: 188 rtex->array_mode[i] = V_038000_ARRAY_LINEAR_ALIGNED; 189 break; 190 case RADEON_SURF_MODE_1D: 191 rtex->array_mode[i] = V_038000_ARRAY_1D_TILED_THIN1; 192 break; 193 case RADEON_SURF_MODE_2D: 194 rtex->array_mode[i] = V_038000_ARRAY_2D_TILED_THIN1; 195 break; 196 default: 197 case RADEON_SURF_MODE_LINEAR: 198 rtex->array_mode[i] = 0; 199 break; 200 } 201 } 202 return 0; 203} 204 205static boolean r600_texture_get_handle(struct pipe_screen* screen, 206 struct pipe_resource *ptex, 207 struct winsys_handle *whandle) 208{ 209 struct r600_texture *rtex = (struct r600_texture*)ptex; 210 struct r600_resource *resource = &rtex->resource; 211 struct radeon_surface *surface = &rtex->surface; 212 struct r600_screen *rscreen = (struct r600_screen*)screen; 213 214 rscreen->ws->buffer_set_tiling(resource->buf, 215 NULL, 216 surface->level[0].mode >= RADEON_SURF_MODE_1D ? 217 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR, 218 surface->level[0].mode >= RADEON_SURF_MODE_2D ? 219 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR, 220 surface->bankw, surface->bankh, 221 surface->tile_split, 222 surface->stencil_tile_split, 223 surface->mtilea, 224 rtex->surface.level[0].pitch_bytes); 225 226 return rscreen->ws->buffer_get_handle(resource->buf, 227 rtex->surface.level[0].pitch_bytes, whandle); 228} 229 230static void r600_texture_destroy(struct pipe_screen *screen, 231 struct pipe_resource *ptex) 232{ 233 struct r600_texture *rtex = (struct r600_texture*)ptex; 234 struct r600_resource *resource = &rtex->resource; 235 236 if (rtex->flushed_depth_texture) 237 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL); 238 239 pb_reference(&resource->buf, NULL); 240 FREE(rtex); 241} 242 243static const struct u_resource_vtbl r600_texture_vtbl = 244{ 245 r600_texture_get_handle, /* get_handle */ 246 r600_texture_destroy, /* resource_destroy */ 247 r600_texture_get_transfer, /* get_transfer */ 248 r600_texture_transfer_destroy, /* transfer_destroy */ 249 r600_texture_transfer_map, /* transfer_map */ 250 NULL, /* transfer_flush_region */ 251 r600_texture_transfer_unmap, /* transfer_unmap */ 252 NULL /* transfer_inline_write */ 253}; 254 255static void r600_texture_allocate_fmask(struct r600_screen *rscreen, 256 struct r600_texture *rtex) 257{ 258 /* FMASK is allocated pretty much like an ordinary texture. 259 * Here we use bpe in the units of bits, not bytes. */ 260 struct radeon_surface fmask = rtex->surface; 261 unsigned nr_samples = rtex->resource.b.b.nr_samples; 262 263 switch (nr_samples) { 264 case 2: 265 /* This should be 8,1, but we should set nsamples > 1 266 * for the allocator to treat it as a multisample surface. 267 * Let's set 4,2 then. */ 268 case 4: 269 fmask.bpe = 4; 270 fmask.nsamples = 2; 271 break; 272 case 8: 273 fmask.bpe = 8; 274 fmask.nsamples = 4; 275 break; 276 case 16: 277 fmask.bpe = 16; 278 fmask.nsamples = 4; 279 break; 280 default: 281 R600_ERR("Invalid sample count for FMASK allocation.\n"); 282 return; 283 } 284 285 /* R600-R700 errata? Anyway, this fixes colorbuffer corruption. */ 286 if (rscreen->chip_class <= R700) { 287 fmask.bpe *= 2; 288 } 289 290 if (rscreen->chip_class >= EVERGREEN) { 291 fmask.bankh = nr_samples <= 4 ? 4 : 1; 292 } 293 294 if (rscreen->ws->surface_init(rscreen->ws, &fmask)) { 295 R600_ERR("Got error in surface_init while allocating FMASK.\n"); 296 return; 297 } 298 assert(fmask.level[0].mode == RADEON_SURF_MODE_2D); 299 300 /* Reserve space for FMASK while converting bits back to bytes. */ 301 rtex->fmask_bank_height = fmask.bankh; 302 rtex->fmask_offset = align(rtex->size, MAX2(256, fmask.bo_alignment)); 303 rtex->fmask_size = (fmask.bo_size + 7) / 8; 304 rtex->size = rtex->fmask_offset + rtex->fmask_size; 305#if 0 306 printf("FMASK width=%u, height=%i, bits=%u, size=%u\n", 307 fmask.npix_x, fmask.npix_y, fmask.bpe * fmask.nsamples, rtex->fmask_size); 308#endif 309} 310 311static void r600_texture_allocate_cmask(struct r600_screen *rscreen, 312 struct r600_texture *rtex) 313{ 314 unsigned cmask_tile_width = 8; 315 unsigned cmask_tile_height = 8; 316 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height; 317 unsigned element_bits = 4; 318 unsigned cmask_cache_bits = 1024; 319 unsigned num_pipes = rscreen->tiling_info.num_channels; 320 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes; 321 322 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes; 323 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements; 324 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile); 325 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile); 326 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width; 327 328 unsigned pitch_elements = align(rtex->surface.npix_x, macro_tile_width); 329 unsigned height = align(rtex->surface.npix_y, macro_tile_height); 330 331 unsigned base_align = num_pipes * pipe_interleave_bytes; 332 unsigned slice_bytes = 333 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements; 334 unsigned size = rtex->surface.array_size * align(slice_bytes, base_align); 335 336 assert(macro_tile_width % 128 == 0); 337 assert(macro_tile_height % 128 == 0); 338 339 rtex->cmask_slice_tile_max = ((pitch_elements * height) / (128*128)) - 1; 340 rtex->cmask_offset = align(rtex->size, MAX2(256, base_align)); 341 rtex->cmask_size = size; 342 rtex->size = rtex->cmask_offset + rtex->cmask_size; 343#if 0 344 printf("CMASK: macro tile width = %u, macro tile height = %u, " 345 "pitch elements = %u, height = %u, slice tile max = %u\n", 346 macro_tile_width, macro_tile_height, pitch_elements, height, 347 rtex->cmask_slice_tile_max); 348#endif 349} 350 351static struct r600_texture * 352r600_texture_create_object(struct pipe_screen *screen, 353 const struct pipe_resource *base, 354 unsigned pitch_in_bytes_override, 355 struct pb_buffer *buf, 356 boolean alloc_bo, 357 struct radeon_surface *surface) 358{ 359 struct r600_texture *rtex; 360 struct r600_resource *resource; 361 struct r600_screen *rscreen = (struct r600_screen*)screen; 362 int r; 363 364 rtex = CALLOC_STRUCT(r600_texture); 365 if (rtex == NULL) 366 return NULL; 367 368 resource = &rtex->resource; 369 resource->b.b = *base; 370 resource->b.vtbl = &r600_texture_vtbl; 371 pipe_reference_init(&resource->b.b.reference, 1); 372 resource->b.b.screen = screen; 373 rtex->pitch_override = pitch_in_bytes_override; 374 375 /* don't include stencil-only formats which we don't support for rendering */ 376 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format)); 377 378 rtex->surface = *surface; 379 r = r600_setup_surface(screen, rtex, 380 pitch_in_bytes_override); 381 if (r) { 382 FREE(rtex); 383 return NULL; 384 } 385 386 if (base->nr_samples > 1 && !rtex->is_depth && alloc_bo) { 387 r600_texture_allocate_cmask(rscreen, rtex); 388 r600_texture_allocate_fmask(rscreen, rtex); 389 } 390 391 if (!rtex->is_depth && base->nr_samples > 1 && 392 (!rtex->fmask_size || !rtex->cmask_size)) { 393 FREE(rtex); 394 return NULL; 395 } 396 397 /* Now create the backing buffer. */ 398 if (!buf && alloc_bo) { 399 unsigned base_align = rtex->surface.bo_alignment; 400 unsigned usage = R600_TEX_IS_TILED(rtex, 0) ? PIPE_USAGE_STATIC : base->usage; 401 402 if (!r600_init_resource(rscreen, resource, rtex->size, base_align, base->bind, usage)) { 403 FREE(rtex); 404 return NULL; 405 } 406 } else if (buf) { 407 resource->buf = buf; 408 resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf); 409 resource->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM; 410 } 411 412 if (rtex->cmask_size) { 413 /* Initialize the cmask to 0xCC (= compressed state). */ 414 char *ptr = rscreen->ws->buffer_map(resource->cs_buf, NULL, PIPE_TRANSFER_WRITE); 415 memset(ptr + rtex->cmask_offset, 0xCC, rtex->cmask_size); 416 rscreen->ws->buffer_unmap(resource->cs_buf); 417 } 418 return rtex; 419} 420 421struct pipe_resource *r600_texture_create(struct pipe_screen *screen, 422 const struct pipe_resource *templ) 423{ 424 struct r600_screen *rscreen = (struct r600_screen*)screen; 425 struct radeon_surface surface; 426 unsigned array_mode = 0; 427 int r; 428 429 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER)) { 430 if (!(templ->bind & PIPE_BIND_SCANOUT) && 431 templ->usage != PIPE_USAGE_STAGING && 432 templ->usage != PIPE_USAGE_STREAM) { 433 array_mode = V_038000_ARRAY_2D_TILED_THIN1; 434 } else if (util_format_is_compressed(templ->format)) { 435 array_mode = V_038000_ARRAY_1D_TILED_THIN1; 436 } 437 } 438 439 /* XXX tiling is broken for the 422 formats */ 440 if (util_format_description(templ->format)->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) 441 array_mode = V_038000_ARRAY_LINEAR_ALIGNED; 442 443 r = r600_init_surface(rscreen, &surface, templ, array_mode, 444 templ->flags & R600_RESOURCE_FLAG_TRANSFER, 445 templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH); 446 if (r) { 447 return NULL; 448 } 449 r = rscreen->ws->surface_best(rscreen->ws, &surface); 450 if (r) { 451 return NULL; 452 } 453 return (struct pipe_resource *)r600_texture_create_object(screen, templ, 454 0, NULL, TRUE, &surface); 455} 456 457static struct pipe_surface *r600_create_surface(struct pipe_context *pipe, 458 struct pipe_resource *texture, 459 const struct pipe_surface *templ) 460{ 461 struct r600_texture *rtex = (struct r600_texture*)texture; 462 struct r600_surface *surface = CALLOC_STRUCT(r600_surface); 463 unsigned level = templ->u.tex.level; 464 465 assert(templ->u.tex.first_layer == templ->u.tex.last_layer); 466 if (surface == NULL) 467 return NULL; 468 pipe_reference_init(&surface->base.reference, 1); 469 pipe_resource_reference(&surface->base.texture, texture); 470 surface->base.context = pipe; 471 surface->base.format = templ->format; 472 surface->base.width = rtex->surface.level[level].npix_x; 473 surface->base.height = rtex->surface.level[level].npix_y; 474 surface->base.usage = templ->usage; 475 surface->base.u = templ->u; 476 return &surface->base; 477} 478 479static void r600_surface_destroy(struct pipe_context *pipe, 480 struct pipe_surface *surface) 481{ 482 pipe_resource_reference(&surface->texture, NULL); 483 FREE(surface); 484} 485 486struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen, 487 const struct pipe_resource *templ, 488 struct winsys_handle *whandle) 489{ 490 struct r600_screen *rscreen = (struct r600_screen*)screen; 491 struct pb_buffer *buf = NULL; 492 unsigned stride = 0; 493 unsigned array_mode = 0; 494 enum radeon_bo_layout micro, macro; 495 struct radeon_surface surface; 496 int r; 497 498 /* Support only 2D textures without mipmaps */ 499 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) || 500 templ->depth0 != 1 || templ->last_level != 0) 501 return NULL; 502 503 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride); 504 if (!buf) 505 return NULL; 506 507 rscreen->ws->buffer_get_tiling(buf, µ, ¯o, 508 &surface.bankw, &surface.bankh, 509 &surface.tile_split, 510 &surface.stencil_tile_split, 511 &surface.mtilea); 512 513 if (macro == RADEON_LAYOUT_TILED) 514 array_mode = V_0280A0_ARRAY_2D_TILED_THIN1; 515 else if (micro == RADEON_LAYOUT_TILED) 516 array_mode = V_0280A0_ARRAY_1D_TILED_THIN1; 517 else 518 array_mode = 0; 519 520 r = r600_init_surface(rscreen, &surface, templ, array_mode, false, false); 521 if (r) { 522 return NULL; 523 } 524 return (struct pipe_resource *)r600_texture_create_object(screen, templ, 525 stride, buf, FALSE, &surface); 526} 527 528bool r600_init_flushed_depth_texture(struct pipe_context *ctx, 529 struct pipe_resource *texture, 530 struct r600_texture **staging) 531{ 532 struct r600_texture *rtex = (struct r600_texture*)texture; 533 struct pipe_resource resource; 534 struct r600_texture **flushed_depth_texture = staging ? 535 staging : &rtex->flushed_depth_texture; 536 537 if (!staging && rtex->flushed_depth_texture) 538 return true; /* it's ready */ 539 540 resource.target = texture->target; 541 resource.format = texture->format; 542 resource.width0 = texture->width0; 543 resource.height0 = texture->height0; 544 resource.depth0 = texture->depth0; 545 resource.array_size = texture->array_size; 546 resource.last_level = texture->last_level; 547 resource.nr_samples = texture->nr_samples; 548 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_STATIC; 549 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL; 550 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH; 551 552 if (staging) 553 resource.flags |= R600_RESOURCE_FLAG_TRANSFER; 554 555 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource); 556 if (*flushed_depth_texture == NULL) { 557 R600_ERR("failed to create temporary texture to hold flushed depth\n"); 558 return false; 559 } 560 561 (*flushed_depth_texture)->is_flushing_texture = TRUE; 562 return true; 563} 564 565/* Needs adjustment for pixelformat: 566 */ 567static INLINE unsigned u_box_volume( const struct pipe_box *box ) 568{ 569 return box->width * box->depth * box->height; 570} 571 572struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx, 573 struct pipe_resource *texture, 574 unsigned level, 575 unsigned usage, 576 const struct pipe_box *box) 577{ 578 struct r600_context *rctx = (struct r600_context*)ctx; 579 struct r600_texture *rtex = (struct r600_texture*)texture; 580 struct pipe_resource resource; 581 struct r600_transfer *trans; 582 boolean use_staging_texture = FALSE; 583 584 /* We cannot map a tiled texture directly because the data is 585 * in a different order, therefore we do detiling using a blit. 586 * 587 * Also, use a temporary in GTT memory for read transfers, as 588 * the CPU is much happier reading out of cached system memory 589 * than uncached VRAM. 590 */ 591 if (R600_TEX_IS_TILED(rtex, level)) { 592 use_staging_texture = TRUE; 593 } 594 595 if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024) 596 use_staging_texture = TRUE; 597 598 /* Use a staging texture for uploads if the underlying BO is busy. */ 599 if (!(usage & PIPE_TRANSFER_READ) && 600 (rctx->ws->cs_is_buffer_referenced(rctx->cs, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) || 601 rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) { 602 use_staging_texture = TRUE; 603 } 604 605 if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) { 606 use_staging_texture = FALSE; 607 } 608 609 if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) { 610 return NULL; 611 } 612 613 trans = CALLOC_STRUCT(r600_transfer); 614 if (trans == NULL) 615 return NULL; 616 pipe_resource_reference(&trans->transfer.resource, texture); 617 trans->transfer.level = level; 618 trans->transfer.usage = usage; 619 trans->transfer.box = *box; 620 if (rtex->is_depth) { 621 /* XXX: only readback the rectangle which is being mapped? 622 */ 623 /* XXX: when discard is true, no need to read back from depth texture 624 */ 625 struct r600_texture *staging_depth; 626 627 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) { 628 R600_ERR("failed to create temporary texture to hold untiled copy\n"); 629 pipe_resource_reference(&trans->transfer.resource, NULL); 630 FREE(trans); 631 return NULL; 632 } 633 634 r600_blit_decompress_depth(ctx, rtex, staging_depth, 635 level, level, 636 box->z, box->z + box->depth - 1, 637 0, 0); 638 639 trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes; 640 trans->offset = r600_texture_get_offset(staging_depth, level, box->z); 641 trans->staging = (struct r600_resource*)staging_depth; 642 return &trans->transfer; 643 } else if (use_staging_texture) { 644 resource.target = PIPE_TEXTURE_2D; 645 resource.format = texture->format; 646 resource.width0 = box->width; 647 resource.height0 = box->height; 648 resource.depth0 = 1; 649 resource.array_size = 1; 650 resource.last_level = 0; 651 resource.nr_samples = 0; 652 resource.usage = PIPE_USAGE_STAGING; 653 resource.bind = 0; 654 resource.flags = R600_RESOURCE_FLAG_TRANSFER; 655 /* For texture reading, the temporary (detiled) texture is used as 656 * a render target when blitting from a tiled texture. */ 657 if (usage & PIPE_TRANSFER_READ) { 658 resource.bind |= PIPE_BIND_RENDER_TARGET; 659 } 660 /* For texture writing, the temporary texture is used as a sampler 661 * when blitting into a tiled texture. */ 662 if (usage & PIPE_TRANSFER_WRITE) { 663 resource.bind |= PIPE_BIND_SAMPLER_VIEW; 664 } 665 /* Create the temporary texture. */ 666 trans->staging = (struct r600_resource*)ctx->screen->resource_create(ctx->screen, &resource); 667 if (trans->staging == NULL) { 668 R600_ERR("failed to create temporary texture to hold untiled copy\n"); 669 pipe_resource_reference(&trans->transfer.resource, NULL); 670 FREE(trans); 671 return NULL; 672 } 673 674 trans->transfer.stride = 675 ((struct r600_texture *)trans->staging)->surface.level[0].pitch_bytes; 676 if (usage & PIPE_TRANSFER_READ) { 677 r600_copy_to_staging_texture(ctx, trans); 678 /* Always referenced in the blit. */ 679 r600_flush(ctx, NULL, 0); 680 } 681 return &trans->transfer; 682 } 683 trans->transfer.stride = rtex->surface.level[level].pitch_bytes; 684 trans->transfer.layer_stride = rtex->surface.level[level].slice_size; 685 trans->offset = r600_texture_get_offset(rtex, level, box->z); 686 return &trans->transfer; 687} 688 689void r600_texture_transfer_destroy(struct pipe_context *ctx, 690 struct pipe_transfer *transfer) 691{ 692 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; 693 struct pipe_resource *texture = transfer->resource; 694 struct r600_texture *rtex = (struct r600_texture*)texture; 695 696 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) { 697 if (rtex->is_depth) { 698 ctx->resource_copy_region(ctx, texture, transfer->level, 699 transfer->box.x, transfer->box.y, transfer->box.z, 700 &rtransfer->staging->b.b, transfer->level, 701 &transfer->box); 702 } else { 703 r600_copy_from_staging_texture(ctx, rtransfer); 704 } 705 } 706 707 if (rtransfer->staging) 708 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL); 709 710 pipe_resource_reference(&transfer->resource, NULL); 711 FREE(transfer); 712} 713 714void* r600_texture_transfer_map(struct pipe_context *ctx, 715 struct pipe_transfer* transfer) 716{ 717 struct r600_context *rctx = (struct r600_context *)ctx; 718 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; 719 struct radeon_winsys_cs_handle *buf; 720 struct r600_texture *rtex = 721 (struct r600_texture*)transfer->resource; 722 enum pipe_format format = transfer->resource->format; 723 unsigned offset = 0; 724 char *map; 725 726 if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) { 727 return r600_compute_global_transfer_map(ctx, transfer); 728 } 729 730 if (rtransfer->staging) { 731 buf = ((struct r600_resource *)rtransfer->staging)->cs_buf; 732 } else { 733 buf = ((struct r600_resource *)transfer->resource)->cs_buf; 734 } 735 736 if (rtex->is_depth || !rtransfer->staging) 737 offset = rtransfer->offset + 738 transfer->box.y / util_format_get_blockheight(format) * transfer->stride + 739 transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format); 740 741 if (!(map = rctx->ws->buffer_map(buf, rctx->cs, transfer->usage))) { 742 return NULL; 743 } 744 745 return map + offset; 746} 747 748void r600_texture_transfer_unmap(struct pipe_context *ctx, 749 struct pipe_transfer* transfer) 750{ 751 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; 752 struct r600_context *rctx = (struct r600_context*)ctx; 753 struct radeon_winsys_cs_handle *buf; 754 755 if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) { 756 return r600_compute_global_transfer_unmap(ctx, transfer); 757 } 758 759 if (rtransfer->staging) { 760 buf = ((struct r600_resource *)rtransfer->staging)->cs_buf; 761 } else { 762 buf = ((struct r600_resource *)transfer->resource)->cs_buf; 763 } 764 rctx->ws->buffer_unmap(buf); 765} 766 767void r600_init_surface_functions(struct r600_context *r600) 768{ 769 r600->context.create_surface = r600_create_surface; 770 r600->context.surface_destroy = r600_surface_destroy; 771} 772 773static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format, 774 const unsigned char *swizzle_view) 775{ 776 unsigned i; 777 unsigned char swizzle[4]; 778 unsigned result = 0; 779 const uint32_t swizzle_shift[4] = { 780 16, 19, 22, 25, 781 }; 782 const uint32_t swizzle_bit[4] = { 783 0, 1, 2, 3, 784 }; 785 786 if (swizzle_view) { 787 util_format_compose_swizzles(swizzle_format, swizzle_view, swizzle); 788 } else { 789 memcpy(swizzle, swizzle_format, 4); 790 } 791 792 /* Get swizzle. */ 793 for (i = 0; i < 4; i++) { 794 switch (swizzle[i]) { 795 case UTIL_FORMAT_SWIZZLE_Y: 796 result |= swizzle_bit[1] << swizzle_shift[i]; 797 break; 798 case UTIL_FORMAT_SWIZZLE_Z: 799 result |= swizzle_bit[2] << swizzle_shift[i]; 800 break; 801 case UTIL_FORMAT_SWIZZLE_W: 802 result |= swizzle_bit[3] << swizzle_shift[i]; 803 break; 804 case UTIL_FORMAT_SWIZZLE_0: 805 result |= V_038010_SQ_SEL_0 << swizzle_shift[i]; 806 break; 807 case UTIL_FORMAT_SWIZZLE_1: 808 result |= V_038010_SQ_SEL_1 << swizzle_shift[i]; 809 break; 810 default: /* UTIL_FORMAT_SWIZZLE_X */ 811 result |= swizzle_bit[0] << swizzle_shift[i]; 812 } 813 } 814 return result; 815} 816 817/* texture format translate */ 818uint32_t r600_translate_texformat(struct pipe_screen *screen, 819 enum pipe_format format, 820 const unsigned char *swizzle_view, 821 uint32_t *word4_p, uint32_t *yuv_format_p) 822{ 823 uint32_t result = 0, word4 = 0, yuv_format = 0; 824 const struct util_format_description *desc; 825 boolean uniform = TRUE; 826 static int r600_enable_s3tc = -1; 827 bool is_srgb_valid = FALSE; 828 829 int i; 830 const uint32_t sign_bit[4] = { 831 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED), 832 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED), 833 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED), 834 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED) 835 }; 836 desc = util_format_description(format); 837 838 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view); 839 840 /* Colorspace (return non-RGB formats directly). */ 841 switch (desc->colorspace) { 842 /* Depth stencil formats */ 843 case UTIL_FORMAT_COLORSPACE_ZS: 844 switch (format) { 845 case PIPE_FORMAT_Z16_UNORM: 846 result = FMT_16; 847 goto out_word4; 848 case PIPE_FORMAT_X24S8_UINT: 849 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 850 case PIPE_FORMAT_Z24X8_UNORM: 851 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 852 result = FMT_8_24; 853 goto out_word4; 854 case PIPE_FORMAT_S8X24_UINT: 855 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 856 case PIPE_FORMAT_X8Z24_UNORM: 857 case PIPE_FORMAT_S8_UINT_Z24_UNORM: 858 result = FMT_24_8; 859 goto out_word4; 860 case PIPE_FORMAT_S8_UINT: 861 result = FMT_8; 862 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 863 goto out_word4; 864 case PIPE_FORMAT_Z32_FLOAT: 865 result = FMT_32_FLOAT; 866 goto out_word4; 867 case PIPE_FORMAT_X32_S8X24_UINT: 868 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 869 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 870 result = FMT_X24_8_32_FLOAT; 871 goto out_word4; 872 default: 873 goto out_unknown; 874 } 875 876 case UTIL_FORMAT_COLORSPACE_YUV: 877 yuv_format |= (1 << 30); 878 switch (format) { 879 case PIPE_FORMAT_UYVY: 880 case PIPE_FORMAT_YUYV: 881 default: 882 break; 883 } 884 goto out_unknown; /* XXX */ 885 886 case UTIL_FORMAT_COLORSPACE_SRGB: 887 word4 |= S_038010_FORCE_DEGAMMA(1); 888 break; 889 890 default: 891 break; 892 } 893 894 if (r600_enable_s3tc == -1) { 895 struct r600_screen *rscreen = (struct r600_screen *)screen; 896 if (rscreen->info.drm_minor >= 9) 897 r600_enable_s3tc = 1; 898 else 899 r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE); 900 } 901 902 if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) { 903 if (!r600_enable_s3tc) 904 goto out_unknown; 905 906 switch (format) { 907 case PIPE_FORMAT_RGTC1_SNORM: 908 case PIPE_FORMAT_LATC1_SNORM: 909 word4 |= sign_bit[0]; 910 case PIPE_FORMAT_RGTC1_UNORM: 911 case PIPE_FORMAT_LATC1_UNORM: 912 result = FMT_BC4; 913 goto out_word4; 914 case PIPE_FORMAT_RGTC2_SNORM: 915 case PIPE_FORMAT_LATC2_SNORM: 916 word4 |= sign_bit[0] | sign_bit[1]; 917 case PIPE_FORMAT_RGTC2_UNORM: 918 case PIPE_FORMAT_LATC2_UNORM: 919 result = FMT_BC5; 920 goto out_word4; 921 default: 922 goto out_unknown; 923 } 924 } 925 926 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) { 927 928 if (!r600_enable_s3tc) 929 goto out_unknown; 930 931 if (!util_format_s3tc_enabled) { 932 goto out_unknown; 933 } 934 935 switch (format) { 936 case PIPE_FORMAT_DXT1_RGB: 937 case PIPE_FORMAT_DXT1_RGBA: 938 case PIPE_FORMAT_DXT1_SRGB: 939 case PIPE_FORMAT_DXT1_SRGBA: 940 result = FMT_BC1; 941 is_srgb_valid = TRUE; 942 goto out_word4; 943 case PIPE_FORMAT_DXT3_RGBA: 944 case PIPE_FORMAT_DXT3_SRGBA: 945 result = FMT_BC2; 946 is_srgb_valid = TRUE; 947 goto out_word4; 948 case PIPE_FORMAT_DXT5_RGBA: 949 case PIPE_FORMAT_DXT5_SRGBA: 950 result = FMT_BC3; 951 is_srgb_valid = TRUE; 952 goto out_word4; 953 default: 954 goto out_unknown; 955 } 956 } 957 958 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) { 959 switch (format) { 960 case PIPE_FORMAT_R8G8_B8G8_UNORM: 961 case PIPE_FORMAT_G8R8_B8R8_UNORM: 962 result = FMT_GB_GR; 963 goto out_word4; 964 case PIPE_FORMAT_G8R8_G8B8_UNORM: 965 case PIPE_FORMAT_R8G8_R8B8_UNORM: 966 result = FMT_BG_RG; 967 goto out_word4; 968 default: 969 goto out_unknown; 970 } 971 } 972 973 if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) { 974 result = FMT_5_9_9_9_SHAREDEXP; 975 goto out_word4; 976 } else if (format == PIPE_FORMAT_R11G11B10_FLOAT) { 977 result = FMT_10_11_11_FLOAT; 978 goto out_word4; 979 } 980 981 982 for (i = 0; i < desc->nr_channels; i++) { 983 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 984 word4 |= sign_bit[i]; 985 } 986 } 987 988 /* R8G8Bx_SNORM - XXX CxV8U8 */ 989 990 /* See whether the components are of the same size. */ 991 for (i = 1; i < desc->nr_channels; i++) { 992 uniform = uniform && desc->channel[0].size == desc->channel[i].size; 993 } 994 995 /* Non-uniform formats. */ 996 if (!uniform) { 997 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB && 998 desc->channel[0].pure_integer) 999 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 1000 switch(desc->nr_channels) { 1001 case 3: 1002 if (desc->channel[0].size == 5 && 1003 desc->channel[1].size == 6 && 1004 desc->channel[2].size == 5) { 1005 result = FMT_5_6_5; 1006 goto out_word4; 1007 } 1008 goto out_unknown; 1009 case 4: 1010 if (desc->channel[0].size == 5 && 1011 desc->channel[1].size == 5 && 1012 desc->channel[2].size == 5 && 1013 desc->channel[3].size == 1) { 1014 result = FMT_1_5_5_5; 1015 goto out_word4; 1016 } 1017 if (desc->channel[0].size == 10 && 1018 desc->channel[1].size == 10 && 1019 desc->channel[2].size == 10 && 1020 desc->channel[3].size == 2) { 1021 result = FMT_2_10_10_10; 1022 goto out_word4; 1023 } 1024 goto out_unknown; 1025 } 1026 goto out_unknown; 1027 } 1028 1029 /* Find the first non-VOID channel. */ 1030 for (i = 0; i < 4; i++) { 1031 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 1032 break; 1033 } 1034 } 1035 1036 if (i == 4) 1037 goto out_unknown; 1038 1039 /* uniform formats */ 1040 switch (desc->channel[i].type) { 1041 case UTIL_FORMAT_TYPE_UNSIGNED: 1042 case UTIL_FORMAT_TYPE_SIGNED: 1043#if 0 1044 if (!desc->channel[i].normalized && 1045 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) { 1046 goto out_unknown; 1047 } 1048#endif 1049 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB && 1050 desc->channel[i].pure_integer) 1051 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); 1052 1053 switch (desc->channel[i].size) { 1054 case 4: 1055 switch (desc->nr_channels) { 1056 case 2: 1057 result = FMT_4_4; 1058 goto out_word4; 1059 case 4: 1060 result = FMT_4_4_4_4; 1061 goto out_word4; 1062 } 1063 goto out_unknown; 1064 case 8: 1065 switch (desc->nr_channels) { 1066 case 1: 1067 result = FMT_8; 1068 goto out_word4; 1069 case 2: 1070 result = FMT_8_8; 1071 goto out_word4; 1072 case 4: 1073 result = FMT_8_8_8_8; 1074 is_srgb_valid = TRUE; 1075 goto out_word4; 1076 } 1077 goto out_unknown; 1078 case 16: 1079 switch (desc->nr_channels) { 1080 case 1: 1081 result = FMT_16; 1082 goto out_word4; 1083 case 2: 1084 result = FMT_16_16; 1085 goto out_word4; 1086 case 4: 1087 result = FMT_16_16_16_16; 1088 goto out_word4; 1089 } 1090 goto out_unknown; 1091 case 32: 1092 switch (desc->nr_channels) { 1093 case 1: 1094 result = FMT_32; 1095 goto out_word4; 1096 case 2: 1097 result = FMT_32_32; 1098 goto out_word4; 1099 case 4: 1100 result = FMT_32_32_32_32; 1101 goto out_word4; 1102 } 1103 } 1104 goto out_unknown; 1105 1106 case UTIL_FORMAT_TYPE_FLOAT: 1107 switch (desc->channel[i].size) { 1108 case 16: 1109 switch (desc->nr_channels) { 1110 case 1: 1111 result = FMT_16_FLOAT; 1112 goto out_word4; 1113 case 2: 1114 result = FMT_16_16_FLOAT; 1115 goto out_word4; 1116 case 4: 1117 result = FMT_16_16_16_16_FLOAT; 1118 goto out_word4; 1119 } 1120 goto out_unknown; 1121 case 32: 1122 switch (desc->nr_channels) { 1123 case 1: 1124 result = FMT_32_FLOAT; 1125 goto out_word4; 1126 case 2: 1127 result = FMT_32_32_FLOAT; 1128 goto out_word4; 1129 case 4: 1130 result = FMT_32_32_32_32_FLOAT; 1131 goto out_word4; 1132 } 1133 } 1134 goto out_unknown; 1135 } 1136 1137out_word4: 1138 1139 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && !is_srgb_valid) 1140 return ~0; 1141 if (word4_p) 1142 *word4_p = word4; 1143 if (yuv_format_p) 1144 *yuv_format_p = yuv_format; 1145 return result; 1146out_unknown: 1147 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */ 1148 return ~0; 1149} 1150