intel_fbo.c revision e0d67a3a8b4ec73df7e6f818989480a3dd1ee706
1d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien/************************************************************************** 25270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * 3d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 45270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * All Rights Reserved. 5d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * 6d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * Permission is hereby granted, free of charge, to any person obtaining a 7d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * copy of this software and associated documentation files (the 8d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * "Software"), to deal in the Software without restriction, including 9d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * without limitation the rights to use, copy, modify, merge, publish, 10d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * distribute, sub license, and/or sell copies of the Software, and to 115270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * permit persons to whom the Software is furnished to do so, subject to 125270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * the following conditions: 135270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * 145270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * The above copyright notice and this permission notice (including the 155270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * next paragraph) shall be included in all copies or substantial portions 16d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien * of the Software. 175270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * 185270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 195270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 202e5a5ee613fae456a8ed4282acede56860682f4fLogan Chien * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 215270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 225270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 235270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 245270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 252e5a5ee613fae456a8ed4282acede56860682f4fLogan Chien * 262e5a5ee613fae456a8ed4282acede56860682f4fLogan Chien **************************************************************************/ 272e5a5ee613fae456a8ed4282acede56860682f4fLogan Chien 288019aac390baf43b3907d92928bad7fbe62588c6Stephen Hines 292e5a5ee613fae456a8ed4282acede56860682f4fLogan Chien#include "main/enums.h" 302e5a5ee613fae456a8ed4282acede56860682f4fLogan Chien#include "main/imports.h" 315270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "main/macros.h" 325270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "main/mfeatures.h" 335270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "main/mtypes.h" 345270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "main/fbobject.h" 35d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "main/framebuffer.h" 36d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "main/renderbuffer.h" 37d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "main/context.h" 38d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "main/teximage.h" 39d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "main/image.h" 40d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 41d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "swrast/swrast.h" 42d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "drivers/common/meta.h" 43d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 44d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "intel_context.h" 45d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "intel_batchbuffer.h" 46d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "intel_buffers.h" 47d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "intel_blit.h" 48d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "intel_fbo.h" 495270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "intel_mipmap_tree.h" 505270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "intel_regions.h" 51d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#include "intel_tex.h" 525270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "intel_span.h" 535270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#ifndef I915 545270e6c7832a375c46ad7a7d820ffdd3431f5d8eLogan Chien#include "brw_context.h" 55d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#endif 56d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 57d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien#define FILE_DEBUG_FLAG DEBUG_FBO 58d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 59d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 60d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chienbool 61d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chienintel_framebuffer_has_hiz(struct gl_framebuffer *fb) 62d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien{ 63d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien struct intel_renderbuffer *rb = NULL; 64d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien if (fb) 65d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien rb = intel_get_renderbuffer(fb, BUFFER_DEPTH); 66d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien return rb && rb->mt && rb->mt->hiz_mt; 67d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien} 68d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 69d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chienstruct intel_region* 7048559a3be6ba305b80242908d5e05baeef6420f6Shih-wei Liaointel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex) 7148559a3be6ba305b80242908d5e05baeef6420f6Shih-wei Liao{ 7248559a3be6ba305b80242908d5e05baeef6420f6Shih-wei Liao struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex); 73d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien if (irb && irb->mt) 74d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien return irb->mt->region; 75d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien else 76d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien return NULL; 77d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien} 78d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien 79d2351e5c088147b5d71d5745cf07b5085a7f0073Logan Chien/** 80 * Create a new framebuffer object. 81 */ 82static struct gl_framebuffer * 83intel_new_framebuffer(struct gl_context * ctx, GLuint name) 84{ 85 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 86 * class 87 */ 88 return _mesa_new_framebuffer(ctx, name); 89} 90 91 92/** Called by gl_renderbuffer::Delete() */ 93static void 94intel_delete_renderbuffer(struct gl_renderbuffer *rb) 95{ 96 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 97 98 ASSERT(irb); 99 100 intel_miptree_release(&irb->mt); 101 102 _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL); 103 _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL); 104 105 free(irb); 106} 107 108/** 109 * \brief Map a renderbuffer through the GTT. 110 * 111 * \see intel_map_renderbuffer() 112 */ 113static void 114intel_map_renderbuffer_gtt(struct gl_context *ctx, 115 struct gl_renderbuffer *rb, 116 GLuint x, GLuint y, GLuint w, GLuint h, 117 GLbitfield mode, 118 GLubyte **out_map, 119 GLint *out_stride) 120{ 121 struct intel_context *intel = intel_context(ctx); 122 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 123 GLubyte *map; 124 int stride, flip_stride; 125 126 assert(irb->mt); 127 128 intel_renderbuffer_resolve_depth(intel, irb); 129 if (mode & GL_MAP_WRITE_BIT) { 130 intel_renderbuffer_set_needs_hiz_resolve(irb); 131 } 132 133 irb->map_mode = mode; 134 irb->map_x = x; 135 irb->map_y = y; 136 irb->map_w = w; 137 irb->map_h = h; 138 139 stride = irb->mt->region->pitch * irb->mt->region->cpp; 140 141 if (rb->Name == 0) { 142 y = irb->mt->region->height - 1 - y; 143 flip_stride = -stride; 144 } else { 145 x += irb->draw_x; 146 y += irb->draw_y; 147 flip_stride = stride; 148 } 149 150 if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) { 151 intel_batchbuffer_flush(intel); 152 } 153 154 drm_intel_gem_bo_map_gtt(irb->mt->region->bo); 155 156 map = irb->mt->region->bo->virtual; 157 map += x * irb->mt->region->cpp; 158 map += (int)y * stride; 159 160 *out_map = map; 161 *out_stride = flip_stride; 162 163 DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 164 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 165 x, y, w, h, *out_map, *out_stride); 166} 167 168/** 169 * \brief Map a renderbuffer by blitting it to a temporary gem buffer. 170 * 171 * On gen6+, we have LLC sharing, which means we can get high-performance 172 * access to linear-mapped buffers. 173 * 174 * This function allocates a temporary gem buffer at 175 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and 176 * returns a map of that. (Note: Only X tiled buffers can be blitted). 177 * 178 * \see intel_renderbuffer::map_bo 179 * \see intel_map_renderbuffer() 180 */ 181static void 182intel_map_renderbuffer_blit(struct gl_context *ctx, 183 struct gl_renderbuffer *rb, 184 GLuint x, GLuint y, GLuint w, GLuint h, 185 GLbitfield mode, 186 GLubyte **out_map, 187 GLint *out_stride) 188{ 189 struct intel_context *intel = intel_context(ctx); 190 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 191 192 int src_x, src_y; 193 int dst_stride; 194 195 assert(irb->mt->region); 196 assert(intel->gen >= 6); 197 assert(!(mode & GL_MAP_WRITE_BIT)); 198 assert(irb->mt->region->tiling == I915_TILING_X); 199 200 irb->map_mode = mode; 201 irb->map_x = x; 202 irb->map_y = y; 203 irb->map_w = w; 204 irb->map_h = h; 205 206 dst_stride = ALIGN(w * irb->mt->region->cpp, 4); 207 208 if (rb->Name) { 209 src_x = x + irb->draw_x; 210 src_y = y + irb->draw_y; 211 } else { 212 src_x = x; 213 src_y = irb->mt->region->height - y - h; 214 } 215 216 irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp", 217 dst_stride * h, 4096); 218 219 /* We don't do the flip in the blit, because it's always so tricky to get 220 * right. 221 */ 222 if (irb->map_bo && 223 intelEmitCopyBlit(intel, 224 irb->mt->region->cpp, 225 irb->mt->region->pitch, irb->mt->region->bo, 226 0, irb->mt->region->tiling, 227 dst_stride / irb->mt->region->cpp, irb->map_bo, 228 0, I915_TILING_NONE, 229 src_x, src_y, 230 0, 0, 231 w, h, 232 GL_COPY)) { 233 intel_batchbuffer_flush(intel); 234 drm_intel_bo_map(irb->map_bo, false); 235 236 if (rb->Name) { 237 *out_map = irb->map_bo->virtual; 238 *out_stride = dst_stride; 239 } else { 240 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride; 241 *out_stride = -dst_stride; 242 } 243 244 DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n", 245 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 246 src_x, src_y, w, h, *out_map, *out_stride); 247 } else { 248 /* Fallback to GTT mapping. */ 249 drm_intel_bo_unreference(irb->map_bo); 250 irb->map_bo = NULL; 251 intel_map_renderbuffer_gtt(ctx, rb, 252 x, y, w, h, 253 mode, 254 out_map, out_stride); 255 } 256} 257 258/** 259 * \brief Map a depthstencil buffer with separate stencil. 260 * 261 * A depthstencil renderbuffer, if using separate stencil, consists of a depth 262 * renderbuffer and a hidden stencil renderbuffer. This function maps the 263 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and 264 * returns that as the mapped pointer. The caller need not be aware of the 265 * hidden stencil buffer and may safely assume that the mapped pointer points 266 * to a MESA_FORMAT_S8_Z24 buffer 267 * 268 * The consistency between the depth buffer's S8 bits and the hidden stencil 269 * buffer is managed within intel_map_renderbuffer() and 270 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits 271 * according to the map mode. 272 * 273 * \see intel_map_renderbuffer() 274 * \see intel_unmap_renderbuffer_separate_s8z24() 275 */ 276static void 277intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx, 278 struct gl_renderbuffer *rb, 279 GLuint x, GLuint y, GLuint w, GLuint h, 280 GLbitfield mode, 281 GLubyte **out_map, 282 GLint *out_stride) 283{ 284 struct intel_context *intel = intel_context(ctx); 285 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 286 287 uint8_t *s8z24_map; 288 int32_t s8z24_stride; 289 290 struct intel_renderbuffer *s8_irb; 291 uint8_t *s8_map; 292 293 assert(rb->Name != 0); 294 assert(rb->Format == MESA_FORMAT_S8_Z24); 295 assert(irb->wrapped_depth != NULL); 296 assert(irb->wrapped_stencil != NULL); 297 298 irb->map_mode = mode; 299 irb->map_x = x; 300 irb->map_y = y; 301 irb->map_w = w; 302 irb->map_h = h; 303 304 /* Map with write mode for the gather below. */ 305 intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth, 306 x, y, w, h, mode | GL_MAP_WRITE_BIT, 307 &s8z24_map, &s8z24_stride); 308 309 s8_irb = intel_renderbuffer(irb->wrapped_stencil); 310 s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_READ_BIT); 311 312 /* Gather the stencil buffer into the depth buffer. */ 313 for (uint32_t pix_y = 0; pix_y < h; ++pix_y) { 314 for (uint32_t pix_x = 0; pix_x < w; ++pix_x) { 315 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch, 316 x + pix_x, 317 y + pix_y); 318 ptrdiff_t s8z24_offset = pix_y * s8z24_stride 319 + pix_x * 4 320 + 3; 321 s8z24_map[s8z24_offset] = s8_map[s8_offset]; 322 } 323 } 324 325 intel_region_unmap(intel, s8_irb->mt->region); 326 327 *out_map = s8z24_map; 328 *out_stride = s8z24_stride; 329} 330 331/** 332 * \see dd_function_table::MapRenderbuffer 333 */ 334static void 335intel_map_renderbuffer(struct gl_context *ctx, 336 struct gl_renderbuffer *rb, 337 GLuint x, GLuint y, GLuint w, GLuint h, 338 GLbitfield mode, 339 GLubyte **out_map, 340 GLint *out_stride) 341{ 342 struct intel_context *intel = intel_context(ctx); 343 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 344 345 /* We sometimes get called with this by our intel_span.c usage. */ 346 if (!irb->mt && !irb->wrapped_depth) { 347 *out_map = NULL; 348 *out_stride = 0; 349 return; 350 } 351 352 if (rb->Format == MESA_FORMAT_S8) { 353 void *map; 354 int stride; 355 356 /* For a window-system renderbuffer, we need to flip the mapping we 357 * receive upside-down. So we need to ask for a rectangle on flipped 358 * vertically, and we then return a pointer to the bottom of it with a 359 * negative stride. 360 */ 361 if (rb->Name == 0) { 362 y = rb->Height - y - h; 363 } 364 365 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer, 366 x, y, w, h, mode, &map, &stride); 367 368 if (rb->Name == 0) { 369 map += (h - 1) * stride; 370 stride = -stride; 371 } 372 373 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 374 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 375 x, y, w, h, *out_map, *out_stride); 376 377 *out_map = map; 378 *out_stride = stride; 379 } else if (irb->wrapped_depth) { 380 intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode, 381 out_map, out_stride); 382 } else if (intel->gen >= 6 && 383 !(mode & GL_MAP_WRITE_BIT) && 384 irb->mt->region->tiling == I915_TILING_X) { 385 intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode, 386 out_map, out_stride); 387 } else { 388 intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode, 389 out_map, out_stride); 390 } 391} 392 393/** 394 * \brief Unmap a depthstencil renderbuffer with separate stencil. 395 * 396 * \see intel_map_renderbuffer_separate_s8z24() 397 * \see intel_unmap_renderbuffer() 398 */ 399static void 400intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx, 401 struct gl_renderbuffer *rb) 402{ 403 struct intel_context *intel = intel_context(ctx); 404 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 405 struct intel_renderbuffer *s8z24_irb; 406 407 assert(rb->Name != 0); 408 assert(rb->Format == MESA_FORMAT_S8_Z24); 409 assert(irb->wrapped_depth != NULL); 410 assert(irb->wrapped_stencil != NULL); 411 412 s8z24_irb = intel_renderbuffer(irb->wrapped_depth); 413 414 if (irb->map_mode & GL_MAP_WRITE_BIT) { 415 /* Copy the stencil bits from the depth buffer into the stencil buffer. 416 */ 417 uint32_t map_x = irb->map_x; 418 uint32_t map_y = irb->map_y; 419 uint32_t map_w = irb->map_w; 420 uint32_t map_h = irb->map_h; 421 422 struct intel_renderbuffer *s8_irb; 423 uint8_t *s8_map; 424 425 s8_irb = intel_renderbuffer(irb->wrapped_stencil); 426 s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_WRITE_BIT); 427 428 int32_t s8z24_stride = 4 * s8z24_irb->mt->region->pitch; 429 uint8_t *s8z24_map = s8z24_irb->mt->region->bo->virtual 430 + map_y * s8z24_stride 431 + map_x * 4; 432 433 for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) { 434 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) { 435 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch, 436 map_x + pix_x, 437 map_y + pix_y); 438 ptrdiff_t s8z24_offset = pix_y * s8z24_stride 439 + pix_x * 4 440 + 3; 441 s8_map[s8_offset] = s8z24_map[s8z24_offset]; 442 } 443 } 444 445 intel_region_unmap(intel, s8_irb->mt->region); 446 } 447 448 drm_intel_gem_bo_unmap_gtt(s8z24_irb->mt->region->bo); 449} 450 451/** 452 * \see dd_function_table::UnmapRenderbuffer 453 */ 454static void 455intel_unmap_renderbuffer(struct gl_context *ctx, 456 struct gl_renderbuffer *rb) 457{ 458 struct intel_context *intel = intel_context(ctx); 459 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 460 461 DBG("%s: rb %d (%s)\n", __FUNCTION__, 462 rb->Name, _mesa_get_format_name(rb->Format)); 463 464 if (rb->Format == MESA_FORMAT_S8) { 465 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer); 466 } else if (irb->wrapped_depth) { 467 intel_unmap_renderbuffer_separate_s8z24(ctx, rb); 468 } else if (irb->map_bo) { 469 /* Paired with intel_map_renderbuffer_blit(). */ 470 drm_intel_bo_unmap(irb->map_bo); 471 drm_intel_bo_unreference(irb->map_bo); 472 irb->map_bo = 0; 473 } else { 474 /* Paired with intel_map_renderbuffer_gtt(). */ 475 if (irb->mt) { 476 /* The miptree may be null when intel_map_renderbuffer() is 477 * called from intel_span.c. 478 */ 479 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo); 480 } 481 } 482} 483 484/** 485 * Return a pointer to a specific pixel in a renderbuffer. 486 */ 487static void * 488intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb, 489 GLint x, GLint y) 490{ 491 /* By returning NULL we force all software rendering to go through 492 * the span routines. 493 */ 494 return NULL; 495} 496 497 498/** 499 * Called via glRenderbufferStorageEXT() to set the format and allocate 500 * storage for a user-created renderbuffer. 501 */ 502GLboolean 503intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 504 GLenum internalFormat, 505 GLuint width, GLuint height) 506{ 507 struct intel_context *intel = intel_context(ctx); 508 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 509 int cpp, tiling; 510 511 ASSERT(rb->Name != 0); 512 513 switch (internalFormat) { 514 default: 515 /* Use the same format-choice logic as for textures. 516 * Renderbuffers aren't any different from textures for us, 517 * except they're less useful because you can't texture with 518 * them. 519 */ 520 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 521 GL_NONE, GL_NONE); 522 break; 523 case GL_STENCIL_INDEX: 524 case GL_STENCIL_INDEX1_EXT: 525 case GL_STENCIL_INDEX4_EXT: 526 case GL_STENCIL_INDEX8_EXT: 527 case GL_STENCIL_INDEX16_EXT: 528 /* These aren't actual texture formats, so force them here. */ 529 if (intel->has_separate_stencil) { 530 rb->Format = MESA_FORMAT_S8; 531 } else { 532 assert(!intel->must_use_separate_stencil); 533 rb->Format = MESA_FORMAT_S8_Z24; 534 } 535 break; 536 } 537 538 rb->Width = width; 539 rb->Height = height; 540 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 541 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 542 cpp = _mesa_get_format_bytes(rb->Format); 543 544 intel_flush(ctx); 545 546 intel_miptree_release(&irb->mt); 547 548 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 549 _mesa_lookup_enum_by_nr(internalFormat), 550 _mesa_get_format_name(rb->Format), width, height); 551 552 tiling = I915_TILING_NONE; 553 if (intel->use_texture_tiling) { 554 GLenum base_format = _mesa_get_format_base_format(rb->Format); 555 556 if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT || 557 base_format == GL_STENCIL_INDEX || 558 base_format == GL_DEPTH_STENCIL)) 559 tiling = I915_TILING_Y; 560 else 561 tiling = I915_TILING_X; 562 } 563 564 if (irb->Base.Format == MESA_FORMAT_S8) { 565 /* 566 * The stencil buffer is W tiled. However, we request from the kernel a 567 * non-tiled buffer because the GTT is incapable of W fencing. 568 * 569 * The stencil buffer has quirky pitch requirements. From Vol 2a, 570 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch": 571 * The pitch must be set to 2x the value computed based on width, as 572 * the stencil buffer is stored with two rows interleaved. 573 * To accomplish this, we resort to the nasty hack of doubling the drm 574 * region's cpp and halving its height. 575 * 576 * If we neglect to double the pitch, then render corruption occurs. 577 */ 578 irb->mt = intel_miptree_create_for_renderbuffer( 579 intel, 580 rb->Format, 581 I915_TILING_NONE, 582 cpp * 2, 583 ALIGN(width, 64), 584 ALIGN((height + 1) / 2, 64)); 585 if (!irb->mt) 586 return false; 587 588 } else if (irb->Base.Format == MESA_FORMAT_S8_Z24 589 && intel->has_separate_stencil) { 590 591 bool ok = true; 592 struct gl_renderbuffer *depth_rb; 593 struct gl_renderbuffer *stencil_rb; 594 struct intel_renderbuffer *depth_irb, *stencil_irb; 595 596 depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height, 597 MESA_FORMAT_X8_Z24); 598 stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height, 599 MESA_FORMAT_S8); 600 ok = depth_rb && stencil_rb; 601 ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb, 602 depth_rb->InternalFormat, 603 width, height); 604 ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb, 605 stencil_rb->InternalFormat, 606 width, height); 607 608 if (!ok) { 609 if (depth_rb) { 610 intel_delete_renderbuffer(depth_rb); 611 } 612 if (stencil_rb) { 613 intel_delete_renderbuffer(stencil_rb); 614 } 615 return false; 616 } 617 618 depth_irb = intel_renderbuffer(depth_rb); 619 stencil_irb = intel_renderbuffer(stencil_rb); 620 621 intel_miptree_reference(&depth_irb->mt->stencil_mt, stencil_irb->mt); 622 intel_miptree_reference(&irb->mt, depth_irb->mt); 623 624 depth_rb->Wrapped = rb; 625 stencil_rb->Wrapped = rb; 626 _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb); 627 _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb); 628 629 } else { 630 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format, 631 tiling, cpp, 632 width, height); 633 if (!irb->mt) 634 return false; 635 636 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 637 bool ok = intel_miptree_alloc_hiz(intel, irb->mt); 638 if (!ok) { 639 intel_miptree_release(&irb->mt); 640 return false; 641 } 642 } 643 } 644 645 return true; 646} 647 648 649#if FEATURE_OES_EGL_image 650static void 651intel_image_target_renderbuffer_storage(struct gl_context *ctx, 652 struct gl_renderbuffer *rb, 653 void *image_handle) 654{ 655 struct intel_context *intel = intel_context(ctx); 656 struct intel_renderbuffer *irb; 657 __DRIscreen *screen; 658 __DRIimage *image; 659 660 screen = intel->intelScreen->driScrnPriv; 661 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 662 screen->loaderPrivate); 663 if (image == NULL) 664 return; 665 666 /* __DRIimage is opaque to the core so it has to be checked here */ 667 switch (image->format) { 668 case MESA_FORMAT_RGBA8888_REV: 669 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 670 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 671 return; 672 break; 673 default: 674 break; 675 } 676 677 irb = intel_renderbuffer(rb); 678 intel_miptree_release(&irb->mt); 679 irb->mt = intel_miptree_create_for_region(intel, 680 GL_TEXTURE_2D, 681 image->format, 682 image->region); 683 if (!irb->mt) 684 return; 685 686 rb->InternalFormat = image->internal_format; 687 rb->Width = image->region->width; 688 rb->Height = image->region->height; 689 rb->Format = image->format; 690 rb->DataType = image->data_type; 691 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 692 image->internal_format); 693} 694#endif 695 696/** 697 * Called for each hardware renderbuffer when a _window_ is resized. 698 * Just update fields. 699 * Not used for user-created renderbuffers! 700 */ 701static GLboolean 702intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 703 GLenum internalFormat, GLuint width, GLuint height) 704{ 705 ASSERT(rb->Name == 0); 706 rb->Width = width; 707 rb->Height = height; 708 rb->InternalFormat = internalFormat; 709 710 return true; 711} 712 713 714static void 715intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 716 GLuint width, GLuint height) 717{ 718 int i; 719 720 _mesa_resize_framebuffer(ctx, fb, width, height); 721 722 fb->Initialized = true; /* XXX remove someday */ 723 724 if (fb->Name != 0) { 725 return; 726 } 727 728 729 /* Make sure all window system renderbuffers are up to date */ 730 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 731 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 732 733 /* only resize if size is changing */ 734 if (rb && (rb->Width != width || rb->Height != height)) { 735 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 736 } 737 } 738} 739 740 741/** Dummy function for gl_renderbuffer::AllocStorage() */ 742static GLboolean 743intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 744 GLenum internalFormat, GLuint width, GLuint height) 745{ 746 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 747 return false; 748} 749 750/** 751 * Create a new intel_renderbuffer which corresponds to an on-screen window, 752 * not a user-created renderbuffer. 753 */ 754struct intel_renderbuffer * 755intel_create_renderbuffer(gl_format format) 756{ 757 GET_CURRENT_CONTEXT(ctx); 758 759 struct intel_renderbuffer *irb; 760 761 irb = CALLOC_STRUCT(intel_renderbuffer); 762 if (!irb) { 763 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 764 return NULL; 765 } 766 767 _mesa_init_renderbuffer(&irb->Base, 0); 768 irb->Base.ClassID = INTEL_RB_CLASS; 769 irb->Base._BaseFormat = _mesa_get_format_base_format(format); 770 irb->Base.Format = format; 771 irb->Base.InternalFormat = irb->Base._BaseFormat; 772 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format); 773 774 /* intel-specific methods */ 775 irb->Base.Delete = intel_delete_renderbuffer; 776 irb->Base.AllocStorage = intel_alloc_window_storage; 777 irb->Base.GetPointer = intel_get_pointer; 778 779 return irb; 780} 781 782 783struct gl_renderbuffer* 784intel_create_wrapped_renderbuffer(struct gl_context * ctx, 785 int width, int height, 786 gl_format format) 787{ 788 /* 789 * The name here is irrelevant, as long as its nonzero, because the 790 * renderbuffer never gets entered into Mesa's renderbuffer hash table. 791 */ 792 GLuint name = ~0; 793 794 struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer); 795 if (!irb) { 796 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 797 return NULL; 798 } 799 800 struct gl_renderbuffer *rb = &irb->Base; 801 _mesa_init_renderbuffer(rb, name); 802 rb->ClassID = INTEL_RB_CLASS; 803 rb->_BaseFormat = _mesa_get_format_base_format(format); 804 rb->Format = format; 805 rb->InternalFormat = rb->_BaseFormat; 806 rb->DataType = intel_mesa_format_to_rb_datatype(format); 807 rb->Width = width; 808 rb->Height = height; 809 810 return rb; 811} 812 813 814/** 815 * Create a new renderbuffer object. 816 * Typically called via glBindRenderbufferEXT(). 817 */ 818static struct gl_renderbuffer * 819intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 820{ 821 /*struct intel_context *intel = intel_context(ctx); */ 822 struct intel_renderbuffer *irb; 823 824 irb = CALLOC_STRUCT(intel_renderbuffer); 825 if (!irb) { 826 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 827 return NULL; 828 } 829 830 _mesa_init_renderbuffer(&irb->Base, name); 831 irb->Base.ClassID = INTEL_RB_CLASS; 832 833 /* intel-specific methods */ 834 irb->Base.Delete = intel_delete_renderbuffer; 835 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage; 836 irb->Base.GetPointer = intel_get_pointer; 837 /* span routines set in alloc_storage function */ 838 839 return &irb->Base; 840} 841 842 843/** 844 * Called via glBindFramebufferEXT(). 845 */ 846static void 847intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 848 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 849{ 850 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 851 intel_draw_buffer(ctx); 852 } 853 else { 854 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 855 } 856} 857 858 859/** 860 * Called via glFramebufferRenderbufferEXT(). 861 */ 862static void 863intel_framebuffer_renderbuffer(struct gl_context * ctx, 864 struct gl_framebuffer *fb, 865 GLenum attachment, struct gl_renderbuffer *rb) 866{ 867 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 868 869 intel_flush(ctx); 870 871 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 872 intel_draw_buffer(ctx); 873} 874 875static struct intel_renderbuffer* 876intel_renderbuffer_wrap_miptree(struct intel_context *intel, 877 struct intel_mipmap_tree *mt, 878 uint32_t level, 879 uint32_t layer, 880 gl_format format, 881 GLenum internal_format); 882 883/** 884 * \par Special case for separate stencil 885 * 886 * When wrapping a depthstencil texture that uses separate stencil, this 887 * function is recursively called twice: once to create \c 888 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the 889 * call to create \c irb->wrapped_depth, the \c format and \c 890 * internal_format parameters do not match \c mt->format. In that case, \c 891 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c 892 * MESA_FORMAT_X8_Z24. 893 * 894 * @return true on success 895 */ 896static bool 897intel_renderbuffer_update_wrapper(struct intel_context *intel, 898 struct intel_renderbuffer *irb, 899 struct intel_mipmap_tree *mt, 900 uint32_t level, 901 uint32_t layer, 902 gl_format format, 903 GLenum internal_format) 904{ 905 struct gl_renderbuffer *rb = &irb->Base; 906 907 rb->Format = format; 908 rb->InternalFormat = internal_format; 909 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 910 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format); 911 rb->Width = mt->level[level].width; 912 rb->Height = mt->level[level].height; 913 914 irb->Base.Delete = intel_delete_renderbuffer; 915 irb->Base.AllocStorage = intel_nop_alloc_storage; 916 917 intel_miptree_check_level_layer(mt, level, layer); 918 irb->mt_level = level; 919 irb->mt_layer = layer; 920 921 if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) { 922 assert((irb->wrapped_depth == NULL) == (irb->wrapped_stencil == NULL)); 923 924 struct intel_renderbuffer *depth_irb; 925 struct intel_renderbuffer *stencil_irb; 926 927 if (!irb->wrapped_depth) { 928 depth_irb = intel_renderbuffer_wrap_miptree(intel, 929 mt, level, layer, 930 MESA_FORMAT_X8_Z24, 931 GL_DEPTH_COMPONENT24); 932 stencil_irb = intel_renderbuffer_wrap_miptree(intel, 933 mt->stencil_mt, 934 level, layer, 935 MESA_FORMAT_S8, 936 GL_STENCIL_INDEX8); 937 _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base); 938 _mesa_reference_renderbuffer(&irb->wrapped_stencil, &stencil_irb->Base); 939 940 if (!irb->wrapped_depth || !irb->wrapped_stencil) 941 return false; 942 } else { 943 bool ok = true; 944 945 depth_irb = intel_renderbuffer(irb->wrapped_depth); 946 stencil_irb = intel_renderbuffer(irb->wrapped_stencil); 947 948 ok &= intel_renderbuffer_update_wrapper(intel, 949 depth_irb, 950 mt, 951 level, layer, 952 MESA_FORMAT_X8_Z24, 953 GL_DEPTH_COMPONENT24); 954 ok &= intel_renderbuffer_update_wrapper(intel, 955 stencil_irb, 956 mt->stencil_mt, 957 level, layer, 958 MESA_FORMAT_S8, 959 GL_STENCIL_INDEX8); 960 if (!ok) 961 return false; 962 } 963 964 intel_miptree_reference(&depth_irb->mt->stencil_mt, stencil_irb->mt); 965 intel_miptree_reference(&irb->mt, depth_irb->mt); 966 } else { 967 intel_miptree_reference(&irb->mt, mt); 968 intel_renderbuffer_set_draw_offset(irb); 969 970 if (mt->hiz_mt == NULL && 971 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 972 intel_miptree_alloc_hiz(intel, mt); 973 if (!mt->hiz_mt) 974 return false; 975 } 976 } 977 978 return true; 979} 980 981/** 982 * \brief Wrap a renderbuffer around a single slice of a miptree. 983 * 984 * Called by glFramebufferTexture*(). This just allocates a 985 * ``struct intel_renderbuffer`` then calls 986 * intel_renderbuffer_update_wrapper() to do the real work. 987 * 988 * \see intel_renderbuffer_update_wrapper() 989 */ 990static struct intel_renderbuffer* 991intel_renderbuffer_wrap_miptree(struct intel_context *intel, 992 struct intel_mipmap_tree *mt, 993 uint32_t level, 994 uint32_t layer, 995 gl_format format, 996 GLenum internal_format) 997 998{ 999 const GLuint name = ~0; /* not significant, but distinct for debugging */ 1000 struct gl_context *ctx = &intel->ctx; 1001 struct intel_renderbuffer *irb; 1002 1003 intel_miptree_check_level_layer(mt, level, layer); 1004 1005 irb = CALLOC_STRUCT(intel_renderbuffer); 1006 if (!irb) { 1007 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture"); 1008 return NULL; 1009 } 1010 1011 _mesa_init_renderbuffer(&irb->Base, name); 1012 irb->Base.ClassID = INTEL_RB_CLASS; 1013 1014 if (!intel_renderbuffer_update_wrapper(intel, irb, 1015 mt, level, layer, 1016 format, internal_format)) { 1017 free(irb); 1018 return NULL; 1019 } 1020 1021 return irb; 1022} 1023 1024void 1025intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 1026{ 1027 unsigned int dst_x, dst_y; 1028 1029 /* compute offset of the particular 2D image within the texture region */ 1030 intel_miptree_get_image_offset(irb->mt, 1031 irb->mt_level, 1032 0, /* face, which we ignore */ 1033 irb->mt_layer, 1034 &dst_x, &dst_y); 1035 1036 irb->draw_x = dst_x; 1037 irb->draw_y = dst_y; 1038} 1039 1040/** 1041 * Rendering to tiled buffers requires that the base address of the 1042 * buffer be aligned to a page boundary. We generally render to 1043 * textures by pointing the surface at the mipmap image level, which 1044 * may not be aligned to a tile boundary. 1045 * 1046 * This function returns an appropriately-aligned base offset 1047 * according to the tiling restrictions, plus any required x/y offset 1048 * from there. 1049 */ 1050uint32_t 1051intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 1052 uint32_t *tile_x, 1053 uint32_t *tile_y) 1054{ 1055 struct intel_region *region = irb->mt->region; 1056 int cpp = region->cpp; 1057 uint32_t pitch = region->pitch * cpp; 1058 1059 if (region->tiling == I915_TILING_NONE) { 1060 *tile_x = 0; 1061 *tile_y = 0; 1062 return irb->draw_x * cpp + irb->draw_y * pitch; 1063 } else if (region->tiling == I915_TILING_X) { 1064 *tile_x = irb->draw_x % (512 / cpp); 1065 *tile_y = irb->draw_y % 8; 1066 return ((irb->draw_y / 8) * (8 * pitch) + 1067 (irb->draw_x - *tile_x) / (512 / cpp) * 4096); 1068 } else { 1069 assert(region->tiling == I915_TILING_Y); 1070 *tile_x = irb->draw_x % (128 / cpp); 1071 *tile_y = irb->draw_y % 32; 1072 return ((irb->draw_y / 32) * (32 * pitch) + 1073 (irb->draw_x - *tile_x) / (128 / cpp) * 4096); 1074 } 1075} 1076 1077#ifndef I915 1078static bool 1079need_tile_offset_workaround(struct brw_context *brw, 1080 struct intel_renderbuffer *irb) 1081{ 1082 uint32_t tile_x, tile_y; 1083 1084 if (brw->has_surface_tile_offset) 1085 return false; 1086 1087 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 1088 1089 return tile_x != 0 || tile_y != 0; 1090} 1091#endif 1092 1093/** 1094 * Called by glFramebufferTexture[123]DEXT() (and other places) to 1095 * prepare for rendering into texture memory. This might be called 1096 * many times to choose different texture levels, cube faces, etc 1097 * before intel_finish_render_texture() is ever called. 1098 */ 1099static void 1100intel_render_texture(struct gl_context * ctx, 1101 struct gl_framebuffer *fb, 1102 struct gl_renderbuffer_attachment *att) 1103{ 1104 struct intel_context *intel = intel_context(ctx); 1105 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 1106 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 1107 struct intel_texture_image *intel_image = intel_texture_image(image); 1108 struct intel_mipmap_tree *mt = intel_image->mt; 1109 1110 (void) fb; 1111 1112 int layer; 1113 if (att->CubeMapFace > 0) { 1114 assert(att->Zoffset == 0); 1115 layer = att->CubeMapFace; 1116 } else { 1117 layer = att->Zoffset; 1118 } 1119 1120 if (!intel_image->mt) { 1121 /* Fallback on drawing to a texture that doesn't have a miptree 1122 * (has a border, width/height 0, etc.) 1123 */ 1124 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 1125 _swrast_render_texture(ctx, fb, att); 1126 return; 1127 } 1128 else if (!irb) { 1129 irb = intel_renderbuffer_wrap_miptree(intel, 1130 mt, 1131 att->TextureLevel, 1132 layer, 1133 image->TexFormat, 1134 image->InternalFormat); 1135 1136 if (irb) { 1137 /* bind the wrapper to the attachment point */ 1138 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base); 1139 } 1140 else { 1141 /* fallback to software rendering */ 1142 _swrast_render_texture(ctx, fb, att); 1143 return; 1144 } 1145 } 1146 1147 if (!intel_renderbuffer_update_wrapper(intel, irb, 1148 mt, att->TextureLevel, layer, 1149 image->TexFormat, 1150 image->InternalFormat)) { 1151 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 1152 _swrast_render_texture(ctx, fb, att); 1153 return; 1154 } 1155 1156 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n", 1157 _mesa_get_format_name(image->TexFormat), 1158 att->Texture->Name, image->Width, image->Height, 1159 irb->Base.RefCount); 1160 1161 intel_image->used_as_render_target = true; 1162 1163#ifndef I915 1164 if (need_tile_offset_workaround(brw_context(ctx), irb)) { 1165 /* Original gen4 hardware couldn't draw to a non-tile-aligned 1166 * destination in a miptree unless you actually setup your 1167 * renderbuffer as a miptree and used the fragile 1168 * lod/array_index/etc. controls to select the image. So, 1169 * instead, we just make a new single-level miptree and render 1170 * into that. 1171 */ 1172 struct intel_context *intel = intel_context(ctx); 1173 struct intel_mipmap_tree *new_mt; 1174 int width, height, depth; 1175 1176 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); 1177 1178 new_mt = intel_miptree_create(intel, image->TexObject->Target, 1179 intel_image->base.Base.TexFormat, 1180 intel_image->base.Base.Level, 1181 intel_image->base.Base.Level, 1182 width, height, depth, 1183 true); 1184 1185 intel_miptree_copy_teximage(intel, intel_image, new_mt); 1186 intel_renderbuffer_set_draw_offset(irb); 1187 1188 intel_miptree_reference(&irb->mt, intel_image->mt); 1189 intel_miptree_release(&new_mt); 1190 } 1191#endif 1192 /* update drawing region, etc */ 1193 intel_draw_buffer(ctx); 1194} 1195 1196 1197/** 1198 * Called by Mesa when rendering to a texture is done. 1199 */ 1200static void 1201intel_finish_render_texture(struct gl_context * ctx, 1202 struct gl_renderbuffer_attachment *att) 1203{ 1204 struct intel_context *intel = intel_context(ctx); 1205 struct gl_texture_object *tex_obj = att->Texture; 1206 struct gl_texture_image *image = 1207 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 1208 struct intel_texture_image *intel_image = intel_texture_image(image); 1209 1210 DBG("Finish render %s texture tex=%u\n", 1211 _mesa_get_format_name(image->TexFormat), att->Texture->Name); 1212 1213 /* Flag that this image may now be validated into the object's miptree. */ 1214 if (intel_image) 1215 intel_image->used_as_render_target = false; 1216 1217 /* Since we've (probably) rendered to the texture and will (likely) use 1218 * it in the texture domain later on in this batchbuffer, flush the 1219 * batch. Once again, we wish for a domain tracker in libdrm to cover 1220 * usage inside of a batchbuffer like GEM does in the kernel. 1221 */ 1222 intel_batchbuffer_emit_mi_flush(intel); 1223} 1224 1225/** 1226 * Do additional "completeness" testing of a framebuffer object. 1227 */ 1228static void 1229intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 1230{ 1231 struct intel_context *intel = intel_context(ctx); 1232 const struct intel_renderbuffer *depthRb = 1233 intel_get_renderbuffer(fb, BUFFER_DEPTH); 1234 const struct intel_renderbuffer *stencilRb = 1235 intel_get_renderbuffer(fb, BUFFER_STENCIL); 1236 int i; 1237 1238 /* 1239 * The depth and stencil renderbuffers are the same renderbuffer or wrap 1240 * the same texture. 1241 */ 1242 if (depthRb && stencilRb) { 1243 bool depth_stencil_are_same; 1244 if (depthRb == stencilRb) 1245 depth_stencil_are_same = true; 1246 else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) && 1247 (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) && 1248 (fb->Attachment[BUFFER_DEPTH].Texture->Name == 1249 fb->Attachment[BUFFER_STENCIL].Texture->Name)) 1250 depth_stencil_are_same = true; 1251 else 1252 depth_stencil_are_same = false; 1253 1254 if (!intel->has_separate_stencil && !depth_stencil_are_same) { 1255 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1256 } 1257 } 1258 1259 for (i = 0; i < Elements(fb->Attachment); i++) { 1260 struct gl_renderbuffer *rb; 1261 struct intel_renderbuffer *irb; 1262 1263 if (fb->Attachment[i].Type == GL_NONE) 1264 continue; 1265 1266 /* A supported attachment will have a Renderbuffer set either 1267 * from being a Renderbuffer or being a texture that got the 1268 * intel_wrap_texture() treatment. 1269 */ 1270 rb = fb->Attachment[i].Renderbuffer; 1271 if (rb == NULL) { 1272 DBG("attachment without renderbuffer\n"); 1273 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1274 continue; 1275 } 1276 1277 irb = intel_renderbuffer(rb); 1278 if (irb == NULL) { 1279 DBG("software rendering renderbuffer\n"); 1280 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1281 continue; 1282 } 1283 1284 if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) { 1285 DBG("Unsupported HW texture/renderbuffer format attached: %s\n", 1286 _mesa_get_format_name(irb->Base.Format)); 1287 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1288 } 1289 1290#ifdef I915 1291 if (!intel_span_supports_format(irb->Base.Format)) { 1292 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n", 1293 _mesa_get_format_name(irb->Base.Format)); 1294 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1295 } 1296#endif 1297 } 1298} 1299 1300/** 1301 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 1302 * We can do this when the dst renderbuffer is actually a texture and 1303 * there is no scaling, mirroring or scissoring. 1304 * 1305 * \return new buffer mask indicating the buffers left to blit using the 1306 * normal path. 1307 */ 1308static GLbitfield 1309intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 1310 GLint srcX0, GLint srcY0, 1311 GLint srcX1, GLint srcY1, 1312 GLint dstX0, GLint dstY0, 1313 GLint dstX1, GLint dstY1, 1314 GLbitfield mask, GLenum filter) 1315{ 1316 if (mask & GL_COLOR_BUFFER_BIT) { 1317 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 1318 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 1319 const struct gl_renderbuffer_attachment *drawAtt = 1320 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 1321 1322 /* If the source and destination are the same size with no 1323 mirroring, the rectangles are within the size of the 1324 texture and there is no scissor then we can use 1325 glCopyTexSubimage2D to implement the blit. This will end 1326 up as a fast hardware blit on some drivers */ 1327 if (drawAtt && drawAtt->Texture && 1328 srcX0 - srcX1 == dstX0 - dstX1 && 1329 srcY0 - srcY1 == dstY0 - dstY1 && 1330 srcX1 >= srcX0 && 1331 srcY1 >= srcY0 && 1332 srcX0 >= 0 && srcX1 <= readFb->Width && 1333 srcY0 >= 0 && srcY1 <= readFb->Height && 1334 dstX0 >= 0 && dstX1 <= drawFb->Width && 1335 dstY0 >= 0 && dstY1 <= drawFb->Height && 1336 !ctx->Scissor.Enabled) { 1337 const struct gl_texture_object *texObj = drawAtt->Texture; 1338 const GLuint dstLevel = drawAtt->TextureLevel; 1339 const GLenum target = texObj->Target; 1340 1341 struct gl_texture_image *texImage = 1342 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 1343 1344 if (intel_copy_texsubimage(intel_context(ctx), 1345 intel_texture_image(texImage), 1346 dstX0, dstY0, 1347 srcX0, srcY0, 1348 srcX1 - srcX0, /* width */ 1349 srcY1 - srcY0)) 1350 mask &= ~GL_COLOR_BUFFER_BIT; 1351 } 1352 } 1353 1354 return mask; 1355} 1356 1357static void 1358intel_blit_framebuffer(struct gl_context *ctx, 1359 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 1360 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 1361 GLbitfield mask, GLenum filter) 1362{ 1363 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 1364 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 1365 srcX0, srcY0, srcX1, srcY1, 1366 dstX0, dstY0, dstX1, dstY1, 1367 mask, filter); 1368 if (mask == 0x0) 1369 return; 1370 1371 _mesa_meta_BlitFramebuffer(ctx, 1372 srcX0, srcY0, srcX1, srcY1, 1373 dstX0, dstY0, dstX1, dstY1, 1374 mask, filter); 1375} 1376 1377void 1378intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb) 1379{ 1380 if (irb->mt) { 1381 intel_miptree_slice_set_needs_hiz_resolve(irb->mt, 1382 irb->mt_level, 1383 irb->mt_layer); 1384 } else if (irb->wrapped_depth) { 1385 intel_renderbuffer_set_needs_hiz_resolve( 1386 intel_renderbuffer(irb->wrapped_depth)); 1387 } else { 1388 return; 1389 } 1390} 1391 1392void 1393intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb) 1394{ 1395 if (irb->mt) { 1396 intel_miptree_slice_set_needs_depth_resolve(irb->mt, 1397 irb->mt_level, 1398 irb->mt_layer); 1399 } else if (irb->wrapped_depth) { 1400 intel_renderbuffer_set_needs_depth_resolve( 1401 intel_renderbuffer(irb->wrapped_depth)); 1402 } else { 1403 return; 1404 } 1405} 1406 1407bool 1408intel_renderbuffer_resolve_hiz(struct intel_context *intel, 1409 struct intel_renderbuffer *irb) 1410{ 1411 if (irb->mt) 1412 return intel_miptree_slice_resolve_hiz(intel, 1413 irb->mt, 1414 irb->mt_level, 1415 irb->mt_layer); 1416 if (irb->wrapped_depth) 1417 return intel_renderbuffer_resolve_hiz(intel, 1418 intel_renderbuffer(irb->wrapped_depth)); 1419 1420 return false; 1421} 1422 1423bool 1424intel_renderbuffer_resolve_depth(struct intel_context *intel, 1425 struct intel_renderbuffer *irb) 1426{ 1427 if (irb->mt) 1428 return intel_miptree_slice_resolve_depth(intel, 1429 irb->mt, 1430 irb->mt_level, 1431 irb->mt_layer); 1432 1433 if (irb->wrapped_depth) 1434 return intel_renderbuffer_resolve_depth(intel, 1435 intel_renderbuffer(irb->wrapped_depth)); 1436 1437 return false; 1438} 1439 1440/** 1441 * Do one-time context initializations related to GL_EXT_framebuffer_object. 1442 * Hook in device driver functions. 1443 */ 1444void 1445intel_fbo_init(struct intel_context *intel) 1446{ 1447 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 1448 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 1449 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 1450 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 1451 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 1452 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 1453 intel->ctx.Driver.RenderTexture = intel_render_texture; 1454 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 1455 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 1456 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 1457 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 1458 1459#if FEATURE_OES_EGL_image 1460 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 1461 intel_image_target_renderbuffer_storage; 1462#endif 1463} 1464