intel_fbo.c revision 24da7335b22432ef4c2d57cab86e4b8fbe8733d5
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/enums.h" 30#include "main/imports.h" 31#include "main/macros.h" 32#include "main/mfeatures.h" 33#include "main/mtypes.h" 34#include "main/fbobject.h" 35#include "main/framebuffer.h" 36#include "main/renderbuffer.h" 37#include "main/context.h" 38#include "main/teximage.h" 39#include "swrast/swrast.h" 40#include "drivers/common/meta.h" 41 42#include "intel_context.h" 43#include "intel_batchbuffer.h" 44#include "intel_buffers.h" 45#include "intel_blit.h" 46#include "intel_fbo.h" 47#include "intel_mipmap_tree.h" 48#include "intel_regions.h" 49#include "intel_tex.h" 50#include "intel_span.h" 51#ifndef I915 52#include "brw_context.h" 53#endif 54 55#define FILE_DEBUG_FLAG DEBUG_FBO 56 57 58bool 59intel_framebuffer_has_hiz(struct gl_framebuffer *fb) 60{ 61 struct intel_renderbuffer *rb = NULL; 62 if (fb) 63 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH); 64 return rb && rb->mt && rb->mt->hiz_region; 65} 66 67struct intel_region* 68intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex) 69{ 70 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex); 71 if (irb && irb->mt) 72 return irb->mt->region; 73 else 74 return NULL; 75} 76 77/** 78 * Create a new framebuffer object. 79 */ 80static struct gl_framebuffer * 81intel_new_framebuffer(struct gl_context * ctx, GLuint name) 82{ 83 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 84 * class 85 */ 86 return _mesa_new_framebuffer(ctx, name); 87} 88 89 90/** Called by gl_renderbuffer::Delete() */ 91static void 92intel_delete_renderbuffer(struct gl_renderbuffer *rb) 93{ 94 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 95 96 ASSERT(irb); 97 98 intel_miptree_release(&irb->mt); 99 100 _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL); 101 _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL); 102 103 free(irb); 104} 105 106/** 107 * \brief Map a renderbuffer through the GTT. 108 * 109 * \see intel_map_renderbuffer() 110 */ 111static void 112intel_map_renderbuffer_gtt(struct gl_context *ctx, 113 struct gl_renderbuffer *rb, 114 GLuint x, GLuint y, GLuint w, GLuint h, 115 GLbitfield mode, 116 GLubyte **out_map, 117 GLint *out_stride) 118{ 119 struct intel_context *intel = intel_context(ctx); 120 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 121 GLubyte *map; 122 int stride, flip_stride; 123 124 assert(irb->mt); 125 126 irb->map_mode = mode; 127 irb->map_x = x; 128 irb->map_y = y; 129 irb->map_w = w; 130 irb->map_h = h; 131 132 stride = irb->mt->region->pitch * irb->mt->region->cpp; 133 134 if (rb->Name == 0) { 135 y = irb->mt->region->height - 1 - y; 136 flip_stride = -stride; 137 } else { 138 x += irb->draw_x; 139 y += irb->draw_y; 140 flip_stride = stride; 141 } 142 143 if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) { 144 intel_batchbuffer_flush(intel); 145 } 146 147 drm_intel_gem_bo_map_gtt(irb->mt->region->bo); 148 149 map = irb->mt->region->bo->virtual; 150 map += x * irb->mt->region->cpp; 151 map += (int)y * stride; 152 153 *out_map = map; 154 *out_stride = flip_stride; 155 156 DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 157 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 158 x, y, w, h, *out_map, *out_stride); 159} 160 161/** 162 * \brief Map a renderbuffer by blitting it to a temporary gem buffer. 163 * 164 * On gen6+, we have LLC sharing, which means we can get high-performance 165 * access to linear-mapped buffers. 166 * 167 * This function allocates a temporary gem buffer at 168 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and 169 * returns a map of that. (Note: Only X tiled buffers can be blitted). 170 * 171 * \see intel_renderbuffer::map_bo 172 * \see intel_map_renderbuffer() 173 */ 174static void 175intel_map_renderbuffer_blit(struct gl_context *ctx, 176 struct gl_renderbuffer *rb, 177 GLuint x, GLuint y, GLuint w, GLuint h, 178 GLbitfield mode, 179 GLubyte **out_map, 180 GLint *out_stride) 181{ 182 struct intel_context *intel = intel_context(ctx); 183 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 184 185 int src_x, src_y; 186 int dst_stride; 187 188 assert(irb->mt->region); 189 assert(intel->gen >= 6); 190 assert(!(mode & GL_MAP_WRITE_BIT)); 191 assert(irb->mt->region->tiling == I915_TILING_X); 192 193 irb->map_mode = mode; 194 irb->map_x = x; 195 irb->map_y = y; 196 irb->map_w = w; 197 irb->map_h = h; 198 199 dst_stride = ALIGN(w * irb->mt->region->cpp, 4); 200 201 if (rb->Name) { 202 src_x = x + irb->draw_x; 203 src_y = y + irb->draw_y; 204 } else { 205 src_x = x; 206 src_y = irb->mt->region->height - y - h; 207 } 208 209 irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp", 210 dst_stride * h, 4096); 211 212 /* We don't do the flip in the blit, because it's always so tricky to get 213 * right. 214 */ 215 if (irb->map_bo && 216 intelEmitCopyBlit(intel, 217 irb->mt->region->cpp, 218 irb->mt->region->pitch, irb->mt->region->bo, 219 0, irb->mt->region->tiling, 220 dst_stride / irb->mt->region->cpp, irb->map_bo, 221 0, I915_TILING_NONE, 222 src_x, src_y, 223 0, 0, 224 w, h, 225 GL_COPY)) { 226 intel_batchbuffer_flush(intel); 227 drm_intel_bo_map(irb->map_bo, false); 228 229 if (rb->Name) { 230 *out_map = irb->map_bo->virtual; 231 *out_stride = dst_stride; 232 } else { 233 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride; 234 *out_stride = -dst_stride; 235 } 236 237 DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n", 238 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 239 src_x, src_y, w, h, *out_map, *out_stride); 240 } else { 241 /* Fallback to GTT mapping. */ 242 drm_intel_bo_unreference(irb->map_bo); 243 irb->map_bo = NULL; 244 intel_map_renderbuffer_gtt(ctx, rb, 245 x, y, w, h, 246 mode, 247 out_map, out_stride); 248 } 249} 250 251/** 252 * \brief Map a stencil renderbuffer. 253 * 254 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile 255 * the buffer in software. 256 * 257 * This function allocates a temporary malloc'd buffer at 258 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then 259 * returns the temporary buffer as the map. 260 * 261 * \see intel_renderbuffer::map_buffer 262 * \see intel_map_renderbuffer() 263 * \see intel_unmap_renderbuffer_s8() 264 */ 265static void 266intel_map_renderbuffer_s8(struct gl_context *ctx, 267 struct gl_renderbuffer *rb, 268 GLuint x, GLuint y, GLuint w, GLuint h, 269 GLbitfield mode, 270 GLubyte **out_map, 271 GLint *out_stride) 272{ 273 struct intel_context *intel = intel_context(ctx); 274 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 275 uint8_t *tiled_s8_map; 276 uint8_t *untiled_s8_map; 277 278 assert(rb->Format == MESA_FORMAT_S8); 279 assert(irb->mt); 280 281 irb->map_mode = mode; 282 irb->map_x = x; 283 irb->map_y = y; 284 irb->map_w = w; 285 irb->map_h = h; 286 287 /* Flip the Y axis for the default framebuffer. */ 288 int y_flip = (rb->Name == 0) ? -1 : 1; 289 int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0; 290 291 irb->map_buffer = malloc(w * h); 292 untiled_s8_map = irb->map_buffer; 293 tiled_s8_map = intel_region_map(intel, irb->mt->region, mode); 294 295 for (uint32_t pix_y = 0; pix_y < h; pix_y++) { 296 for (uint32_t pix_x = 0; pix_x < w; pix_x++) { 297 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias; 298 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch, 299 x + pix_x, 300 flipped_y); 301 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset]; 302 } 303 } 304 305 *out_map = untiled_s8_map; 306 *out_stride = w; 307 308 DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n", 309 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 310 x, y, w, h, *out_map, *out_stride); 311} 312 313/** 314 * \brief Map a depthstencil buffer with separate stencil. 315 * 316 * A depthstencil renderbuffer, if using separate stencil, consists of a depth 317 * renderbuffer and a hidden stencil renderbuffer. This function maps the 318 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and 319 * returns that as the mapped pointer. The caller need not be aware of the 320 * hidden stencil buffer and may safely assume that the mapped pointer points 321 * to a MESA_FORMAT_S8_Z24 buffer 322 * 323 * The consistency between the depth buffer's S8 bits and the hidden stencil 324 * buffer is managed within intel_map_renderbuffer() and 325 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits 326 * according to the map mode. 327 * 328 * \see intel_map_renderbuffer() 329 * \see intel_unmap_renderbuffer_separate_s8z24() 330 */ 331static void 332intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx, 333 struct gl_renderbuffer *rb, 334 GLuint x, GLuint y, GLuint w, GLuint h, 335 GLbitfield mode, 336 GLubyte **out_map, 337 GLint *out_stride) 338{ 339 struct intel_context *intel = intel_context(ctx); 340 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 341 342 uint8_t *s8z24_map; 343 int32_t s8z24_stride; 344 345 struct intel_renderbuffer *s8_irb; 346 uint8_t *s8_map; 347 348 assert(rb->Name != 0); 349 assert(rb->Format == MESA_FORMAT_S8_Z24); 350 assert(irb->wrapped_depth != NULL); 351 assert(irb->wrapped_stencil != NULL); 352 353 irb->map_mode = mode; 354 irb->map_x = x; 355 irb->map_y = y; 356 irb->map_w = w; 357 irb->map_h = h; 358 359 /* Map with write mode for the gather below. */ 360 intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth, 361 x, y, w, h, mode | GL_MAP_WRITE_BIT, 362 &s8z24_map, &s8z24_stride); 363 364 s8_irb = intel_renderbuffer(irb->wrapped_stencil); 365 s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_READ_BIT); 366 367 /* Gather the stencil buffer into the depth buffer. */ 368 for (uint32_t pix_y = 0; pix_y < h; ++pix_y) { 369 for (uint32_t pix_x = 0; pix_x < w; ++pix_x) { 370 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch, 371 x + pix_x, 372 y + pix_y); 373 ptrdiff_t s8z24_offset = pix_y * s8z24_stride 374 + pix_x * 4 375 + 3; 376 s8z24_map[s8z24_offset] = s8_map[s8_offset]; 377 } 378 } 379 380 intel_region_unmap(intel, s8_irb->mt->region); 381 382 *out_map = s8z24_map; 383 *out_stride = s8z24_stride; 384} 385 386/** 387 * \see dd_function_table::MapRenderbuffer 388 */ 389static void 390intel_map_renderbuffer(struct gl_context *ctx, 391 struct gl_renderbuffer *rb, 392 GLuint x, GLuint y, GLuint w, GLuint h, 393 GLbitfield mode, 394 GLubyte **out_map, 395 GLint *out_stride) 396{ 397 struct intel_context *intel = intel_context(ctx); 398 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 399 400 /* We sometimes get called with this by our intel_span.c usage. */ 401 if (!irb->mt && !irb->wrapped_depth) { 402 *out_map = NULL; 403 *out_stride = 0; 404 return; 405 } 406 407 if (rb->Format == MESA_FORMAT_S8) { 408 intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode, 409 out_map, out_stride); 410 } else if (irb->wrapped_depth) { 411 intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode, 412 out_map, out_stride); 413 } else if (intel->gen >= 6 && 414 !(mode & GL_MAP_WRITE_BIT) && 415 irb->mt->region->tiling == I915_TILING_X) { 416 intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode, 417 out_map, out_stride); 418 } else { 419 intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode, 420 out_map, out_stride); 421 } 422} 423 424/** 425 * \see intel_map_renderbuffer_s8() 426 */ 427static void 428intel_unmap_renderbuffer_s8(struct gl_context *ctx, 429 struct gl_renderbuffer *rb) 430{ 431 struct intel_context *intel = intel_context(ctx); 432 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 433 434 DBG("%s: rb %d (%s)\n", __FUNCTION__, 435 rb->Name, _mesa_get_format_name(rb->Format)); 436 437 assert(rb->Format == MESA_FORMAT_S8); 438 439 if (!irb->map_buffer) 440 return; 441 442 if (irb->map_mode & GL_MAP_WRITE_BIT) { 443 /* The temporary buffer was written to, so we must copy its pixels into 444 * the real buffer. 445 */ 446 uint8_t *untiled_s8_map = irb->map_buffer; 447 uint8_t *tiled_s8_map = irb->mt->region->bo->virtual; 448 449 /* Flip the Y axis for the default framebuffer. */ 450 int y_flip = (rb->Name == 0) ? -1 : 1; 451 int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0; 452 453 for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) { 454 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) { 455 uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias; 456 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch, 457 pix_x + irb->map_x, 458 flipped_y); 459 tiled_s8_map[offset] = 460 untiled_s8_map[pix_y * irb->map_w + pix_x]; 461 } 462 } 463 } 464 465 intel_region_unmap(intel, irb->mt->region); 466 free(irb->map_buffer); 467 irb->map_buffer = NULL; 468} 469 470/** 471 * \brief Unmap a depthstencil renderbuffer with separate stencil. 472 * 473 * \see intel_map_renderbuffer_separate_s8z24() 474 * \see intel_unmap_renderbuffer() 475 */ 476static void 477intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx, 478 struct gl_renderbuffer *rb) 479{ 480 struct intel_context *intel = intel_context(ctx); 481 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 482 struct intel_renderbuffer *s8z24_irb; 483 484 assert(rb->Name != 0); 485 assert(rb->Format == MESA_FORMAT_S8_Z24); 486 assert(irb->wrapped_depth != NULL); 487 assert(irb->wrapped_stencil != NULL); 488 489 s8z24_irb = intel_renderbuffer(irb->wrapped_depth); 490 491 if (irb->map_mode & GL_MAP_WRITE_BIT) { 492 /* Copy the stencil bits from the depth buffer into the stencil buffer. 493 */ 494 uint32_t map_x = irb->map_x; 495 uint32_t map_y = irb->map_y; 496 uint32_t map_w = irb->map_w; 497 uint32_t map_h = irb->map_h; 498 499 struct intel_renderbuffer *s8_irb; 500 uint8_t *s8_map; 501 502 s8_irb = intel_renderbuffer(irb->wrapped_stencil); 503 s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_WRITE_BIT); 504 505 int32_t s8z24_stride = 4 * s8z24_irb->mt->region->pitch; 506 uint8_t *s8z24_map = s8z24_irb->mt->region->bo->virtual 507 + map_y * s8z24_stride 508 + map_x * 4; 509 510 for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) { 511 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) { 512 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch, 513 map_x + pix_x, 514 map_y + pix_y); 515 ptrdiff_t s8z24_offset = pix_y * s8z24_stride 516 + pix_x * 4 517 + 3; 518 s8_map[s8_offset] = s8z24_map[s8z24_offset]; 519 } 520 } 521 522 intel_region_unmap(intel, s8_irb->mt->region); 523 } 524 525 drm_intel_gem_bo_unmap_gtt(s8z24_irb->mt->region->bo); 526} 527 528/** 529 * \see dd_function_table::UnmapRenderbuffer 530 */ 531static void 532intel_unmap_renderbuffer(struct gl_context *ctx, 533 struct gl_renderbuffer *rb) 534{ 535 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 536 537 DBG("%s: rb %d (%s)\n", __FUNCTION__, 538 rb->Name, _mesa_get_format_name(rb->Format)); 539 540 if (rb->Format == MESA_FORMAT_S8) { 541 intel_unmap_renderbuffer_s8(ctx, rb); 542 } else if (irb->wrapped_depth) { 543 intel_unmap_renderbuffer_separate_s8z24(ctx, rb); 544 } else if (irb->map_bo) { 545 /* Paired with intel_map_renderbuffer_blit(). */ 546 drm_intel_bo_unmap(irb->map_bo); 547 drm_intel_bo_unreference(irb->map_bo); 548 irb->map_bo = 0; 549 } else { 550 /* Paired with intel_map_renderbuffer_gtt(). */ 551 if (irb->mt) { 552 /* The miptree may be null when intel_map_renderbuffer() is 553 * called from intel_span.c. 554 */ 555 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo); 556 } 557 } 558} 559 560/** 561 * Return a pointer to a specific pixel in a renderbuffer. 562 */ 563static void * 564intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb, 565 GLint x, GLint y) 566{ 567 /* By returning NULL we force all software rendering to go through 568 * the span routines. 569 */ 570 return NULL; 571} 572 573 574/** 575 * Called via glRenderbufferStorageEXT() to set the format and allocate 576 * storage for a user-created renderbuffer. 577 */ 578GLboolean 579intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 580 GLenum internalFormat, 581 GLuint width, GLuint height) 582{ 583 struct intel_context *intel = intel_context(ctx); 584 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 585 int cpp, tiling; 586 587 ASSERT(rb->Name != 0); 588 589 switch (internalFormat) { 590 default: 591 /* Use the same format-choice logic as for textures. 592 * Renderbuffers aren't any different from textures for us, 593 * except they're less useful because you can't texture with 594 * them. 595 */ 596 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 597 GL_NONE, GL_NONE); 598 break; 599 case GL_STENCIL_INDEX: 600 case GL_STENCIL_INDEX1_EXT: 601 case GL_STENCIL_INDEX4_EXT: 602 case GL_STENCIL_INDEX8_EXT: 603 case GL_STENCIL_INDEX16_EXT: 604 /* These aren't actual texture formats, so force them here. */ 605 if (intel->has_separate_stencil) { 606 rb->Format = MESA_FORMAT_S8; 607 } else { 608 assert(!intel->must_use_separate_stencil); 609 rb->Format = MESA_FORMAT_S8_Z24; 610 } 611 break; 612 } 613 614 rb->Width = width; 615 rb->Height = height; 616 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 617 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 618 cpp = _mesa_get_format_bytes(rb->Format); 619 620 intel_flush(ctx); 621 622 intel_miptree_release(&irb->mt); 623 624 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 625 _mesa_lookup_enum_by_nr(internalFormat), 626 _mesa_get_format_name(rb->Format), width, height); 627 628 tiling = I915_TILING_NONE; 629 if (intel->use_texture_tiling) { 630 GLenum base_format = _mesa_get_format_base_format(rb->Format); 631 632 if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT || 633 base_format == GL_STENCIL_INDEX || 634 base_format == GL_DEPTH_STENCIL)) 635 tiling = I915_TILING_Y; 636 else 637 tiling = I915_TILING_X; 638 } 639 640 if (irb->Base.Format == MESA_FORMAT_S8) { 641 /* 642 * The stencil buffer is W tiled. However, we request from the kernel a 643 * non-tiled buffer because the GTT is incapable of W fencing. 644 * 645 * The stencil buffer has quirky pitch requirements. From Vol 2a, 646 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch": 647 * The pitch must be set to 2x the value computed based on width, as 648 * the stencil buffer is stored with two rows interleaved. 649 * To accomplish this, we resort to the nasty hack of doubling the drm 650 * region's cpp and halving its height. 651 * 652 * If we neglect to double the pitch, then render corruption occurs. 653 */ 654 irb->mt = intel_miptree_create_for_renderbuffer( 655 intel, 656 rb->Format, 657 I915_TILING_NONE, 658 cpp * 2, 659 ALIGN(width, 64), 660 ALIGN((height + 1) / 2, 64)); 661 if (!irb->mt) 662 return false; 663 664 } else if (irb->Base.Format == MESA_FORMAT_S8_Z24 665 && intel->must_use_separate_stencil) { 666 667 bool ok = true; 668 struct gl_renderbuffer *depth_rb; 669 struct gl_renderbuffer *stencil_rb; 670 671 depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height, 672 MESA_FORMAT_X8_Z24); 673 stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height, 674 MESA_FORMAT_S8); 675 ok = depth_rb && stencil_rb; 676 ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb, 677 depth_rb->InternalFormat, 678 width, height); 679 ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb, 680 stencil_rb->InternalFormat, 681 width, height); 682 683 if (!ok) { 684 if (depth_rb) { 685 intel_delete_renderbuffer(depth_rb); 686 } 687 if (stencil_rb) { 688 intel_delete_renderbuffer(stencil_rb); 689 } 690 return false; 691 } 692 693 depth_rb->Wrapped = rb; 694 stencil_rb->Wrapped = rb; 695 _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb); 696 _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb); 697 698 } else { 699 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format, 700 tiling, cpp, 701 width, height); 702 if (!irb->mt) 703 return false; 704 705 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 706 irb->mt->hiz_region = intel_region_alloc(intel->intelScreen, 707 I915_TILING_Y, 708 cpp, 709 rb->Width, 710 rb->Height, 711 true); 712 if (!irb->mt->hiz_region) { 713 intel_miptree_release(&irb->mt); 714 return false; 715 } 716 } 717 } 718 719 return true; 720} 721 722 723#if FEATURE_OES_EGL_image 724static void 725intel_image_target_renderbuffer_storage(struct gl_context *ctx, 726 struct gl_renderbuffer *rb, 727 void *image_handle) 728{ 729 struct intel_context *intel = intel_context(ctx); 730 struct intel_renderbuffer *irb; 731 __DRIscreen *screen; 732 __DRIimage *image; 733 734 screen = intel->intelScreen->driScrnPriv; 735 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 736 screen->loaderPrivate); 737 if (image == NULL) 738 return; 739 740 /* __DRIimage is opaque to the core so it has to be checked here */ 741 switch (image->format) { 742 case MESA_FORMAT_RGBA8888_REV: 743 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 744 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 745 return; 746 break; 747 default: 748 break; 749 } 750 751 irb = intel_renderbuffer(rb); 752 intel_miptree_release(&irb->mt); 753 irb->mt = intel_miptree_create_for_region(intel, 754 GL_TEXTURE_2D, 755 image->format, 756 image->region); 757 if (!irb->mt) 758 return; 759 760 rb->InternalFormat = image->internal_format; 761 rb->Width = image->region->width; 762 rb->Height = image->region->height; 763 rb->Format = image->format; 764 rb->DataType = image->data_type; 765 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 766 image->internal_format); 767} 768#endif 769 770/** 771 * Called for each hardware renderbuffer when a _window_ is resized. 772 * Just update fields. 773 * Not used for user-created renderbuffers! 774 */ 775static GLboolean 776intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 777 GLenum internalFormat, GLuint width, GLuint height) 778{ 779 ASSERT(rb->Name == 0); 780 rb->Width = width; 781 rb->Height = height; 782 rb->InternalFormat = internalFormat; 783 784 return true; 785} 786 787 788static void 789intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 790 GLuint width, GLuint height) 791{ 792 int i; 793 794 _mesa_resize_framebuffer(ctx, fb, width, height); 795 796 fb->Initialized = true; /* XXX remove someday */ 797 798 if (fb->Name != 0) { 799 return; 800 } 801 802 803 /* Make sure all window system renderbuffers are up to date */ 804 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 805 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 806 807 /* only resize if size is changing */ 808 if (rb && (rb->Width != width || rb->Height != height)) { 809 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 810 } 811 } 812} 813 814 815/** Dummy function for gl_renderbuffer::AllocStorage() */ 816static GLboolean 817intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 818 GLenum internalFormat, GLuint width, GLuint height) 819{ 820 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 821 return false; 822} 823 824/** 825 * Create a new intel_renderbuffer which corresponds to an on-screen window, 826 * not a user-created renderbuffer. 827 */ 828struct intel_renderbuffer * 829intel_create_renderbuffer(gl_format format) 830{ 831 GET_CURRENT_CONTEXT(ctx); 832 833 struct intel_renderbuffer *irb; 834 835 irb = CALLOC_STRUCT(intel_renderbuffer); 836 if (!irb) { 837 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 838 return NULL; 839 } 840 841 _mesa_init_renderbuffer(&irb->Base, 0); 842 irb->Base.ClassID = INTEL_RB_CLASS; 843 irb->Base._BaseFormat = _mesa_get_format_base_format(format); 844 irb->Base.Format = format; 845 irb->Base.InternalFormat = irb->Base._BaseFormat; 846 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format); 847 848 /* intel-specific methods */ 849 irb->Base.Delete = intel_delete_renderbuffer; 850 irb->Base.AllocStorage = intel_alloc_window_storage; 851 irb->Base.GetPointer = intel_get_pointer; 852 853 return irb; 854} 855 856 857struct gl_renderbuffer* 858intel_create_wrapped_renderbuffer(struct gl_context * ctx, 859 int width, int height, 860 gl_format format) 861{ 862 /* 863 * The name here is irrelevant, as long as its nonzero, because the 864 * renderbuffer never gets entered into Mesa's renderbuffer hash table. 865 */ 866 GLuint name = ~0; 867 868 struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer); 869 if (!irb) { 870 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 871 return NULL; 872 } 873 874 struct gl_renderbuffer *rb = &irb->Base; 875 _mesa_init_renderbuffer(rb, name); 876 rb->ClassID = INTEL_RB_CLASS; 877 rb->_BaseFormat = _mesa_get_format_base_format(format); 878 rb->Format = format; 879 rb->InternalFormat = rb->_BaseFormat; 880 rb->DataType = intel_mesa_format_to_rb_datatype(format); 881 rb->Width = width; 882 rb->Height = height; 883 884 return rb; 885} 886 887 888/** 889 * Create a new renderbuffer object. 890 * Typically called via glBindRenderbufferEXT(). 891 */ 892static struct gl_renderbuffer * 893intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 894{ 895 /*struct intel_context *intel = intel_context(ctx); */ 896 struct intel_renderbuffer *irb; 897 898 irb = CALLOC_STRUCT(intel_renderbuffer); 899 if (!irb) { 900 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 901 return NULL; 902 } 903 904 _mesa_init_renderbuffer(&irb->Base, name); 905 irb->Base.ClassID = INTEL_RB_CLASS; 906 907 /* intel-specific methods */ 908 irb->Base.Delete = intel_delete_renderbuffer; 909 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage; 910 irb->Base.GetPointer = intel_get_pointer; 911 /* span routines set in alloc_storage function */ 912 913 return &irb->Base; 914} 915 916 917/** 918 * Called via glBindFramebufferEXT(). 919 */ 920static void 921intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 922 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 923{ 924 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 925 intel_draw_buffer(ctx); 926 } 927 else { 928 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 929 } 930} 931 932 933/** 934 * Called via glFramebufferRenderbufferEXT(). 935 */ 936static void 937intel_framebuffer_renderbuffer(struct gl_context * ctx, 938 struct gl_framebuffer *fb, 939 GLenum attachment, struct gl_renderbuffer *rb) 940{ 941 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 942 943 intel_flush(ctx); 944 945 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 946 intel_draw_buffer(ctx); 947} 948 949static bool 950intel_update_wrapper(struct gl_context *ctx, struct intel_renderbuffer *irb, 951 struct gl_renderbuffer_attachment *att) 952{ 953 struct gl_texture_image *texImage = _mesa_get_attachment_teximage(att); 954 struct intel_texture_image *intel_image = intel_texture_image(texImage); 955 int width, height, depth; 956 957 if (!intel_span_supports_format(texImage->TexFormat)) { 958 DBG("Render to texture BAD FORMAT %s\n", 959 _mesa_get_format_name(texImage->TexFormat)); 960 return false; 961 } else { 962 DBG("Render to texture %s\n", _mesa_get_format_name(texImage->TexFormat)); 963 } 964 965 intel_miptree_get_dimensions_for_image(texImage, &width, &height, &depth); 966 967 irb->Base.Format = texImage->TexFormat; 968 irb->Base.DataType = intel_mesa_format_to_rb_datatype(texImage->TexFormat); 969 irb->Base.InternalFormat = texImage->InternalFormat; 970 irb->Base._BaseFormat = _mesa_base_tex_format(ctx, irb->Base.InternalFormat); 971 irb->Base.Width = width; 972 irb->Base.Height = height; 973 974 irb->Base.Delete = intel_delete_renderbuffer; 975 irb->Base.AllocStorage = intel_nop_alloc_storage; 976 977 irb->mt_level = att->TextureLevel; 978 if (texImage->TexObject->Target == GL_TEXTURE_CUBE_MAP) { 979 assert(att->Zoffset == 0); 980 irb->mt_layer = att->CubeMapFace; 981 } else { 982 assert(att->CubeMapFace == 0); 983 irb->mt_layer= att->Zoffset; 984 } 985 986 if (intel_image->stencil_rb) { 987 /* The tex image has packed depth/stencil format, but is using separate 988 * stencil. It shares its embedded depth and stencil renderbuffers with 989 * the renderbuffer wrapper. 990 * 991 * FIXME: glFramebufferTexture*() is broken for depthstencil textures 992 * FIXME: with separate stencil. To fix this, we must create a separate 993 * FIXME: pair of depth/stencil renderbuffers for each attached slice 994 * FIXME: of the miptree. 995 */ 996 struct intel_renderbuffer *depth_irb; 997 struct intel_renderbuffer *stencil_irb; 998 999 _mesa_reference_renderbuffer(&irb->wrapped_depth, 1000 intel_image->depth_rb); 1001 _mesa_reference_renderbuffer(&irb->wrapped_stencil, 1002 intel_image->stencil_rb); 1003 1004 depth_irb = intel_renderbuffer(intel_image->depth_rb); 1005 depth_irb->mt_level = irb->mt_level; 1006 depth_irb->mt_layer = irb->mt_layer; 1007 1008 stencil_irb = intel_renderbuffer(intel_image->stencil_rb); 1009 stencil_irb->mt_level = irb->mt_level; 1010 stencil_irb->mt_layer = irb->mt_layer; 1011 } else { 1012 intel_miptree_reference(&irb->mt, intel_image->mt); 1013 } 1014 return true; 1015} 1016 1017/** 1018 * When glFramebufferTexture[123]D is called this function sets up the 1019 * gl_renderbuffer wrapper around the texture image. 1020 * This will have the region info needed for hardware rendering. 1021 */ 1022static struct intel_renderbuffer * 1023intel_wrap_texture(struct gl_context * ctx, 1024 struct gl_renderbuffer_attachment *att) 1025{ 1026 const GLuint name = ~0; /* not significant, but distinct for debugging */ 1027 struct intel_renderbuffer *irb; 1028 1029 /* make an intel_renderbuffer to wrap the texture image */ 1030 irb = CALLOC_STRUCT(intel_renderbuffer); 1031 if (!irb) { 1032 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture"); 1033 return NULL; 1034 } 1035 1036 _mesa_init_renderbuffer(&irb->Base, name); 1037 irb->Base.ClassID = INTEL_RB_CLASS; 1038 1039 if (!intel_update_wrapper(ctx, irb, att)) { 1040 free(irb); 1041 return NULL; 1042 } 1043 1044 return irb; 1045} 1046 1047void 1048intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb, 1049 struct intel_texture_image *intel_image, 1050 int zoffset) 1051{ 1052 unsigned int dst_x, dst_y; 1053 1054 /* compute offset of the particular 2D image within the texture region */ 1055 intel_miptree_get_image_offset(intel_image->mt, 1056 intel_image->base.Base.Level, 1057 intel_image->base.Base.Face, 1058 zoffset, 1059 &dst_x, &dst_y); 1060 1061 irb->draw_x = dst_x; 1062 irb->draw_y = dst_y; 1063} 1064 1065/** 1066 * Rendering to tiled buffers requires that the base address of the 1067 * buffer be aligned to a page boundary. We generally render to 1068 * textures by pointing the surface at the mipmap image level, which 1069 * may not be aligned to a tile boundary. 1070 * 1071 * This function returns an appropriately-aligned base offset 1072 * according to the tiling restrictions, plus any required x/y offset 1073 * from there. 1074 */ 1075uint32_t 1076intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 1077 uint32_t *tile_x, 1078 uint32_t *tile_y) 1079{ 1080 struct intel_region *region = irb->mt->region; 1081 int cpp = region->cpp; 1082 uint32_t pitch = region->pitch * cpp; 1083 1084 if (region->tiling == I915_TILING_NONE) { 1085 *tile_x = 0; 1086 *tile_y = 0; 1087 return irb->draw_x * cpp + irb->draw_y * pitch; 1088 } else if (region->tiling == I915_TILING_X) { 1089 *tile_x = irb->draw_x % (512 / cpp); 1090 *tile_y = irb->draw_y % 8; 1091 return ((irb->draw_y / 8) * (8 * pitch) + 1092 (irb->draw_x - *tile_x) / (512 / cpp) * 4096); 1093 } else { 1094 assert(region->tiling == I915_TILING_Y); 1095 *tile_x = irb->draw_x % (128 / cpp); 1096 *tile_y = irb->draw_y % 32; 1097 return ((irb->draw_y / 32) * (32 * pitch) + 1098 (irb->draw_x - *tile_x) / (128 / cpp) * 4096); 1099 } 1100} 1101 1102#ifndef I915 1103static bool 1104need_tile_offset_workaround(struct brw_context *brw, 1105 struct intel_renderbuffer *irb) 1106{ 1107 uint32_t tile_x, tile_y; 1108 1109 if (brw->has_surface_tile_offset) 1110 return false; 1111 1112 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 1113 1114 return tile_x != 0 || tile_y != 0; 1115} 1116#endif 1117 1118/** 1119 * Called by glFramebufferTexture[123]DEXT() (and other places) to 1120 * prepare for rendering into texture memory. This might be called 1121 * many times to choose different texture levels, cube faces, etc 1122 * before intel_finish_render_texture() is ever called. 1123 */ 1124static void 1125intel_render_texture(struct gl_context * ctx, 1126 struct gl_framebuffer *fb, 1127 struct gl_renderbuffer_attachment *att) 1128{ 1129 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 1130 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 1131 struct intel_texture_image *intel_image = intel_texture_image(image); 1132 1133 (void) fb; 1134 1135 if (!intel_image->mt) { 1136 /* Fallback on drawing to a texture that doesn't have a miptree 1137 * (has a border, width/height 0, etc.) 1138 */ 1139 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 1140 _swrast_render_texture(ctx, fb, att); 1141 return; 1142 } 1143 else if (!irb) { 1144 irb = intel_wrap_texture(ctx, att); 1145 if (irb) { 1146 /* bind the wrapper to the attachment point */ 1147 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base); 1148 } 1149 else { 1150 /* fallback to software rendering */ 1151 _swrast_render_texture(ctx, fb, att); 1152 return; 1153 } 1154 } 1155 1156 if (!intel_update_wrapper(ctx, irb, att)) { 1157 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 1158 _swrast_render_texture(ctx, fb, att); 1159 return; 1160 } 1161 1162 DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n", 1163 _glthread_GetID(), 1164 att->Texture->Name, image->Width, image->Height, 1165 irb->Base.RefCount); 1166 1167 intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset); 1168 intel_image->used_as_render_target = true; 1169 1170#ifndef I915 1171 if (need_tile_offset_workaround(brw_context(ctx), irb)) { 1172 /* Original gen4 hardware couldn't draw to a non-tile-aligned 1173 * destination in a miptree unless you actually setup your 1174 * renderbuffer as a miptree and used the fragile 1175 * lod/array_index/etc. controls to select the image. So, 1176 * instead, we just make a new single-level miptree and render 1177 * into that. 1178 */ 1179 struct intel_context *intel = intel_context(ctx); 1180 struct intel_mipmap_tree *new_mt; 1181 int width, height, depth; 1182 1183 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); 1184 1185 new_mt = intel_miptree_create(intel, image->TexObject->Target, 1186 intel_image->base.Base.TexFormat, 1187 intel_image->base.Base.Level, 1188 intel_image->base.Base.Level, 1189 width, height, depth, 1190 true); 1191 1192 intel_miptree_copy_teximage(intel, intel_image, new_mt); 1193 intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset); 1194 1195 intel_miptree_reference(&irb->mt, intel_image->mt); 1196 intel_miptree_release(&new_mt); 1197 } 1198#endif 1199 /* update drawing region, etc */ 1200 intel_draw_buffer(ctx); 1201} 1202 1203 1204/** 1205 * Called by Mesa when rendering to a texture is done. 1206 */ 1207static void 1208intel_finish_render_texture(struct gl_context * ctx, 1209 struct gl_renderbuffer_attachment *att) 1210{ 1211 struct intel_context *intel = intel_context(ctx); 1212 struct gl_texture_object *tex_obj = att->Texture; 1213 struct gl_texture_image *image = 1214 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 1215 struct intel_texture_image *intel_image = intel_texture_image(image); 1216 1217 DBG("Finish render texture tid %lx tex=%u\n", 1218 _glthread_GetID(), att->Texture->Name); 1219 1220 /* Flag that this image may now be validated into the object's miptree. */ 1221 if (intel_image) 1222 intel_image->used_as_render_target = false; 1223 1224 /* Since we've (probably) rendered to the texture and will (likely) use 1225 * it in the texture domain later on in this batchbuffer, flush the 1226 * batch. Once again, we wish for a domain tracker in libdrm to cover 1227 * usage inside of a batchbuffer like GEM does in the kernel. 1228 */ 1229 intel_batchbuffer_emit_mi_flush(intel); 1230} 1231 1232/** 1233 * Do additional "completeness" testing of a framebuffer object. 1234 */ 1235static void 1236intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 1237{ 1238 struct intel_context *intel = intel_context(ctx); 1239 const struct intel_renderbuffer *depthRb = 1240 intel_get_renderbuffer(fb, BUFFER_DEPTH); 1241 const struct intel_renderbuffer *stencilRb = 1242 intel_get_renderbuffer(fb, BUFFER_STENCIL); 1243 int i; 1244 1245 /* 1246 * The depth and stencil renderbuffers are the same renderbuffer or wrap 1247 * the same texture. 1248 */ 1249 if (depthRb && stencilRb) { 1250 bool depth_stencil_are_same; 1251 if (depthRb == stencilRb) 1252 depth_stencil_are_same = true; 1253 else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) && 1254 (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) && 1255 (fb->Attachment[BUFFER_DEPTH].Texture->Name == 1256 fb->Attachment[BUFFER_STENCIL].Texture->Name)) 1257 depth_stencil_are_same = true; 1258 else 1259 depth_stencil_are_same = false; 1260 1261 if (!intel->has_separate_stencil && !depth_stencil_are_same) { 1262 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1263 } 1264 } 1265 1266 for (i = 0; i < Elements(fb->Attachment); i++) { 1267 struct gl_renderbuffer *rb; 1268 struct intel_renderbuffer *irb; 1269 1270 if (fb->Attachment[i].Type == GL_NONE) 1271 continue; 1272 1273 /* A supported attachment will have a Renderbuffer set either 1274 * from being a Renderbuffer or being a texture that got the 1275 * intel_wrap_texture() treatment. 1276 */ 1277 rb = fb->Attachment[i].Renderbuffer; 1278 if (rb == NULL) { 1279 DBG("attachment without renderbuffer\n"); 1280 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1281 continue; 1282 } 1283 1284 irb = intel_renderbuffer(rb); 1285 if (irb == NULL) { 1286 DBG("software rendering renderbuffer\n"); 1287 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1288 continue; 1289 } 1290 1291 if (!intel_span_supports_format(irb->Base.Format) || 1292 !intel->vtbl.render_target_supported(irb->Base.Format)) { 1293 DBG("Unsupported texture/renderbuffer format attached: %s\n", 1294 _mesa_get_format_name(irb->Base.Format)); 1295 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1296 } 1297 } 1298} 1299 1300/** 1301 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 1302 * We can do this when the dst renderbuffer is actually a texture and 1303 * there is no scaling, mirroring or scissoring. 1304 * 1305 * \return new buffer mask indicating the buffers left to blit using the 1306 * normal path. 1307 */ 1308static GLbitfield 1309intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 1310 GLint srcX0, GLint srcY0, 1311 GLint srcX1, GLint srcY1, 1312 GLint dstX0, GLint dstY0, 1313 GLint dstX1, GLint dstY1, 1314 GLbitfield mask, GLenum filter) 1315{ 1316 if (mask & GL_COLOR_BUFFER_BIT) { 1317 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 1318 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 1319 const struct gl_renderbuffer_attachment *drawAtt = 1320 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 1321 1322 /* If the source and destination are the same size with no 1323 mirroring, the rectangles are within the size of the 1324 texture and there is no scissor then we can use 1325 glCopyTexSubimage2D to implement the blit. This will end 1326 up as a fast hardware blit on some drivers */ 1327 if (drawAtt && drawAtt->Texture && 1328 srcX0 - srcX1 == dstX0 - dstX1 && 1329 srcY0 - srcY1 == dstY0 - dstY1 && 1330 srcX1 >= srcX0 && 1331 srcY1 >= srcY0 && 1332 srcX0 >= 0 && srcX1 <= readFb->Width && 1333 srcY0 >= 0 && srcY1 <= readFb->Height && 1334 dstX0 >= 0 && dstX1 <= drawFb->Width && 1335 dstY0 >= 0 && dstY1 <= drawFb->Height && 1336 !ctx->Scissor.Enabled) { 1337 const struct gl_texture_object *texObj = drawAtt->Texture; 1338 const GLuint dstLevel = drawAtt->TextureLevel; 1339 const GLenum target = texObj->Target; 1340 1341 struct gl_texture_image *texImage = 1342 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 1343 1344 if (intel_copy_texsubimage(intel_context(ctx), 1345 intel_texture_image(texImage), 1346 dstX0, dstY0, 1347 srcX0, srcY0, 1348 srcX1 - srcX0, /* width */ 1349 srcY1 - srcY0)) 1350 mask &= ~GL_COLOR_BUFFER_BIT; 1351 } 1352 } 1353 1354 return mask; 1355} 1356 1357static void 1358intel_blit_framebuffer(struct gl_context *ctx, 1359 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 1360 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 1361 GLbitfield mask, GLenum filter) 1362{ 1363 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 1364 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 1365 srcX0, srcY0, srcX1, srcY1, 1366 dstX0, dstY0, dstX1, dstY1, 1367 mask, filter); 1368 if (mask == 0x0) 1369 return; 1370 1371 _mesa_meta_BlitFramebuffer(ctx, 1372 srcX0, srcY0, srcX1, srcY1, 1373 dstX0, dstY0, dstX1, dstY1, 1374 mask, filter); 1375} 1376 1377/** 1378 * Do one-time context initializations related to GL_EXT_framebuffer_object. 1379 * Hook in device driver functions. 1380 */ 1381void 1382intel_fbo_init(struct intel_context *intel) 1383{ 1384 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 1385 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 1386 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 1387 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 1388 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 1389 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 1390 intel->ctx.Driver.RenderTexture = intel_render_texture; 1391 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 1392 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 1393 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 1394 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 1395 1396#if FEATURE_OES_EGL_image 1397 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 1398 intel_image_target_renderbuffer_storage; 1399#endif 1400} 1401