intel_fbo.c revision dc4c3a31c64aae2c3d76ccbd5bf54d04a1d5d041
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/enums.h" 30#include "main/imports.h" 31#include "main/macros.h" 32#include "main/mfeatures.h" 33#include "main/mtypes.h" 34#include "main/fbobject.h" 35#include "main/framebuffer.h" 36#include "main/renderbuffer.h" 37#include "main/context.h" 38#include "main/teximage.h" 39#include "swrast/swrast.h" 40#include "drivers/common/meta.h" 41 42#include "intel_context.h" 43#include "intel_batchbuffer.h" 44#include "intel_buffers.h" 45#include "intel_blit.h" 46#include "intel_fbo.h" 47#include "intel_mipmap_tree.h" 48#include "intel_regions.h" 49#include "intel_tex.h" 50#include "intel_span.h" 51#ifndef I915 52#include "brw_context.h" 53#endif 54 55#define FILE_DEBUG_FLAG DEBUG_FBO 56 57 58/** 59 * Create a new framebuffer object. 60 */ 61static struct gl_framebuffer * 62intel_new_framebuffer(struct gl_context * ctx, GLuint name) 63{ 64 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 65 * class 66 */ 67 return _mesa_new_framebuffer(ctx, name); 68} 69 70 71/** Called by gl_renderbuffer::Delete() */ 72static void 73intel_delete_renderbuffer(struct gl_renderbuffer *rb) 74{ 75 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 76 77 ASSERT(irb); 78 79 intel_region_release(&irb->region); 80 intel_region_release(&irb->hiz_region); 81 82 _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL); 83 _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL); 84 85 free(irb); 86} 87 88/** 89 * \brief Map a renderbuffer through the GTT. 90 * 91 * \see intel_map_renderbuffer() 92 */ 93static void 94intel_map_renderbuffer_gtt(struct gl_context *ctx, 95 struct gl_renderbuffer *rb, 96 GLuint x, GLuint y, GLuint w, GLuint h, 97 GLbitfield mode, 98 GLubyte **out_map, 99 GLint *out_stride) 100{ 101 struct intel_context *intel = intel_context(ctx); 102 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 103 GLubyte *map; 104 int stride, flip_stride; 105 106 assert(irb->region); 107 108 irb->map_mode = mode; 109 irb->map_x = x; 110 irb->map_y = y; 111 irb->map_w = w; 112 irb->map_h = h; 113 114 stride = irb->region->pitch * irb->region->cpp; 115 116 if (rb->Name == 0) { 117 y = irb->region->height - 1 - y; 118 flip_stride = -stride; 119 } else { 120 x += irb->draw_x; 121 y += irb->draw_y; 122 flip_stride = stride; 123 } 124 125 if (drm_intel_bo_references(intel->batch.bo, irb->region->bo)) { 126 intel_batchbuffer_flush(intel); 127 } 128 129 drm_intel_gem_bo_map_gtt(irb->region->bo); 130 131 map = irb->region->bo->virtual; 132 map += x * irb->region->cpp; 133 map += (int)y * stride; 134 135 *out_map = map; 136 *out_stride = flip_stride; 137 138 DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 139 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 140 x, y, w, h, *out_map, *out_stride); 141} 142 143/** 144 * \brief Map a renderbuffer by blitting it to a temporary gem buffer. 145 * 146 * On gen6+, we have LLC sharing, which means we can get high-performance 147 * access to linear-mapped buffers. 148 * 149 * This function allocates a temporary gem buffer at 150 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and 151 * returns a map of that. (Note: Only X tiled buffers can be blitted). 152 * 153 * \see intel_renderbuffer::map_bo 154 * \see intel_map_renderbuffer() 155 */ 156static void 157intel_map_renderbuffer_blit(struct gl_context *ctx, 158 struct gl_renderbuffer *rb, 159 GLuint x, GLuint y, GLuint w, GLuint h, 160 GLbitfield mode, 161 GLubyte **out_map, 162 GLint *out_stride) 163{ 164 struct intel_context *intel = intel_context(ctx); 165 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 166 167 int src_x, src_y; 168 int dst_stride; 169 170 assert(irb->region); 171 assert(intel->gen >= 6); 172 assert(!(mode & GL_MAP_WRITE_BIT)); 173 assert(irb->region->tiling == I915_TILING_X); 174 175 irb->map_mode = mode; 176 irb->map_x = x; 177 irb->map_y = y; 178 irb->map_w = w; 179 irb->map_h = h; 180 181 dst_stride = ALIGN(w * irb->region->cpp, 4); 182 183 if (rb->Name) { 184 src_x = x + irb->draw_x; 185 src_y = y + irb->draw_y; 186 } else { 187 src_x = x; 188 src_y = irb->region->height - y - h; 189 } 190 191 irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp", 192 dst_stride * h, 4096); 193 194 /* We don't do the flip in the blit, because it's always so tricky to get 195 * right. 196 */ 197 if (irb->map_bo && 198 intelEmitCopyBlit(intel, 199 irb->region->cpp, 200 irb->region->pitch, irb->region->bo, 201 0, irb->region->tiling, 202 dst_stride / irb->region->cpp, irb->map_bo, 203 0, I915_TILING_NONE, 204 src_x, src_y, 205 0, 0, 206 w, h, 207 GL_COPY)) { 208 intel_batchbuffer_flush(intel); 209 drm_intel_bo_map(irb->map_bo, false); 210 211 if (rb->Name) { 212 *out_map = irb->map_bo->virtual; 213 *out_stride = dst_stride; 214 } else { 215 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride; 216 *out_stride = -dst_stride; 217 } 218 219 DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n", 220 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 221 src_x, src_y, w, h, *out_map, *out_stride); 222 } else { 223 /* Fallback to GTT mapping. */ 224 drm_intel_bo_unreference(irb->map_bo); 225 irb->map_bo = NULL; 226 intel_map_renderbuffer_gtt(ctx, rb, 227 x, y, w, h, 228 mode, 229 out_map, out_stride); 230 } 231} 232 233/** 234 * \brief Map a stencil renderbuffer. 235 * 236 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile 237 * the buffer in software. 238 * 239 * This function allocates a temporary malloc'd buffer at 240 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then 241 * returns the temporary buffer as the map. 242 * 243 * \see intel_renderbuffer::map_buffer 244 * \see intel_map_renderbuffer() 245 * \see intel_unmap_renderbuffer_s8() 246 */ 247static void 248intel_map_renderbuffer_s8(struct gl_context *ctx, 249 struct gl_renderbuffer *rb, 250 GLuint x, GLuint y, GLuint w, GLuint h, 251 GLbitfield mode, 252 GLubyte **out_map, 253 GLint *out_stride) 254{ 255 struct intel_context *intel = intel_context(ctx); 256 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 257 uint8_t *tiled_s8_map; 258 uint8_t *untiled_s8_map; 259 260 assert(rb->Format == MESA_FORMAT_S8); 261 assert(irb->region); 262 263 irb->map_mode = mode; 264 irb->map_x = x; 265 irb->map_y = y; 266 irb->map_w = w; 267 irb->map_h = h; 268 269 /* Flip the Y axis for the default framebuffer. */ 270 int y_flip = (rb->Name == 0) ? -1 : 1; 271 int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0; 272 273 irb->map_buffer = malloc(w * h); 274 untiled_s8_map = irb->map_buffer; 275 tiled_s8_map = intel_region_map(intel, irb->region, mode); 276 277 for (uint32_t pix_y = 0; pix_y < h; pix_y++) { 278 for (uint32_t pix_x = 0; pix_x < w; pix_x++) { 279 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias; 280 ptrdiff_t offset = intel_offset_S8(irb->region->pitch, 281 x + pix_x, 282 flipped_y); 283 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset]; 284 } 285 } 286 287 *out_map = untiled_s8_map; 288 *out_stride = w; 289 290 DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n", 291 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 292 x, y, w, h, *out_map, *out_stride); 293} 294 295/** 296 * \brief Map a depthstencil buffer with separate stencil. 297 * 298 * A depthstencil renderbuffer, if using separate stencil, consists of a depth 299 * renderbuffer and a hidden stencil renderbuffer. This function maps the 300 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and 301 * returns that as the mapped pointer. The caller need not be aware of the 302 * hidden stencil buffer and may safely assume that the mapped pointer points 303 * to a MESA_FORMAT_S8_Z24 buffer 304 * 305 * The consistency between the depth buffer's S8 bits and the hidden stencil 306 * buffer is managed within intel_map_renderbuffer() and 307 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits 308 * according to the map mode. 309 * 310 * \see intel_map_renderbuffer() 311 * \see intel_unmap_renderbuffer_separate_s8z24() 312 */ 313static void 314intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx, 315 struct gl_renderbuffer *rb, 316 GLuint x, GLuint y, GLuint w, GLuint h, 317 GLbitfield mode, 318 GLubyte **out_map, 319 GLint *out_stride) 320{ 321 struct intel_context *intel = intel_context(ctx); 322 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 323 324 GLbitfield adjusted_mode; 325 326 uint8_t *s8z24_map; 327 int32_t s8z24_stride; 328 329 assert(rb->Name != 0); 330 assert(rb->Format == MESA_FORMAT_S8_Z24); 331 assert(irb->wrapped_depth != NULL); 332 assert(irb->wrapped_stencil != NULL); 333 334 irb->map_mode = mode; 335 irb->map_x = x; 336 irb->map_y = y; 337 irb->map_w = w; 338 irb->map_h = h; 339 340 if (mode & GL_MAP_READ_BIT) { 341 /* Since the caller may read the stencil bits, we must copy the stencil 342 * buffer's contents into the depth buffer. This necessitates that the 343 * depth buffer be mapped in write mode. 344 */ 345 adjusted_mode = mode | GL_MAP_WRITE_BIT; 346 } else { 347 adjusted_mode = mode; 348 } 349 350 intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth, 351 x, y, w, h, adjusted_mode, 352 &s8z24_map, &s8z24_stride); 353 354 if (mode & GL_MAP_READ_BIT) { 355 struct intel_renderbuffer *s8_irb; 356 uint8_t *s8_map; 357 358 s8_irb = intel_renderbuffer(irb->wrapped_stencil); 359 s8_map = intel_region_map(intel, s8_irb->region, GL_MAP_READ_BIT); 360 361 for (uint32_t pix_y = 0; pix_y < h; ++pix_y) { 362 for (uint32_t pix_x = 0; pix_x < w; ++pix_x) { 363 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->region->pitch, 364 x + pix_x, 365 y + pix_y); 366 ptrdiff_t s8z24_offset = pix_y * s8z24_stride 367 + pix_x * 4 368 + 3; 369 s8z24_map[s8z24_offset] = s8_map[s8_offset]; 370 } 371 } 372 373 intel_region_unmap(intel, s8_irb->region); 374 } 375 376 *out_map = s8z24_map; 377 *out_stride = s8z24_stride; 378} 379 380/** 381 * \see dd_function_table::MapRenderbuffer 382 */ 383static void 384intel_map_renderbuffer(struct gl_context *ctx, 385 struct gl_renderbuffer *rb, 386 GLuint x, GLuint y, GLuint w, GLuint h, 387 GLbitfield mode, 388 GLubyte **out_map, 389 GLint *out_stride) 390{ 391 struct intel_context *intel = intel_context(ctx); 392 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 393 394 /* We sometimes get called with this by our intel_span.c usage. */ 395 if (!irb->region && !irb->wrapped_depth) { 396 *out_map = NULL; 397 *out_stride = 0; 398 return; 399 } 400 401 if (rb->Format == MESA_FORMAT_S8) { 402 intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode, 403 out_map, out_stride); 404 } else if (irb->wrapped_depth) { 405 intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode, 406 out_map, out_stride); 407 } else if (intel->gen >= 6 && 408 !(mode & GL_MAP_WRITE_BIT) && 409 irb->region->tiling == I915_TILING_X) { 410 intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode, 411 out_map, out_stride); 412 } else { 413 intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode, 414 out_map, out_stride); 415 } 416} 417 418/** 419 * \see intel_map_renderbuffer_s8() 420 */ 421static void 422intel_unmap_renderbuffer_s8(struct gl_context *ctx, 423 struct gl_renderbuffer *rb) 424{ 425 struct intel_context *intel = intel_context(ctx); 426 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 427 428 DBG("%s: rb %d (%s)\n", __FUNCTION__, 429 rb->Name, _mesa_get_format_name(rb->Format)); 430 431 assert(rb->Format == MESA_FORMAT_S8); 432 433 if (!irb->map_buffer) 434 return; 435 436 if (irb->map_mode & GL_MAP_WRITE_BIT) { 437 /* The temporary buffer was written to, so we must copy its pixels into 438 * the real buffer. 439 */ 440 uint8_t *untiled_s8_map = irb->map_buffer; 441 uint8_t *tiled_s8_map = irb->region->bo->virtual; 442 443 /* Flip the Y axis for the default framebuffer. */ 444 int y_flip = (rb->Name == 0) ? -1 : 1; 445 int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0; 446 447 for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) { 448 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) { 449 uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias; 450 ptrdiff_t offset = intel_offset_S8(irb->region->pitch, 451 pix_x + irb->map_x, 452 flipped_y); 453 tiled_s8_map[offset] = 454 untiled_s8_map[pix_y * irb->map_w + pix_x]; 455 } 456 } 457 } 458 459 intel_region_unmap(intel, irb->region); 460 free(irb->map_buffer); 461 irb->map_buffer = NULL; 462} 463 464/** 465 * \brief Unmap a depthstencil renderbuffer with separate stencil. 466 * 467 * \see intel_map_renderbuffer_separate_s8z24() 468 * \see intel_unmap_renderbuffer() 469 */ 470static void 471intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx, 472 struct gl_renderbuffer *rb) 473{ 474 struct intel_context *intel = intel_context(ctx); 475 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 476 struct intel_renderbuffer *s8z24_irb; 477 478 assert(rb->Name != 0); 479 assert(rb->Format == MESA_FORMAT_S8_Z24); 480 assert(irb->wrapped_depth != NULL); 481 assert(irb->wrapped_stencil != NULL); 482 483 s8z24_irb = intel_renderbuffer(irb->wrapped_depth); 484 485 if (irb->map_mode & GL_MAP_WRITE_BIT) { 486 /* Copy the stencil bits from the depth buffer into the stencil buffer. 487 */ 488 uint32_t map_x = irb->map_x; 489 uint32_t map_y = irb->map_y; 490 uint32_t map_w = irb->map_w; 491 uint32_t map_h = irb->map_h; 492 493 struct intel_renderbuffer *s8_irb; 494 uint8_t *s8_map; 495 496 s8_irb = intel_renderbuffer(irb->wrapped_stencil); 497 s8_map = intel_region_map(intel, s8_irb->region, GL_MAP_WRITE_BIT); 498 499 int32_t s8z24_stride = 4 * s8z24_irb->region->pitch; 500 uint8_t *s8z24_map = s8z24_irb->region->bo->virtual 501 + map_y * s8z24_stride 502 + map_x * 4; 503 504 for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) { 505 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) { 506 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->region->pitch, 507 map_x + pix_x, 508 map_y + pix_y); 509 ptrdiff_t s8z24_offset = pix_y * s8z24_stride 510 + pix_x * 4 511 + 3; 512 s8_map[s8_offset] = s8z24_map[s8z24_offset]; 513 } 514 } 515 516 intel_region_unmap(intel, s8_irb->region); 517 } 518 519 drm_intel_gem_bo_unmap_gtt(s8z24_irb->region->bo); 520} 521 522/** 523 * \see dd_function_table::UnmapRenderbuffer 524 */ 525static void 526intel_unmap_renderbuffer(struct gl_context *ctx, 527 struct gl_renderbuffer *rb) 528{ 529 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 530 531 DBG("%s: rb %d (%s)\n", __FUNCTION__, 532 rb->Name, _mesa_get_format_name(rb->Format)); 533 534 if (rb->Format == MESA_FORMAT_S8) { 535 intel_unmap_renderbuffer_s8(ctx, rb); 536 } else if (irb->wrapped_depth) { 537 intel_unmap_renderbuffer_separate_s8z24(ctx, rb); 538 } else if (irb->map_bo) { 539 /* Paired with intel_map_renderbuffer_blit(). */ 540 drm_intel_bo_unmap(irb->map_bo); 541 drm_intel_bo_unreference(irb->map_bo); 542 irb->map_bo = 0; 543 } else { 544 /* Paired with intel_map_renderbuffer_gtt(). */ 545 if (irb->region) { 546 /* The region may be null when intel_map_renderbuffer() is 547 * called from intel_span.c. 548 */ 549 drm_intel_gem_bo_unmap_gtt(irb->region->bo); 550 } 551 } 552} 553 554/** 555 * Return a pointer to a specific pixel in a renderbuffer. 556 */ 557static void * 558intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb, 559 GLint x, GLint y) 560{ 561 /* By returning NULL we force all software rendering to go through 562 * the span routines. 563 */ 564 return NULL; 565} 566 567 568/** 569 * Called via glRenderbufferStorageEXT() to set the format and allocate 570 * storage for a user-created renderbuffer. 571 */ 572GLboolean 573intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 574 GLenum internalFormat, 575 GLuint width, GLuint height) 576{ 577 struct intel_context *intel = intel_context(ctx); 578 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 579 int cpp, tiling; 580 581 ASSERT(rb->Name != 0); 582 583 switch (internalFormat) { 584 default: 585 /* Use the same format-choice logic as for textures. 586 * Renderbuffers aren't any different from textures for us, 587 * except they're less useful because you can't texture with 588 * them. 589 */ 590 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 591 GL_NONE, GL_NONE); 592 break; 593 case GL_STENCIL_INDEX: 594 case GL_STENCIL_INDEX1_EXT: 595 case GL_STENCIL_INDEX4_EXT: 596 case GL_STENCIL_INDEX8_EXT: 597 case GL_STENCIL_INDEX16_EXT: 598 /* These aren't actual texture formats, so force them here. */ 599 if (intel->has_separate_stencil) { 600 rb->Format = MESA_FORMAT_S8; 601 } else { 602 assert(!intel->must_use_separate_stencil); 603 rb->Format = MESA_FORMAT_S8_Z24; 604 } 605 break; 606 } 607 608 rb->Width = width; 609 rb->Height = height; 610 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 611 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 612 cpp = _mesa_get_format_bytes(rb->Format); 613 614 intel_flush(ctx); 615 616 /* free old region */ 617 if (irb->region) { 618 intel_region_release(&irb->region); 619 } 620 if (irb->hiz_region) { 621 intel_region_release(&irb->hiz_region); 622 } 623 624 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 625 _mesa_lookup_enum_by_nr(internalFormat), 626 _mesa_get_format_name(rb->Format), width, height); 627 628 tiling = I915_TILING_NONE; 629 if (intel->use_texture_tiling) { 630 GLenum base_format = _mesa_get_format_base_format(rb->Format); 631 632 if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT || 633 base_format == GL_STENCIL_INDEX || 634 base_format == GL_DEPTH_STENCIL)) 635 tiling = I915_TILING_Y; 636 else 637 tiling = I915_TILING_X; 638 } 639 640 if (irb->Base.Format == MESA_FORMAT_S8) { 641 /* 642 * The stencil buffer is W tiled. However, we request from the kernel a 643 * non-tiled buffer because the GTT is incapable of W fencing. 644 * 645 * The stencil buffer has quirky pitch requirements. From Vol 2a, 646 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch": 647 * The pitch must be set to 2x the value computed based on width, as 648 * the stencil buffer is stored with two rows interleaved. 649 * To accomplish this, we resort to the nasty hack of doubling the drm 650 * region's cpp and halving its height. 651 * 652 * If we neglect to double the pitch, then render corruption occurs. 653 */ 654 irb->region = intel_region_alloc(intel->intelScreen, 655 I915_TILING_NONE, 656 cpp * 2, 657 ALIGN(width, 64), 658 ALIGN((height + 1) / 2, 64), 659 true); 660 if (!irb->region) 661 return false; 662 663 } else if (irb->Base.Format == MESA_FORMAT_S8_Z24 664 && intel->must_use_separate_stencil) { 665 666 bool ok = true; 667 struct gl_renderbuffer *depth_rb; 668 struct gl_renderbuffer *stencil_rb; 669 670 depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height, 671 MESA_FORMAT_X8_Z24); 672 stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height, 673 MESA_FORMAT_S8); 674 ok = depth_rb && stencil_rb; 675 ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb, 676 depth_rb->InternalFormat, 677 width, height); 678 ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb, 679 stencil_rb->InternalFormat, 680 width, height); 681 682 if (!ok) { 683 if (depth_rb) { 684 intel_delete_renderbuffer(depth_rb); 685 } 686 if (stencil_rb) { 687 intel_delete_renderbuffer(stencil_rb); 688 } 689 return false; 690 } 691 692 depth_rb->Wrapped = rb; 693 stencil_rb->Wrapped = rb; 694 _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb); 695 _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb); 696 697 } else { 698 irb->region = intel_region_alloc(intel->intelScreen, tiling, cpp, 699 width, height, true); 700 if (!irb->region) 701 return false; 702 703 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 704 irb->hiz_region = intel_region_alloc(intel->intelScreen, 705 I915_TILING_Y, 706 irb->region->cpp, 707 irb->region->width, 708 irb->region->height, 709 true); 710 if (!irb->hiz_region) { 711 intel_region_release(&irb->region); 712 return false; 713 } 714 } 715 } 716 717 return true; 718} 719 720 721#if FEATURE_OES_EGL_image 722static void 723intel_image_target_renderbuffer_storage(struct gl_context *ctx, 724 struct gl_renderbuffer *rb, 725 void *image_handle) 726{ 727 struct intel_context *intel = intel_context(ctx); 728 struct intel_renderbuffer *irb; 729 __DRIscreen *screen; 730 __DRIimage *image; 731 732 screen = intel->intelScreen->driScrnPriv; 733 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 734 screen->loaderPrivate); 735 if (image == NULL) 736 return; 737 738 /* __DRIimage is opaque to the core so it has to be checked here */ 739 switch (image->format) { 740 case MESA_FORMAT_RGBA8888_REV: 741 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 742 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 743 return; 744 break; 745 default: 746 break; 747 } 748 749 irb = intel_renderbuffer(rb); 750 intel_region_reference(&irb->region, image->region); 751 752 rb->InternalFormat = image->internal_format; 753 rb->Width = image->region->width; 754 rb->Height = image->region->height; 755 rb->Format = image->format; 756 rb->DataType = image->data_type; 757 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 758 image->internal_format); 759} 760#endif 761 762/** 763 * Called for each hardware renderbuffer when a _window_ is resized. 764 * Just update fields. 765 * Not used for user-created renderbuffers! 766 */ 767static GLboolean 768intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 769 GLenum internalFormat, GLuint width, GLuint height) 770{ 771 ASSERT(rb->Name == 0); 772 rb->Width = width; 773 rb->Height = height; 774 rb->InternalFormat = internalFormat; 775 776 return true; 777} 778 779 780static void 781intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 782 GLuint width, GLuint height) 783{ 784 int i; 785 786 _mesa_resize_framebuffer(ctx, fb, width, height); 787 788 fb->Initialized = true; /* XXX remove someday */ 789 790 if (fb->Name != 0) { 791 return; 792 } 793 794 795 /* Make sure all window system renderbuffers are up to date */ 796 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 797 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 798 799 /* only resize if size is changing */ 800 if (rb && (rb->Width != width || rb->Height != height)) { 801 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 802 } 803 } 804} 805 806 807/** Dummy function for gl_renderbuffer::AllocStorage() */ 808static GLboolean 809intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 810 GLenum internalFormat, GLuint width, GLuint height) 811{ 812 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 813 return false; 814} 815 816/** 817 * Create a new intel_renderbuffer which corresponds to an on-screen window, 818 * not a user-created renderbuffer. 819 */ 820struct intel_renderbuffer * 821intel_create_renderbuffer(gl_format format) 822{ 823 GET_CURRENT_CONTEXT(ctx); 824 825 struct intel_renderbuffer *irb; 826 827 irb = CALLOC_STRUCT(intel_renderbuffer); 828 if (!irb) { 829 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 830 return NULL; 831 } 832 833 _mesa_init_renderbuffer(&irb->Base, 0); 834 irb->Base.ClassID = INTEL_RB_CLASS; 835 irb->Base._BaseFormat = _mesa_get_format_base_format(format); 836 irb->Base.Format = format; 837 irb->Base.InternalFormat = irb->Base._BaseFormat; 838 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format); 839 840 /* intel-specific methods */ 841 irb->Base.Delete = intel_delete_renderbuffer; 842 irb->Base.AllocStorage = intel_alloc_window_storage; 843 irb->Base.GetPointer = intel_get_pointer; 844 845 return irb; 846} 847 848 849struct gl_renderbuffer* 850intel_create_wrapped_renderbuffer(struct gl_context * ctx, 851 int width, int height, 852 gl_format format) 853{ 854 /* 855 * The name here is irrelevant, as long as its nonzero, because the 856 * renderbuffer never gets entered into Mesa's renderbuffer hash table. 857 */ 858 GLuint name = ~0; 859 860 struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer); 861 if (!irb) { 862 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 863 return NULL; 864 } 865 866 struct gl_renderbuffer *rb = &irb->Base; 867 _mesa_init_renderbuffer(rb, name); 868 rb->ClassID = INTEL_RB_CLASS; 869 rb->_BaseFormat = _mesa_get_format_base_format(format); 870 rb->Format = format; 871 rb->InternalFormat = rb->_BaseFormat; 872 rb->DataType = intel_mesa_format_to_rb_datatype(format); 873 rb->Width = width; 874 rb->Height = height; 875 876 return rb; 877} 878 879 880/** 881 * Create a new renderbuffer object. 882 * Typically called via glBindRenderbufferEXT(). 883 */ 884static struct gl_renderbuffer * 885intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 886{ 887 /*struct intel_context *intel = intel_context(ctx); */ 888 struct intel_renderbuffer *irb; 889 890 irb = CALLOC_STRUCT(intel_renderbuffer); 891 if (!irb) { 892 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 893 return NULL; 894 } 895 896 _mesa_init_renderbuffer(&irb->Base, name); 897 irb->Base.ClassID = INTEL_RB_CLASS; 898 899 /* intel-specific methods */ 900 irb->Base.Delete = intel_delete_renderbuffer; 901 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage; 902 irb->Base.GetPointer = intel_get_pointer; 903 /* span routines set in alloc_storage function */ 904 905 return &irb->Base; 906} 907 908 909/** 910 * Called via glBindFramebufferEXT(). 911 */ 912static void 913intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 914 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 915{ 916 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 917 intel_draw_buffer(ctx); 918 } 919 else { 920 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 921 } 922} 923 924 925/** 926 * Called via glFramebufferRenderbufferEXT(). 927 */ 928static void 929intel_framebuffer_renderbuffer(struct gl_context * ctx, 930 struct gl_framebuffer *fb, 931 GLenum attachment, struct gl_renderbuffer *rb) 932{ 933 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 934 935 intel_flush(ctx); 936 937 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 938 intel_draw_buffer(ctx); 939} 940 941static bool 942intel_update_tex_wrapper_regions(struct intel_context *intel, 943 struct intel_renderbuffer *irb, 944 struct intel_texture_image *intel_image); 945 946static bool 947intel_update_wrapper(struct gl_context *ctx, struct intel_renderbuffer *irb, 948 struct gl_texture_image *texImage) 949{ 950 struct intel_context *intel = intel_context(ctx); 951 struct intel_texture_image *intel_image = intel_texture_image(texImage); 952 int width, height, depth; 953 954 if (!intel_span_supports_format(texImage->TexFormat)) { 955 DBG("Render to texture BAD FORMAT %s\n", 956 _mesa_get_format_name(texImage->TexFormat)); 957 return false; 958 } else { 959 DBG("Render to texture %s\n", _mesa_get_format_name(texImage->TexFormat)); 960 } 961 962 intel_miptree_get_dimensions_for_image(texImage, &width, &height, &depth); 963 964 irb->Base.Format = texImage->TexFormat; 965 irb->Base.DataType = intel_mesa_format_to_rb_datatype(texImage->TexFormat); 966 irb->Base.InternalFormat = texImage->InternalFormat; 967 irb->Base._BaseFormat = _mesa_base_tex_format(ctx, irb->Base.InternalFormat); 968 irb->Base.Width = width; 969 irb->Base.Height = height; 970 971 irb->Base.Delete = intel_delete_renderbuffer; 972 irb->Base.AllocStorage = intel_nop_alloc_storage; 973 974 if (intel_image->stencil_rb) { 975 /* The tex image has packed depth/stencil format, but is using separate 976 * stencil. */ 977 978 bool ok; 979 struct intel_renderbuffer *depth_irb = 980 intel_renderbuffer(intel_image->depth_rb); 981 982 /* Update the hiz region if necessary. */ 983 ok = intel_update_tex_wrapper_regions(intel, depth_irb, intel_image); 984 if (!ok) { 985 return false; 986 } 987 988 /* The tex image shares its embedded depth and stencil renderbuffers with 989 * the renderbuffer wrapper. */ 990 _mesa_reference_renderbuffer(&irb->wrapped_depth, 991 intel_image->depth_rb); 992 _mesa_reference_renderbuffer(&irb->wrapped_stencil, 993 intel_image->stencil_rb); 994 995 return true; 996 } else { 997 return intel_update_tex_wrapper_regions(intel, irb, intel_image); 998 } 999} 1000 1001/** 1002 * FIXME: The handling of the hiz region is broken for mipmapped depth textures 1003 * FIXME: because intel_finalize_mipmap_tree is unaware of it. 1004 */ 1005static bool 1006intel_update_tex_wrapper_regions(struct intel_context *intel, 1007 struct intel_renderbuffer *irb, 1008 struct intel_texture_image *intel_image) 1009{ 1010 struct gl_renderbuffer *rb = &irb->Base; 1011 1012 /* Point the renderbuffer's region to the texture's region. */ 1013 if (irb->region != intel_image->mt->region) { 1014 intel_region_reference(&irb->region, intel_image->mt->region); 1015 } 1016 1017 /* Allocate the texture's hiz region if necessary. */ 1018 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format) 1019 && !intel_image->mt->hiz_region) { 1020 intel_image->mt->hiz_region = 1021 intel_region_alloc(intel->intelScreen, 1022 I915_TILING_Y, 1023 _mesa_get_format_bytes(rb->Format), 1024 rb->Width, 1025 rb->Height, 1026 true); 1027 if (!intel_image->mt->hiz_region) 1028 return false; 1029 } 1030 1031 /* Point the renderbuffer's hiz region to the texture's hiz region. */ 1032 if (irb->hiz_region != intel_image->mt->hiz_region) { 1033 intel_region_reference(&irb->hiz_region, intel_image->mt->hiz_region); 1034 } 1035 1036 return true; 1037} 1038 1039 1040/** 1041 * When glFramebufferTexture[123]D is called this function sets up the 1042 * gl_renderbuffer wrapper around the texture image. 1043 * This will have the region info needed for hardware rendering. 1044 */ 1045static struct intel_renderbuffer * 1046intel_wrap_texture(struct gl_context * ctx, struct gl_texture_image *texImage) 1047{ 1048 const GLuint name = ~0; /* not significant, but distinct for debugging */ 1049 struct intel_renderbuffer *irb; 1050 1051 /* make an intel_renderbuffer to wrap the texture image */ 1052 irb = CALLOC_STRUCT(intel_renderbuffer); 1053 if (!irb) { 1054 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture"); 1055 return NULL; 1056 } 1057 1058 _mesa_init_renderbuffer(&irb->Base, name); 1059 irb->Base.ClassID = INTEL_RB_CLASS; 1060 1061 if (!intel_update_wrapper(ctx, irb, texImage)) { 1062 free(irb); 1063 return NULL; 1064 } 1065 1066 return irb; 1067} 1068 1069void 1070intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb, 1071 struct intel_texture_image *intel_image, 1072 int zoffset) 1073{ 1074 unsigned int dst_x, dst_y; 1075 1076 /* compute offset of the particular 2D image within the texture region */ 1077 intel_miptree_get_image_offset(intel_image->mt, 1078 intel_image->base.Base.Level, 1079 intel_image->base.Base.Face, 1080 zoffset, 1081 &dst_x, &dst_y); 1082 1083 irb->draw_x = dst_x; 1084 irb->draw_y = dst_y; 1085} 1086 1087/** 1088 * Rendering to tiled buffers requires that the base address of the 1089 * buffer be aligned to a page boundary. We generally render to 1090 * textures by pointing the surface at the mipmap image level, which 1091 * may not be aligned to a tile boundary. 1092 * 1093 * This function returns an appropriately-aligned base offset 1094 * according to the tiling restrictions, plus any required x/y offset 1095 * from there. 1096 */ 1097uint32_t 1098intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 1099 uint32_t *tile_x, 1100 uint32_t *tile_y) 1101{ 1102 int cpp = irb->region->cpp; 1103 uint32_t pitch = irb->region->pitch * cpp; 1104 1105 if (irb->region->tiling == I915_TILING_NONE) { 1106 *tile_x = 0; 1107 *tile_y = 0; 1108 return irb->draw_x * cpp + irb->draw_y * pitch; 1109 } else if (irb->region->tiling == I915_TILING_X) { 1110 *tile_x = irb->draw_x % (512 / cpp); 1111 *tile_y = irb->draw_y % 8; 1112 return ((irb->draw_y / 8) * (8 * pitch) + 1113 (irb->draw_x - *tile_x) / (512 / cpp) * 4096); 1114 } else { 1115 assert(irb->region->tiling == I915_TILING_Y); 1116 *tile_x = irb->draw_x % (128 / cpp); 1117 *tile_y = irb->draw_y % 32; 1118 return ((irb->draw_y / 32) * (32 * pitch) + 1119 (irb->draw_x - *tile_x) / (128 / cpp) * 4096); 1120 } 1121} 1122 1123#ifndef I915 1124static bool 1125need_tile_offset_workaround(struct brw_context *brw, 1126 struct intel_renderbuffer *irb) 1127{ 1128 uint32_t tile_x, tile_y; 1129 1130 if (brw->has_surface_tile_offset) 1131 return false; 1132 1133 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 1134 1135 return tile_x != 0 || tile_y != 0; 1136} 1137#endif 1138 1139/** 1140 * Called by glFramebufferTexture[123]DEXT() (and other places) to 1141 * prepare for rendering into texture memory. This might be called 1142 * many times to choose different texture levels, cube faces, etc 1143 * before intel_finish_render_texture() is ever called. 1144 */ 1145static void 1146intel_render_texture(struct gl_context * ctx, 1147 struct gl_framebuffer *fb, 1148 struct gl_renderbuffer_attachment *att) 1149{ 1150 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 1151 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 1152 struct intel_texture_image *intel_image = intel_texture_image(image); 1153 1154 (void) fb; 1155 1156 if (!intel_image->mt) { 1157 /* Fallback on drawing to a texture that doesn't have a miptree 1158 * (has a border, width/height 0, etc.) 1159 */ 1160 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 1161 _swrast_render_texture(ctx, fb, att); 1162 return; 1163 } 1164 else if (!irb) { 1165 irb = intel_wrap_texture(ctx, image); 1166 if (irb) { 1167 /* bind the wrapper to the attachment point */ 1168 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base); 1169 } 1170 else { 1171 /* fallback to software rendering */ 1172 _swrast_render_texture(ctx, fb, att); 1173 return; 1174 } 1175 } 1176 1177 if (!intel_update_wrapper(ctx, irb, image)) { 1178 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 1179 _swrast_render_texture(ctx, fb, att); 1180 return; 1181 } 1182 1183 DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n", 1184 _glthread_GetID(), 1185 att->Texture->Name, image->Width, image->Height, 1186 irb->Base.RefCount); 1187 1188 intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset); 1189 intel_image->used_as_render_target = true; 1190 1191#ifndef I915 1192 if (need_tile_offset_workaround(brw_context(ctx), irb)) { 1193 /* Original gen4 hardware couldn't draw to a non-tile-aligned 1194 * destination in a miptree unless you actually setup your 1195 * renderbuffer as a miptree and used the fragile 1196 * lod/array_index/etc. controls to select the image. So, 1197 * instead, we just make a new single-level miptree and render 1198 * into that. 1199 */ 1200 struct intel_context *intel = intel_context(ctx); 1201 struct intel_mipmap_tree *new_mt; 1202 int width, height, depth; 1203 1204 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); 1205 1206 new_mt = intel_miptree_create(intel, image->TexObject->Target, 1207 intel_image->base.Base.TexFormat, 1208 intel_image->base.Base.Level, 1209 intel_image->base.Base.Level, 1210 width, height, depth, 1211 true); 1212 1213 intel_miptree_copy_teximage(intel, intel_image, new_mt); 1214 intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset); 1215 1216 intel_region_reference(&irb->region, intel_image->mt->region); 1217 intel_miptree_release(&new_mt); 1218 } 1219#endif 1220 /* update drawing region, etc */ 1221 intel_draw_buffer(ctx); 1222} 1223 1224 1225/** 1226 * Called by Mesa when rendering to a texture is done. 1227 */ 1228static void 1229intel_finish_render_texture(struct gl_context * ctx, 1230 struct gl_renderbuffer_attachment *att) 1231{ 1232 struct intel_context *intel = intel_context(ctx); 1233 struct gl_texture_object *tex_obj = att->Texture; 1234 struct gl_texture_image *image = 1235 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 1236 struct intel_texture_image *intel_image = intel_texture_image(image); 1237 1238 DBG("Finish render texture tid %lx tex=%u\n", 1239 _glthread_GetID(), att->Texture->Name); 1240 1241 /* Flag that this image may now be validated into the object's miptree. */ 1242 if (intel_image) 1243 intel_image->used_as_render_target = false; 1244 1245 /* Since we've (probably) rendered to the texture and will (likely) use 1246 * it in the texture domain later on in this batchbuffer, flush the 1247 * batch. Once again, we wish for a domain tracker in libdrm to cover 1248 * usage inside of a batchbuffer like GEM does in the kernel. 1249 */ 1250 intel_batchbuffer_emit_mi_flush(intel); 1251} 1252 1253/** 1254 * Do additional "completeness" testing of a framebuffer object. 1255 */ 1256static void 1257intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 1258{ 1259 struct intel_context *intel = intel_context(ctx); 1260 const struct intel_renderbuffer *depthRb = 1261 intel_get_renderbuffer(fb, BUFFER_DEPTH); 1262 const struct intel_renderbuffer *stencilRb = 1263 intel_get_renderbuffer(fb, BUFFER_STENCIL); 1264 int i; 1265 1266 /* 1267 * The depth and stencil renderbuffers are the same renderbuffer or wrap 1268 * the same texture. 1269 */ 1270 if (depthRb && stencilRb) { 1271 bool depth_stencil_are_same; 1272 if (depthRb == stencilRb) 1273 depth_stencil_are_same = true; 1274 else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) && 1275 (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) && 1276 (fb->Attachment[BUFFER_DEPTH].Texture->Name == 1277 fb->Attachment[BUFFER_STENCIL].Texture->Name)) 1278 depth_stencil_are_same = true; 1279 else 1280 depth_stencil_are_same = false; 1281 1282 if (!intel->has_separate_stencil && !depth_stencil_are_same) { 1283 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1284 } 1285 } 1286 1287 for (i = 0; i < Elements(fb->Attachment); i++) { 1288 struct gl_renderbuffer *rb; 1289 struct intel_renderbuffer *irb; 1290 1291 if (fb->Attachment[i].Type == GL_NONE) 1292 continue; 1293 1294 /* A supported attachment will have a Renderbuffer set either 1295 * from being a Renderbuffer or being a texture that got the 1296 * intel_wrap_texture() treatment. 1297 */ 1298 rb = fb->Attachment[i].Renderbuffer; 1299 if (rb == NULL) { 1300 DBG("attachment without renderbuffer\n"); 1301 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1302 continue; 1303 } 1304 1305 irb = intel_renderbuffer(rb); 1306 if (irb == NULL) { 1307 DBG("software rendering renderbuffer\n"); 1308 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1309 continue; 1310 } 1311 1312 if (!intel_span_supports_format(irb->Base.Format) || 1313 !intel->vtbl.render_target_supported(irb->Base.Format)) { 1314 DBG("Unsupported texture/renderbuffer format attached: %s\n", 1315 _mesa_get_format_name(irb->Base.Format)); 1316 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 1317 } 1318 } 1319} 1320 1321/** 1322 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 1323 * We can do this when the dst renderbuffer is actually a texture and 1324 * there is no scaling, mirroring or scissoring. 1325 * 1326 * \return new buffer mask indicating the buffers left to blit using the 1327 * normal path. 1328 */ 1329static GLbitfield 1330intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 1331 GLint srcX0, GLint srcY0, 1332 GLint srcX1, GLint srcY1, 1333 GLint dstX0, GLint dstY0, 1334 GLint dstX1, GLint dstY1, 1335 GLbitfield mask, GLenum filter) 1336{ 1337 if (mask & GL_COLOR_BUFFER_BIT) { 1338 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 1339 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 1340 const struct gl_renderbuffer_attachment *drawAtt = 1341 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 1342 1343 /* If the source and destination are the same size with no 1344 mirroring, the rectangles are within the size of the 1345 texture and there is no scissor then we can use 1346 glCopyTexSubimage2D to implement the blit. This will end 1347 up as a fast hardware blit on some drivers */ 1348 if (drawAtt && drawAtt->Texture && 1349 srcX0 - srcX1 == dstX0 - dstX1 && 1350 srcY0 - srcY1 == dstY0 - dstY1 && 1351 srcX1 >= srcX0 && 1352 srcY1 >= srcY0 && 1353 srcX0 >= 0 && srcX1 <= readFb->Width && 1354 srcY0 >= 0 && srcY1 <= readFb->Height && 1355 dstX0 >= 0 && dstX1 <= drawFb->Width && 1356 dstY0 >= 0 && dstY1 <= drawFb->Height && 1357 !ctx->Scissor.Enabled) { 1358 const struct gl_texture_object *texObj = drawAtt->Texture; 1359 const GLuint dstLevel = drawAtt->TextureLevel; 1360 const GLenum target = texObj->Target; 1361 1362 struct gl_texture_image *texImage = 1363 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 1364 1365 if (intel_copy_texsubimage(intel_context(ctx), 1366 intel_texture_image(texImage), 1367 dstX0, dstY0, 1368 srcX0, srcY0, 1369 srcX1 - srcX0, /* width */ 1370 srcY1 - srcY0)) 1371 mask &= ~GL_COLOR_BUFFER_BIT; 1372 } 1373 } 1374 1375 return mask; 1376} 1377 1378static void 1379intel_blit_framebuffer(struct gl_context *ctx, 1380 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 1381 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 1382 GLbitfield mask, GLenum filter) 1383{ 1384 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 1385 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 1386 srcX0, srcY0, srcX1, srcY1, 1387 dstX0, dstY0, dstX1, dstY1, 1388 mask, filter); 1389 if (mask == 0x0) 1390 return; 1391 1392 _mesa_meta_BlitFramebuffer(ctx, 1393 srcX0, srcY0, srcX1, srcY1, 1394 dstX0, dstY0, dstX1, dstY1, 1395 mask, filter); 1396} 1397 1398/** 1399 * Do one-time context initializations related to GL_EXT_framebuffer_object. 1400 * Hook in device driver functions. 1401 */ 1402void 1403intel_fbo_init(struct intel_context *intel) 1404{ 1405 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 1406 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 1407 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 1408 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 1409 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 1410 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 1411 intel->ctx.Driver.RenderTexture = intel_render_texture; 1412 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 1413 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 1414 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 1415 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 1416 1417#if FEATURE_OES_EGL_image 1418 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 1419 intel_image_target_renderbuffer_storage; 1420#endif 1421} 1422