intel_fbo.c revision 433ff3e16e8e090fd3a1bf427e61f3e5971a5740
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/enums.h" 30#include "main/imports.h" 31#include "main/macros.h" 32#include "main/mfeatures.h" 33#include "main/mtypes.h" 34#include "main/fbobject.h" 35#include "main/framebuffer.h" 36#include "main/renderbuffer.h" 37#include "main/context.h" 38#include "main/teximage.h" 39#include "main/image.h" 40 41#include "swrast/swrast.h" 42#include "drivers/common/meta.h" 43 44#include "intel_context.h" 45#include "intel_batchbuffer.h" 46#include "intel_buffers.h" 47#include "intel_blit.h" 48#include "intel_fbo.h" 49#include "intel_mipmap_tree.h" 50#include "intel_regions.h" 51#include "intel_tex.h" 52#include "intel_span.h" 53#ifndef I915 54#include "brw_context.h" 55#endif 56 57#define FILE_DEBUG_FLAG DEBUG_FBO 58 59static struct gl_renderbuffer * 60intel_new_renderbuffer(struct gl_context * ctx, GLuint name); 61 62bool 63intel_framebuffer_has_hiz(struct gl_framebuffer *fb) 64{ 65 struct intel_renderbuffer *rb = NULL; 66 if (fb) 67 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH); 68 return rb && rb->mt && rb->mt->hiz_mt; 69} 70 71struct intel_region* 72intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex) 73{ 74 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex); 75 if (irb && irb->mt) { 76 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt) 77 return irb->mt->stencil_mt->region; 78 else 79 return irb->mt->region; 80 } else 81 return NULL; 82} 83 84/** 85 * Create a new framebuffer object. 86 */ 87static struct gl_framebuffer * 88intel_new_framebuffer(struct gl_context * ctx, GLuint name) 89{ 90 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 91 * class 92 */ 93 return _mesa_new_framebuffer(ctx, name); 94} 95 96 97/** Called by gl_renderbuffer::Delete() */ 98static void 99intel_delete_renderbuffer(struct gl_renderbuffer *rb) 100{ 101 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 102 103 ASSERT(irb); 104 105 intel_miptree_release(&irb->mt); 106 107 free(irb); 108} 109 110/** 111 * \see dd_function_table::MapRenderbuffer 112 */ 113static void 114intel_map_renderbuffer(struct gl_context *ctx, 115 struct gl_renderbuffer *rb, 116 GLuint x, GLuint y, GLuint w, GLuint h, 117 GLbitfield mode, 118 GLubyte **out_map, 119 GLint *out_stride) 120{ 121 struct intel_context *intel = intel_context(ctx); 122 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; 123 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 124 void *map; 125 int stride; 126 127 if (srb->Buffer) { 128 /* this is a malloc'd renderbuffer (accum buffer), not an irb */ 129 GLint bpp = _mesa_get_format_bytes(rb->Format); 130 GLint rowStride = srb->RowStride; 131 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp; 132 *out_stride = rowStride; 133 return; 134 } 135 136 /* We sometimes get called with this by our intel_span.c usage. */ 137 if (!irb->mt) { 138 *out_map = NULL; 139 *out_stride = 0; 140 return; 141 } 142 143 /* For a window-system renderbuffer, we need to flip the mapping we receive 144 * upside-down. So we need to ask for a rectangle on flipped vertically, and 145 * we then return a pointer to the bottom of it with a negative stride. 146 */ 147 if (rb->Name == 0) { 148 y = rb->Height - y - h; 149 } 150 151 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer, 152 x, y, w, h, mode, &map, &stride); 153 154 if (rb->Name == 0) { 155 map += (h - 1) * stride; 156 stride = -stride; 157 } 158 159 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 160 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 161 x, y, w, h, map, stride); 162 163 *out_map = map; 164 *out_stride = stride; 165} 166 167/** 168 * \see dd_function_table::UnmapRenderbuffer 169 */ 170static void 171intel_unmap_renderbuffer(struct gl_context *ctx, 172 struct gl_renderbuffer *rb) 173{ 174 struct intel_context *intel = intel_context(ctx); 175 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; 176 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 177 178 DBG("%s: rb %d (%s)\n", __FUNCTION__, 179 rb->Name, _mesa_get_format_name(rb->Format)); 180 181 if (srb->Buffer) { 182 /* this is a malloc'd renderbuffer (accum buffer) */ 183 /* nothing to do */ 184 return; 185 } 186 187 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer); 188} 189 190 191/** 192 * Round up the requested multisample count to the next supported sample size. 193 */ 194static unsigned 195quantize_num_samples(struct intel_context *intel, unsigned num_samples) 196{ 197 switch (intel->gen) { 198 case 6: 199 /* Gen6 supports only 4x multisampling. */ 200 if (num_samples > 0) 201 return 4; 202 else 203 return 0; 204 case 7: 205 /* TODO: Gen7 supports only 4x multisampling at the moment. */ 206 if (num_samples > 0) 207 return 4; 208 else 209 return 0; 210 return 0; 211 default: 212 /* MSAA unsupported */ 213 return 0; 214 } 215} 216 217 218/** 219 * Called via glRenderbufferStorageEXT() to set the format and allocate 220 * storage for a user-created renderbuffer. 221 */ 222GLboolean 223intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 224 GLenum internalFormat, 225 GLuint width, GLuint height) 226{ 227 struct intel_context *intel = intel_context(ctx); 228 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 229 rb->NumSamples = quantize_num_samples(intel, rb->NumSamples); 230 231 switch (internalFormat) { 232 default: 233 /* Use the same format-choice logic as for textures. 234 * Renderbuffers aren't any different from textures for us, 235 * except they're less useful because you can't texture with 236 * them. 237 */ 238 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 239 GL_NONE, GL_NONE); 240 break; 241 case GL_STENCIL_INDEX: 242 case GL_STENCIL_INDEX1_EXT: 243 case GL_STENCIL_INDEX4_EXT: 244 case GL_STENCIL_INDEX8_EXT: 245 case GL_STENCIL_INDEX16_EXT: 246 /* These aren't actual texture formats, so force them here. */ 247 if (intel->has_separate_stencil) { 248 rb->Format = MESA_FORMAT_S8; 249 } else { 250 assert(!intel->must_use_separate_stencil); 251 rb->Format = MESA_FORMAT_S8_Z24; 252 } 253 break; 254 } 255 256 rb->Width = width; 257 rb->Height = height; 258 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 259 260 intel_miptree_release(&irb->mt); 261 262 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 263 _mesa_lookup_enum_by_nr(internalFormat), 264 _mesa_get_format_name(rb->Format), width, height); 265 266 if (width == 0 || height == 0) 267 return true; 268 269 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format, 270 width, height, 271 rb->NumSamples); 272 if (!irb->mt) 273 return false; 274 275 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 276 bool ok = intel_miptree_alloc_hiz(intel, irb->mt, rb->NumSamples); 277 if (!ok) { 278 intel_miptree_release(&irb->mt); 279 return false; 280 } 281 } 282 283 if (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) { 284 bool ok = intel_miptree_alloc_mcs(intel, irb->mt, rb->NumSamples); 285 if (!ok) { 286 intel_miptree_release(&irb->mt); 287 return false; 288 } 289 } 290 291 return true; 292} 293 294 295#if FEATURE_OES_EGL_image 296static void 297intel_image_target_renderbuffer_storage(struct gl_context *ctx, 298 struct gl_renderbuffer *rb, 299 void *image_handle) 300{ 301 struct intel_context *intel = intel_context(ctx); 302 struct intel_renderbuffer *irb; 303 __DRIscreen *screen; 304 __DRIimage *image; 305 306 screen = intel->intelScreen->driScrnPriv; 307 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 308 screen->loaderPrivate); 309 if (image == NULL) 310 return; 311 312 /* __DRIimage is opaque to the core so it has to be checked here */ 313 switch (image->format) { 314 case MESA_FORMAT_RGBA8888_REV: 315 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 316 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 317 return; 318 break; 319 default: 320 break; 321 } 322 323 irb = intel_renderbuffer(rb); 324 intel_miptree_release(&irb->mt); 325 irb->mt = intel_miptree_create_for_region(intel, 326 GL_TEXTURE_2D, 327 image->format, 328 image->region); 329 if (!irb->mt) 330 return; 331 332 rb->InternalFormat = image->internal_format; 333 rb->Width = image->region->width; 334 rb->Height = image->region->height; 335 rb->Format = image->format; 336 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 337 image->internal_format); 338} 339#endif 340 341/** 342 * Called for each hardware renderbuffer when a _window_ is resized. 343 * Just update fields. 344 * Not used for user-created renderbuffers! 345 */ 346static GLboolean 347intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 348 GLenum internalFormat, GLuint width, GLuint height) 349{ 350 ASSERT(rb->Name == 0); 351 rb->Width = width; 352 rb->Height = height; 353 rb->InternalFormat = internalFormat; 354 355 return true; 356} 357 358 359static void 360intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 361 GLuint width, GLuint height) 362{ 363 int i; 364 365 _mesa_resize_framebuffer(ctx, fb, width, height); 366 367 fb->Initialized = true; /* XXX remove someday */ 368 369 if (fb->Name != 0) { 370 return; 371 } 372 373 374 /* Make sure all window system renderbuffers are up to date */ 375 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 376 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 377 378 /* only resize if size is changing */ 379 if (rb && (rb->Width != width || rb->Height != height)) { 380 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 381 } 382 } 383} 384 385 386/** Dummy function for gl_renderbuffer::AllocStorage() */ 387static GLboolean 388intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 389 GLenum internalFormat, GLuint width, GLuint height) 390{ 391 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 392 return false; 393} 394 395/** 396 * Create a new intel_renderbuffer which corresponds to an on-screen window, 397 * not a user-created renderbuffer. 398 */ 399struct intel_renderbuffer * 400intel_create_renderbuffer(gl_format format) 401{ 402 struct intel_renderbuffer *irb; 403 struct gl_renderbuffer *rb; 404 405 GET_CURRENT_CONTEXT(ctx); 406 407 irb = CALLOC_STRUCT(intel_renderbuffer); 408 if (!irb) { 409 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 410 return NULL; 411 } 412 413 rb = &irb->Base.Base; 414 415 _mesa_init_renderbuffer(rb, 0); 416 rb->ClassID = INTEL_RB_CLASS; 417 rb->_BaseFormat = _mesa_get_format_base_format(format); 418 rb->Format = format; 419 rb->InternalFormat = rb->_BaseFormat; 420 421 /* intel-specific methods */ 422 rb->Delete = intel_delete_renderbuffer; 423 rb->AllocStorage = intel_alloc_window_storage; 424 425 return irb; 426} 427 428/** 429 * Private window-system buffers (as opposed to ones shared with the display 430 * server created with intel_create_renderbuffer()) are most similar in their 431 * handling to user-created renderbuffers, but they have a resize handler that 432 * may be called at intel_update_renderbuffers() time. 433 */ 434struct intel_renderbuffer * 435intel_create_private_renderbuffer(gl_format format) 436{ 437 struct intel_renderbuffer *irb; 438 439 irb = intel_create_renderbuffer(format); 440 irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage; 441 442 return irb; 443} 444 445/** 446 * Create a new renderbuffer object. 447 * Typically called via glBindRenderbufferEXT(). 448 */ 449static struct gl_renderbuffer * 450intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 451{ 452 /*struct intel_context *intel = intel_context(ctx); */ 453 struct intel_renderbuffer *irb; 454 struct gl_renderbuffer *rb; 455 456 irb = CALLOC_STRUCT(intel_renderbuffer); 457 if (!irb) { 458 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 459 return NULL; 460 } 461 462 rb = &irb->Base.Base; 463 464 _mesa_init_renderbuffer(rb, name); 465 rb->ClassID = INTEL_RB_CLASS; 466 467 /* intel-specific methods */ 468 rb->Delete = intel_delete_renderbuffer; 469 rb->AllocStorage = intel_alloc_renderbuffer_storage; 470 /* span routines set in alloc_storage function */ 471 472 return rb; 473} 474 475 476/** 477 * Called via glBindFramebufferEXT(). 478 */ 479static void 480intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 481 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 482{ 483 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 484 intel_draw_buffer(ctx); 485 } 486 else { 487 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 488 } 489} 490 491 492/** 493 * Called via glFramebufferRenderbufferEXT(). 494 */ 495static void 496intel_framebuffer_renderbuffer(struct gl_context * ctx, 497 struct gl_framebuffer *fb, 498 GLenum attachment, struct gl_renderbuffer *rb) 499{ 500 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 501 502 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 503 intel_draw_buffer(ctx); 504} 505 506/** 507 * \par Special case for separate stencil 508 * 509 * When wrapping a depthstencil texture that uses separate stencil, this 510 * function is recursively called twice: once to create \c 511 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the 512 * call to create \c irb->wrapped_depth, the \c format and \c 513 * internal_format parameters do not match \c mt->format. In that case, \c 514 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c 515 * MESA_FORMAT_X8_Z24. 516 * 517 * @return true on success 518 */ 519 520static bool 521intel_renderbuffer_update_wrapper(struct intel_context *intel, 522 struct intel_renderbuffer *irb, 523 struct gl_texture_image *image, 524 uint32_t layer) 525{ 526 struct gl_renderbuffer *rb = &irb->Base.Base; 527 struct intel_texture_image *intel_image = intel_texture_image(image); 528 struct intel_mipmap_tree *mt = intel_image->mt; 529 int level = image->Level; 530 531 rb->Format = image->TexFormat; 532 rb->InternalFormat = image->InternalFormat; 533 rb->_BaseFormat = image->_BaseFormat; 534 rb->Width = mt->level[level].width; 535 rb->Height = mt->level[level].height; 536 537 rb->Delete = intel_delete_renderbuffer; 538 rb->AllocStorage = intel_nop_alloc_storage; 539 540 intel_miptree_check_level_layer(mt, level, layer); 541 irb->mt_level = level; 542 irb->mt_layer = layer; 543 544 intel_miptree_reference(&irb->mt, mt); 545 546 intel_renderbuffer_set_draw_offset(irb); 547 548 if (mt->hiz_mt == NULL && 549 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 550 intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */); 551 if (!mt->hiz_mt) 552 return false; 553 } 554 555 return true; 556} 557 558void 559intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 560{ 561 unsigned int dst_x, dst_y; 562 563 /* compute offset of the particular 2D image within the texture region */ 564 intel_miptree_get_image_offset(irb->mt, 565 irb->mt_level, 566 0, /* face, which we ignore */ 567 irb->mt_layer, 568 &dst_x, &dst_y); 569 570 irb->draw_x = dst_x; 571 irb->draw_y = dst_y; 572} 573 574/** 575 * Rendering to tiled buffers requires that the base address of the 576 * buffer be aligned to a page boundary. We generally render to 577 * textures by pointing the surface at the mipmap image level, which 578 * may not be aligned to a tile boundary. 579 * 580 * This function returns an appropriately-aligned base offset 581 * according to the tiling restrictions, plus any required x/y offset 582 * from there. 583 */ 584uint32_t 585intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 586 uint32_t *tile_x, 587 uint32_t *tile_y) 588{ 589 struct intel_region *region = irb->mt->region; 590 uint32_t mask_x, mask_y; 591 592 intel_region_get_tile_masks(region, &mask_x, &mask_y); 593 594 *tile_x = irb->draw_x & mask_x; 595 *tile_y = irb->draw_y & mask_y; 596 return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x, 597 irb->draw_y & ~mask_y); 598} 599 600/** 601 * Called by glFramebufferTexture[123]DEXT() (and other places) to 602 * prepare for rendering into texture memory. This might be called 603 * many times to choose different texture levels, cube faces, etc 604 * before intel_finish_render_texture() is ever called. 605 */ 606static void 607intel_render_texture(struct gl_context * ctx, 608 struct gl_framebuffer *fb, 609 struct gl_renderbuffer_attachment *att) 610{ 611 struct intel_context *intel = intel_context(ctx); 612 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 613 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 614 struct intel_texture_image *intel_image = intel_texture_image(image); 615 struct intel_mipmap_tree *mt = intel_image->mt; 616 int layer; 617 618 (void) fb; 619 620 if (att->CubeMapFace > 0) { 621 assert(att->Zoffset == 0); 622 layer = att->CubeMapFace; 623 } else { 624 layer = att->Zoffset; 625 } 626 627 if (!intel_image->mt) { 628 /* Fallback on drawing to a texture that doesn't have a miptree 629 * (has a border, width/height 0, etc.) 630 */ 631 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 632 _swrast_render_texture(ctx, fb, att); 633 return; 634 } 635 else if (!irb) { 636 intel_miptree_check_level_layer(mt, att->TextureLevel, layer); 637 638 irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0); 639 640 if (irb) { 641 /* bind the wrapper to the attachment point */ 642 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base); 643 } 644 else { 645 /* fallback to software rendering */ 646 _swrast_render_texture(ctx, fb, att); 647 return; 648 } 649 } 650 651 if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) { 652 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 653 _swrast_render_texture(ctx, fb, att); 654 return; 655 } 656 657 irb->tex_image = image; 658 659 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n", 660 _mesa_get_format_name(image->TexFormat), 661 att->Texture->Name, image->Width, image->Height, 662 irb->Base.Base.RefCount); 663 664 /* update drawing region, etc */ 665 intel_draw_buffer(ctx); 666} 667 668 669/** 670 * Called by Mesa when rendering to a texture is done. 671 */ 672static void 673intel_finish_render_texture(struct gl_context * ctx, 674 struct gl_renderbuffer_attachment *att) 675{ 676 struct intel_context *intel = intel_context(ctx); 677 struct gl_texture_object *tex_obj = att->Texture; 678 struct gl_texture_image *image = 679 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 680 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 681 682 DBG("Finish render %s texture tex=%u\n", 683 _mesa_get_format_name(image->TexFormat), att->Texture->Name); 684 685 if (irb) 686 irb->tex_image = NULL; 687 688 /* Since we've (probably) rendered to the texture and will (likely) use 689 * it in the texture domain later on in this batchbuffer, flush the 690 * batch. Once again, we wish for a domain tracker in libdrm to cover 691 * usage inside of a batchbuffer like GEM does in the kernel. 692 */ 693 intel_batchbuffer_emit_mi_flush(intel); 694} 695 696/** 697 * Do additional "completeness" testing of a framebuffer object. 698 */ 699static void 700intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 701{ 702 struct intel_context *intel = intel_context(ctx); 703 const struct intel_renderbuffer *depthRb = 704 intel_get_renderbuffer(fb, BUFFER_DEPTH); 705 const struct intel_renderbuffer *stencilRb = 706 intel_get_renderbuffer(fb, BUFFER_STENCIL); 707 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL; 708 int i; 709 710 DBG("%s() on fb %p (%s)\n", __FUNCTION__, 711 fb, (fb == ctx->DrawBuffer ? "drawbuffer" : 712 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer"))); 713 714 if (depthRb) 715 depth_mt = depthRb->mt; 716 if (stencilRb) { 717 stencil_mt = stencilRb->mt; 718 if (stencil_mt->stencil_mt) 719 stencil_mt = stencil_mt->stencil_mt; 720 } 721 722 if (depth_mt && stencil_mt) { 723 if (depth_mt == stencil_mt) { 724 /* For true packed depth/stencil (not faked on prefers-separate-stencil 725 * hardware) we need to be sure they're the same level/layer, since 726 * we'll be emitting a single packet describing the packed setup. 727 */ 728 if (depthRb->mt_level != stencilRb->mt_level || 729 depthRb->mt_layer != stencilRb->mt_layer) { 730 DBG("depth image level/layer %d/%d != stencil image %d/%d\n", 731 depthRb->mt_level, 732 depthRb->mt_layer, 733 stencilRb->mt_level, 734 stencilRb->mt_layer); 735 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 736 } 737 } else { 738 if (!intel->has_separate_stencil) { 739 DBG("separate stencil unsupported\n"); 740 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 741 } 742 if (stencil_mt->format != MESA_FORMAT_S8) { 743 DBG("separate stencil is %s instead of S8\n", 744 _mesa_get_format_name(stencil_mt->format)); 745 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 746 } 747 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) { 748 /* Before Gen7, separate depth and stencil buffers can be used 749 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2, 750 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable: 751 * [DevSNB]: This field must be set to the same value (enabled 752 * or disabled) as Hierarchical Depth Buffer Enable. 753 */ 754 DBG("separate stencil without HiZ\n"); 755 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; 756 } 757 } 758 } 759 760 for (i = 0; i < Elements(fb->Attachment); i++) { 761 struct gl_renderbuffer *rb; 762 struct intel_renderbuffer *irb; 763 764 if (fb->Attachment[i].Type == GL_NONE) 765 continue; 766 767 /* A supported attachment will have a Renderbuffer set either 768 * from being a Renderbuffer or being a texture that got the 769 * intel_wrap_texture() treatment. 770 */ 771 rb = fb->Attachment[i].Renderbuffer; 772 if (rb == NULL) { 773 DBG("attachment without renderbuffer\n"); 774 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 775 continue; 776 } 777 778 if (fb->Attachment[i].Type == GL_TEXTURE) { 779 const struct gl_texture_image *img = 780 _mesa_get_attachment_teximage_const(&fb->Attachment[i]); 781 782 if (img->Border) { 783 DBG("texture with border\n"); 784 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 785 continue; 786 } 787 } 788 789 irb = intel_renderbuffer(rb); 790 if (irb == NULL) { 791 DBG("software rendering renderbuffer\n"); 792 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 793 continue; 794 } 795 796 if (!intel->vtbl.render_target_supported(intel, rb)) { 797 DBG("Unsupported HW texture/renderbuffer format attached: %s\n", 798 _mesa_get_format_name(intel_rb_format(irb))); 799 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 800 } 801 } 802} 803 804/** 805 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 806 * We can do this when the dst renderbuffer is actually a texture and 807 * there is no scaling, mirroring or scissoring. 808 * 809 * \return new buffer mask indicating the buffers left to blit using the 810 * normal path. 811 */ 812static GLbitfield 813intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 814 GLint srcX0, GLint srcY0, 815 GLint srcX1, GLint srcY1, 816 GLint dstX0, GLint dstY0, 817 GLint dstX1, GLint dstY1, 818 GLbitfield mask, GLenum filter) 819{ 820 if (mask & GL_COLOR_BUFFER_BIT) { 821 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 822 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 823 const struct gl_renderbuffer_attachment *drawAtt = 824 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 825 struct intel_renderbuffer *srcRb = 826 intel_renderbuffer(readFb->_ColorReadBuffer); 827 828 /* If the source and destination are the same size with no 829 mirroring, the rectangles are within the size of the 830 texture and there is no scissor then we can use 831 glCopyTexSubimage2D to implement the blit. This will end 832 up as a fast hardware blit on some drivers */ 833 if (srcRb && drawAtt && drawAtt->Texture && 834 srcX0 - srcX1 == dstX0 - dstX1 && 835 srcY0 - srcY1 == dstY0 - dstY1 && 836 srcX1 >= srcX0 && 837 srcY1 >= srcY0 && 838 srcX0 >= 0 && srcX1 <= readFb->Width && 839 srcY0 >= 0 && srcY1 <= readFb->Height && 840 dstX0 >= 0 && dstX1 <= drawFb->Width && 841 dstY0 >= 0 && dstY1 <= drawFb->Height && 842 !ctx->Scissor.Enabled) { 843 const struct gl_texture_object *texObj = drawAtt->Texture; 844 const GLuint dstLevel = drawAtt->TextureLevel; 845 const GLenum target = texObj->Target; 846 847 struct gl_texture_image *texImage = 848 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 849 850 if (intel_copy_texsubimage(intel_context(ctx), 851 intel_texture_image(texImage), 852 dstX0, dstY0, 853 srcRb, 854 srcX0, srcY0, 855 srcX1 - srcX0, /* width */ 856 srcY1 - srcY0)) 857 mask &= ~GL_COLOR_BUFFER_BIT; 858 } 859 } 860 861 return mask; 862} 863 864static void 865intel_blit_framebuffer(struct gl_context *ctx, 866 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 867 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 868 GLbitfield mask, GLenum filter) 869{ 870 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 871 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 872 srcX0, srcY0, srcX1, srcY1, 873 dstX0, dstY0, dstX1, dstY1, 874 mask, filter); 875 if (mask == 0x0) 876 return; 877 878#ifndef I915 879 mask = brw_blorp_framebuffer(intel_context(ctx), 880 srcX0, srcY0, srcX1, srcY1, 881 dstX0, dstY0, dstX1, dstY1, 882 mask, filter); 883 if (mask == 0x0) 884 return; 885#endif 886 887 _mesa_meta_BlitFramebuffer(ctx, 888 srcX0, srcY0, srcX1, srcY1, 889 dstX0, dstY0, dstX1, dstY1, 890 mask, filter); 891} 892 893void 894intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb) 895{ 896 if (irb->mt) { 897 intel_miptree_slice_set_needs_hiz_resolve(irb->mt, 898 irb->mt_level, 899 irb->mt_layer); 900 } 901} 902 903void 904intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb) 905{ 906 if (irb->mt) { 907 intel_miptree_slice_set_needs_depth_resolve(irb->mt, 908 irb->mt_level, 909 irb->mt_layer); 910 } 911} 912 913bool 914intel_renderbuffer_resolve_hiz(struct intel_context *intel, 915 struct intel_renderbuffer *irb) 916{ 917 if (irb->mt) 918 return intel_miptree_slice_resolve_hiz(intel, 919 irb->mt, 920 irb->mt_level, 921 irb->mt_layer); 922 923 return false; 924} 925 926bool 927intel_renderbuffer_resolve_depth(struct intel_context *intel, 928 struct intel_renderbuffer *irb) 929{ 930 if (irb->mt) 931 return intel_miptree_slice_resolve_depth(intel, 932 irb->mt, 933 irb->mt_level, 934 irb->mt_layer); 935 936 return false; 937} 938 939/** 940 * Do one-time context initializations related to GL_EXT_framebuffer_object. 941 * Hook in device driver functions. 942 */ 943void 944intel_fbo_init(struct intel_context *intel) 945{ 946 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 947 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 948 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 949 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 950 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 951 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 952 intel->ctx.Driver.RenderTexture = intel_render_texture; 953 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 954 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 955 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 956 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 957 958#if FEATURE_OES_EGL_image 959 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 960 intel_image_target_renderbuffer_storage; 961#endif 962} 963