intel_fbo.c revision 1839a7fc9faae81d32ffc0cdc908b933f4524e28
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/enums.h" 30#include "main/imports.h" 31#include "main/macros.h" 32#include "main/mfeatures.h" 33#include "main/mtypes.h" 34#include "main/fbobject.h" 35#include "main/framebuffer.h" 36#include "main/renderbuffer.h" 37#include "main/context.h" 38#include "main/teximage.h" 39#include "main/image.h" 40 41#include "swrast/swrast.h" 42#include "drivers/common/meta.h" 43 44#include "intel_context.h" 45#include "intel_batchbuffer.h" 46#include "intel_buffers.h" 47#include "intel_blit.h" 48#include "intel_fbo.h" 49#include "intel_mipmap_tree.h" 50#include "intel_regions.h" 51#include "intel_tex.h" 52#include "intel_span.h" 53#ifndef I915 54#include "brw_context.h" 55#endif 56 57#define FILE_DEBUG_FLAG DEBUG_FBO 58 59static struct gl_renderbuffer * 60intel_new_renderbuffer(struct gl_context * ctx, GLuint name); 61 62static bool 63intel_renderbuffer_update_wrapper(struct intel_context *intel, 64 struct intel_renderbuffer *irb, 65 struct intel_mipmap_tree *mt, 66 uint32_t level, 67 uint32_t layer, 68 gl_format format, 69 GLenum internal_format); 70 71bool 72intel_framebuffer_has_hiz(struct gl_framebuffer *fb) 73{ 74 struct intel_renderbuffer *rb = NULL; 75 if (fb) 76 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH); 77 return rb && rb->mt && rb->mt->hiz_mt; 78} 79 80struct intel_region* 81intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex) 82{ 83 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex); 84 if (irb && irb->mt) { 85 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt) 86 return irb->mt->stencil_mt->region; 87 else 88 return irb->mt->region; 89 } else 90 return NULL; 91} 92 93/** 94 * Create a new framebuffer object. 95 */ 96static struct gl_framebuffer * 97intel_new_framebuffer(struct gl_context * ctx, GLuint name) 98{ 99 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 100 * class 101 */ 102 return _mesa_new_framebuffer(ctx, name); 103} 104 105 106/** Called by gl_renderbuffer::Delete() */ 107static void 108intel_delete_renderbuffer(struct gl_renderbuffer *rb) 109{ 110 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 111 112 ASSERT(irb); 113 114 intel_miptree_release(&irb->mt); 115 116 free(irb); 117} 118 119/** 120 * \see dd_function_table::MapRenderbuffer 121 */ 122static void 123intel_map_renderbuffer(struct gl_context *ctx, 124 struct gl_renderbuffer *rb, 125 GLuint x, GLuint y, GLuint w, GLuint h, 126 GLbitfield mode, 127 GLubyte **out_map, 128 GLint *out_stride) 129{ 130 struct intel_context *intel = intel_context(ctx); 131 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 132 void *map; 133 int stride; 134 135 if (!irb && irb->Base.Buffer) { 136 /* this is a malloc'd renderbuffer (accum buffer) */ 137 GLint bpp = _mesa_get_format_bytes(rb->Format); 138 GLint rowStride = irb->Base.RowStride; 139 *out_map = (GLubyte *) irb->Base.Buffer + y * rowStride + x * bpp; 140 *out_stride = rowStride; 141 return; 142 } 143 144 /* We sometimes get called with this by our intel_span.c usage. */ 145 if (!irb->mt) { 146 *out_map = NULL; 147 *out_stride = 0; 148 return; 149 } 150 151 /* For a window-system renderbuffer, we need to flip the mapping we receive 152 * upside-down. So we need to ask for a rectangle on flipped vertically, and 153 * we then return a pointer to the bottom of it with a negative stride. 154 */ 155 if (rb->Name == 0) { 156 y = rb->Height - y - h; 157 } 158 159 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer, 160 x, y, w, h, mode, &map, &stride); 161 162 if (rb->Name == 0) { 163 map += (h - 1) * stride; 164 stride = -stride; 165 } 166 167 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 168 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 169 x, y, w, h, map, stride); 170 171 *out_map = map; 172 *out_stride = stride; 173} 174 175/** 176 * \see dd_function_table::UnmapRenderbuffer 177 */ 178static void 179intel_unmap_renderbuffer(struct gl_context *ctx, 180 struct gl_renderbuffer *rb) 181{ 182 struct intel_context *intel = intel_context(ctx); 183 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 184 185 DBG("%s: rb %d (%s)\n", __FUNCTION__, 186 rb->Name, _mesa_get_format_name(rb->Format)); 187 188 if (!irb && irb->Base.Buffer) { 189 /* this is a malloc'd renderbuffer (accum buffer) */ 190 /* nothing to do */ 191 return; 192 } 193 194 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer); 195} 196 197 198/** 199 * Called via glRenderbufferStorageEXT() to set the format and allocate 200 * storage for a user-created renderbuffer. 201 */ 202GLboolean 203intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 204 GLenum internalFormat, 205 GLuint width, GLuint height) 206{ 207 struct intel_context *intel = intel_context(ctx); 208 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 209 210 ASSERT(rb->Name != 0); 211 212 switch (internalFormat) { 213 default: 214 /* Use the same format-choice logic as for textures. 215 * Renderbuffers aren't any different from textures for us, 216 * except they're less useful because you can't texture with 217 * them. 218 */ 219 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 220 GL_NONE, GL_NONE); 221 break; 222 case GL_STENCIL_INDEX: 223 case GL_STENCIL_INDEX1_EXT: 224 case GL_STENCIL_INDEX4_EXT: 225 case GL_STENCIL_INDEX8_EXT: 226 case GL_STENCIL_INDEX16_EXT: 227 /* These aren't actual texture formats, so force them here. */ 228 if (intel->has_separate_stencil) { 229 rb->Format = MESA_FORMAT_S8; 230 } else { 231 assert(!intel->must_use_separate_stencil); 232 rb->Format = MESA_FORMAT_S8_Z24; 233 } 234 break; 235 } 236 237 rb->Width = width; 238 rb->Height = height; 239 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 240 241 intel_miptree_release(&irb->mt); 242 243 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 244 _mesa_lookup_enum_by_nr(internalFormat), 245 _mesa_get_format_name(rb->Format), width, height); 246 247 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format, 248 width, height); 249 if (!irb->mt) 250 return false; 251 252 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 253 bool ok = intel_miptree_alloc_hiz(intel, irb->mt); 254 if (!ok) { 255 intel_miptree_release(&irb->mt); 256 return false; 257 } 258 } 259 260 return true; 261} 262 263 264#if FEATURE_OES_EGL_image 265static void 266intel_image_target_renderbuffer_storage(struct gl_context *ctx, 267 struct gl_renderbuffer *rb, 268 void *image_handle) 269{ 270 struct intel_context *intel = intel_context(ctx); 271 struct intel_renderbuffer *irb; 272 __DRIscreen *screen; 273 __DRIimage *image; 274 275 screen = intel->intelScreen->driScrnPriv; 276 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 277 screen->loaderPrivate); 278 if (image == NULL) 279 return; 280 281 /* __DRIimage is opaque to the core so it has to be checked here */ 282 switch (image->format) { 283 case MESA_FORMAT_RGBA8888_REV: 284 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 285 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 286 return; 287 break; 288 default: 289 break; 290 } 291 292 irb = intel_renderbuffer(rb); 293 intel_miptree_release(&irb->mt); 294 irb->mt = intel_miptree_create_for_region(intel, 295 GL_TEXTURE_2D, 296 image->format, 297 image->region); 298 if (!irb->mt) 299 return; 300 301 rb->InternalFormat = image->internal_format; 302 rb->Width = image->region->width; 303 rb->Height = image->region->height; 304 rb->Format = image->format; 305 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 306 image->internal_format); 307} 308#endif 309 310/** 311 * Called for each hardware renderbuffer when a _window_ is resized. 312 * Just update fields. 313 * Not used for user-created renderbuffers! 314 */ 315static GLboolean 316intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 317 GLenum internalFormat, GLuint width, GLuint height) 318{ 319 ASSERT(rb->Name == 0); 320 rb->Width = width; 321 rb->Height = height; 322 rb->InternalFormat = internalFormat; 323 324 return true; 325} 326 327 328static void 329intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 330 GLuint width, GLuint height) 331{ 332 int i; 333 334 _mesa_resize_framebuffer(ctx, fb, width, height); 335 336 fb->Initialized = true; /* XXX remove someday */ 337 338 if (fb->Name != 0) { 339 return; 340 } 341 342 343 /* Make sure all window system renderbuffers are up to date */ 344 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 345 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 346 347 /* only resize if size is changing */ 348 if (rb && (rb->Width != width || rb->Height != height)) { 349 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 350 } 351 } 352} 353 354 355/** Dummy function for gl_renderbuffer::AllocStorage() */ 356static GLboolean 357intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 358 GLenum internalFormat, GLuint width, GLuint height) 359{ 360 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 361 return false; 362} 363 364/** 365 * Create a new intel_renderbuffer which corresponds to an on-screen window, 366 * not a user-created renderbuffer. 367 */ 368struct intel_renderbuffer * 369intel_create_renderbuffer(gl_format format) 370{ 371 struct intel_renderbuffer *irb; 372 struct gl_renderbuffer *rb; 373 374 GET_CURRENT_CONTEXT(ctx); 375 376 irb = CALLOC_STRUCT(intel_renderbuffer); 377 if (!irb) { 378 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 379 return NULL; 380 } 381 382 rb = &irb->Base.Base; 383 384 _mesa_init_renderbuffer(rb, 0); 385 rb->ClassID = INTEL_RB_CLASS; 386 rb->_BaseFormat = _mesa_get_format_base_format(format); 387 rb->Format = format; 388 rb->InternalFormat = rb->_BaseFormat; 389 390 /* intel-specific methods */ 391 rb->Delete = intel_delete_renderbuffer; 392 rb->AllocStorage = intel_alloc_window_storage; 393 394 return irb; 395} 396 397/** 398 * Create a new renderbuffer object. 399 * Typically called via glBindRenderbufferEXT(). 400 */ 401static struct gl_renderbuffer * 402intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 403{ 404 /*struct intel_context *intel = intel_context(ctx); */ 405 struct intel_renderbuffer *irb; 406 struct gl_renderbuffer *rb; 407 408 irb = CALLOC_STRUCT(intel_renderbuffer); 409 if (!irb) { 410 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 411 return NULL; 412 } 413 414 rb = &irb->Base.Base; 415 416 _mesa_init_renderbuffer(rb, name); 417 rb->ClassID = INTEL_RB_CLASS; 418 419 /* intel-specific methods */ 420 rb->Delete = intel_delete_renderbuffer; 421 rb->AllocStorage = intel_alloc_renderbuffer_storage; 422 /* span routines set in alloc_storage function */ 423 424 return rb; 425} 426 427 428/** 429 * Called via glBindFramebufferEXT(). 430 */ 431static void 432intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 433 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 434{ 435 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 436 intel_draw_buffer(ctx); 437 } 438 else { 439 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 440 } 441} 442 443 444/** 445 * Called via glFramebufferRenderbufferEXT(). 446 */ 447static void 448intel_framebuffer_renderbuffer(struct gl_context * ctx, 449 struct gl_framebuffer *fb, 450 GLenum attachment, struct gl_renderbuffer *rb) 451{ 452 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 453 454 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 455 intel_draw_buffer(ctx); 456} 457 458static struct intel_renderbuffer* 459intel_renderbuffer_wrap_miptree(struct intel_context *intel, 460 struct intel_mipmap_tree *mt, 461 uint32_t level, 462 uint32_t layer, 463 gl_format format, 464 GLenum internal_format); 465 466/** 467 * \par Special case for separate stencil 468 * 469 * When wrapping a depthstencil texture that uses separate stencil, this 470 * function is recursively called twice: once to create \c 471 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the 472 * call to create \c irb->wrapped_depth, the \c format and \c 473 * internal_format parameters do not match \c mt->format. In that case, \c 474 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c 475 * MESA_FORMAT_X8_Z24. 476 * 477 * @return true on success 478 */ 479static bool 480intel_renderbuffer_update_wrapper(struct intel_context *intel, 481 struct intel_renderbuffer *irb, 482 struct intel_mipmap_tree *mt, 483 uint32_t level, 484 uint32_t layer, 485 gl_format format, 486 GLenum internal_format) 487{ 488 struct gl_renderbuffer *rb = &irb->Base.Base; 489 490 rb->Format = format; 491 rb->InternalFormat = internal_format; 492 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format); 493 rb->Width = mt->level[level].width; 494 rb->Height = mt->level[level].height; 495 496 rb->Delete = intel_delete_renderbuffer; 497 rb->AllocStorage = intel_nop_alloc_storage; 498 499 intel_miptree_check_level_layer(mt, level, layer); 500 irb->mt_level = level; 501 irb->mt_layer = layer; 502 503 intel_miptree_reference(&irb->mt, mt); 504 505 intel_renderbuffer_set_draw_offset(irb); 506 507 if (mt->hiz_mt == NULL && 508 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 509 intel_miptree_alloc_hiz(intel, mt); 510 if (!mt->hiz_mt) 511 return false; 512 } 513 514 return true; 515} 516 517/** 518 * \brief Wrap a renderbuffer around a single slice of a miptree. 519 * 520 * Called by glFramebufferTexture*(). This just allocates a 521 * ``struct intel_renderbuffer`` then calls 522 * intel_renderbuffer_update_wrapper() to do the real work. 523 * 524 * \see intel_renderbuffer_update_wrapper() 525 */ 526static struct intel_renderbuffer* 527intel_renderbuffer_wrap_miptree(struct intel_context *intel, 528 struct intel_mipmap_tree *mt, 529 uint32_t level, 530 uint32_t layer, 531 gl_format format, 532 GLenum internal_format) 533 534{ 535 struct gl_context *ctx = &intel->ctx; 536 struct gl_renderbuffer *rb; 537 struct intel_renderbuffer *irb; 538 539 intel_miptree_check_level_layer(mt, level, layer); 540 541 rb = intel_new_renderbuffer(ctx, ~0); 542 irb = intel_renderbuffer(rb); 543 if (!irb) 544 return NULL; 545 546 if (!intel_renderbuffer_update_wrapper(intel, irb, 547 mt, level, layer, 548 format, internal_format)) { 549 free(irb); 550 return NULL; 551 } 552 553 return irb; 554} 555 556void 557intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 558{ 559 unsigned int dst_x, dst_y; 560 561 /* compute offset of the particular 2D image within the texture region */ 562 intel_miptree_get_image_offset(irb->mt, 563 irb->mt_level, 564 0, /* face, which we ignore */ 565 irb->mt_layer, 566 &dst_x, &dst_y); 567 568 irb->draw_x = dst_x; 569 irb->draw_y = dst_y; 570} 571 572/** 573 * Rendering to tiled buffers requires that the base address of the 574 * buffer be aligned to a page boundary. We generally render to 575 * textures by pointing the surface at the mipmap image level, which 576 * may not be aligned to a tile boundary. 577 * 578 * This function returns an appropriately-aligned base offset 579 * according to the tiling restrictions, plus any required x/y offset 580 * from there. 581 */ 582uint32_t 583intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 584 uint32_t *tile_x, 585 uint32_t *tile_y) 586{ 587 struct intel_region *region = irb->mt->region; 588 int cpp = region->cpp; 589 uint32_t pitch = region->pitch * cpp; 590 591 if (region->tiling == I915_TILING_NONE) { 592 *tile_x = 0; 593 *tile_y = 0; 594 return irb->draw_x * cpp + irb->draw_y * pitch; 595 } else if (region->tiling == I915_TILING_X) { 596 *tile_x = irb->draw_x % (512 / cpp); 597 *tile_y = irb->draw_y % 8; 598 return ((irb->draw_y / 8) * (8 * pitch) + 599 (irb->draw_x - *tile_x) / (512 / cpp) * 4096); 600 } else { 601 assert(region->tiling == I915_TILING_Y); 602 *tile_x = irb->draw_x % (128 / cpp); 603 *tile_y = irb->draw_y % 32; 604 return ((irb->draw_y / 32) * (32 * pitch) + 605 (irb->draw_x - *tile_x) / (128 / cpp) * 4096); 606 } 607} 608 609#ifndef I915 610static bool 611need_tile_offset_workaround(struct brw_context *brw, 612 struct intel_renderbuffer *irb) 613{ 614 uint32_t tile_x, tile_y; 615 616 if (brw->has_surface_tile_offset) 617 return false; 618 619 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 620 621 return tile_x != 0 || tile_y != 0; 622} 623#endif 624 625/** 626 * Called by glFramebufferTexture[123]DEXT() (and other places) to 627 * prepare for rendering into texture memory. This might be called 628 * many times to choose different texture levels, cube faces, etc 629 * before intel_finish_render_texture() is ever called. 630 */ 631static void 632intel_render_texture(struct gl_context * ctx, 633 struct gl_framebuffer *fb, 634 struct gl_renderbuffer_attachment *att) 635{ 636 struct intel_context *intel = intel_context(ctx); 637 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 638 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 639 struct intel_texture_image *intel_image = intel_texture_image(image); 640 struct intel_mipmap_tree *mt = intel_image->mt; 641 int layer; 642 643 (void) fb; 644 645 if (att->CubeMapFace > 0) { 646 assert(att->Zoffset == 0); 647 layer = att->CubeMapFace; 648 } else { 649 layer = att->Zoffset; 650 } 651 652 if (!intel_image->mt) { 653 /* Fallback on drawing to a texture that doesn't have a miptree 654 * (has a border, width/height 0, etc.) 655 */ 656 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 657 _swrast_render_texture(ctx, fb, att); 658 return; 659 } 660 else if (!irb) { 661 irb = intel_renderbuffer_wrap_miptree(intel, 662 mt, 663 att->TextureLevel, 664 layer, 665 image->TexFormat, 666 image->InternalFormat); 667 668 if (irb) { 669 /* bind the wrapper to the attachment point */ 670 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base); 671 } 672 else { 673 /* fallback to software rendering */ 674 _swrast_render_texture(ctx, fb, att); 675 return; 676 } 677 } 678 679 if (!intel_renderbuffer_update_wrapper(intel, irb, 680 mt, att->TextureLevel, layer, 681 image->TexFormat, 682 image->InternalFormat)) { 683 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 684 _swrast_render_texture(ctx, fb, att); 685 return; 686 } 687 688 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n", 689 _mesa_get_format_name(image->TexFormat), 690 att->Texture->Name, image->Width, image->Height, 691 irb->Base.Base.RefCount); 692 693 intel_image->used_as_render_target = true; 694 695#ifndef I915 696 if (need_tile_offset_workaround(brw_context(ctx), irb)) { 697 /* Original gen4 hardware couldn't draw to a non-tile-aligned 698 * destination in a miptree unless you actually setup your 699 * renderbuffer as a miptree and used the fragile 700 * lod/array_index/etc. controls to select the image. So, 701 * instead, we just make a new single-level miptree and render 702 * into that. 703 */ 704 struct intel_context *intel = intel_context(ctx); 705 struct intel_mipmap_tree *new_mt; 706 int width, height, depth; 707 708 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); 709 710 new_mt = intel_miptree_create(intel, image->TexObject->Target, 711 intel_image->base.Base.TexFormat, 712 intel_image->base.Base.Level, 713 intel_image->base.Base.Level, 714 width, height, depth, 715 true); 716 717 intel_miptree_copy_teximage(intel, intel_image, new_mt); 718 intel_renderbuffer_set_draw_offset(irb); 719 720 intel_miptree_reference(&irb->mt, intel_image->mt); 721 intel_miptree_release(&new_mt); 722 } 723#endif 724 /* update drawing region, etc */ 725 intel_draw_buffer(ctx); 726} 727 728 729/** 730 * Called by Mesa when rendering to a texture is done. 731 */ 732static void 733intel_finish_render_texture(struct gl_context * ctx, 734 struct gl_renderbuffer_attachment *att) 735{ 736 struct intel_context *intel = intel_context(ctx); 737 struct gl_texture_object *tex_obj = att->Texture; 738 struct gl_texture_image *image = 739 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 740 struct intel_texture_image *intel_image = intel_texture_image(image); 741 742 DBG("Finish render %s texture tex=%u\n", 743 _mesa_get_format_name(image->TexFormat), att->Texture->Name); 744 745 /* Flag that this image may now be validated into the object's miptree. */ 746 if (intel_image) 747 intel_image->used_as_render_target = false; 748 749 /* Since we've (probably) rendered to the texture and will (likely) use 750 * it in the texture domain later on in this batchbuffer, flush the 751 * batch. Once again, we wish for a domain tracker in libdrm to cover 752 * usage inside of a batchbuffer like GEM does in the kernel. 753 */ 754 intel_batchbuffer_emit_mi_flush(intel); 755} 756 757/** 758 * Do additional "completeness" testing of a framebuffer object. 759 */ 760static void 761intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 762{ 763 struct intel_context *intel = intel_context(ctx); 764 const struct intel_renderbuffer *depthRb = 765 intel_get_renderbuffer(fb, BUFFER_DEPTH); 766 const struct intel_renderbuffer *stencilRb = 767 intel_get_renderbuffer(fb, BUFFER_STENCIL); 768 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL; 769 int i; 770 771 if (depthRb) 772 depth_mt = depthRb->mt; 773 if (stencilRb) { 774 stencil_mt = stencilRb->mt; 775 if (stencil_mt->stencil_mt) 776 stencil_mt = stencil_mt->stencil_mt; 777 } 778 779 if (depth_mt && stencil_mt) { 780 if (depth_mt == stencil_mt) { 781 /* For true packed depth/stencil (not faked on prefers-separate-stencil 782 * hardware) we need to be sure they're the same level/layer, since 783 * we'll be emitting a single packet describing the packed setup. 784 */ 785 if (depthRb->mt_level != stencilRb->mt_level || 786 depthRb->mt_layer != stencilRb->mt_layer) { 787 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 788 } 789 } else { 790 if (!intel->has_separate_stencil) 791 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 792 if (stencil_mt->format != MESA_FORMAT_S8) 793 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 794 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) { 795 /* Before Gen7, separate depth and stencil buffers can be used 796 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2, 797 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable: 798 * [DevSNB]: This field must be set to the same value (enabled 799 * or disabled) as Hierarchical Depth Buffer Enable. 800 */ 801 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; 802 } 803 } 804 } 805 806 for (i = 0; i < Elements(fb->Attachment); i++) { 807 struct gl_renderbuffer *rb; 808 struct intel_renderbuffer *irb; 809 810 if (fb->Attachment[i].Type == GL_NONE) 811 continue; 812 813 /* A supported attachment will have a Renderbuffer set either 814 * from being a Renderbuffer or being a texture that got the 815 * intel_wrap_texture() treatment. 816 */ 817 rb = fb->Attachment[i].Renderbuffer; 818 if (rb == NULL) { 819 DBG("attachment without renderbuffer\n"); 820 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 821 continue; 822 } 823 824 irb = intel_renderbuffer(rb); 825 if (irb == NULL) { 826 DBG("software rendering renderbuffer\n"); 827 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 828 continue; 829 } 830 831 if (!intel->vtbl.render_target_supported(intel, intel_rb_format(irb))) { 832 DBG("Unsupported HW texture/renderbuffer format attached: %s\n", 833 _mesa_get_format_name(intel_rb_format(irb))); 834 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 835 } 836 } 837} 838 839/** 840 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 841 * We can do this when the dst renderbuffer is actually a texture and 842 * there is no scaling, mirroring or scissoring. 843 * 844 * \return new buffer mask indicating the buffers left to blit using the 845 * normal path. 846 */ 847static GLbitfield 848intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 849 GLint srcX0, GLint srcY0, 850 GLint srcX1, GLint srcY1, 851 GLint dstX0, GLint dstY0, 852 GLint dstX1, GLint dstY1, 853 GLbitfield mask, GLenum filter) 854{ 855 if (mask & GL_COLOR_BUFFER_BIT) { 856 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 857 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 858 const struct gl_renderbuffer_attachment *drawAtt = 859 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 860 struct intel_renderbuffer *srcRb = 861 intel_renderbuffer(readFb->_ColorReadBuffer); 862 863 /* If the source and destination are the same size with no 864 mirroring, the rectangles are within the size of the 865 texture and there is no scissor then we can use 866 glCopyTexSubimage2D to implement the blit. This will end 867 up as a fast hardware blit on some drivers */ 868 if (srcRb && drawAtt && drawAtt->Texture && 869 srcX0 - srcX1 == dstX0 - dstX1 && 870 srcY0 - srcY1 == dstY0 - dstY1 && 871 srcX1 >= srcX0 && 872 srcY1 >= srcY0 && 873 srcX0 >= 0 && srcX1 <= readFb->Width && 874 srcY0 >= 0 && srcY1 <= readFb->Height && 875 dstX0 >= 0 && dstX1 <= drawFb->Width && 876 dstY0 >= 0 && dstY1 <= drawFb->Height && 877 !ctx->Scissor.Enabled) { 878 const struct gl_texture_object *texObj = drawAtt->Texture; 879 const GLuint dstLevel = drawAtt->TextureLevel; 880 const GLenum target = texObj->Target; 881 882 struct gl_texture_image *texImage = 883 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 884 885 if (intel_copy_texsubimage(intel_context(ctx), 886 intel_texture_image(texImage), 887 dstX0, dstY0, 888 srcRb, 889 srcX0, srcY0, 890 srcX1 - srcX0, /* width */ 891 srcY1 - srcY0)) 892 mask &= ~GL_COLOR_BUFFER_BIT; 893 } 894 } 895 896 return mask; 897} 898 899static void 900intel_blit_framebuffer(struct gl_context *ctx, 901 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 902 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 903 GLbitfield mask, GLenum filter) 904{ 905 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 906 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 907 srcX0, srcY0, srcX1, srcY1, 908 dstX0, dstY0, dstX1, dstY1, 909 mask, filter); 910 if (mask == 0x0) 911 return; 912 913 _mesa_meta_BlitFramebuffer(ctx, 914 srcX0, srcY0, srcX1, srcY1, 915 dstX0, dstY0, dstX1, dstY1, 916 mask, filter); 917} 918 919void 920intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb) 921{ 922 if (irb->mt) { 923 intel_miptree_slice_set_needs_hiz_resolve(irb->mt, 924 irb->mt_level, 925 irb->mt_layer); 926 } 927} 928 929void 930intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb) 931{ 932 if (irb->mt) { 933 intel_miptree_slice_set_needs_depth_resolve(irb->mt, 934 irb->mt_level, 935 irb->mt_layer); 936 } 937} 938 939bool 940intel_renderbuffer_resolve_hiz(struct intel_context *intel, 941 struct intel_renderbuffer *irb) 942{ 943 if (irb->mt) 944 return intel_miptree_slice_resolve_hiz(intel, 945 irb->mt, 946 irb->mt_level, 947 irb->mt_layer); 948 949 return false; 950} 951 952bool 953intel_renderbuffer_resolve_depth(struct intel_context *intel, 954 struct intel_renderbuffer *irb) 955{ 956 if (irb->mt) 957 return intel_miptree_slice_resolve_depth(intel, 958 irb->mt, 959 irb->mt_level, 960 irb->mt_layer); 961 962 return false; 963} 964 965/** 966 * Do one-time context initializations related to GL_EXT_framebuffer_object. 967 * Hook in device driver functions. 968 */ 969void 970intel_fbo_init(struct intel_context *intel) 971{ 972 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 973 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 974 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 975 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 976 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 977 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 978 intel->ctx.Driver.RenderTexture = intel_render_texture; 979 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 980 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 981 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 982 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 983 984#if FEATURE_OES_EGL_image 985 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 986 intel_image_target_renderbuffer_storage; 987#endif 988} 989