intel_fbo.c revision 6dbdc0395698de929e23b4ec1ab399e64ecfd264
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/enums.h" 30#include "main/imports.h" 31#include "main/macros.h" 32#include "main/mfeatures.h" 33#include "main/mtypes.h" 34#include "main/fbobject.h" 35#include "main/framebuffer.h" 36#include "main/renderbuffer.h" 37#include "main/context.h" 38#include "main/teximage.h" 39#include "main/image.h" 40 41#include "swrast/swrast.h" 42#include "drivers/common/meta.h" 43 44#include "intel_context.h" 45#include "intel_batchbuffer.h" 46#include "intel_buffers.h" 47#include "intel_blit.h" 48#include "intel_fbo.h" 49#include "intel_mipmap_tree.h" 50#include "intel_regions.h" 51#include "intel_tex.h" 52#include "intel_span.h" 53#ifndef I915 54#include "brw_context.h" 55#endif 56 57#define FILE_DEBUG_FLAG DEBUG_FBO 58 59static struct gl_renderbuffer * 60intel_new_renderbuffer(struct gl_context * ctx, GLuint name); 61 62static bool 63intel_renderbuffer_update_wrapper(struct intel_context *intel, 64 struct intel_renderbuffer *irb, 65 struct intel_mipmap_tree *mt, 66 uint32_t level, 67 uint32_t layer, 68 gl_format format, 69 GLenum internal_format); 70 71bool 72intel_framebuffer_has_hiz(struct gl_framebuffer *fb) 73{ 74 struct intel_renderbuffer *rb = NULL; 75 if (fb) 76 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH); 77 return rb && rb->mt && rb->mt->hiz_mt; 78} 79 80struct intel_region* 81intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex) 82{ 83 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex); 84 if (irb && irb->mt) { 85 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt) 86 return irb->mt->stencil_mt->region; 87 else 88 return irb->mt->region; 89 } else 90 return NULL; 91} 92 93/** 94 * Create a new framebuffer object. 95 */ 96static struct gl_framebuffer * 97intel_new_framebuffer(struct gl_context * ctx, GLuint name) 98{ 99 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 100 * class 101 */ 102 return _mesa_new_framebuffer(ctx, name); 103} 104 105 106/** Called by gl_renderbuffer::Delete() */ 107static void 108intel_delete_renderbuffer(struct gl_renderbuffer *rb) 109{ 110 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 111 112 ASSERT(irb); 113 114 intel_miptree_release(&irb->mt); 115 116 free(irb); 117} 118 119/** 120 * \see dd_function_table::MapRenderbuffer 121 */ 122static void 123intel_map_renderbuffer(struct gl_context *ctx, 124 struct gl_renderbuffer *rb, 125 GLuint x, GLuint y, GLuint w, GLuint h, 126 GLbitfield mode, 127 GLubyte **out_map, 128 GLint *out_stride) 129{ 130 struct intel_context *intel = intel_context(ctx); 131 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 132 void *map; 133 int stride; 134 135 if (!irb && rb->Data) { 136 /* this is a malloc'd renderbuffer (accum buffer) */ 137 GLint bpp = _mesa_get_format_bytes(rb->Format); 138 GLint rowStride = rb->RowStride * bpp; 139 *out_map = (GLubyte *) rb->Data + y * rowStride + x * bpp; 140 *out_stride = rowStride; 141 return; 142 } 143 144 /* We sometimes get called with this by our intel_span.c usage. */ 145 if (!irb->mt) { 146 *out_map = NULL; 147 *out_stride = 0; 148 return; 149 } 150 151 /* For a window-system renderbuffer, we need to flip the mapping we receive 152 * upside-down. So we need to ask for a rectangle on flipped vertically, and 153 * we then return a pointer to the bottom of it with a negative stride. 154 */ 155 if (rb->Name == 0) { 156 y = rb->Height - y - h; 157 } 158 159 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer, 160 x, y, w, h, mode, &map, &stride); 161 162 if (rb->Name == 0) { 163 map += (h - 1) * stride; 164 stride = -stride; 165 } 166 167 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 168 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 169 x, y, w, h, map, stride); 170 171 *out_map = map; 172 *out_stride = stride; 173} 174 175/** 176 * \see dd_function_table::UnmapRenderbuffer 177 */ 178static void 179intel_unmap_renderbuffer(struct gl_context *ctx, 180 struct gl_renderbuffer *rb) 181{ 182 struct intel_context *intel = intel_context(ctx); 183 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 184 185 DBG("%s: rb %d (%s)\n", __FUNCTION__, 186 rb->Name, _mesa_get_format_name(rb->Format)); 187 188 if (!irb && rb->Data) { 189 /* this is a malloc'd renderbuffer (accum buffer) */ 190 /* nothing to do */ 191 return; 192 } 193 194 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer); 195} 196 197/** 198 * Return a pointer to a specific pixel in a renderbuffer. 199 */ 200static void * 201intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb, 202 GLint x, GLint y) 203{ 204 /* By returning NULL we force all software rendering to go through 205 * the span routines. 206 */ 207 return NULL; 208} 209 210 211/** 212 * Called via glRenderbufferStorageEXT() to set the format and allocate 213 * storage for a user-created renderbuffer. 214 */ 215GLboolean 216intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 217 GLenum internalFormat, 218 GLuint width, GLuint height) 219{ 220 struct intel_context *intel = intel_context(ctx); 221 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 222 223 ASSERT(rb->Name != 0); 224 225 switch (internalFormat) { 226 default: 227 /* Use the same format-choice logic as for textures. 228 * Renderbuffers aren't any different from textures for us, 229 * except they're less useful because you can't texture with 230 * them. 231 */ 232 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 233 GL_NONE, GL_NONE); 234 break; 235 case GL_STENCIL_INDEX: 236 case GL_STENCIL_INDEX1_EXT: 237 case GL_STENCIL_INDEX4_EXT: 238 case GL_STENCIL_INDEX8_EXT: 239 case GL_STENCIL_INDEX16_EXT: 240 /* These aren't actual texture formats, so force them here. */ 241 if (intel->has_separate_stencil) { 242 rb->Format = MESA_FORMAT_S8; 243 } else { 244 assert(!intel->must_use_separate_stencil); 245 rb->Format = MESA_FORMAT_S8_Z24; 246 } 247 break; 248 } 249 250 rb->Width = width; 251 rb->Height = height; 252 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 253 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 254 255 intel_miptree_release(&irb->mt); 256 257 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 258 _mesa_lookup_enum_by_nr(internalFormat), 259 _mesa_get_format_name(rb->Format), width, height); 260 261 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format, 262 width, height); 263 if (!irb->mt) 264 return false; 265 266 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 267 bool ok = intel_miptree_alloc_hiz(intel, irb->mt); 268 if (!ok) { 269 intel_miptree_release(&irb->mt); 270 return false; 271 } 272 } 273 274 return true; 275} 276 277 278#if FEATURE_OES_EGL_image 279static void 280intel_image_target_renderbuffer_storage(struct gl_context *ctx, 281 struct gl_renderbuffer *rb, 282 void *image_handle) 283{ 284 struct intel_context *intel = intel_context(ctx); 285 struct intel_renderbuffer *irb; 286 __DRIscreen *screen; 287 __DRIimage *image; 288 289 screen = intel->intelScreen->driScrnPriv; 290 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 291 screen->loaderPrivate); 292 if (image == NULL) 293 return; 294 295 /* __DRIimage is opaque to the core so it has to be checked here */ 296 switch (image->format) { 297 case MESA_FORMAT_RGBA8888_REV: 298 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 299 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 300 return; 301 break; 302 default: 303 break; 304 } 305 306 irb = intel_renderbuffer(rb); 307 intel_miptree_release(&irb->mt); 308 irb->mt = intel_miptree_create_for_region(intel, 309 GL_TEXTURE_2D, 310 image->format, 311 image->region); 312 if (!irb->mt) 313 return; 314 315 rb->InternalFormat = image->internal_format; 316 rb->Width = image->region->width; 317 rb->Height = image->region->height; 318 rb->Format = image->format; 319 rb->DataType = image->data_type; 320 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 321 image->internal_format); 322} 323#endif 324 325/** 326 * Called for each hardware renderbuffer when a _window_ is resized. 327 * Just update fields. 328 * Not used for user-created renderbuffers! 329 */ 330static GLboolean 331intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 332 GLenum internalFormat, GLuint width, GLuint height) 333{ 334 ASSERT(rb->Name == 0); 335 rb->Width = width; 336 rb->Height = height; 337 rb->InternalFormat = internalFormat; 338 339 return true; 340} 341 342 343static void 344intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 345 GLuint width, GLuint height) 346{ 347 int i; 348 349 _mesa_resize_framebuffer(ctx, fb, width, height); 350 351 fb->Initialized = true; /* XXX remove someday */ 352 353 if (fb->Name != 0) { 354 return; 355 } 356 357 358 /* Make sure all window system renderbuffers are up to date */ 359 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 360 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 361 362 /* only resize if size is changing */ 363 if (rb && (rb->Width != width || rb->Height != height)) { 364 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 365 } 366 } 367} 368 369 370/** Dummy function for gl_renderbuffer::AllocStorage() */ 371static GLboolean 372intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 373 GLenum internalFormat, GLuint width, GLuint height) 374{ 375 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 376 return false; 377} 378 379/** 380 * Create a new intel_renderbuffer which corresponds to an on-screen window, 381 * not a user-created renderbuffer. 382 */ 383struct intel_renderbuffer * 384intel_create_renderbuffer(gl_format format) 385{ 386 GET_CURRENT_CONTEXT(ctx); 387 388 struct intel_renderbuffer *irb; 389 390 irb = CALLOC_STRUCT(intel_renderbuffer); 391 if (!irb) { 392 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 393 return NULL; 394 } 395 396 _mesa_init_renderbuffer(&irb->Base, 0); 397 irb->Base.ClassID = INTEL_RB_CLASS; 398 irb->Base._BaseFormat = _mesa_get_format_base_format(format); 399 irb->Base.Format = format; 400 irb->Base.InternalFormat = irb->Base._BaseFormat; 401 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format); 402 403 /* intel-specific methods */ 404 irb->Base.Delete = intel_delete_renderbuffer; 405 irb->Base.AllocStorage = intel_alloc_window_storage; 406 irb->Base.GetPointer = intel_get_pointer; 407 408 return irb; 409} 410 411/** 412 * Create a new renderbuffer object. 413 * Typically called via glBindRenderbufferEXT(). 414 */ 415static struct gl_renderbuffer * 416intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 417{ 418 /*struct intel_context *intel = intel_context(ctx); */ 419 struct intel_renderbuffer *irb; 420 421 irb = CALLOC_STRUCT(intel_renderbuffer); 422 if (!irb) { 423 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 424 return NULL; 425 } 426 427 _mesa_init_renderbuffer(&irb->Base, name); 428 irb->Base.ClassID = INTEL_RB_CLASS; 429 430 /* intel-specific methods */ 431 irb->Base.Delete = intel_delete_renderbuffer; 432 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage; 433 irb->Base.GetPointer = intel_get_pointer; 434 /* span routines set in alloc_storage function */ 435 436 return &irb->Base; 437} 438 439 440/** 441 * Called via glBindFramebufferEXT(). 442 */ 443static void 444intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 445 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 446{ 447 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 448 intel_draw_buffer(ctx); 449 } 450 else { 451 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 452 } 453} 454 455 456/** 457 * Called via glFramebufferRenderbufferEXT(). 458 */ 459static void 460intel_framebuffer_renderbuffer(struct gl_context * ctx, 461 struct gl_framebuffer *fb, 462 GLenum attachment, struct gl_renderbuffer *rb) 463{ 464 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 465 466 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 467 intel_draw_buffer(ctx); 468} 469 470static struct intel_renderbuffer* 471intel_renderbuffer_wrap_miptree(struct intel_context *intel, 472 struct intel_mipmap_tree *mt, 473 uint32_t level, 474 uint32_t layer, 475 gl_format format, 476 GLenum internal_format); 477 478/** 479 * \par Special case for separate stencil 480 * 481 * When wrapping a depthstencil texture that uses separate stencil, this 482 * function is recursively called twice: once to create \c 483 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the 484 * call to create \c irb->wrapped_depth, the \c format and \c 485 * internal_format parameters do not match \c mt->format. In that case, \c 486 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c 487 * MESA_FORMAT_X8_Z24. 488 * 489 * @return true on success 490 */ 491static bool 492intel_renderbuffer_update_wrapper(struct intel_context *intel, 493 struct intel_renderbuffer *irb, 494 struct intel_mipmap_tree *mt, 495 uint32_t level, 496 uint32_t layer, 497 gl_format format, 498 GLenum internal_format) 499{ 500 struct gl_renderbuffer *rb = &irb->Base; 501 502 rb->Format = format; 503 rb->InternalFormat = internal_format; 504 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 505 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format); 506 rb->Width = mt->level[level].width; 507 rb->Height = mt->level[level].height; 508 509 irb->Base.Delete = intel_delete_renderbuffer; 510 irb->Base.AllocStorage = intel_nop_alloc_storage; 511 512 intel_miptree_check_level_layer(mt, level, layer); 513 irb->mt_level = level; 514 irb->mt_layer = layer; 515 516 intel_miptree_reference(&irb->mt, mt); 517 518 intel_renderbuffer_set_draw_offset(irb); 519 520 if (mt->hiz_mt == NULL && 521 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 522 intel_miptree_alloc_hiz(intel, mt); 523 if (!mt->hiz_mt) 524 return false; 525 } 526 527 return true; 528} 529 530/** 531 * \brief Wrap a renderbuffer around a single slice of a miptree. 532 * 533 * Called by glFramebufferTexture*(). This just allocates a 534 * ``struct intel_renderbuffer`` then calls 535 * intel_renderbuffer_update_wrapper() to do the real work. 536 * 537 * \see intel_renderbuffer_update_wrapper() 538 */ 539static struct intel_renderbuffer* 540intel_renderbuffer_wrap_miptree(struct intel_context *intel, 541 struct intel_mipmap_tree *mt, 542 uint32_t level, 543 uint32_t layer, 544 gl_format format, 545 GLenum internal_format) 546 547{ 548 struct gl_context *ctx = &intel->ctx; 549 struct gl_renderbuffer *rb; 550 struct intel_renderbuffer *irb; 551 552 intel_miptree_check_level_layer(mt, level, layer); 553 554 rb = intel_new_renderbuffer(ctx, ~0); 555 irb = intel_renderbuffer(rb); 556 if (!irb) 557 return NULL; 558 559 if (!intel_renderbuffer_update_wrapper(intel, irb, 560 mt, level, layer, 561 format, internal_format)) { 562 free(irb); 563 return NULL; 564 } 565 566 return irb; 567} 568 569void 570intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 571{ 572 unsigned int dst_x, dst_y; 573 574 /* compute offset of the particular 2D image within the texture region */ 575 intel_miptree_get_image_offset(irb->mt, 576 irb->mt_level, 577 0, /* face, which we ignore */ 578 irb->mt_layer, 579 &dst_x, &dst_y); 580 581 irb->draw_x = dst_x; 582 irb->draw_y = dst_y; 583} 584 585/** 586 * Rendering to tiled buffers requires that the base address of the 587 * buffer be aligned to a page boundary. We generally render to 588 * textures by pointing the surface at the mipmap image level, which 589 * may not be aligned to a tile boundary. 590 * 591 * This function returns an appropriately-aligned base offset 592 * according to the tiling restrictions, plus any required x/y offset 593 * from there. 594 */ 595uint32_t 596intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 597 uint32_t *tile_x, 598 uint32_t *tile_y) 599{ 600 struct intel_region *region = irb->mt->region; 601 int cpp = region->cpp; 602 uint32_t pitch = region->pitch * cpp; 603 604 if (region->tiling == I915_TILING_NONE) { 605 *tile_x = 0; 606 *tile_y = 0; 607 return irb->draw_x * cpp + irb->draw_y * pitch; 608 } else if (region->tiling == I915_TILING_X) { 609 *tile_x = irb->draw_x % (512 / cpp); 610 *tile_y = irb->draw_y % 8; 611 return ((irb->draw_y / 8) * (8 * pitch) + 612 (irb->draw_x - *tile_x) / (512 / cpp) * 4096); 613 } else { 614 assert(region->tiling == I915_TILING_Y); 615 *tile_x = irb->draw_x % (128 / cpp); 616 *tile_y = irb->draw_y % 32; 617 return ((irb->draw_y / 32) * (32 * pitch) + 618 (irb->draw_x - *tile_x) / (128 / cpp) * 4096); 619 } 620} 621 622#ifndef I915 623static bool 624need_tile_offset_workaround(struct brw_context *brw, 625 struct intel_renderbuffer *irb) 626{ 627 uint32_t tile_x, tile_y; 628 629 if (brw->has_surface_tile_offset) 630 return false; 631 632 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 633 634 return tile_x != 0 || tile_y != 0; 635} 636#endif 637 638/** 639 * Called by glFramebufferTexture[123]DEXT() (and other places) to 640 * prepare for rendering into texture memory. This might be called 641 * many times to choose different texture levels, cube faces, etc 642 * before intel_finish_render_texture() is ever called. 643 */ 644static void 645intel_render_texture(struct gl_context * ctx, 646 struct gl_framebuffer *fb, 647 struct gl_renderbuffer_attachment *att) 648{ 649 struct intel_context *intel = intel_context(ctx); 650 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 651 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 652 struct intel_texture_image *intel_image = intel_texture_image(image); 653 struct intel_mipmap_tree *mt = intel_image->mt; 654 655 (void) fb; 656 657 int layer; 658 if (att->CubeMapFace > 0) { 659 assert(att->Zoffset == 0); 660 layer = att->CubeMapFace; 661 } else { 662 layer = att->Zoffset; 663 } 664 665 if (!intel_image->mt) { 666 /* Fallback on drawing to a texture that doesn't have a miptree 667 * (has a border, width/height 0, etc.) 668 */ 669 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 670 _swrast_render_texture(ctx, fb, att); 671 return; 672 } 673 else if (!irb) { 674 irb = intel_renderbuffer_wrap_miptree(intel, 675 mt, 676 att->TextureLevel, 677 layer, 678 image->TexFormat, 679 image->InternalFormat); 680 681 if (irb) { 682 /* bind the wrapper to the attachment point */ 683 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base); 684 } 685 else { 686 /* fallback to software rendering */ 687 _swrast_render_texture(ctx, fb, att); 688 return; 689 } 690 } 691 692 if (!intel_renderbuffer_update_wrapper(intel, irb, 693 mt, att->TextureLevel, layer, 694 image->TexFormat, 695 image->InternalFormat)) { 696 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 697 _swrast_render_texture(ctx, fb, att); 698 return; 699 } 700 701 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n", 702 _mesa_get_format_name(image->TexFormat), 703 att->Texture->Name, image->Width, image->Height, 704 irb->Base.RefCount); 705 706 intel_image->used_as_render_target = true; 707 708#ifndef I915 709 if (need_tile_offset_workaround(brw_context(ctx), irb)) { 710 /* Original gen4 hardware couldn't draw to a non-tile-aligned 711 * destination in a miptree unless you actually setup your 712 * renderbuffer as a miptree and used the fragile 713 * lod/array_index/etc. controls to select the image. So, 714 * instead, we just make a new single-level miptree and render 715 * into that. 716 */ 717 struct intel_context *intel = intel_context(ctx); 718 struct intel_mipmap_tree *new_mt; 719 int width, height, depth; 720 721 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); 722 723 new_mt = intel_miptree_create(intel, image->TexObject->Target, 724 intel_image->base.Base.TexFormat, 725 intel_image->base.Base.Level, 726 intel_image->base.Base.Level, 727 width, height, depth, 728 true); 729 730 intel_miptree_copy_teximage(intel, intel_image, new_mt); 731 intel_renderbuffer_set_draw_offset(irb); 732 733 intel_miptree_reference(&irb->mt, intel_image->mt); 734 intel_miptree_release(&new_mt); 735 } 736#endif 737 /* update drawing region, etc */ 738 intel_draw_buffer(ctx); 739} 740 741 742/** 743 * Called by Mesa when rendering to a texture is done. 744 */ 745static void 746intel_finish_render_texture(struct gl_context * ctx, 747 struct gl_renderbuffer_attachment *att) 748{ 749 struct intel_context *intel = intel_context(ctx); 750 struct gl_texture_object *tex_obj = att->Texture; 751 struct gl_texture_image *image = 752 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 753 struct intel_texture_image *intel_image = intel_texture_image(image); 754 755 DBG("Finish render %s texture tex=%u\n", 756 _mesa_get_format_name(image->TexFormat), att->Texture->Name); 757 758 /* Flag that this image may now be validated into the object's miptree. */ 759 if (intel_image) 760 intel_image->used_as_render_target = false; 761 762 /* Since we've (probably) rendered to the texture and will (likely) use 763 * it in the texture domain later on in this batchbuffer, flush the 764 * batch. Once again, we wish for a domain tracker in libdrm to cover 765 * usage inside of a batchbuffer like GEM does in the kernel. 766 */ 767 intel_batchbuffer_emit_mi_flush(intel); 768} 769 770/** 771 * Do additional "completeness" testing of a framebuffer object. 772 */ 773static void 774intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 775{ 776 struct intel_context *intel = intel_context(ctx); 777 const struct intel_renderbuffer *depthRb = 778 intel_get_renderbuffer(fb, BUFFER_DEPTH); 779 const struct intel_renderbuffer *stencilRb = 780 intel_get_renderbuffer(fb, BUFFER_STENCIL); 781 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL; 782 int i; 783 784 if (depthRb) 785 depth_mt = depthRb->mt; 786 if (stencilRb) { 787 stencil_mt = stencilRb->mt; 788 if (stencil_mt->stencil_mt) 789 stencil_mt = stencil_mt->stencil_mt; 790 } 791 792 if (depth_mt && stencil_mt) { 793 if (depth_mt == stencil_mt) { 794 /* For true packed depth/stencil (not faked on prefers-separate-stencil 795 * hardware) we need to be sure they're the same level/layer, since 796 * we'll be emitting a single packet describing the packed setup. 797 */ 798 if (depthRb->mt_level != stencilRb->mt_level || 799 depthRb->mt_layer != stencilRb->mt_layer) { 800 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 801 } 802 } else { 803 if (!intel->has_separate_stencil) 804 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 805 if (stencil_mt->format != MESA_FORMAT_S8) 806 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 807 } 808 } 809 810 for (i = 0; i < Elements(fb->Attachment); i++) { 811 struct gl_renderbuffer *rb; 812 struct intel_renderbuffer *irb; 813 814 if (fb->Attachment[i].Type == GL_NONE) 815 continue; 816 817 /* A supported attachment will have a Renderbuffer set either 818 * from being a Renderbuffer or being a texture that got the 819 * intel_wrap_texture() treatment. 820 */ 821 rb = fb->Attachment[i].Renderbuffer; 822 if (rb == NULL) { 823 DBG("attachment without renderbuffer\n"); 824 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 825 continue; 826 } 827 828 irb = intel_renderbuffer(rb); 829 if (irb == NULL) { 830 DBG("software rendering renderbuffer\n"); 831 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 832 continue; 833 } 834 835 if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) { 836 DBG("Unsupported HW texture/renderbuffer format attached: %s\n", 837 _mesa_get_format_name(irb->Base.Format)); 838 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 839 } 840 841#ifdef I915 842 if (!intel_span_supports_format(irb->Base.Format)) { 843 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n", 844 _mesa_get_format_name(irb->Base.Format)); 845 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 846 } 847#endif 848 } 849} 850 851/** 852 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 853 * We can do this when the dst renderbuffer is actually a texture and 854 * there is no scaling, mirroring or scissoring. 855 * 856 * \return new buffer mask indicating the buffers left to blit using the 857 * normal path. 858 */ 859static GLbitfield 860intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 861 GLint srcX0, GLint srcY0, 862 GLint srcX1, GLint srcY1, 863 GLint dstX0, GLint dstY0, 864 GLint dstX1, GLint dstY1, 865 GLbitfield mask, GLenum filter) 866{ 867 if (mask & GL_COLOR_BUFFER_BIT) { 868 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 869 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 870 const struct gl_renderbuffer_attachment *drawAtt = 871 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 872 struct intel_renderbuffer *srcRb = 873 intel_renderbuffer(readFb->_ColorReadBuffer); 874 875 /* If the source and destination are the same size with no 876 mirroring, the rectangles are within the size of the 877 texture and there is no scissor then we can use 878 glCopyTexSubimage2D to implement the blit. This will end 879 up as a fast hardware blit on some drivers */ 880 if (srcRb && drawAtt && drawAtt->Texture && 881 srcX0 - srcX1 == dstX0 - dstX1 && 882 srcY0 - srcY1 == dstY0 - dstY1 && 883 srcX1 >= srcX0 && 884 srcY1 >= srcY0 && 885 srcX0 >= 0 && srcX1 <= readFb->Width && 886 srcY0 >= 0 && srcY1 <= readFb->Height && 887 dstX0 >= 0 && dstX1 <= drawFb->Width && 888 dstY0 >= 0 && dstY1 <= drawFb->Height && 889 !ctx->Scissor.Enabled) { 890 const struct gl_texture_object *texObj = drawAtt->Texture; 891 const GLuint dstLevel = drawAtt->TextureLevel; 892 const GLenum target = texObj->Target; 893 894 struct gl_texture_image *texImage = 895 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 896 897 if (intel_copy_texsubimage(intel_context(ctx), 898 intel_texture_image(texImage), 899 dstX0, dstY0, 900 srcRb, 901 srcX0, srcY0, 902 srcX1 - srcX0, /* width */ 903 srcY1 - srcY0)) 904 mask &= ~GL_COLOR_BUFFER_BIT; 905 } 906 } 907 908 return mask; 909} 910 911static void 912intel_blit_framebuffer(struct gl_context *ctx, 913 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 914 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 915 GLbitfield mask, GLenum filter) 916{ 917 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 918 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 919 srcX0, srcY0, srcX1, srcY1, 920 dstX0, dstY0, dstX1, dstY1, 921 mask, filter); 922 if (mask == 0x0) 923 return; 924 925 _mesa_meta_BlitFramebuffer(ctx, 926 srcX0, srcY0, srcX1, srcY1, 927 dstX0, dstY0, dstX1, dstY1, 928 mask, filter); 929} 930 931void 932intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb) 933{ 934 if (irb->mt) { 935 intel_miptree_slice_set_needs_hiz_resolve(irb->mt, 936 irb->mt_level, 937 irb->mt_layer); 938 } 939} 940 941void 942intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb) 943{ 944 if (irb->mt) { 945 intel_miptree_slice_set_needs_depth_resolve(irb->mt, 946 irb->mt_level, 947 irb->mt_layer); 948 } 949} 950 951bool 952intel_renderbuffer_resolve_hiz(struct intel_context *intel, 953 struct intel_renderbuffer *irb) 954{ 955 if (irb->mt) 956 return intel_miptree_slice_resolve_hiz(intel, 957 irb->mt, 958 irb->mt_level, 959 irb->mt_layer); 960 961 return false; 962} 963 964bool 965intel_renderbuffer_resolve_depth(struct intel_context *intel, 966 struct intel_renderbuffer *irb) 967{ 968 if (irb->mt) 969 return intel_miptree_slice_resolve_depth(intel, 970 irb->mt, 971 irb->mt_level, 972 irb->mt_layer); 973 974 return false; 975} 976 977/** 978 * Do one-time context initializations related to GL_EXT_framebuffer_object. 979 * Hook in device driver functions. 980 */ 981void 982intel_fbo_init(struct intel_context *intel) 983{ 984 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 985 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 986 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 987 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 988 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 989 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 990 intel->ctx.Driver.RenderTexture = intel_render_texture; 991 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 992 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 993 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 994 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 995 996#if FEATURE_OES_EGL_image 997 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 998 intel_image_target_renderbuffer_storage; 999#endif 1000} 1001