intel_fbo.c revision fdf18b323156098ba5fb2881aa1a7888d2e0667f
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29#include "main/enums.h" 30#include "main/imports.h" 31#include "main/macros.h" 32#include "main/mfeatures.h" 33#include "main/mtypes.h" 34#include "main/fbobject.h" 35#include "main/framebuffer.h" 36#include "main/renderbuffer.h" 37#include "main/context.h" 38#include "main/teximage.h" 39#include "main/image.h" 40 41#include "swrast/swrast.h" 42#include "drivers/common/meta.h" 43 44#include "intel_context.h" 45#include "intel_batchbuffer.h" 46#include "intel_buffers.h" 47#include "intel_blit.h" 48#include "intel_fbo.h" 49#include "intel_mipmap_tree.h" 50#include "intel_regions.h" 51#include "intel_tex.h" 52#include "intel_span.h" 53#ifndef I915 54#include "brw_context.h" 55#endif 56 57#define FILE_DEBUG_FLAG DEBUG_FBO 58 59static struct gl_renderbuffer * 60intel_new_renderbuffer(struct gl_context * ctx, GLuint name); 61 62static bool 63intel_renderbuffer_update_wrapper(struct intel_context *intel, 64 struct intel_renderbuffer *irb, 65 struct intel_mipmap_tree *mt, 66 uint32_t level, 67 uint32_t layer, 68 gl_format format, 69 GLenum internal_format); 70 71bool 72intel_framebuffer_has_hiz(struct gl_framebuffer *fb) 73{ 74 struct intel_renderbuffer *rb = NULL; 75 if (fb) 76 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH); 77 return rb && rb->mt && rb->mt->hiz_mt; 78} 79 80struct intel_region* 81intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex) 82{ 83 struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex); 84 if (irb && irb->mt) { 85 if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt) 86 return irb->mt->stencil_mt->region; 87 else 88 return irb->mt->region; 89 } else 90 return NULL; 91} 92 93/** 94 * Create a new framebuffer object. 95 */ 96static struct gl_framebuffer * 97intel_new_framebuffer(struct gl_context * ctx, GLuint name) 98{ 99 /* Only drawable state in intel_framebuffer at this time, just use Mesa's 100 * class 101 */ 102 return _mesa_new_framebuffer(ctx, name); 103} 104 105 106/** Called by gl_renderbuffer::Delete() */ 107static void 108intel_delete_renderbuffer(struct gl_renderbuffer *rb) 109{ 110 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 111 112 ASSERT(irb); 113 114 intel_miptree_release(&irb->mt); 115 116 _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL); 117 118 free(irb); 119} 120 121/** 122 * \see dd_function_table::MapRenderbuffer 123 */ 124static void 125intel_map_renderbuffer(struct gl_context *ctx, 126 struct gl_renderbuffer *rb, 127 GLuint x, GLuint y, GLuint w, GLuint h, 128 GLbitfield mode, 129 GLubyte **out_map, 130 GLint *out_stride) 131{ 132 struct intel_context *intel = intel_context(ctx); 133 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 134 void *map; 135 int stride; 136 137 /* We sometimes get called with this by our intel_span.c usage. */ 138 if (!irb->mt) { 139 *out_map = NULL; 140 *out_stride = 0; 141 return; 142 } 143 144 /* For a window-system renderbuffer, we need to flip the mapping we receive 145 * upside-down. So we need to ask for a rectangle on flipped vertically, and 146 * we then return a pointer to the bottom of it with a negative stride. 147 */ 148 if (rb->Name == 0) { 149 y = rb->Height - y - h; 150 } 151 152 intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer, 153 x, y, w, h, mode, &map, &stride); 154 155 if (rb->Name == 0) { 156 map += (h - 1) * stride; 157 stride = -stride; 158 } 159 160 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n", 161 __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), 162 x, y, w, h, map, stride); 163 164 *out_map = map; 165 *out_stride = stride; 166} 167 168/** 169 * \see dd_function_table::UnmapRenderbuffer 170 */ 171static void 172intel_unmap_renderbuffer(struct gl_context *ctx, 173 struct gl_renderbuffer *rb) 174{ 175 struct intel_context *intel = intel_context(ctx); 176 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 177 178 DBG("%s: rb %d (%s)\n", __FUNCTION__, 179 rb->Name, _mesa_get_format_name(rb->Format)); 180 181 intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer); 182} 183 184/** 185 * Return a pointer to a specific pixel in a renderbuffer. 186 */ 187static void * 188intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb, 189 GLint x, GLint y) 190{ 191 /* By returning NULL we force all software rendering to go through 192 * the span routines. 193 */ 194 return NULL; 195} 196 197 198/** 199 * Called via glRenderbufferStorageEXT() to set the format and allocate 200 * storage for a user-created renderbuffer. 201 */ 202GLboolean 203intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 204 GLenum internalFormat, 205 GLuint width, GLuint height) 206{ 207 struct intel_context *intel = intel_context(ctx); 208 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 209 210 ASSERT(rb->Name != 0); 211 212 switch (internalFormat) { 213 default: 214 /* Use the same format-choice logic as for textures. 215 * Renderbuffers aren't any different from textures for us, 216 * except they're less useful because you can't texture with 217 * them. 218 */ 219 rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat, 220 GL_NONE, GL_NONE); 221 break; 222 case GL_STENCIL_INDEX: 223 case GL_STENCIL_INDEX1_EXT: 224 case GL_STENCIL_INDEX4_EXT: 225 case GL_STENCIL_INDEX8_EXT: 226 case GL_STENCIL_INDEX16_EXT: 227 /* These aren't actual texture formats, so force them here. */ 228 if (intel->has_separate_stencil) { 229 rb->Format = MESA_FORMAT_S8; 230 } else { 231 assert(!intel->must_use_separate_stencil); 232 rb->Format = MESA_FORMAT_S8_Z24; 233 } 234 break; 235 } 236 237 rb->Width = width; 238 rb->Height = height; 239 rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); 240 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 241 242 intel_flush(ctx); 243 244 intel_miptree_release(&irb->mt); 245 246 DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__, 247 _mesa_lookup_enum_by_nr(internalFormat), 248 _mesa_get_format_name(rb->Format), width, height); 249 250 irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format, 251 width, height); 252 if (!irb->mt) 253 return false; 254 255 if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 256 bool ok = intel_miptree_alloc_hiz(intel, irb->mt); 257 if (!ok) { 258 intel_miptree_release(&irb->mt); 259 return false; 260 } 261 } 262 263 if (irb->mt->stencil_mt) { 264 bool ok; 265 struct intel_renderbuffer *depth_irb; 266 267 /* The RB got allocated as separate stencil. Hook up our wrapped 268 * renderbuffer so that consumers of intel_get_renderbuffer(BUFFER_DEPTH) 269 * end up with pointers to the separate depth. 270 */ 271 if (!irb->wrapped_depth) { 272 _mesa_reference_renderbuffer(&irb->wrapped_depth, 273 intel_new_renderbuffer(ctx, ~0)); 274 } 275 276 depth_irb = intel_renderbuffer(irb->wrapped_depth); 277 if (!depth_irb) { 278 intel_miptree_release(&irb->mt); 279 return false; 280 } 281 282 assert(irb->mt->format == MESA_FORMAT_S8_Z24 || 283 irb->mt->format == MESA_FORMAT_X8_Z24); 284 ok = intel_renderbuffer_update_wrapper(intel, depth_irb, irb->mt, 285 0, 0, /* level, layer */ 286 MESA_FORMAT_X8_Z24, 287 GL_DEPTH_COMPONENT24); 288 assert(ok); 289 } 290 291 return true; 292} 293 294 295#if FEATURE_OES_EGL_image 296static void 297intel_image_target_renderbuffer_storage(struct gl_context *ctx, 298 struct gl_renderbuffer *rb, 299 void *image_handle) 300{ 301 struct intel_context *intel = intel_context(ctx); 302 struct intel_renderbuffer *irb; 303 __DRIscreen *screen; 304 __DRIimage *image; 305 306 screen = intel->intelScreen->driScrnPriv; 307 image = screen->dri2.image->lookupEGLImage(screen, image_handle, 308 screen->loaderPrivate); 309 if (image == NULL) 310 return; 311 312 /* __DRIimage is opaque to the core so it has to be checked here */ 313 switch (image->format) { 314 case MESA_FORMAT_RGBA8888_REV: 315 _mesa_error(&intel->ctx, GL_INVALID_OPERATION, 316 "glEGLImageTargetRenderbufferStorage(unsupported image format"); 317 return; 318 break; 319 default: 320 break; 321 } 322 323 irb = intel_renderbuffer(rb); 324 intel_miptree_release(&irb->mt); 325 irb->mt = intel_miptree_create_for_region(intel, 326 GL_TEXTURE_2D, 327 image->format, 328 image->region); 329 if (!irb->mt) 330 return; 331 332 rb->InternalFormat = image->internal_format; 333 rb->Width = image->region->width; 334 rb->Height = image->region->height; 335 rb->Format = image->format; 336 rb->DataType = image->data_type; 337 rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx, 338 image->internal_format); 339} 340#endif 341 342/** 343 * Called for each hardware renderbuffer when a _window_ is resized. 344 * Just update fields. 345 * Not used for user-created renderbuffers! 346 */ 347static GLboolean 348intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 349 GLenum internalFormat, GLuint width, GLuint height) 350{ 351 ASSERT(rb->Name == 0); 352 rb->Width = width; 353 rb->Height = height; 354 rb->InternalFormat = internalFormat; 355 356 return true; 357} 358 359 360static void 361intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb, 362 GLuint width, GLuint height) 363{ 364 int i; 365 366 _mesa_resize_framebuffer(ctx, fb, width, height); 367 368 fb->Initialized = true; /* XXX remove someday */ 369 370 if (fb->Name != 0) { 371 return; 372 } 373 374 375 /* Make sure all window system renderbuffers are up to date */ 376 for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) { 377 struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer; 378 379 /* only resize if size is changing */ 380 if (rb && (rb->Width != width || rb->Height != height)) { 381 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height); 382 } 383 } 384} 385 386 387/** Dummy function for gl_renderbuffer::AllocStorage() */ 388static GLboolean 389intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 390 GLenum internalFormat, GLuint width, GLuint height) 391{ 392 _mesa_problem(ctx, "intel_op_alloc_storage should never be called."); 393 return false; 394} 395 396/** 397 * Create a new intel_renderbuffer which corresponds to an on-screen window, 398 * not a user-created renderbuffer. 399 */ 400struct intel_renderbuffer * 401intel_create_renderbuffer(gl_format format) 402{ 403 GET_CURRENT_CONTEXT(ctx); 404 405 struct intel_renderbuffer *irb; 406 407 irb = CALLOC_STRUCT(intel_renderbuffer); 408 if (!irb) { 409 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 410 return NULL; 411 } 412 413 _mesa_init_renderbuffer(&irb->Base, 0); 414 irb->Base.ClassID = INTEL_RB_CLASS; 415 irb->Base._BaseFormat = _mesa_get_format_base_format(format); 416 irb->Base.Format = format; 417 irb->Base.InternalFormat = irb->Base._BaseFormat; 418 irb->Base.DataType = intel_mesa_format_to_rb_datatype(format); 419 420 /* intel-specific methods */ 421 irb->Base.Delete = intel_delete_renderbuffer; 422 irb->Base.AllocStorage = intel_alloc_window_storage; 423 irb->Base.GetPointer = intel_get_pointer; 424 425 return irb; 426} 427 428/** 429 * Create a new renderbuffer object. 430 * Typically called via glBindRenderbufferEXT(). 431 */ 432static struct gl_renderbuffer * 433intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 434{ 435 /*struct intel_context *intel = intel_context(ctx); */ 436 struct intel_renderbuffer *irb; 437 438 irb = CALLOC_STRUCT(intel_renderbuffer); 439 if (!irb) { 440 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 441 return NULL; 442 } 443 444 _mesa_init_renderbuffer(&irb->Base, name); 445 irb->Base.ClassID = INTEL_RB_CLASS; 446 447 /* intel-specific methods */ 448 irb->Base.Delete = intel_delete_renderbuffer; 449 irb->Base.AllocStorage = intel_alloc_renderbuffer_storage; 450 irb->Base.GetPointer = intel_get_pointer; 451 /* span routines set in alloc_storage function */ 452 453 return &irb->Base; 454} 455 456 457/** 458 * Called via glBindFramebufferEXT(). 459 */ 460static void 461intel_bind_framebuffer(struct gl_context * ctx, GLenum target, 462 struct gl_framebuffer *fb, struct gl_framebuffer *fbread) 463{ 464 if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) { 465 intel_draw_buffer(ctx); 466 } 467 else { 468 /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */ 469 } 470} 471 472 473/** 474 * Called via glFramebufferRenderbufferEXT(). 475 */ 476static void 477intel_framebuffer_renderbuffer(struct gl_context * ctx, 478 struct gl_framebuffer *fb, 479 GLenum attachment, struct gl_renderbuffer *rb) 480{ 481 DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); 482 483 intel_flush(ctx); 484 485 _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); 486 intel_draw_buffer(ctx); 487} 488 489static struct intel_renderbuffer* 490intel_renderbuffer_wrap_miptree(struct intel_context *intel, 491 struct intel_mipmap_tree *mt, 492 uint32_t level, 493 uint32_t layer, 494 gl_format format, 495 GLenum internal_format); 496 497/** 498 * \par Special case for separate stencil 499 * 500 * When wrapping a depthstencil texture that uses separate stencil, this 501 * function is recursively called twice: once to create \c 502 * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the 503 * call to create \c irb->wrapped_depth, the \c format and \c 504 * internal_format parameters do not match \c mt->format. In that case, \c 505 * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c 506 * MESA_FORMAT_X8_Z24. 507 * 508 * @return true on success 509 */ 510static bool 511intel_renderbuffer_update_wrapper(struct intel_context *intel, 512 struct intel_renderbuffer *irb, 513 struct intel_mipmap_tree *mt, 514 uint32_t level, 515 uint32_t layer, 516 gl_format format, 517 GLenum internal_format) 518{ 519 struct gl_renderbuffer *rb = &irb->Base; 520 521 rb->Format = format; 522 rb->InternalFormat = internal_format; 523 rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format); 524 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format); 525 rb->Width = mt->level[level].width; 526 rb->Height = mt->level[level].height; 527 528 irb->Base.Delete = intel_delete_renderbuffer; 529 irb->Base.AllocStorage = intel_nop_alloc_storage; 530 531 intel_miptree_check_level_layer(mt, level, layer); 532 irb->mt_level = level; 533 irb->mt_layer = layer; 534 535 intel_miptree_reference(&irb->mt, mt); 536 if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) { 537 struct intel_renderbuffer *depth_irb; 538 539 if (!irb->wrapped_depth) { 540 depth_irb = intel_renderbuffer_wrap_miptree(intel, 541 mt, level, layer, 542 MESA_FORMAT_X8_Z24, 543 GL_DEPTH_COMPONENT24); 544 _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base); 545 546 if (!irb->wrapped_depth) { 547 intel_miptree_release(&irb->mt); 548 return false; 549 } 550 } else { 551 bool ok = true; 552 553 depth_irb = intel_renderbuffer(irb->wrapped_depth); 554 555 ok &= intel_renderbuffer_update_wrapper(intel, 556 depth_irb, 557 mt, 558 level, layer, 559 MESA_FORMAT_X8_Z24, 560 GL_DEPTH_COMPONENT24); 561 if (!ok) { 562 intel_miptree_release(&irb->mt); 563 return false; 564 } 565 } 566 } else { 567 intel_renderbuffer_set_draw_offset(irb); 568 569 if (mt->hiz_mt == NULL && 570 intel->vtbl.is_hiz_depth_format(intel, rb->Format)) { 571 intel_miptree_alloc_hiz(intel, mt); 572 if (!mt->hiz_mt) 573 return false; 574 } 575 } 576 577 return true; 578} 579 580/** 581 * \brief Wrap a renderbuffer around a single slice of a miptree. 582 * 583 * Called by glFramebufferTexture*(). This just allocates a 584 * ``struct intel_renderbuffer`` then calls 585 * intel_renderbuffer_update_wrapper() to do the real work. 586 * 587 * \see intel_renderbuffer_update_wrapper() 588 */ 589static struct intel_renderbuffer* 590intel_renderbuffer_wrap_miptree(struct intel_context *intel, 591 struct intel_mipmap_tree *mt, 592 uint32_t level, 593 uint32_t layer, 594 gl_format format, 595 GLenum internal_format) 596 597{ 598 struct gl_context *ctx = &intel->ctx; 599 struct gl_renderbuffer *rb; 600 struct intel_renderbuffer *irb; 601 602 intel_miptree_check_level_layer(mt, level, layer); 603 604 rb = intel_new_renderbuffer(ctx, ~0); 605 irb = intel_renderbuffer(rb); 606 if (!irb) 607 return NULL; 608 609 if (!intel_renderbuffer_update_wrapper(intel, irb, 610 mt, level, layer, 611 format, internal_format)) { 612 free(irb); 613 return NULL; 614 } 615 616 return irb; 617} 618 619void 620intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 621{ 622 unsigned int dst_x, dst_y; 623 624 /* compute offset of the particular 2D image within the texture region */ 625 intel_miptree_get_image_offset(irb->mt, 626 irb->mt_level, 627 0, /* face, which we ignore */ 628 irb->mt_layer, 629 &dst_x, &dst_y); 630 631 irb->draw_x = dst_x; 632 irb->draw_y = dst_y; 633} 634 635/** 636 * Rendering to tiled buffers requires that the base address of the 637 * buffer be aligned to a page boundary. We generally render to 638 * textures by pointing the surface at the mipmap image level, which 639 * may not be aligned to a tile boundary. 640 * 641 * This function returns an appropriately-aligned base offset 642 * according to the tiling restrictions, plus any required x/y offset 643 * from there. 644 */ 645uint32_t 646intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb, 647 uint32_t *tile_x, 648 uint32_t *tile_y) 649{ 650 struct intel_region *region = irb->mt->region; 651 int cpp = region->cpp; 652 uint32_t pitch = region->pitch * cpp; 653 654 if (region->tiling == I915_TILING_NONE) { 655 *tile_x = 0; 656 *tile_y = 0; 657 return irb->draw_x * cpp + irb->draw_y * pitch; 658 } else if (region->tiling == I915_TILING_X) { 659 *tile_x = irb->draw_x % (512 / cpp); 660 *tile_y = irb->draw_y % 8; 661 return ((irb->draw_y / 8) * (8 * pitch) + 662 (irb->draw_x - *tile_x) / (512 / cpp) * 4096); 663 } else { 664 assert(region->tiling == I915_TILING_Y); 665 *tile_x = irb->draw_x % (128 / cpp); 666 *tile_y = irb->draw_y % 32; 667 return ((irb->draw_y / 32) * (32 * pitch) + 668 (irb->draw_x - *tile_x) / (128 / cpp) * 4096); 669 } 670} 671 672#ifndef I915 673static bool 674need_tile_offset_workaround(struct brw_context *brw, 675 struct intel_renderbuffer *irb) 676{ 677 uint32_t tile_x, tile_y; 678 679 if (brw->has_surface_tile_offset) 680 return false; 681 682 intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y); 683 684 return tile_x != 0 || tile_y != 0; 685} 686#endif 687 688/** 689 * Called by glFramebufferTexture[123]DEXT() (and other places) to 690 * prepare for rendering into texture memory. This might be called 691 * many times to choose different texture levels, cube faces, etc 692 * before intel_finish_render_texture() is ever called. 693 */ 694static void 695intel_render_texture(struct gl_context * ctx, 696 struct gl_framebuffer *fb, 697 struct gl_renderbuffer_attachment *att) 698{ 699 struct intel_context *intel = intel_context(ctx); 700 struct gl_texture_image *image = _mesa_get_attachment_teximage(att); 701 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 702 struct intel_texture_image *intel_image = intel_texture_image(image); 703 struct intel_mipmap_tree *mt = intel_image->mt; 704 705 (void) fb; 706 707 int layer; 708 if (att->CubeMapFace > 0) { 709 assert(att->Zoffset == 0); 710 layer = att->CubeMapFace; 711 } else { 712 layer = att->Zoffset; 713 } 714 715 if (!intel_image->mt) { 716 /* Fallback on drawing to a texture that doesn't have a miptree 717 * (has a border, width/height 0, etc.) 718 */ 719 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 720 _swrast_render_texture(ctx, fb, att); 721 return; 722 } 723 else if (!irb) { 724 irb = intel_renderbuffer_wrap_miptree(intel, 725 mt, 726 att->TextureLevel, 727 layer, 728 image->TexFormat, 729 image->InternalFormat); 730 731 if (irb) { 732 /* bind the wrapper to the attachment point */ 733 _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base); 734 } 735 else { 736 /* fallback to software rendering */ 737 _swrast_render_texture(ctx, fb, att); 738 return; 739 } 740 } 741 742 if (!intel_renderbuffer_update_wrapper(intel, irb, 743 mt, att->TextureLevel, layer, 744 image->TexFormat, 745 image->InternalFormat)) { 746 _mesa_reference_renderbuffer(&att->Renderbuffer, NULL); 747 _swrast_render_texture(ctx, fb, att); 748 return; 749 } 750 751 DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n", 752 _mesa_get_format_name(image->TexFormat), 753 att->Texture->Name, image->Width, image->Height, 754 irb->Base.RefCount); 755 756 intel_image->used_as_render_target = true; 757 758#ifndef I915 759 if (need_tile_offset_workaround(brw_context(ctx), irb)) { 760 /* Original gen4 hardware couldn't draw to a non-tile-aligned 761 * destination in a miptree unless you actually setup your 762 * renderbuffer as a miptree and used the fragile 763 * lod/array_index/etc. controls to select the image. So, 764 * instead, we just make a new single-level miptree and render 765 * into that. 766 */ 767 struct intel_context *intel = intel_context(ctx); 768 struct intel_mipmap_tree *new_mt; 769 int width, height, depth; 770 771 intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); 772 773 new_mt = intel_miptree_create(intel, image->TexObject->Target, 774 intel_image->base.Base.TexFormat, 775 intel_image->base.Base.Level, 776 intel_image->base.Base.Level, 777 width, height, depth, 778 true); 779 780 intel_miptree_copy_teximage(intel, intel_image, new_mt); 781 intel_renderbuffer_set_draw_offset(irb); 782 783 intel_miptree_reference(&irb->mt, intel_image->mt); 784 intel_miptree_release(&new_mt); 785 } 786#endif 787 /* update drawing region, etc */ 788 intel_draw_buffer(ctx); 789} 790 791 792/** 793 * Called by Mesa when rendering to a texture is done. 794 */ 795static void 796intel_finish_render_texture(struct gl_context * ctx, 797 struct gl_renderbuffer_attachment *att) 798{ 799 struct intel_context *intel = intel_context(ctx); 800 struct gl_texture_object *tex_obj = att->Texture; 801 struct gl_texture_image *image = 802 tex_obj->Image[att->CubeMapFace][att->TextureLevel]; 803 struct intel_texture_image *intel_image = intel_texture_image(image); 804 805 DBG("Finish render %s texture tex=%u\n", 806 _mesa_get_format_name(image->TexFormat), att->Texture->Name); 807 808 /* Flag that this image may now be validated into the object's miptree. */ 809 if (intel_image) 810 intel_image->used_as_render_target = false; 811 812 /* Since we've (probably) rendered to the texture and will (likely) use 813 * it in the texture domain later on in this batchbuffer, flush the 814 * batch. Once again, we wish for a domain tracker in libdrm to cover 815 * usage inside of a batchbuffer like GEM does in the kernel. 816 */ 817 intel_batchbuffer_emit_mi_flush(intel); 818} 819 820/** 821 * Do additional "completeness" testing of a framebuffer object. 822 */ 823static void 824intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 825{ 826 struct intel_context *intel = intel_context(ctx); 827 const struct intel_renderbuffer *depthRb = 828 intel_get_renderbuffer(fb, BUFFER_DEPTH); 829 const struct intel_renderbuffer *stencilRb = 830 intel_get_renderbuffer(fb, BUFFER_STENCIL); 831 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL; 832 int i; 833 834 if (depthRb) 835 depth_mt = depthRb->mt; 836 if (stencilRb) { 837 stencil_mt = stencilRb->mt; 838 if (stencil_mt->stencil_mt) 839 stencil_mt = stencil_mt->stencil_mt; 840 } 841 842 if (depth_mt && stencil_mt) { 843 if (depth_mt == stencil_mt) { 844 /* For true packed depth/stencil (not faked on prefers-separate-stencil 845 * hardware) we need to be sure they're the same level/layer, since 846 * we'll be emitting a single packet describing the packed setup. 847 */ 848 if (depthRb->mt_level != stencilRb->mt_level || 849 depthRb->mt_layer != stencilRb->mt_layer) { 850 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 851 } 852 } else { 853 if (!intel->has_separate_stencil) 854 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 855 if (stencil_mt->format != MESA_FORMAT_S8) 856 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 857 } 858 } 859 860 for (i = 0; i < Elements(fb->Attachment); i++) { 861 struct gl_renderbuffer *rb; 862 struct intel_renderbuffer *irb; 863 864 if (fb->Attachment[i].Type == GL_NONE) 865 continue; 866 867 /* A supported attachment will have a Renderbuffer set either 868 * from being a Renderbuffer or being a texture that got the 869 * intel_wrap_texture() treatment. 870 */ 871 rb = fb->Attachment[i].Renderbuffer; 872 if (rb == NULL) { 873 DBG("attachment without renderbuffer\n"); 874 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 875 continue; 876 } 877 878 irb = intel_renderbuffer(rb); 879 if (irb == NULL) { 880 DBG("software rendering renderbuffer\n"); 881 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 882 continue; 883 } 884 885 if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) { 886 DBG("Unsupported HW texture/renderbuffer format attached: %s\n", 887 _mesa_get_format_name(irb->Base.Format)); 888 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 889 } 890 891#ifdef I915 892 if (!intel_span_supports_format(irb->Base.Format)) { 893 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n", 894 _mesa_get_format_name(irb->Base.Format)); 895 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT; 896 } 897#endif 898 } 899} 900 901/** 902 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 903 * We can do this when the dst renderbuffer is actually a texture and 904 * there is no scaling, mirroring or scissoring. 905 * 906 * \return new buffer mask indicating the buffers left to blit using the 907 * normal path. 908 */ 909static GLbitfield 910intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx, 911 GLint srcX0, GLint srcY0, 912 GLint srcX1, GLint srcY1, 913 GLint dstX0, GLint dstY0, 914 GLint dstX1, GLint dstY1, 915 GLbitfield mask, GLenum filter) 916{ 917 if (mask & GL_COLOR_BUFFER_BIT) { 918 const struct gl_framebuffer *drawFb = ctx->DrawBuffer; 919 const struct gl_framebuffer *readFb = ctx->ReadBuffer; 920 const struct gl_renderbuffer_attachment *drawAtt = 921 &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]]; 922 923 /* If the source and destination are the same size with no 924 mirroring, the rectangles are within the size of the 925 texture and there is no scissor then we can use 926 glCopyTexSubimage2D to implement the blit. This will end 927 up as a fast hardware blit on some drivers */ 928 if (drawAtt && drawAtt->Texture && 929 srcX0 - srcX1 == dstX0 - dstX1 && 930 srcY0 - srcY1 == dstY0 - dstY1 && 931 srcX1 >= srcX0 && 932 srcY1 >= srcY0 && 933 srcX0 >= 0 && srcX1 <= readFb->Width && 934 srcY0 >= 0 && srcY1 <= readFb->Height && 935 dstX0 >= 0 && dstX1 <= drawFb->Width && 936 dstY0 >= 0 && dstY1 <= drawFb->Height && 937 !ctx->Scissor.Enabled) { 938 const struct gl_texture_object *texObj = drawAtt->Texture; 939 const GLuint dstLevel = drawAtt->TextureLevel; 940 const GLenum target = texObj->Target; 941 942 struct gl_texture_image *texImage = 943 _mesa_select_tex_image(ctx, texObj, target, dstLevel); 944 945 if (intel_copy_texsubimage(intel_context(ctx), 946 intel_texture_image(texImage), 947 dstX0, dstY0, 948 srcX0, srcY0, 949 srcX1 - srcX0, /* width */ 950 srcY1 - srcY0)) 951 mask &= ~GL_COLOR_BUFFER_BIT; 952 } 953 } 954 955 return mask; 956} 957 958static void 959intel_blit_framebuffer(struct gl_context *ctx, 960 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 961 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 962 GLbitfield mask, GLenum filter) 963{ 964 /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */ 965 mask = intel_blit_framebuffer_copy_tex_sub_image(ctx, 966 srcX0, srcY0, srcX1, srcY1, 967 dstX0, dstY0, dstX1, dstY1, 968 mask, filter); 969 if (mask == 0x0) 970 return; 971 972 _mesa_meta_BlitFramebuffer(ctx, 973 srcX0, srcY0, srcX1, srcY1, 974 dstX0, dstY0, dstX1, dstY1, 975 mask, filter); 976} 977 978void 979intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb) 980{ 981 if (irb->mt) { 982 intel_miptree_slice_set_needs_hiz_resolve(irb->mt, 983 irb->mt_level, 984 irb->mt_layer); 985 } else if (irb->wrapped_depth) { 986 intel_renderbuffer_set_needs_hiz_resolve( 987 intel_renderbuffer(irb->wrapped_depth)); 988 } else { 989 return; 990 } 991} 992 993void 994intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb) 995{ 996 if (irb->mt) { 997 intel_miptree_slice_set_needs_depth_resolve(irb->mt, 998 irb->mt_level, 999 irb->mt_layer); 1000 } else if (irb->wrapped_depth) { 1001 intel_renderbuffer_set_needs_depth_resolve( 1002 intel_renderbuffer(irb->wrapped_depth)); 1003 } else { 1004 return; 1005 } 1006} 1007 1008bool 1009intel_renderbuffer_resolve_hiz(struct intel_context *intel, 1010 struct intel_renderbuffer *irb) 1011{ 1012 if (irb->mt) 1013 return intel_miptree_slice_resolve_hiz(intel, 1014 irb->mt, 1015 irb->mt_level, 1016 irb->mt_layer); 1017 if (irb->wrapped_depth) 1018 return intel_renderbuffer_resolve_hiz(intel, 1019 intel_renderbuffer(irb->wrapped_depth)); 1020 1021 return false; 1022} 1023 1024bool 1025intel_renderbuffer_resolve_depth(struct intel_context *intel, 1026 struct intel_renderbuffer *irb) 1027{ 1028 if (irb->mt) 1029 return intel_miptree_slice_resolve_depth(intel, 1030 irb->mt, 1031 irb->mt_level, 1032 irb->mt_layer); 1033 1034 if (irb->wrapped_depth) 1035 return intel_renderbuffer_resolve_depth(intel, 1036 intel_renderbuffer(irb->wrapped_depth)); 1037 1038 return false; 1039} 1040 1041/** 1042 * Do one-time context initializations related to GL_EXT_framebuffer_object. 1043 * Hook in device driver functions. 1044 */ 1045void 1046intel_fbo_init(struct intel_context *intel) 1047{ 1048 intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer; 1049 intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer; 1050 intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer; 1051 intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer; 1052 intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer; 1053 intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer; 1054 intel->ctx.Driver.RenderTexture = intel_render_texture; 1055 intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture; 1056 intel->ctx.Driver.ResizeBuffers = intel_resize_buffers; 1057 intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer; 1058 intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer; 1059 1060#if FEATURE_OES_EGL_image 1061 intel->ctx.Driver.EGLImageTargetRenderbufferStorage = 1062 intel_image_target_renderbuffer_storage; 1063#endif 1064} 1065