intel_fbo.c revision 53fa28f7b1f21251a3807abf1f234f52beff0256
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/enums.h"
30#include "main/imports.h"
31#include "main/macros.h"
32#include "main/mfeatures.h"
33#include "main/mtypes.h"
34#include "main/fbobject.h"
35#include "main/framebuffer.h"
36#include "main/renderbuffer.h"
37#include "main/context.h"
38#include "main/teximage.h"
39#include "main/image.h"
40
41#include "swrast/swrast.h"
42#include "drivers/common/meta.h"
43
44#include "intel_context.h"
45#include "intel_batchbuffer.h"
46#include "intel_buffers.h"
47#include "intel_blit.h"
48#include "intel_fbo.h"
49#include "intel_mipmap_tree.h"
50#include "intel_regions.h"
51#include "intel_tex.h"
52#include "intel_span.h"
53#ifndef I915
54#include "brw_context.h"
55#endif
56
57#define FILE_DEBUG_FLAG DEBUG_FBO
58
59static struct gl_renderbuffer *
60intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
61
62struct intel_region*
63intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
64{
65   struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
66   if (irb && irb->mt) {
67      if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
68	 return irb->mt->stencil_mt->region;
69      else
70	 return irb->mt->region;
71   } else
72      return NULL;
73}
74
75/**
76 * Create a new framebuffer object.
77 */
78static struct gl_framebuffer *
79intel_new_framebuffer(struct gl_context * ctx, GLuint name)
80{
81   /* Only drawable state in intel_framebuffer at this time, just use Mesa's
82    * class
83    */
84   return _mesa_new_framebuffer(ctx, name);
85}
86
87
88/** Called by gl_renderbuffer::Delete() */
89static void
90intel_delete_renderbuffer(struct gl_renderbuffer *rb)
91{
92   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
93
94   ASSERT(irb);
95
96   intel_miptree_release(&irb->mt);
97
98   free(irb);
99}
100
101/**
102 * \see dd_function_table::MapRenderbuffer
103 */
104static void
105intel_map_renderbuffer(struct gl_context *ctx,
106		       struct gl_renderbuffer *rb,
107		       GLuint x, GLuint y, GLuint w, GLuint h,
108		       GLbitfield mode,
109		       GLubyte **out_map,
110		       GLint *out_stride)
111{
112   struct intel_context *intel = intel_context(ctx);
113   struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
114   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
115   void *map;
116   int stride;
117
118   if (srb->Buffer) {
119      /* this is a malloc'd renderbuffer (accum buffer), not an irb */
120      GLint bpp = _mesa_get_format_bytes(rb->Format);
121      GLint rowStride = srb->RowStride;
122      *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
123      *out_stride = rowStride;
124      return;
125   }
126
127   /* We sometimes get called with this by our intel_span.c usage. */
128   if (!irb->mt) {
129      *out_map = NULL;
130      *out_stride = 0;
131      return;
132   }
133
134   /* For a window-system renderbuffer, we need to flip the mapping we receive
135    * upside-down.  So we need to ask for a rectangle on flipped vertically, and
136    * we then return a pointer to the bottom of it with a negative stride.
137    */
138   if (rb->Name == 0) {
139      y = rb->Height - y - h;
140   }
141
142   intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
143		     x, y, w, h, mode, &map, &stride);
144
145   if (rb->Name == 0) {
146      map += (h - 1) * stride;
147      stride = -stride;
148   }
149
150   DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
151       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
152       x, y, w, h, map, stride);
153
154   *out_map = map;
155   *out_stride = stride;
156}
157
158/**
159 * \see dd_function_table::UnmapRenderbuffer
160 */
161static void
162intel_unmap_renderbuffer(struct gl_context *ctx,
163			 struct gl_renderbuffer *rb)
164{
165   struct intel_context *intel = intel_context(ctx);
166   struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
167   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
168
169   DBG("%s: rb %d (%s)\n", __FUNCTION__,
170       rb->Name, _mesa_get_format_name(rb->Format));
171
172   if (srb->Buffer) {
173      /* this is a malloc'd renderbuffer (accum buffer) */
174      /* nothing to do */
175      return;
176   }
177
178   intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
179}
180
181
182/**
183 * Round up the requested multisample count to the next supported sample size.
184 */
185unsigned
186intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
187{
188   switch (intel->gen) {
189   case 6:
190      /* Gen6 supports only 4x multisampling. */
191      if (num_samples > 0)
192         return 4;
193      else
194         return 0;
195   case 7:
196      /* Gen7 supports 4x and 8x multisampling. */
197      if (num_samples > 4)
198         return 8;
199      else if (num_samples > 0)
200         return 4;
201      else
202         return 0;
203      return 0;
204   default:
205      /* MSAA unsupported.  However, a careful reading of
206       * EXT_framebuffer_multisample reveals that we need to permit
207       * num_samples to be 1 (since num_samples is permitted to be as high as
208       * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1).  Since
209       * platforms before Gen6 don't support MSAA, this is safe, because
210       * multisampling won't happen anyhow.
211       */
212      if (num_samples > 0)
213         return 1;
214      return 0;
215   }
216}
217
218
219/**
220 * Called via glRenderbufferStorageEXT() to set the format and allocate
221 * storage for a user-created renderbuffer.
222 */
223GLboolean
224intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
225                                 GLenum internalFormat,
226                                 GLuint width, GLuint height)
227{
228   struct intel_context *intel = intel_context(ctx);
229   struct intel_screen *screen = intel->intelScreen;
230   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
231   rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
232
233   switch (internalFormat) {
234   default:
235      /* Use the same format-choice logic as for textures.
236       * Renderbuffers aren't any different from textures for us,
237       * except they're less useful because you can't texture with
238       * them.
239       */
240      rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
241							 GL_NONE, GL_NONE);
242      break;
243   case GL_STENCIL_INDEX:
244   case GL_STENCIL_INDEX1_EXT:
245   case GL_STENCIL_INDEX4_EXT:
246   case GL_STENCIL_INDEX8_EXT:
247   case GL_STENCIL_INDEX16_EXT:
248      /* These aren't actual texture formats, so force them here. */
249      if (intel->has_separate_stencil) {
250	 rb->Format = MESA_FORMAT_S8;
251      } else {
252	 assert(!intel->must_use_separate_stencil);
253	 rb->Format = MESA_FORMAT_S8_Z24;
254      }
255      break;
256   }
257
258   rb->Width = width;
259   rb->Height = height;
260   rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
261
262   intel_miptree_release(&irb->mt);
263
264   DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
265       _mesa_lookup_enum_by_nr(internalFormat),
266       _mesa_get_format_name(rb->Format), width, height);
267
268   if (width == 0 || height == 0)
269      return true;
270
271   irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
272						   width, height,
273                                                   rb->NumSamples);
274   if (!irb->mt)
275      return false;
276
277   if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
278      bool ok = intel_miptree_alloc_hiz(intel, irb->mt, rb->NumSamples);
279      if (!ok) {
280	 intel_miptree_release(&irb->mt);
281	 return false;
282      }
283   }
284
285   if (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
286      bool ok = intel_miptree_alloc_mcs(intel, irb->mt, rb->NumSamples);
287      if (!ok) {
288         intel_miptree_release(&irb->mt);
289         return false;
290      }
291   }
292
293   return true;
294}
295
296
297#if FEATURE_OES_EGL_image
298static void
299intel_image_target_renderbuffer_storage(struct gl_context *ctx,
300					struct gl_renderbuffer *rb,
301					void *image_handle)
302{
303   struct intel_context *intel = intel_context(ctx);
304   struct intel_renderbuffer *irb;
305   __DRIscreen *screen;
306   __DRIimage *image;
307
308   screen = intel->intelScreen->driScrnPriv;
309   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
310					      screen->loaderPrivate);
311   if (image == NULL)
312      return;
313
314   /* __DRIimage is opaque to the core so it has to be checked here */
315   switch (image->format) {
316   case MESA_FORMAT_RGBA8888_REV:
317      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
318            "glEGLImageTargetRenderbufferStorage(unsupported image format");
319      return;
320      break;
321   default:
322      break;
323   }
324
325   irb = intel_renderbuffer(rb);
326   intel_miptree_release(&irb->mt);
327   irb->mt = intel_miptree_create_for_region(intel,
328                                             GL_TEXTURE_2D,
329                                             image->format,
330                                             image->region);
331   if (!irb->mt)
332      return;
333
334   rb->InternalFormat = image->internal_format;
335   rb->Width = image->region->width;
336   rb->Height = image->region->height;
337   rb->Format = image->format;
338   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
339					   image->internal_format);
340}
341#endif
342
343/**
344 * Called for each hardware renderbuffer when a _window_ is resized.
345 * Just update fields.
346 * Not used for user-created renderbuffers!
347 */
348static GLboolean
349intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
350                           GLenum internalFormat, GLuint width, GLuint height)
351{
352   ASSERT(rb->Name == 0);
353   rb->Width = width;
354   rb->Height = height;
355   rb->InternalFormat = internalFormat;
356
357   return true;
358}
359
360
361static void
362intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
363		     GLuint width, GLuint height)
364{
365   int i;
366
367   _mesa_resize_framebuffer(ctx, fb, width, height);
368
369   fb->Initialized = true; /* XXX remove someday */
370
371   if (_mesa_is_user_fbo(fb)) {
372      return;
373   }
374
375
376   /* Make sure all window system renderbuffers are up to date */
377   for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
378      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
379
380      /* only resize if size is changing */
381      if (rb && (rb->Width != width || rb->Height != height)) {
382	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
383      }
384   }
385}
386
387
388/** Dummy function for gl_renderbuffer::AllocStorage() */
389static GLboolean
390intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
391                        GLenum internalFormat, GLuint width, GLuint height)
392{
393   _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
394   return false;
395}
396
397/**
398 * Create a new intel_renderbuffer which corresponds to an on-screen window,
399 * not a user-created renderbuffer.
400 */
401struct intel_renderbuffer *
402intel_create_renderbuffer(gl_format format)
403{
404   struct intel_renderbuffer *irb;
405   struct gl_renderbuffer *rb;
406
407   GET_CURRENT_CONTEXT(ctx);
408
409   irb = CALLOC_STRUCT(intel_renderbuffer);
410   if (!irb) {
411      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
412      return NULL;
413   }
414
415   rb = &irb->Base.Base;
416
417   _mesa_init_renderbuffer(rb, 0);
418   rb->ClassID = INTEL_RB_CLASS;
419   rb->_BaseFormat = _mesa_get_format_base_format(format);
420   rb->Format = format;
421   rb->InternalFormat = rb->_BaseFormat;
422
423   /* intel-specific methods */
424   rb->Delete = intel_delete_renderbuffer;
425   rb->AllocStorage = intel_alloc_window_storage;
426
427   return irb;
428}
429
430/**
431 * Private window-system buffers (as opposed to ones shared with the display
432 * server created with intel_create_renderbuffer()) are most similar in their
433 * handling to user-created renderbuffers, but they have a resize handler that
434 * may be called at intel_update_renderbuffers() time.
435 */
436struct intel_renderbuffer *
437intel_create_private_renderbuffer(gl_format format)
438{
439   struct intel_renderbuffer *irb;
440
441   irb = intel_create_renderbuffer(format);
442   irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
443
444   return irb;
445}
446
447/**
448 * Create a new renderbuffer object.
449 * Typically called via glBindRenderbufferEXT().
450 */
451static struct gl_renderbuffer *
452intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
453{
454   /*struct intel_context *intel = intel_context(ctx); */
455   struct intel_renderbuffer *irb;
456   struct gl_renderbuffer *rb;
457
458   irb = CALLOC_STRUCT(intel_renderbuffer);
459   if (!irb) {
460      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
461      return NULL;
462   }
463
464   rb = &irb->Base.Base;
465
466   _mesa_init_renderbuffer(rb, name);
467   rb->ClassID = INTEL_RB_CLASS;
468
469   /* intel-specific methods */
470   rb->Delete = intel_delete_renderbuffer;
471   rb->AllocStorage = intel_alloc_renderbuffer_storage;
472   /* span routines set in alloc_storage function */
473
474   return rb;
475}
476
477
478/**
479 * Called via glBindFramebufferEXT().
480 */
481static void
482intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
483                       struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
484{
485   if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
486      intel_draw_buffer(ctx);
487   }
488   else {
489      /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
490   }
491}
492
493
494/**
495 * Called via glFramebufferRenderbufferEXT().
496 */
497static void
498intel_framebuffer_renderbuffer(struct gl_context * ctx,
499                               struct gl_framebuffer *fb,
500                               GLenum attachment, struct gl_renderbuffer *rb)
501{
502   DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
503
504   _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
505   intel_draw_buffer(ctx);
506}
507
508/**
509 * \par Special case for separate stencil
510 *
511 *     When wrapping a depthstencil texture that uses separate stencil, this
512 *     function is recursively called twice: once to create \c
513 *     irb->wrapped_depth and again to create \c irb->wrapped_stencil.  On the
514 *     call to create \c irb->wrapped_depth, the \c format and \c
515 *     internal_format parameters do not match \c mt->format. In that case, \c
516 *     mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
517 *     MESA_FORMAT_X8_Z24.
518 *
519 * @return true on success
520 */
521
522static bool
523intel_renderbuffer_update_wrapper(struct intel_context *intel,
524                                  struct intel_renderbuffer *irb,
525				  struct gl_texture_image *image,
526                                  uint32_t layer)
527{
528   struct gl_renderbuffer *rb = &irb->Base.Base;
529   struct intel_texture_image *intel_image = intel_texture_image(image);
530   struct intel_mipmap_tree *mt = intel_image->mt;
531   int level = image->Level;
532
533   rb->Format = image->TexFormat;
534   rb->InternalFormat = image->InternalFormat;
535   rb->_BaseFormat = image->_BaseFormat;
536   rb->Width = mt->level[level].width;
537   rb->Height = mt->level[level].height;
538
539   rb->Delete = intel_delete_renderbuffer;
540   rb->AllocStorage = intel_nop_alloc_storage;
541
542   intel_miptree_check_level_layer(mt, level, layer);
543   irb->mt_level = level;
544   irb->mt_layer = layer;
545
546   intel_miptree_reference(&irb->mt, mt);
547
548   intel_renderbuffer_set_draw_offset(irb);
549
550   if (mt->hiz_mt == NULL &&
551       intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
552      intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
553      if (!mt->hiz_mt)
554	 return false;
555   }
556
557   return true;
558}
559
560void
561intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
562{
563   unsigned int dst_x, dst_y;
564
565   /* compute offset of the particular 2D image within the texture region */
566   intel_miptree_get_image_offset(irb->mt,
567				  irb->mt_level,
568				  0, /* face, which we ignore */
569				  irb->mt_layer,
570				  &dst_x, &dst_y);
571
572   irb->draw_x = dst_x;
573   irb->draw_y = dst_y;
574}
575
576/**
577 * Rendering to tiled buffers requires that the base address of the
578 * buffer be aligned to a page boundary.  We generally render to
579 * textures by pointing the surface at the mipmap image level, which
580 * may not be aligned to a tile boundary.
581 *
582 * This function returns an appropriately-aligned base offset
583 * according to the tiling restrictions, plus any required x/y offset
584 * from there.
585 */
586uint32_t
587intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
588				uint32_t *tile_x,
589				uint32_t *tile_y)
590{
591   struct intel_region *region = irb->mt->region;
592   uint32_t mask_x, mask_y;
593
594   intel_region_get_tile_masks(region, &mask_x, &mask_y);
595
596   *tile_x = irb->draw_x & mask_x;
597   *tile_y = irb->draw_y & mask_y;
598   return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
599                                          irb->draw_y & ~mask_y);
600}
601
602/**
603 * Called by glFramebufferTexture[123]DEXT() (and other places) to
604 * prepare for rendering into texture memory.  This might be called
605 * many times to choose different texture levels, cube faces, etc
606 * before intel_finish_render_texture() is ever called.
607 */
608static void
609intel_render_texture(struct gl_context * ctx,
610                     struct gl_framebuffer *fb,
611                     struct gl_renderbuffer_attachment *att)
612{
613   struct intel_context *intel = intel_context(ctx);
614   struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
615   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
616   struct intel_texture_image *intel_image = intel_texture_image(image);
617   struct intel_mipmap_tree *mt = intel_image->mt;
618   int layer;
619
620   (void) fb;
621
622   if (att->CubeMapFace > 0) {
623      assert(att->Zoffset == 0);
624      layer = att->CubeMapFace;
625   } else {
626      layer = att->Zoffset;
627   }
628
629   if (!intel_image->mt) {
630      /* Fallback on drawing to a texture that doesn't have a miptree
631       * (has a border, width/height 0, etc.)
632       */
633      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
634      _swrast_render_texture(ctx, fb, att);
635      return;
636   }
637   else if (!irb) {
638      intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
639
640      irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
641
642      if (irb) {
643         /* bind the wrapper to the attachment point */
644         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
645      }
646      else {
647         /* fallback to software rendering */
648         _swrast_render_texture(ctx, fb, att);
649         return;
650      }
651   }
652
653   if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
654       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
655       _swrast_render_texture(ctx, fb, att);
656       return;
657   }
658
659   irb->tex_image = image;
660
661   DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
662       _mesa_get_format_name(image->TexFormat),
663       att->Texture->Name, image->Width, image->Height,
664       irb->Base.Base.RefCount);
665
666   /* update drawing region, etc */
667   intel_draw_buffer(ctx);
668}
669
670
671/**
672 * Called by Mesa when rendering to a texture is done.
673 */
674static void
675intel_finish_render_texture(struct gl_context * ctx,
676                            struct gl_renderbuffer_attachment *att)
677{
678   struct intel_context *intel = intel_context(ctx);
679   struct gl_texture_object *tex_obj = att->Texture;
680   struct gl_texture_image *image =
681      tex_obj->Image[att->CubeMapFace][att->TextureLevel];
682   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
683
684   DBG("Finish render %s texture tex=%u\n",
685       _mesa_get_format_name(image->TexFormat), att->Texture->Name);
686
687   if (irb)
688      irb->tex_image = NULL;
689
690   /* Since we've (probably) rendered to the texture and will (likely) use
691    * it in the texture domain later on in this batchbuffer, flush the
692    * batch.  Once again, we wish for a domain tracker in libdrm to cover
693    * usage inside of a batchbuffer like GEM does in the kernel.
694    */
695   intel_batchbuffer_emit_mi_flush(intel);
696}
697
698/**
699 * Do additional "completeness" testing of a framebuffer object.
700 */
701static void
702intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
703{
704   struct intel_context *intel = intel_context(ctx);
705   const struct intel_renderbuffer *depthRb =
706      intel_get_renderbuffer(fb, BUFFER_DEPTH);
707   const struct intel_renderbuffer *stencilRb =
708      intel_get_renderbuffer(fb, BUFFER_STENCIL);
709   struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
710   int i;
711
712   DBG("%s() on fb %p (%s)\n", __FUNCTION__,
713       fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
714	    (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
715
716   if (depthRb)
717      depth_mt = depthRb->mt;
718   if (stencilRb) {
719      stencil_mt = stencilRb->mt;
720      if (stencil_mt->stencil_mt)
721	 stencil_mt = stencil_mt->stencil_mt;
722   }
723
724   if (depth_mt && stencil_mt) {
725      if (depth_mt == stencil_mt) {
726	 /* For true packed depth/stencil (not faked on prefers-separate-stencil
727	  * hardware) we need to be sure they're the same level/layer, since
728	  * we'll be emitting a single packet describing the packed setup.
729	  */
730	 if (depthRb->mt_level != stencilRb->mt_level ||
731	     depthRb->mt_layer != stencilRb->mt_layer) {
732	    DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
733		depthRb->mt_level,
734		depthRb->mt_layer,
735		stencilRb->mt_level,
736		stencilRb->mt_layer);
737	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
738	 }
739      } else {
740	 if (!intel->has_separate_stencil) {
741	    DBG("separate stencil unsupported\n");
742	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
743	 }
744	 if (stencil_mt->format != MESA_FORMAT_S8) {
745	    DBG("separate stencil is %s instead of S8\n",
746		_mesa_get_format_name(stencil_mt->format));
747	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
748	 }
749	 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
750	    /* Before Gen7, separate depth and stencil buffers can be used
751	     * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
752	     * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
753	     *     [DevSNB]: This field must be set to the same value (enabled
754	     *     or disabled) as Hierarchical Depth Buffer Enable.
755	     */
756	    DBG("separate stencil without HiZ\n");
757	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
758	 }
759      }
760   }
761
762   for (i = 0; i < Elements(fb->Attachment); i++) {
763      struct gl_renderbuffer *rb;
764      struct intel_renderbuffer *irb;
765
766      if (fb->Attachment[i].Type == GL_NONE)
767	 continue;
768
769      /* A supported attachment will have a Renderbuffer set either
770       * from being a Renderbuffer or being a texture that got the
771       * intel_wrap_texture() treatment.
772       */
773      rb = fb->Attachment[i].Renderbuffer;
774      if (rb == NULL) {
775	 DBG("attachment without renderbuffer\n");
776	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
777	 continue;
778      }
779
780      if (fb->Attachment[i].Type == GL_TEXTURE) {
781	 const struct gl_texture_image *img =
782	    _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
783
784	 if (img->Border) {
785	    DBG("texture with border\n");
786	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
787	    continue;
788	 }
789      }
790
791      irb = intel_renderbuffer(rb);
792      if (irb == NULL) {
793	 DBG("software rendering renderbuffer\n");
794	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
795	 continue;
796      }
797
798      if (!intel->vtbl.render_target_supported(intel, rb)) {
799	 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
800	     _mesa_get_format_name(intel_rb_format(irb)));
801	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
802      }
803   }
804}
805
806/**
807 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
808 * We can do this when the dst renderbuffer is actually a texture and
809 * there is no scaling, mirroring or scissoring.
810 *
811 * \return new buffer mask indicating the buffers left to blit using the
812 *         normal path.
813 */
814static GLbitfield
815intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
816                                          GLint srcX0, GLint srcY0,
817                                          GLint srcX1, GLint srcY1,
818                                          GLint dstX0, GLint dstY0,
819                                          GLint dstX1, GLint dstY1,
820                                          GLbitfield mask, GLenum filter)
821{
822   if (mask & GL_COLOR_BUFFER_BIT) {
823      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
824      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
825      const struct gl_renderbuffer_attachment *drawAtt =
826         &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
827      struct intel_renderbuffer *srcRb =
828         intel_renderbuffer(readFb->_ColorReadBuffer);
829
830      /* If the source and destination are the same size with no
831         mirroring, the rectangles are within the size of the
832         texture and there is no scissor then we can use
833         glCopyTexSubimage2D to implement the blit. This will end
834         up as a fast hardware blit on some drivers */
835      if (srcRb && drawAtt && drawAtt->Texture &&
836          srcX0 - srcX1 == dstX0 - dstX1 &&
837          srcY0 - srcY1 == dstY0 - dstY1 &&
838          srcX1 >= srcX0 &&
839          srcY1 >= srcY0 &&
840          srcX0 >= 0 && srcX1 <= readFb->Width &&
841          srcY0 >= 0 && srcY1 <= readFb->Height &&
842          dstX0 >= 0 && dstX1 <= drawFb->Width &&
843          dstY0 >= 0 && dstY1 <= drawFb->Height &&
844          !ctx->Scissor.Enabled) {
845         const struct gl_texture_object *texObj = drawAtt->Texture;
846         const GLuint dstLevel = drawAtt->TextureLevel;
847         const GLenum target = texObj->Target;
848
849         struct gl_texture_image *texImage =
850            _mesa_select_tex_image(ctx, texObj, target, dstLevel);
851
852         if (intel_copy_texsubimage(intel_context(ctx),
853                                    intel_texture_image(texImage),
854                                    dstX0, dstY0,
855                                    srcRb,
856                                    srcX0, srcY0,
857                                    srcX1 - srcX0, /* width */
858                                    srcY1 - srcY0))
859            mask &= ~GL_COLOR_BUFFER_BIT;
860      }
861   }
862
863   return mask;
864}
865
866static void
867intel_blit_framebuffer(struct gl_context *ctx,
868                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
869                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
870                       GLbitfield mask, GLenum filter)
871{
872   /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
873   mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
874                                                    srcX0, srcY0, srcX1, srcY1,
875                                                    dstX0, dstY0, dstX1, dstY1,
876                                                    mask, filter);
877   if (mask == 0x0)
878      return;
879
880#ifndef I915
881   mask = brw_blorp_framebuffer(intel_context(ctx),
882                                srcX0, srcY0, srcX1, srcY1,
883                                dstX0, dstY0, dstX1, dstY1,
884                                mask, filter);
885   if (mask == 0x0)
886      return;
887#endif
888
889   _mesa_meta_BlitFramebuffer(ctx,
890                              srcX0, srcY0, srcX1, srcY1,
891                              dstX0, dstY0, dstX1, dstY1,
892                              mask, filter);
893}
894
895void
896intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
897{
898   if (irb->mt) {
899      intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
900                                                irb->mt_level,
901                                                irb->mt_layer);
902   }
903}
904
905void
906intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
907{
908   if (irb->mt) {
909      intel_miptree_slice_set_needs_depth_resolve(irb->mt,
910                                                  irb->mt_level,
911                                                  irb->mt_layer);
912   }
913}
914
915bool
916intel_renderbuffer_resolve_hiz(struct intel_context *intel,
917			       struct intel_renderbuffer *irb)
918{
919   if (irb->mt)
920      return intel_miptree_slice_resolve_hiz(intel,
921                                             irb->mt,
922                                             irb->mt_level,
923                                             irb->mt_layer);
924
925   return false;
926}
927
928bool
929intel_renderbuffer_resolve_depth(struct intel_context *intel,
930				 struct intel_renderbuffer *irb)
931{
932   if (irb->mt)
933      return intel_miptree_slice_resolve_depth(intel,
934                                               irb->mt,
935                                               irb->mt_level,
936                                               irb->mt_layer);
937
938   return false;
939}
940
941/**
942 * Do one-time context initializations related to GL_EXT_framebuffer_object.
943 * Hook in device driver functions.
944 */
945void
946intel_fbo_init(struct intel_context *intel)
947{
948   intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
949   intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
950   intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
951   intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
952   intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
953   intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
954   intel->ctx.Driver.RenderTexture = intel_render_texture;
955   intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
956   intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
957   intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
958   intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
959
960#if FEATURE_OES_EGL_image
961   intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
962      intel_image_target_renderbuffer_storage;
963#endif
964}
965