intel_fbo.c revision c80b31fdee1fa96b8d45ad2537ecdb5b9151973e
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/enums.h"
30#include "main/imports.h"
31#include "main/macros.h"
32#include "main/mfeatures.h"
33#include "main/mtypes.h"
34#include "main/fbobject.h"
35#include "main/framebuffer.h"
36#include "main/renderbuffer.h"
37#include "main/context.h"
38#include "main/teximage.h"
39#include "swrast/swrast.h"
40#include "drivers/common/meta.h"
41
42#include "intel_context.h"
43#include "intel_batchbuffer.h"
44#include "intel_buffers.h"
45#include "intel_blit.h"
46#include "intel_fbo.h"
47#include "intel_mipmap_tree.h"
48#include "intel_regions.h"
49#include "intel_tex.h"
50#include "intel_span.h"
51#ifndef I915
52#include "brw_context.h"
53#endif
54
55#define FILE_DEBUG_FLAG DEBUG_FBO
56
57
58bool
59intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
60{
61   struct intel_renderbuffer *rb = NULL;
62   if (fb)
63      rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
64   return rb && rb->mt && rb->mt->hiz_region;
65}
66
67struct intel_region*
68intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
69{
70   struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
71   if (irb && irb->mt)
72      return irb->mt->region;
73   else
74      return NULL;
75}
76
77/**
78 * Create a new framebuffer object.
79 */
80static struct gl_framebuffer *
81intel_new_framebuffer(struct gl_context * ctx, GLuint name)
82{
83   /* Only drawable state in intel_framebuffer at this time, just use Mesa's
84    * class
85    */
86   return _mesa_new_framebuffer(ctx, name);
87}
88
89
90/** Called by gl_renderbuffer::Delete() */
91static void
92intel_delete_renderbuffer(struct gl_renderbuffer *rb)
93{
94   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
95
96   ASSERT(irb);
97
98   intel_miptree_release(&irb->mt);
99
100   _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
101   _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
102
103   free(irb);
104}
105
106/**
107 * \brief Map a renderbuffer through the GTT.
108 *
109 * \see intel_map_renderbuffer()
110 */
111static void
112intel_map_renderbuffer_gtt(struct gl_context *ctx,
113                           struct gl_renderbuffer *rb,
114                           GLuint x, GLuint y, GLuint w, GLuint h,
115                           GLbitfield mode,
116                           GLubyte **out_map,
117                           GLint *out_stride)
118{
119   struct intel_context *intel = intel_context(ctx);
120   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
121   GLubyte *map;
122   int stride, flip_stride;
123
124   assert(irb->mt);
125
126   irb->map_mode = mode;
127   irb->map_x = x;
128   irb->map_y = y;
129   irb->map_w = w;
130   irb->map_h = h;
131
132   stride = irb->mt->region->pitch * irb->mt->region->cpp;
133
134   if (rb->Name == 0) {
135      y = irb->mt->region->height - 1 - y;
136      flip_stride = -stride;
137   } else {
138      x += irb->draw_x;
139      y += irb->draw_y;
140      flip_stride = stride;
141   }
142
143   if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) {
144      intel_batchbuffer_flush(intel);
145   }
146
147   drm_intel_gem_bo_map_gtt(irb->mt->region->bo);
148
149   map = irb->mt->region->bo->virtual;
150   map += x * irb->mt->region->cpp;
151   map += (int)y * stride;
152
153   *out_map = map;
154   *out_stride = flip_stride;
155
156   DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
157       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
158       x, y, w, h, *out_map, *out_stride);
159}
160
161/**
162 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
163 *
164 * On gen6+, we have LLC sharing, which means we can get high-performance
165 * access to linear-mapped buffers.
166 *
167 * This function allocates a temporary gem buffer at
168 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
169 * returns a map of that. (Note: Only X tiled buffers can be blitted).
170 *
171 * \see intel_renderbuffer::map_bo
172 * \see intel_map_renderbuffer()
173 */
174static void
175intel_map_renderbuffer_blit(struct gl_context *ctx,
176			    struct gl_renderbuffer *rb,
177			    GLuint x, GLuint y, GLuint w, GLuint h,
178			    GLbitfield mode,
179			    GLubyte **out_map,
180			    GLint *out_stride)
181{
182   struct intel_context *intel = intel_context(ctx);
183   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
184
185   int src_x, src_y;
186   int dst_stride;
187
188   assert(irb->mt->region);
189   assert(intel->gen >= 6);
190   assert(!(mode & GL_MAP_WRITE_BIT));
191   assert(irb->mt->region->tiling == I915_TILING_X);
192
193   irb->map_mode = mode;
194   irb->map_x = x;
195   irb->map_y = y;
196   irb->map_w = w;
197   irb->map_h = h;
198
199   dst_stride = ALIGN(w * irb->mt->region->cpp, 4);
200
201   if (rb->Name) {
202      src_x = x + irb->draw_x;
203      src_y = y + irb->draw_y;
204   } else {
205      src_x = x;
206      src_y = irb->mt->region->height - y - h;
207   }
208
209   irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
210				    dst_stride * h, 4096);
211
212   /* We don't do the flip in the blit, because it's always so tricky to get
213    * right.
214    */
215   if (irb->map_bo &&
216       intelEmitCopyBlit(intel,
217			 irb->mt->region->cpp,
218			 irb->mt->region->pitch, irb->mt->region->bo,
219			 0, irb->mt->region->tiling,
220			 dst_stride / irb->mt->region->cpp, irb->map_bo,
221			 0, I915_TILING_NONE,
222			 src_x, src_y,
223			 0, 0,
224			 w, h,
225			 GL_COPY)) {
226      intel_batchbuffer_flush(intel);
227      drm_intel_bo_map(irb->map_bo, false);
228
229      if (rb->Name) {
230	 *out_map = irb->map_bo->virtual;
231	 *out_stride = dst_stride;
232      } else {
233	 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
234	 *out_stride = -dst_stride;
235      }
236
237      DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
238	  __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
239	  src_x, src_y, w, h, *out_map, *out_stride);
240   } else {
241      /* Fallback to GTT mapping. */
242      drm_intel_bo_unreference(irb->map_bo);
243      irb->map_bo = NULL;
244      intel_map_renderbuffer_gtt(ctx, rb,
245				 x, y, w, h,
246				 mode,
247				 out_map, out_stride);
248   }
249}
250
251/**
252 * \brief Map a stencil renderbuffer.
253 *
254 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile
255 * the buffer in software.
256 *
257 * This function allocates a temporary malloc'd buffer at
258 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then
259 * returns the temporary buffer as the map.
260 *
261 * \see intel_renderbuffer::map_buffer
262 * \see intel_map_renderbuffer()
263 * \see intel_unmap_renderbuffer_s8()
264 */
265static void
266intel_map_renderbuffer_s8(struct gl_context *ctx,
267			  struct gl_renderbuffer *rb,
268			  GLuint x, GLuint y, GLuint w, GLuint h,
269			  GLbitfield mode,
270			  GLubyte **out_map,
271			  GLint *out_stride)
272{
273   struct intel_context *intel = intel_context(ctx);
274   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
275   uint8_t *tiled_s8_map;
276   uint8_t *untiled_s8_map;
277
278   assert(rb->Format == MESA_FORMAT_S8);
279   assert(irb->mt);
280
281   irb->map_mode = mode;
282   irb->map_x = x;
283   irb->map_y = y;
284   irb->map_w = w;
285   irb->map_h = h;
286
287   /* Flip the Y axis for the default framebuffer. */
288   int y_flip = (rb->Name == 0) ? -1 : 1;
289   int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
290
291   irb->map_buffer = malloc(w * h);
292   untiled_s8_map = irb->map_buffer;
293   tiled_s8_map = intel_region_map(intel, irb->mt->region, mode);
294
295   for (uint32_t pix_y = 0; pix_y < h; pix_y++) {
296      for (uint32_t pix_x = 0; pix_x < w; pix_x++) {
297	 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
298	 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
299	                                    x + pix_x,
300	                                    flipped_y);
301	 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset];
302      }
303   }
304
305   *out_map = untiled_s8_map;
306   *out_stride = w;
307
308   DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
309       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
310       x, y, w, h, *out_map, *out_stride);
311}
312
313/**
314 * \brief Map a depthstencil buffer with separate stencil.
315 *
316 * A depthstencil renderbuffer, if using separate stencil, consists of a depth
317 * renderbuffer and a hidden stencil renderbuffer.  This function maps the
318 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and
319 * returns that as the mapped pointer. The caller need not be aware of the
320 * hidden stencil buffer and may safely assume that the mapped pointer points
321 * to a MESA_FORMAT_S8_Z24 buffer
322 *
323 * The consistency between the depth buffer's S8 bits and the hidden stencil
324 * buffer is managed within intel_map_renderbuffer() and
325 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits
326 * according to the map mode.
327 *
328 * \see intel_map_renderbuffer()
329 * \see intel_unmap_renderbuffer_separate_s8z24()
330 */
331static void
332intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx,
333				      struct gl_renderbuffer *rb,
334				      GLuint x, GLuint y, GLuint w, GLuint h,
335				      GLbitfield mode,
336				      GLubyte **out_map,
337				      GLint *out_stride)
338{
339   struct intel_context *intel = intel_context(ctx);
340   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
341
342   uint8_t *s8z24_map;
343   int32_t s8z24_stride;
344
345   struct intel_renderbuffer *s8_irb;
346   uint8_t *s8_map;
347
348   assert(rb->Name != 0);
349   assert(rb->Format == MESA_FORMAT_S8_Z24);
350   assert(irb->wrapped_depth != NULL);
351   assert(irb->wrapped_stencil != NULL);
352
353   irb->map_mode = mode;
354   irb->map_x = x;
355   irb->map_y = y;
356   irb->map_w = w;
357   irb->map_h = h;
358
359   /* Map with write mode for the gather below. */
360   intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth,
361			       x, y, w, h, mode | GL_MAP_WRITE_BIT,
362			       &s8z24_map, &s8z24_stride);
363
364   s8_irb = intel_renderbuffer(irb->wrapped_stencil);
365   s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_READ_BIT);
366
367   /* Gather the stencil buffer into the depth buffer. */
368   for (uint32_t pix_y = 0; pix_y < h; ++pix_y) {
369      for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
370	 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
371					       x + pix_x,
372					       y + pix_y);
373	 ptrdiff_t s8z24_offset = pix_y * s8z24_stride
374				+ pix_x * 4
375				+ 3;
376	 s8z24_map[s8z24_offset] = s8_map[s8_offset];
377      }
378   }
379
380   intel_region_unmap(intel, s8_irb->mt->region);
381
382   *out_map = s8z24_map;
383   *out_stride = s8z24_stride;
384}
385
386/**
387 * \see dd_function_table::MapRenderbuffer
388 */
389static void
390intel_map_renderbuffer(struct gl_context *ctx,
391		       struct gl_renderbuffer *rb,
392		       GLuint x, GLuint y, GLuint w, GLuint h,
393		       GLbitfield mode,
394		       GLubyte **out_map,
395		       GLint *out_stride)
396{
397   struct intel_context *intel = intel_context(ctx);
398   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
399
400   /* We sometimes get called with this by our intel_span.c usage. */
401   if (!irb->mt && !irb->wrapped_depth) {
402      *out_map = NULL;
403      *out_stride = 0;
404      return;
405   }
406
407   if (rb->Format == MESA_FORMAT_S8) {
408      intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode,
409			        out_map, out_stride);
410   } else if (irb->wrapped_depth) {
411      intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode,
412					    out_map, out_stride);
413   } else if (intel->gen >= 6 &&
414	      !(mode & GL_MAP_WRITE_BIT) &&
415	      irb->mt->region->tiling == I915_TILING_X) {
416      intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
417				  out_map, out_stride);
418   } else {
419      intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
420				 out_map, out_stride);
421   }
422}
423
424/**
425 * \see intel_map_renderbuffer_s8()
426 */
427static void
428intel_unmap_renderbuffer_s8(struct gl_context *ctx,
429			    struct gl_renderbuffer *rb)
430{
431   struct intel_context *intel = intel_context(ctx);
432   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
433
434   DBG("%s: rb %d (%s)\n", __FUNCTION__,
435       rb->Name, _mesa_get_format_name(rb->Format));
436
437   assert(rb->Format == MESA_FORMAT_S8);
438
439   if (!irb->map_buffer)
440      return;
441
442   if (irb->map_mode & GL_MAP_WRITE_BIT) {
443      /* The temporary buffer was written to, so we must copy its pixels into
444       * the real buffer.
445       */
446      uint8_t *untiled_s8_map = irb->map_buffer;
447      uint8_t *tiled_s8_map = irb->mt->region->bo->virtual;
448
449      /* Flip the Y axis for the default framebuffer. */
450      int y_flip = (rb->Name == 0) ? -1 : 1;
451      int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
452
453      for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) {
454	 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) {
455	    uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias;
456	    ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
457	                                       pix_x + irb->map_x,
458	                                       flipped_y);
459	    tiled_s8_map[offset] =
460	       untiled_s8_map[pix_y * irb->map_w + pix_x];
461	 }
462      }
463   }
464
465   intel_region_unmap(intel, irb->mt->region);
466   free(irb->map_buffer);
467   irb->map_buffer = NULL;
468}
469
470/**
471 * \brief Unmap a depthstencil renderbuffer with separate stencil.
472 *
473 * \see intel_map_renderbuffer_separate_s8z24()
474 * \see intel_unmap_renderbuffer()
475 */
476static void
477intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx,
478				        struct gl_renderbuffer *rb)
479{
480   struct intel_context *intel = intel_context(ctx);
481   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
482   struct intel_renderbuffer *s8z24_irb;
483
484   assert(rb->Name != 0);
485   assert(rb->Format == MESA_FORMAT_S8_Z24);
486   assert(irb->wrapped_depth != NULL);
487   assert(irb->wrapped_stencil != NULL);
488
489   s8z24_irb = intel_renderbuffer(irb->wrapped_depth);
490
491   if (irb->map_mode & GL_MAP_WRITE_BIT) {
492      /* Copy the stencil bits from the depth buffer into the stencil buffer.
493       */
494      uint32_t map_x = irb->map_x;
495      uint32_t map_y = irb->map_y;
496      uint32_t map_w = irb->map_w;
497      uint32_t map_h = irb->map_h;
498
499      struct intel_renderbuffer *s8_irb;
500      uint8_t *s8_map;
501
502      s8_irb = intel_renderbuffer(irb->wrapped_stencil);
503      s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_WRITE_BIT);
504
505      int32_t s8z24_stride = 4 * s8z24_irb->mt->region->pitch;
506      uint8_t *s8z24_map = s8z24_irb->mt->region->bo->virtual
507			 + map_y * s8z24_stride
508			 + map_x * 4;
509
510      for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) {
511	 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) {
512	    ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
513						  map_x + pix_x,
514						  map_y + pix_y);
515	    ptrdiff_t s8z24_offset = pix_y * s8z24_stride
516				   + pix_x * 4
517				   + 3;
518	    s8_map[s8_offset] = s8z24_map[s8z24_offset];
519	 }
520      }
521
522      intel_region_unmap(intel, s8_irb->mt->region);
523   }
524
525   drm_intel_gem_bo_unmap_gtt(s8z24_irb->mt->region->bo);
526}
527
528/**
529 * \see dd_function_table::UnmapRenderbuffer
530 */
531static void
532intel_unmap_renderbuffer(struct gl_context *ctx,
533			 struct gl_renderbuffer *rb)
534{
535   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
536
537   DBG("%s: rb %d (%s)\n", __FUNCTION__,
538       rb->Name, _mesa_get_format_name(rb->Format));
539
540   if (rb->Format == MESA_FORMAT_S8) {
541      intel_unmap_renderbuffer_s8(ctx, rb);
542   } else if (irb->wrapped_depth) {
543      intel_unmap_renderbuffer_separate_s8z24(ctx, rb);
544   } else if (irb->map_bo) {
545      /* Paired with intel_map_renderbuffer_blit(). */
546      drm_intel_bo_unmap(irb->map_bo);
547      drm_intel_bo_unreference(irb->map_bo);
548      irb->map_bo = 0;
549   } else {
550      /* Paired with intel_map_renderbuffer_gtt(). */
551      if (irb->mt) {
552	 /* The miptree may be null when intel_map_renderbuffer() is
553	  * called from intel_span.c.
554	  */
555	 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo);
556      }
557   }
558}
559
560/**
561 * Return a pointer to a specific pixel in a renderbuffer.
562 */
563static void *
564intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
565                  GLint x, GLint y)
566{
567   /* By returning NULL we force all software rendering to go through
568    * the span routines.
569    */
570   return NULL;
571}
572
573
574/**
575 * Called via glRenderbufferStorageEXT() to set the format and allocate
576 * storage for a user-created renderbuffer.
577 */
578GLboolean
579intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
580                                 GLenum internalFormat,
581                                 GLuint width, GLuint height)
582{
583   struct intel_context *intel = intel_context(ctx);
584   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
585   int cpp, tiling;
586
587   ASSERT(rb->Name != 0);
588
589   switch (internalFormat) {
590   default:
591      /* Use the same format-choice logic as for textures.
592       * Renderbuffers aren't any different from textures for us,
593       * except they're less useful because you can't texture with
594       * them.
595       */
596      rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
597							 GL_NONE, GL_NONE);
598      break;
599   case GL_STENCIL_INDEX:
600   case GL_STENCIL_INDEX1_EXT:
601   case GL_STENCIL_INDEX4_EXT:
602   case GL_STENCIL_INDEX8_EXT:
603   case GL_STENCIL_INDEX16_EXT:
604      /* These aren't actual texture formats, so force them here. */
605      if (intel->has_separate_stencil) {
606	 rb->Format = MESA_FORMAT_S8;
607      } else {
608	 assert(!intel->must_use_separate_stencil);
609	 rb->Format = MESA_FORMAT_S8_Z24;
610      }
611      break;
612   }
613
614   rb->Width = width;
615   rb->Height = height;
616   rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
617   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
618   cpp = _mesa_get_format_bytes(rb->Format);
619
620   intel_flush(ctx);
621
622   intel_miptree_release(&irb->mt);
623
624   DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
625       _mesa_lookup_enum_by_nr(internalFormat),
626       _mesa_get_format_name(rb->Format), width, height);
627
628   tiling = I915_TILING_NONE;
629   if (intel->use_texture_tiling) {
630      GLenum base_format = _mesa_get_format_base_format(rb->Format);
631
632      if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
633			      base_format == GL_STENCIL_INDEX ||
634			      base_format == GL_DEPTH_STENCIL))
635	 tiling = I915_TILING_Y;
636      else
637	 tiling = I915_TILING_X;
638   }
639
640   if (irb->Base.Format == MESA_FORMAT_S8) {
641      /*
642       * The stencil buffer is W tiled. However, we request from the kernel a
643       * non-tiled buffer because the GTT is incapable of W fencing.
644       *
645       * The stencil buffer has quirky pitch requirements.  From Vol 2a,
646       * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
647       *    The pitch must be set to 2x the value computed based on width, as
648       *    the stencil buffer is stored with two rows interleaved.
649       * To accomplish this, we resort to the nasty hack of doubling the drm
650       * region's cpp and halving its height.
651       *
652       * If we neglect to double the pitch, then render corruption occurs.
653       */
654      irb->mt = intel_miptree_create_for_renderbuffer(
655		  intel,
656		  rb->Format,
657		  I915_TILING_NONE,
658		  cpp * 2,
659		  ALIGN(width, 64),
660		  ALIGN((height + 1) / 2, 64));
661      if (!irb->mt)
662	 return false;
663
664   } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
665	      && intel->must_use_separate_stencil) {
666
667      bool ok = true;
668      struct gl_renderbuffer *depth_rb;
669      struct gl_renderbuffer *stencil_rb;
670
671      depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
672						   MESA_FORMAT_X8_Z24);
673      stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
674						     MESA_FORMAT_S8);
675      ok = depth_rb && stencil_rb;
676      ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
677						  depth_rb->InternalFormat,
678						  width, height);
679      ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
680						  stencil_rb->InternalFormat,
681						  width, height);
682
683      if (!ok) {
684	 if (depth_rb) {
685	    intel_delete_renderbuffer(depth_rb);
686	 }
687	 if (stencil_rb) {
688	    intel_delete_renderbuffer(stencil_rb);
689	 }
690	 return false;
691      }
692
693      depth_rb->Wrapped = rb;
694      stencil_rb->Wrapped = rb;
695      _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
696      _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
697
698   } else {
699      irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
700                                                      tiling, cpp,
701                                                      width, height);
702      if (!irb->mt)
703	 return false;
704
705      if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
706	 irb->mt->hiz_region = intel_region_alloc(intel->intelScreen,
707	                                          I915_TILING_Y,
708	                                          cpp,
709	                                          rb->Width,
710	                                          rb->Height,
711	                                          true);
712	 if (!irb->mt->hiz_region) {
713	    intel_miptree_release(&irb->mt);
714	    return false;
715	 }
716      }
717   }
718
719   return true;
720}
721
722
723#if FEATURE_OES_EGL_image
724static void
725intel_image_target_renderbuffer_storage(struct gl_context *ctx,
726					struct gl_renderbuffer *rb,
727					void *image_handle)
728{
729   struct intel_context *intel = intel_context(ctx);
730   struct intel_renderbuffer *irb;
731   __DRIscreen *screen;
732   __DRIimage *image;
733
734   screen = intel->intelScreen->driScrnPriv;
735   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
736					      screen->loaderPrivate);
737   if (image == NULL)
738      return;
739
740   /* __DRIimage is opaque to the core so it has to be checked here */
741   switch (image->format) {
742   case MESA_FORMAT_RGBA8888_REV:
743      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
744            "glEGLImageTargetRenderbufferStorage(unsupported image format");
745      return;
746      break;
747   default:
748      break;
749   }
750
751   irb = intel_renderbuffer(rb);
752   intel_miptree_release(&irb->mt);
753   irb->mt = intel_miptree_create_for_region(intel,
754                                             GL_TEXTURE_2D,
755                                             image->format,
756                                             image->region);
757   if (!irb->mt)
758      return;
759
760   rb->InternalFormat = image->internal_format;
761   rb->Width = image->region->width;
762   rb->Height = image->region->height;
763   rb->Format = image->format;
764   rb->DataType = image->data_type;
765   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
766					   image->internal_format);
767}
768#endif
769
770/**
771 * Called for each hardware renderbuffer when a _window_ is resized.
772 * Just update fields.
773 * Not used for user-created renderbuffers!
774 */
775static GLboolean
776intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
777                           GLenum internalFormat, GLuint width, GLuint height)
778{
779   ASSERT(rb->Name == 0);
780   rb->Width = width;
781   rb->Height = height;
782   rb->InternalFormat = internalFormat;
783
784   return true;
785}
786
787
788static void
789intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
790		     GLuint width, GLuint height)
791{
792   int i;
793
794   _mesa_resize_framebuffer(ctx, fb, width, height);
795
796   fb->Initialized = true; /* XXX remove someday */
797
798   if (fb->Name != 0) {
799      return;
800   }
801
802
803   /* Make sure all window system renderbuffers are up to date */
804   for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
805      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
806
807      /* only resize if size is changing */
808      if (rb && (rb->Width != width || rb->Height != height)) {
809	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
810      }
811   }
812}
813
814
815/** Dummy function for gl_renderbuffer::AllocStorage() */
816static GLboolean
817intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
818                        GLenum internalFormat, GLuint width, GLuint height)
819{
820   _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
821   return false;
822}
823
824/**
825 * Create a new intel_renderbuffer which corresponds to an on-screen window,
826 * not a user-created renderbuffer.
827 */
828struct intel_renderbuffer *
829intel_create_renderbuffer(gl_format format)
830{
831   GET_CURRENT_CONTEXT(ctx);
832
833   struct intel_renderbuffer *irb;
834
835   irb = CALLOC_STRUCT(intel_renderbuffer);
836   if (!irb) {
837      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
838      return NULL;
839   }
840
841   _mesa_init_renderbuffer(&irb->Base, 0);
842   irb->Base.ClassID = INTEL_RB_CLASS;
843   irb->Base._BaseFormat = _mesa_get_format_base_format(format);
844   irb->Base.Format = format;
845   irb->Base.InternalFormat = irb->Base._BaseFormat;
846   irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
847
848   /* intel-specific methods */
849   irb->Base.Delete = intel_delete_renderbuffer;
850   irb->Base.AllocStorage = intel_alloc_window_storage;
851   irb->Base.GetPointer = intel_get_pointer;
852
853   return irb;
854}
855
856
857struct gl_renderbuffer*
858intel_create_wrapped_renderbuffer(struct gl_context * ctx,
859				  int width, int height,
860				  gl_format format)
861{
862   /*
863    * The name here is irrelevant, as long as its nonzero, because the
864    * renderbuffer never gets entered into Mesa's renderbuffer hash table.
865    */
866   GLuint name = ~0;
867
868   struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
869   if (!irb) {
870      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
871      return NULL;
872   }
873
874   struct gl_renderbuffer *rb = &irb->Base;
875   _mesa_init_renderbuffer(rb, name);
876   rb->ClassID = INTEL_RB_CLASS;
877   rb->_BaseFormat = _mesa_get_format_base_format(format);
878   rb->Format = format;
879   rb->InternalFormat = rb->_BaseFormat;
880   rb->DataType = intel_mesa_format_to_rb_datatype(format);
881   rb->Width = width;
882   rb->Height = height;
883
884   return rb;
885}
886
887
888/**
889 * Create a new renderbuffer object.
890 * Typically called via glBindRenderbufferEXT().
891 */
892static struct gl_renderbuffer *
893intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
894{
895   /*struct intel_context *intel = intel_context(ctx); */
896   struct intel_renderbuffer *irb;
897
898   irb = CALLOC_STRUCT(intel_renderbuffer);
899   if (!irb) {
900      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
901      return NULL;
902   }
903
904   _mesa_init_renderbuffer(&irb->Base, name);
905   irb->Base.ClassID = INTEL_RB_CLASS;
906
907   /* intel-specific methods */
908   irb->Base.Delete = intel_delete_renderbuffer;
909   irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
910   irb->Base.GetPointer = intel_get_pointer;
911   /* span routines set in alloc_storage function */
912
913   return &irb->Base;
914}
915
916
917/**
918 * Called via glBindFramebufferEXT().
919 */
920static void
921intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
922                       struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
923{
924   if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
925      intel_draw_buffer(ctx);
926   }
927   else {
928      /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
929   }
930}
931
932
933/**
934 * Called via glFramebufferRenderbufferEXT().
935 */
936static void
937intel_framebuffer_renderbuffer(struct gl_context * ctx,
938                               struct gl_framebuffer *fb,
939                               GLenum attachment, struct gl_renderbuffer *rb)
940{
941   DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
942
943   intel_flush(ctx);
944
945   _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
946   intel_draw_buffer(ctx);
947}
948
949/**
950 * NOTE: The 'att' parameter is a kludge that will soon be removed. Its
951 * presence allows us to refactor the wrapping of depthstencil textures that
952 * use separate stencil in two easily manageable steps, rather than in one
953 * large, hairy step. First, refactor the common wrapping code used by all
954 * texture formats. Second, refactor the separate stencil code paths.
955 */
956static bool
957intel_renderbuffer_update_wrapper(struct intel_context *intel,
958                                  struct intel_renderbuffer *irb,
959                                  struct intel_mipmap_tree *mt,
960                                  uint32_t level,
961                                  uint32_t layer,
962                                  gl_format format,
963                                  GLenum internal_format,
964                                  struct gl_renderbuffer_attachment *att)
965{
966   struct gl_renderbuffer *rb = &irb->Base;
967
968   /* The image variables are a kludge. See the note above for the att
969    * parameter.
970    */
971   struct gl_texture_image *texImage = _mesa_get_attachment_teximage(att);
972   struct intel_texture_image *intel_image = intel_texture_image(texImage);
973
974   rb->Format = format;
975   if (!intel_span_supports_format(rb->Format)) {
976      DBG("Render to texture BAD FORMAT %s\n",
977	  _mesa_get_format_name(rb->Format));
978      return false;
979   } else {
980      DBG("Render to texture %s\n", _mesa_get_format_name(rb->Format));
981   }
982
983   rb->InternalFormat = internal_format;
984   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
985   rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
986   rb->Width = mt->level[level].width;
987   rb->Height = mt->level[level].height;
988
989   irb->Base.Delete = intel_delete_renderbuffer;
990   irb->Base.AllocStorage = intel_nop_alloc_storage;
991
992   intel_miptree_check_level_layer(mt, level, layer);
993   irb->mt_level = level;
994   irb->mt_layer = layer;
995
996   if (intel_image->stencil_rb) {
997      /*  The tex image has packed depth/stencil format, but is using separate
998       *  stencil. It shares its embedded depth and stencil renderbuffers with
999       *  the renderbuffer wrapper.
1000       *
1001       *  FIXME: glFramebufferTexture*() is broken for depthstencil textures
1002       *  FIXME: with separate stencil. To fix this, we must create a separate
1003       *  FIXME: pair of depth/stencil renderbuffers for each attached slice
1004       *  FIXME: of the miptree.
1005       */
1006      struct intel_renderbuffer *depth_irb;
1007      struct intel_renderbuffer *stencil_irb;
1008
1009      _mesa_reference_renderbuffer(&irb->wrapped_depth,
1010				   intel_image->depth_rb);
1011      _mesa_reference_renderbuffer(&irb->wrapped_stencil,
1012				   intel_image->stencil_rb);
1013
1014      depth_irb = intel_renderbuffer(intel_image->depth_rb);
1015      depth_irb->mt_level = irb->mt_level;
1016      depth_irb->mt_layer = irb->mt_layer;
1017      intel_renderbuffer_set_draw_offset(depth_irb);
1018
1019      stencil_irb = intel_renderbuffer(intel_image->stencil_rb);
1020      stencil_irb->mt_level = irb->mt_level;
1021      stencil_irb->mt_layer = irb->mt_layer;
1022      intel_renderbuffer_set_draw_offset(stencil_irb);
1023   } else {
1024      intel_miptree_reference(&irb->mt, intel_image->mt);
1025      intel_renderbuffer_set_draw_offset(irb);
1026   }
1027
1028   return true;
1029}
1030
1031/**
1032 * \brief Wrap a renderbuffer around a single slice of a miptree.
1033 *
1034 * Called by glFramebufferTexture*(). This just allocates a
1035 * ``struct intel_renderbuffer`` then calls
1036 * intel_renderbuffer_update_wrapper() to do the real work.
1037 *
1038 * NOTE: The 'att' parameter is a kludge that will soon be removed. Its
1039 * presence allows us to refactor the wrapping of depthstencil textures that
1040 * use separate stencil in two easily manageable steps, rather than in one
1041 * large, hairy step. First, refactor the common wrapping code used by all
1042 * texture formats. Second, refactor the separate stencil code paths.
1043 *
1044 * \see intel_renderbuffer_update_wrapper()
1045 */
1046static struct intel_renderbuffer*
1047intel_renderbuffer_wrap_miptree(struct intel_context *intel,
1048                                struct intel_mipmap_tree *mt,
1049                                uint32_t level,
1050                                uint32_t layer,
1051                                gl_format format,
1052                                GLenum internal_format,
1053                                struct gl_renderbuffer_attachment *att)
1054
1055{
1056   const GLuint name = ~0;   /* not significant, but distinct for debugging */
1057   struct gl_context *ctx = &intel->ctx;
1058   struct intel_renderbuffer *irb;
1059
1060   intel_miptree_check_level_layer(mt, level, layer);
1061
1062   irb = CALLOC_STRUCT(intel_renderbuffer);
1063   if (!irb) {
1064      _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
1065      return NULL;
1066   }
1067
1068   _mesa_init_renderbuffer(&irb->Base, name);
1069   irb->Base.ClassID = INTEL_RB_CLASS;
1070
1071   if (!intel_renderbuffer_update_wrapper(intel, irb,
1072                                          mt, level, layer,
1073                                          format, internal_format,
1074                                          att)) {
1075      free(irb);
1076      return NULL;
1077   }
1078
1079   return irb;
1080}
1081
1082void
1083intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
1084{
1085   unsigned int dst_x, dst_y;
1086
1087   /* compute offset of the particular 2D image within the texture region */
1088   intel_miptree_get_image_offset(irb->mt,
1089				  irb->mt_level,
1090				  0, /* face, which we ignore */
1091				  irb->mt_layer,
1092				  &dst_x, &dst_y);
1093
1094   irb->draw_x = dst_x;
1095   irb->draw_y = dst_y;
1096}
1097
1098/**
1099 * Rendering to tiled buffers requires that the base address of the
1100 * buffer be aligned to a page boundary.  We generally render to
1101 * textures by pointing the surface at the mipmap image level, which
1102 * may not be aligned to a tile boundary.
1103 *
1104 * This function returns an appropriately-aligned base offset
1105 * according to the tiling restrictions, plus any required x/y offset
1106 * from there.
1107 */
1108uint32_t
1109intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
1110				uint32_t *tile_x,
1111				uint32_t *tile_y)
1112{
1113   struct intel_region *region = irb->mt->region;
1114   int cpp = region->cpp;
1115   uint32_t pitch = region->pitch * cpp;
1116
1117   if (region->tiling == I915_TILING_NONE) {
1118      *tile_x = 0;
1119      *tile_y = 0;
1120      return irb->draw_x * cpp + irb->draw_y * pitch;
1121   } else if (region->tiling == I915_TILING_X) {
1122      *tile_x = irb->draw_x % (512 / cpp);
1123      *tile_y = irb->draw_y % 8;
1124      return ((irb->draw_y / 8) * (8 * pitch) +
1125	      (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
1126   } else {
1127      assert(region->tiling == I915_TILING_Y);
1128      *tile_x = irb->draw_x % (128 / cpp);
1129      *tile_y = irb->draw_y % 32;
1130      return ((irb->draw_y / 32) * (32 * pitch) +
1131	      (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
1132   }
1133}
1134
1135#ifndef I915
1136static bool
1137need_tile_offset_workaround(struct brw_context *brw,
1138			    struct intel_renderbuffer *irb)
1139{
1140   uint32_t tile_x, tile_y;
1141
1142   if (brw->has_surface_tile_offset)
1143      return false;
1144
1145   intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
1146
1147   return tile_x != 0 || tile_y != 0;
1148}
1149#endif
1150
1151/**
1152 * Called by glFramebufferTexture[123]DEXT() (and other places) to
1153 * prepare for rendering into texture memory.  This might be called
1154 * many times to choose different texture levels, cube faces, etc
1155 * before intel_finish_render_texture() is ever called.
1156 */
1157static void
1158intel_render_texture(struct gl_context * ctx,
1159                     struct gl_framebuffer *fb,
1160                     struct gl_renderbuffer_attachment *att)
1161{
1162   struct intel_context *intel = intel_context(ctx);
1163   struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
1164   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
1165   struct intel_texture_image *intel_image = intel_texture_image(image);
1166   struct intel_mipmap_tree *mt = intel_image->mt;
1167
1168   (void) fb;
1169
1170   int layer;
1171   if (att->CubeMapFace > 0) {
1172      assert(att->Zoffset == 0);
1173      layer = att->CubeMapFace;
1174   } else {
1175      layer = att->Zoffset;
1176   }
1177
1178   if (!intel_image->mt) {
1179      /* Fallback on drawing to a texture that doesn't have a miptree
1180       * (has a border, width/height 0, etc.)
1181       */
1182      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1183      _swrast_render_texture(ctx, fb, att);
1184      return;
1185   }
1186   else if (!irb) {
1187      irb = intel_renderbuffer_wrap_miptree(intel,
1188                                            mt,
1189                                            att->TextureLevel,
1190                                            layer,
1191                                            image->TexFormat,
1192                                            image->InternalFormat,
1193                                            att);
1194
1195      if (irb) {
1196         /* bind the wrapper to the attachment point */
1197         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
1198      }
1199      else {
1200         /* fallback to software rendering */
1201         _swrast_render_texture(ctx, fb, att);
1202         return;
1203      }
1204   }
1205
1206   if (!intel_renderbuffer_update_wrapper(intel, irb,
1207                                          mt, att->TextureLevel, layer,
1208                                          image->TexFormat,
1209                                          image->InternalFormat,
1210                                          att)) {
1211       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1212       _swrast_render_texture(ctx, fb, att);
1213       return;
1214   }
1215
1216   DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
1217       _glthread_GetID(),
1218       att->Texture->Name, image->Width, image->Height,
1219       irb->Base.RefCount);
1220
1221   intel_image->used_as_render_target = true;
1222
1223#ifndef I915
1224   if (need_tile_offset_workaround(brw_context(ctx), irb)) {
1225      /* Original gen4 hardware couldn't draw to a non-tile-aligned
1226       * destination in a miptree unless you actually setup your
1227       * renderbuffer as a miptree and used the fragile
1228       * lod/array_index/etc. controls to select the image.  So,
1229       * instead, we just make a new single-level miptree and render
1230       * into that.
1231       */
1232      struct intel_context *intel = intel_context(ctx);
1233      struct intel_mipmap_tree *new_mt;
1234      int width, height, depth;
1235
1236      intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1237
1238      new_mt = intel_miptree_create(intel, image->TexObject->Target,
1239				    intel_image->base.Base.TexFormat,
1240				    intel_image->base.Base.Level,
1241				    intel_image->base.Base.Level,
1242                                    width, height, depth,
1243				    true);
1244
1245      intel_miptree_copy_teximage(intel, intel_image, new_mt);
1246      intel_renderbuffer_set_draw_offset(irb);
1247
1248      intel_miptree_reference(&irb->mt, intel_image->mt);
1249      intel_miptree_release(&new_mt);
1250   }
1251#endif
1252   /* update drawing region, etc */
1253   intel_draw_buffer(ctx);
1254}
1255
1256
1257/**
1258 * Called by Mesa when rendering to a texture is done.
1259 */
1260static void
1261intel_finish_render_texture(struct gl_context * ctx,
1262                            struct gl_renderbuffer_attachment *att)
1263{
1264   struct intel_context *intel = intel_context(ctx);
1265   struct gl_texture_object *tex_obj = att->Texture;
1266   struct gl_texture_image *image =
1267      tex_obj->Image[att->CubeMapFace][att->TextureLevel];
1268   struct intel_texture_image *intel_image = intel_texture_image(image);
1269
1270   DBG("Finish render texture tid %lx tex=%u\n",
1271       _glthread_GetID(), att->Texture->Name);
1272
1273   /* Flag that this image may now be validated into the object's miptree. */
1274   if (intel_image)
1275      intel_image->used_as_render_target = false;
1276
1277   /* Since we've (probably) rendered to the texture and will (likely) use
1278    * it in the texture domain later on in this batchbuffer, flush the
1279    * batch.  Once again, we wish for a domain tracker in libdrm to cover
1280    * usage inside of a batchbuffer like GEM does in the kernel.
1281    */
1282   intel_batchbuffer_emit_mi_flush(intel);
1283}
1284
1285/**
1286 * Do additional "completeness" testing of a framebuffer object.
1287 */
1288static void
1289intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
1290{
1291   struct intel_context *intel = intel_context(ctx);
1292   const struct intel_renderbuffer *depthRb =
1293      intel_get_renderbuffer(fb, BUFFER_DEPTH);
1294   const struct intel_renderbuffer *stencilRb =
1295      intel_get_renderbuffer(fb, BUFFER_STENCIL);
1296   int i;
1297
1298   /*
1299    * The depth and stencil renderbuffers are the same renderbuffer or wrap
1300    * the same texture.
1301    */
1302   if (depthRb && stencilRb) {
1303      bool depth_stencil_are_same;
1304      if (depthRb == stencilRb)
1305	 depth_stencil_are_same = true;
1306      else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1307	       (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1308	       (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1309		fb->Attachment[BUFFER_STENCIL].Texture->Name))
1310	 depth_stencil_are_same = true;
1311      else
1312	 depth_stencil_are_same = false;
1313
1314      if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1315	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1316      }
1317   }
1318
1319   for (i = 0; i < Elements(fb->Attachment); i++) {
1320      struct gl_renderbuffer *rb;
1321      struct intel_renderbuffer *irb;
1322
1323      if (fb->Attachment[i].Type == GL_NONE)
1324	 continue;
1325
1326      /* A supported attachment will have a Renderbuffer set either
1327       * from being a Renderbuffer or being a texture that got the
1328       * intel_wrap_texture() treatment.
1329       */
1330      rb = fb->Attachment[i].Renderbuffer;
1331      if (rb == NULL) {
1332	 DBG("attachment without renderbuffer\n");
1333	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1334	 continue;
1335      }
1336
1337      irb = intel_renderbuffer(rb);
1338      if (irb == NULL) {
1339	 DBG("software rendering renderbuffer\n");
1340	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1341	 continue;
1342      }
1343
1344      if (!intel_span_supports_format(irb->Base.Format) ||
1345	  !intel->vtbl.render_target_supported(irb->Base.Format)) {
1346	 DBG("Unsupported texture/renderbuffer format attached: %s\n",
1347	     _mesa_get_format_name(irb->Base.Format));
1348	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1349      }
1350   }
1351}
1352
1353/**
1354 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1355 * We can do this when the dst renderbuffer is actually a texture and
1356 * there is no scaling, mirroring or scissoring.
1357 *
1358 * \return new buffer mask indicating the buffers left to blit using the
1359 *         normal path.
1360 */
1361static GLbitfield
1362intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1363                                          GLint srcX0, GLint srcY0,
1364                                          GLint srcX1, GLint srcY1,
1365                                          GLint dstX0, GLint dstY0,
1366                                          GLint dstX1, GLint dstY1,
1367                                          GLbitfield mask, GLenum filter)
1368{
1369   if (mask & GL_COLOR_BUFFER_BIT) {
1370      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1371      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1372      const struct gl_renderbuffer_attachment *drawAtt =
1373         &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1374
1375      /* If the source and destination are the same size with no
1376         mirroring, the rectangles are within the size of the
1377         texture and there is no scissor then we can use
1378         glCopyTexSubimage2D to implement the blit. This will end
1379         up as a fast hardware blit on some drivers */
1380      if (drawAtt && drawAtt->Texture &&
1381          srcX0 - srcX1 == dstX0 - dstX1 &&
1382          srcY0 - srcY1 == dstY0 - dstY1 &&
1383          srcX1 >= srcX0 &&
1384          srcY1 >= srcY0 &&
1385          srcX0 >= 0 && srcX1 <= readFb->Width &&
1386          srcY0 >= 0 && srcY1 <= readFb->Height &&
1387          dstX0 >= 0 && dstX1 <= drawFb->Width &&
1388          dstY0 >= 0 && dstY1 <= drawFb->Height &&
1389          !ctx->Scissor.Enabled) {
1390         const struct gl_texture_object *texObj = drawAtt->Texture;
1391         const GLuint dstLevel = drawAtt->TextureLevel;
1392         const GLenum target = texObj->Target;
1393
1394         struct gl_texture_image *texImage =
1395            _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1396
1397         if (intel_copy_texsubimage(intel_context(ctx),
1398                                    intel_texture_image(texImage),
1399                                    dstX0, dstY0,
1400                                    srcX0, srcY0,
1401                                    srcX1 - srcX0, /* width */
1402                                    srcY1 - srcY0))
1403            mask &= ~GL_COLOR_BUFFER_BIT;
1404      }
1405   }
1406
1407   return mask;
1408}
1409
1410static void
1411intel_blit_framebuffer(struct gl_context *ctx,
1412                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1413                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1414                       GLbitfield mask, GLenum filter)
1415{
1416   /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1417   mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1418                                                    srcX0, srcY0, srcX1, srcY1,
1419                                                    dstX0, dstY0, dstX1, dstY1,
1420                                                    mask, filter);
1421   if (mask == 0x0)
1422      return;
1423
1424   _mesa_meta_BlitFramebuffer(ctx,
1425                              srcX0, srcY0, srcX1, srcY1,
1426                              dstX0, dstY0, dstX1, dstY1,
1427                              mask, filter);
1428}
1429
1430/**
1431 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1432 * Hook in device driver functions.
1433 */
1434void
1435intel_fbo_init(struct intel_context *intel)
1436{
1437   intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1438   intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1439   intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1440   intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1441   intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1442   intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1443   intel->ctx.Driver.RenderTexture = intel_render_texture;
1444   intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1445   intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1446   intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1447   intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1448
1449#if FEATURE_OES_EGL_image
1450   intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1451      intel_image_target_renderbuffer_storage;
1452#endif
1453}
1454