intel_fbo.c revision cc502aa9419a6fb127b264dbb131c786281cb8c7
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/enums.h"
30#include "main/imports.h"
31#include "main/macros.h"
32#include "main/mfeatures.h"
33#include "main/mtypes.h"
34#include "main/fbobject.h"
35#include "main/framebuffer.h"
36#include "main/renderbuffer.h"
37#include "main/context.h"
38#include "main/teximage.h"
39#include "swrast/swrast.h"
40#include "drivers/common/meta.h"
41
42#include "intel_context.h"
43#include "intel_batchbuffer.h"
44#include "intel_buffers.h"
45#include "intel_blit.h"
46#include "intel_fbo.h"
47#include "intel_mipmap_tree.h"
48#include "intel_regions.h"
49#include "intel_tex.h"
50#include "intel_span.h"
51#ifndef I915
52#include "brw_context.h"
53#endif
54
55#define FILE_DEBUG_FLAG DEBUG_FBO
56
57
58/**
59 * Create a new framebuffer object.
60 */
61static struct gl_framebuffer *
62intel_new_framebuffer(struct gl_context * ctx, GLuint name)
63{
64   /* Only drawable state in intel_framebuffer at this time, just use Mesa's
65    * class
66    */
67   return _mesa_new_framebuffer(ctx, name);
68}
69
70
71/** Called by gl_renderbuffer::Delete() */
72static void
73intel_delete_renderbuffer(struct gl_renderbuffer *rb)
74{
75   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
76
77   ASSERT(irb);
78
79   intel_region_release(&irb->region);
80   intel_region_release(&irb->hiz_region);
81
82   _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
83   _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
84
85   free(irb);
86}
87
88/**
89 * \brief Map a renderbuffer through the GTT.
90 *
91 * \see intel_map_renderbuffer()
92 */
93static void
94intel_map_renderbuffer_gtt(struct gl_context *ctx,
95                           struct gl_renderbuffer *rb,
96                           GLuint x, GLuint y, GLuint w, GLuint h,
97                           GLbitfield mode,
98                           GLubyte **out_map,
99                           GLint *out_stride)
100{
101   struct intel_context *intel = intel_context(ctx);
102   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
103   GLubyte *map;
104   int stride, flip_stride;
105
106   assert(irb->region);
107
108   irb->map_mode = mode;
109   irb->map_x = x;
110   irb->map_y = y;
111   irb->map_w = w;
112   irb->map_h = h;
113
114   stride = irb->region->pitch * irb->region->cpp;
115
116   if (rb->Name == 0) {
117      y = irb->region->height - 1 - y;
118      flip_stride = -stride;
119   } else {
120      x += irb->draw_x;
121      y += irb->draw_y;
122      flip_stride = stride;
123   }
124
125   if (drm_intel_bo_references(intel->batch.bo, irb->region->bo)) {
126      intel_batchbuffer_flush(intel);
127   }
128
129   drm_intel_gem_bo_map_gtt(irb->region->bo);
130
131   map = irb->region->bo->virtual;
132   map += x * irb->region->cpp;
133   map += (int)y * stride;
134
135   *out_map = map;
136   *out_stride = flip_stride;
137
138   DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
139       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
140       x, y, w, h, *out_map, *out_stride);
141}
142
143/**
144 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
145 *
146 * On gen6+, we have LLC sharing, which means we can get high-performance
147 * access to linear-mapped buffers.
148 *
149 * This function allocates a temporary gem buffer at
150 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
151 * returns a map of that. (Note: Only X tiled buffers can be blitted).
152 *
153 * \see intel_renderbuffer::map_bo
154 * \see intel_map_renderbuffer()
155 */
156static void
157intel_map_renderbuffer_blit(struct gl_context *ctx,
158			    struct gl_renderbuffer *rb,
159			    GLuint x, GLuint y, GLuint w, GLuint h,
160			    GLbitfield mode,
161			    GLubyte **out_map,
162			    GLint *out_stride)
163{
164   struct intel_context *intel = intel_context(ctx);
165   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
166
167   int src_x, src_y;
168   int dst_stride;
169
170   assert(irb->region);
171   assert(intel->gen >= 6);
172   assert(!(mode & GL_MAP_WRITE_BIT));
173   assert(irb->region->tiling == I915_TILING_X);
174
175   irb->map_mode = mode;
176   irb->map_x = x;
177   irb->map_y = y;
178   irb->map_w = w;
179   irb->map_h = h;
180
181   dst_stride = ALIGN(w * irb->region->cpp, 4);
182
183   if (rb->Name) {
184      src_x = x + irb->draw_x;
185      src_y = y + irb->draw_y;
186   } else {
187      src_x = x;
188      src_y = irb->region->height - y - h;
189   }
190
191   irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
192				    dst_stride * h, 4096);
193
194   /* We don't do the flip in the blit, because it's always so tricky to get
195    * right.
196    */
197   if (irb->map_bo &&
198       intelEmitCopyBlit(intel,
199			 irb->region->cpp,
200			 irb->region->pitch, irb->region->bo,
201			 0, irb->region->tiling,
202			 dst_stride / irb->region->cpp, irb->map_bo,
203			 0, I915_TILING_NONE,
204			 src_x, src_y,
205			 0, 0,
206			 w, h,
207			 GL_COPY)) {
208      intel_batchbuffer_flush(intel);
209      drm_intel_bo_map(irb->map_bo, false);
210
211      if (rb->Name) {
212	 *out_map = irb->map_bo->virtual;
213	 *out_stride = dst_stride;
214      } else {
215	 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
216	 *out_stride = -dst_stride;
217      }
218
219      DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
220	  __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
221	  src_x, src_y, w, h, *out_map, *out_stride);
222   } else {
223      /* Fallback to GTT mapping. */
224      drm_intel_bo_unreference(irb->map_bo);
225      irb->map_bo = NULL;
226      intel_map_renderbuffer_gtt(ctx, rb,
227				 x, y, w, h,
228				 mode,
229				 out_map, out_stride);
230   }
231}
232
233/**
234 * \brief Map a stencil renderbuffer.
235 *
236 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile
237 * the buffer in software.
238 *
239 * This function allocates a temporary malloc'd buffer at
240 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then
241 * returns the temporary buffer as the map.
242 *
243 * \see intel_renderbuffer::map_buffer
244 * \see intel_map_renderbuffer()
245 * \see intel_unmap_renderbuffer_s8()
246 */
247static void
248intel_map_renderbuffer_s8(struct gl_context *ctx,
249			  struct gl_renderbuffer *rb,
250			  GLuint x, GLuint y, GLuint w, GLuint h,
251			  GLbitfield mode,
252			  GLubyte **out_map,
253			  GLint *out_stride)
254{
255   struct intel_context *intel = intel_context(ctx);
256   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
257   uint8_t *tiled_s8_map;
258   uint8_t *untiled_s8_map;
259
260   assert(rb->Format == MESA_FORMAT_S8);
261   assert(irb->region);
262
263   irb->map_mode = mode;
264   irb->map_x = x;
265   irb->map_y = y;
266   irb->map_w = w;
267   irb->map_h = h;
268
269   /* Flip the Y axis for the default framebuffer. */
270   int region_h = irb->region->height;
271   int y_flip = (rb->Name == 0) ? -1 : 1;
272   int y_bias = (rb->Name == 0) ? (region_h * 2 + region_h % 2 - 1) : 0;
273
274   irb->map_buffer = malloc(w * h);
275   untiled_s8_map = irb->map_buffer;
276   tiled_s8_map = intel_region_map(intel, irb->region, mode);
277
278   for (uint32_t pix_y = 0; pix_y < h; pix_y++) {
279      for (uint32_t pix_x = 0; pix_x < w; pix_x++) {
280	 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
281	 ptrdiff_t offset = intel_offset_S8(irb->region->pitch,
282	                                    x + pix_x,
283	                                    flipped_y);
284	 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset];
285      }
286   }
287
288   *out_map = untiled_s8_map;
289   *out_stride = w;
290
291   DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
292       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
293       x, y, w, h, *out_map, *out_stride);
294}
295
296/**
297 * \brief Map a depthstencil buffer with separate stencil.
298 *
299 * A depthstencil renderbuffer, if using separate stencil, consists of a depth
300 * renderbuffer and a hidden stencil renderbuffer.  This function maps the
301 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and
302 * returns that as the mapped pointer. The caller need not be aware of the
303 * hidden stencil buffer and may safely assume that the mapped pointer points
304 * to a MESA_FORMAT_S8_Z24 buffer
305 *
306 * The consistency between the depth buffer's S8 bits and the hidden stencil
307 * buffer is managed within intel_map_renderbuffer() and
308 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits
309 * according to the map mode.
310 *
311 * \see intel_map_renderbuffer()
312 * \see intel_unmap_renderbuffer_separate_s8z24()
313 */
314static void
315intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx,
316				      struct gl_renderbuffer *rb,
317				      GLuint x, GLuint y, GLuint w, GLuint h,
318				      GLbitfield mode,
319				      GLubyte **out_map,
320				      GLint *out_stride)
321{
322   struct intel_context *intel = intel_context(ctx);
323   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
324
325   GLbitfield adjusted_mode;
326
327   uint8_t *s8z24_map;
328   int32_t s8z24_stride;
329
330   assert(rb->Name != 0);
331   assert(rb->Format == MESA_FORMAT_S8_Z24);
332   assert(irb->wrapped_depth != NULL);
333   assert(irb->wrapped_stencil != NULL);
334
335   irb->map_mode = mode;
336   irb->map_x = x;
337   irb->map_y = y;
338   irb->map_w = w;
339   irb->map_h = h;
340
341   if (mode & GL_MAP_READ_BIT) {
342      /* Since the caller may read the stencil bits, we must copy the stencil
343       * buffer's contents into the depth buffer. This necessitates that the
344       * depth buffer be mapped in write mode.
345       */
346      adjusted_mode = mode | GL_MAP_WRITE_BIT;
347   } else {
348      adjusted_mode = mode;
349   }
350
351   intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth,
352			       x, y, w, h, adjusted_mode,
353			       &s8z24_map, &s8z24_stride);
354
355   if (mode & GL_MAP_READ_BIT) {
356      struct intel_renderbuffer *s8_irb;
357      uint8_t *s8_map;
358
359      s8_irb = intel_renderbuffer(irb->wrapped_stencil);
360      s8_map = intel_region_map(intel, s8_irb->region, GL_MAP_READ_BIT);
361
362      for (uint32_t pix_y = 0; pix_y < h; ++pix_y) {
363	 for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
364	    ptrdiff_t s8_offset = intel_offset_S8(s8_irb->region->pitch,
365						  x + pix_x,
366						  y + pix_y);
367	    ptrdiff_t s8z24_offset = pix_y * s8z24_stride
368				   + pix_x * 4
369				   + 3;
370	    s8z24_map[s8z24_offset] = s8_map[s8_offset];
371	 }
372      }
373
374      intel_region_unmap(intel, s8_irb->region);
375   }
376
377   *out_map = s8z24_map;
378   *out_stride = s8z24_stride;
379}
380
381/**
382 * \see dd_function_table::MapRenderbuffer
383 */
384static void
385intel_map_renderbuffer(struct gl_context *ctx,
386		       struct gl_renderbuffer *rb,
387		       GLuint x, GLuint y, GLuint w, GLuint h,
388		       GLbitfield mode,
389		       GLubyte **out_map,
390		       GLint *out_stride)
391{
392   struct intel_context *intel = intel_context(ctx);
393   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
394
395   /* We sometimes get called with this by our intel_span.c usage. */
396   if (!irb->region && !irb->wrapped_depth) {
397      *out_map = NULL;
398      *out_stride = 0;
399      return;
400   }
401
402   if (rb->Format == MESA_FORMAT_S8) {
403      intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode,
404			        out_map, out_stride);
405   } else if (irb->wrapped_depth) {
406      intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode,
407					    out_map, out_stride);
408   } else if (intel->gen >= 6 &&
409	      !(mode & GL_MAP_WRITE_BIT) &&
410	      irb->region->tiling == I915_TILING_X) {
411      intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
412				  out_map, out_stride);
413   } else {
414      intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
415				 out_map, out_stride);
416   }
417}
418
419/**
420 * \see intel_map_renderbuffer_s8()
421 */
422static void
423intel_unmap_renderbuffer_s8(struct gl_context *ctx,
424			    struct gl_renderbuffer *rb)
425{
426   struct intel_context *intel = intel_context(ctx);
427   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
428
429   DBG("%s: rb %d (%s)\n", __FUNCTION__,
430       rb->Name, _mesa_get_format_name(rb->Format));
431
432   assert(rb->Format == MESA_FORMAT_S8);
433
434   if (!irb->map_buffer)
435      return;
436
437   if (irb->map_mode & GL_MAP_WRITE_BIT) {
438      /* The temporary buffer was written to, so we must copy its pixels into
439       * the real buffer.
440       */
441      uint8_t *untiled_s8_map = irb->map_buffer;
442      uint8_t *tiled_s8_map = irb->region->bo->virtual;
443
444      /* Flip the Y axis for the default framebuffer. */
445      int region_h = irb->region->height;
446      int y_flip = (rb->Name == 0) ? -1 : 1;
447      int y_bias = (rb->Name == 0) ? (region_h * 2 + region_h % 2 - 1) : 0;
448
449      for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) {
450	 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) {
451	    uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias;
452	    ptrdiff_t offset = intel_offset_S8(irb->region->pitch,
453	                                       pix_x + irb->map_x,
454	                                       flipped_y);
455	    tiled_s8_map[offset] =
456	       untiled_s8_map[pix_y * irb->map_w + pix_x];
457	 }
458      }
459   }
460
461   intel_region_unmap(intel, irb->region);
462   free(irb->map_buffer);
463   irb->map_buffer = NULL;
464}
465
466/**
467 * \brief Unmap a depthstencil renderbuffer with separate stencil.
468 *
469 * \see intel_map_renderbuffer_separate_s8z24()
470 * \see intel_unmap_renderbuffer()
471 */
472static void
473intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx,
474				        struct gl_renderbuffer *rb)
475{
476   struct intel_context *intel = intel_context(ctx);
477   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
478   struct intel_renderbuffer *s8z24_irb;
479
480   assert(rb->Name != 0);
481   assert(rb->Format == MESA_FORMAT_S8_Z24);
482   assert(irb->wrapped_depth != NULL);
483   assert(irb->wrapped_stencil != NULL);
484
485   s8z24_irb = intel_renderbuffer(irb->wrapped_depth);
486
487   if (irb->map_mode & GL_MAP_WRITE_BIT) {
488      /* Copy the stencil bits from the depth buffer into the stencil buffer.
489       */
490      uint32_t map_x = irb->map_x;
491      uint32_t map_y = irb->map_y;
492      uint32_t map_w = irb->map_w;
493      uint32_t map_h = irb->map_h;
494
495      struct intel_renderbuffer *s8_irb;
496      uint8_t *s8_map;
497
498      s8_irb = intel_renderbuffer(irb->wrapped_stencil);
499      s8_map = intel_region_map(intel, s8_irb->region, GL_MAP_WRITE_BIT);
500
501      int32_t s8z24_stride = 4 * s8z24_irb->region->pitch;
502      uint8_t *s8z24_map = s8z24_irb->region->bo->virtual
503			 + map_y * s8z24_stride
504			 + map_x * 4;
505
506      for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) {
507	 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) {
508	    ptrdiff_t s8_offset = intel_offset_S8(s8_irb->region->pitch,
509						  map_x + pix_x,
510						  map_y + pix_y);
511	    ptrdiff_t s8z24_offset = pix_y * s8z24_stride
512				   + pix_x * 4
513				   + 3;
514	    s8_map[s8_offset] = s8z24_map[s8z24_offset];
515	 }
516      }
517
518      intel_region_unmap(intel, s8_irb->region);
519   }
520
521   drm_intel_gem_bo_unmap_gtt(s8z24_irb->region->bo);
522}
523
524/**
525 * \see dd_function_table::UnmapRenderbuffer
526 */
527static void
528intel_unmap_renderbuffer(struct gl_context *ctx,
529			 struct gl_renderbuffer *rb)
530{
531   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
532
533   DBG("%s: rb %d (%s)\n", __FUNCTION__,
534       rb->Name, _mesa_get_format_name(rb->Format));
535
536   if (rb->Format == MESA_FORMAT_S8) {
537      intel_unmap_renderbuffer_s8(ctx, rb);
538   } else if (irb->wrapped_depth) {
539      intel_unmap_renderbuffer_separate_s8z24(ctx, rb);
540   } else if (irb->map_bo) {
541      /* Paired with intel_map_renderbuffer_blit(). */
542      drm_intel_bo_unmap(irb->map_bo);
543      drm_intel_bo_unreference(irb->map_bo);
544      irb->map_bo = 0;
545   } else {
546      /* Paired with intel_map_renderbuffer_gtt(). */
547      if (irb->region) {
548	 /* The region may be null when intel_map_renderbuffer() is
549	  * called from intel_span.c.
550	  */
551	 drm_intel_gem_bo_unmap_gtt(irb->region->bo);
552      }
553   }
554}
555
556/**
557 * Return a pointer to a specific pixel in a renderbuffer.
558 */
559static void *
560intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
561                  GLint x, GLint y)
562{
563   /* By returning NULL we force all software rendering to go through
564    * the span routines.
565    */
566   return NULL;
567}
568
569
570/**
571 * Called via glRenderbufferStorageEXT() to set the format and allocate
572 * storage for a user-created renderbuffer.
573 */
574GLboolean
575intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
576                                 GLenum internalFormat,
577                                 GLuint width, GLuint height)
578{
579   struct intel_context *intel = intel_context(ctx);
580   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
581   int cpp, tiling;
582
583   ASSERT(rb->Name != 0);
584
585   switch (internalFormat) {
586   default:
587      /* Use the same format-choice logic as for textures.
588       * Renderbuffers aren't any different from textures for us,
589       * except they're less useful because you can't texture with
590       * them.
591       */
592      rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
593							 GL_NONE, GL_NONE);
594      break;
595   case GL_STENCIL_INDEX:
596   case GL_STENCIL_INDEX1_EXT:
597   case GL_STENCIL_INDEX4_EXT:
598   case GL_STENCIL_INDEX8_EXT:
599   case GL_STENCIL_INDEX16_EXT:
600      /* These aren't actual texture formats, so force them here. */
601      if (intel->has_separate_stencil) {
602	 rb->Format = MESA_FORMAT_S8;
603      } else {
604	 assert(!intel->must_use_separate_stencil);
605	 rb->Format = MESA_FORMAT_S8_Z24;
606      }
607      break;
608   }
609
610   rb->Width = width;
611   rb->Height = height;
612   rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
613   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
614   cpp = _mesa_get_format_bytes(rb->Format);
615
616   intel_flush(ctx);
617
618   /* free old region */
619   if (irb->region) {
620      intel_region_release(&irb->region);
621   }
622   if (irb->hiz_region) {
623      intel_region_release(&irb->hiz_region);
624   }
625
626   DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
627       _mesa_lookup_enum_by_nr(internalFormat),
628       _mesa_get_format_name(rb->Format), width, height);
629
630   tiling = I915_TILING_NONE;
631   if (intel->use_texture_tiling) {
632      GLenum base_format = _mesa_get_format_base_format(rb->Format);
633
634      if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
635			      base_format == GL_STENCIL_INDEX ||
636			      base_format == GL_DEPTH_STENCIL))
637	 tiling = I915_TILING_Y;
638      else
639	 tiling = I915_TILING_X;
640   }
641
642   if (irb->Base.Format == MESA_FORMAT_S8) {
643      /*
644       * The stencil buffer is W tiled. However, we request from the kernel a
645       * non-tiled buffer because the GTT is incapable of W fencing.
646       *
647       * The stencil buffer has quirky pitch requirements.  From Vol 2a,
648       * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
649       *    The pitch must be set to 2x the value computed based on width, as
650       *    the stencil buffer is stored with two rows interleaved.
651       * To accomplish this, we resort to the nasty hack of doubling the drm
652       * region's cpp and halving its height.
653       *
654       * If we neglect to double the pitch, then render corruption occurs.
655       */
656      irb->region = intel_region_alloc(intel->intelScreen,
657				       I915_TILING_NONE,
658				       cpp * 2,
659				       ALIGN(width, 64),
660				       ALIGN((height + 1) / 2, 64),
661				       true);
662      if (!irb->region)
663	return false;
664
665   } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
666	      && intel->must_use_separate_stencil) {
667
668      bool ok = true;
669      struct gl_renderbuffer *depth_rb;
670      struct gl_renderbuffer *stencil_rb;
671
672      depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
673						   MESA_FORMAT_X8_Z24);
674      stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
675						     MESA_FORMAT_S8);
676      ok = depth_rb && stencil_rb;
677      ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
678						  depth_rb->InternalFormat,
679						  width, height);
680      ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
681						  stencil_rb->InternalFormat,
682						  width, height);
683
684      if (!ok) {
685	 if (depth_rb) {
686	    intel_delete_renderbuffer(depth_rb);
687	 }
688	 if (stencil_rb) {
689	    intel_delete_renderbuffer(stencil_rb);
690	 }
691	 return false;
692      }
693
694      depth_rb->Wrapped = rb;
695      stencil_rb->Wrapped = rb;
696      _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
697      _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
698
699   } else {
700      irb->region = intel_region_alloc(intel->intelScreen, tiling, cpp,
701				       width, height, true);
702      if (!irb->region)
703	 return false;
704
705      if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
706	 irb->hiz_region = intel_region_alloc(intel->intelScreen,
707					      I915_TILING_Y,
708					      irb->region->cpp,
709					      irb->region->width,
710					      irb->region->height,
711					      true);
712	 if (!irb->hiz_region) {
713	    intel_region_release(&irb->region);
714	    return false;
715	 }
716      }
717   }
718
719   return true;
720}
721
722
723#if FEATURE_OES_EGL_image
724static void
725intel_image_target_renderbuffer_storage(struct gl_context *ctx,
726					struct gl_renderbuffer *rb,
727					void *image_handle)
728{
729   struct intel_context *intel = intel_context(ctx);
730   struct intel_renderbuffer *irb;
731   __DRIscreen *screen;
732   __DRIimage *image;
733
734   screen = intel->intelScreen->driScrnPriv;
735   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
736					      screen->loaderPrivate);
737   if (image == NULL)
738      return;
739
740   /* __DRIimage is opaque to the core so it has to be checked here */
741   switch (image->format) {
742   case MESA_FORMAT_RGBA8888_REV:
743      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
744            "glEGLImageTargetRenderbufferStorage(unsupported image format");
745      return;
746      break;
747   default:
748      break;
749   }
750
751   irb = intel_renderbuffer(rb);
752   intel_region_reference(&irb->region, image->region);
753
754   rb->InternalFormat = image->internal_format;
755   rb->Width = image->region->width;
756   rb->Height = image->region->height;
757   rb->Format = image->format;
758   rb->DataType = image->data_type;
759   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
760					   image->internal_format);
761}
762#endif
763
764/**
765 * Called for each hardware renderbuffer when a _window_ is resized.
766 * Just update fields.
767 * Not used for user-created renderbuffers!
768 */
769static GLboolean
770intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
771                           GLenum internalFormat, GLuint width, GLuint height)
772{
773   ASSERT(rb->Name == 0);
774   rb->Width = width;
775   rb->Height = height;
776   rb->InternalFormat = internalFormat;
777
778   return true;
779}
780
781
782static void
783intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
784		     GLuint width, GLuint height)
785{
786   int i;
787
788   _mesa_resize_framebuffer(ctx, fb, width, height);
789
790   fb->Initialized = true; /* XXX remove someday */
791
792   if (fb->Name != 0) {
793      return;
794   }
795
796
797   /* Make sure all window system renderbuffers are up to date */
798   for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
799      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
800
801      /* only resize if size is changing */
802      if (rb && (rb->Width != width || rb->Height != height)) {
803	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
804      }
805   }
806}
807
808
809/** Dummy function for gl_renderbuffer::AllocStorage() */
810static GLboolean
811intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
812                        GLenum internalFormat, GLuint width, GLuint height)
813{
814   _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
815   return false;
816}
817
818/**
819 * Create a new intel_renderbuffer which corresponds to an on-screen window,
820 * not a user-created renderbuffer.
821 */
822struct intel_renderbuffer *
823intel_create_renderbuffer(gl_format format)
824{
825   GET_CURRENT_CONTEXT(ctx);
826
827   struct intel_renderbuffer *irb;
828
829   irb = CALLOC_STRUCT(intel_renderbuffer);
830   if (!irb) {
831      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
832      return NULL;
833   }
834
835   _mesa_init_renderbuffer(&irb->Base, 0);
836   irb->Base.ClassID = INTEL_RB_CLASS;
837   irb->Base._BaseFormat = _mesa_get_format_base_format(format);
838   irb->Base.Format = format;
839   irb->Base.InternalFormat = irb->Base._BaseFormat;
840   irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
841
842   /* intel-specific methods */
843   irb->Base.Delete = intel_delete_renderbuffer;
844   irb->Base.AllocStorage = intel_alloc_window_storage;
845   irb->Base.GetPointer = intel_get_pointer;
846
847   return irb;
848}
849
850
851struct gl_renderbuffer*
852intel_create_wrapped_renderbuffer(struct gl_context * ctx,
853				  int width, int height,
854				  gl_format format)
855{
856   /*
857    * The name here is irrelevant, as long as its nonzero, because the
858    * renderbuffer never gets entered into Mesa's renderbuffer hash table.
859    */
860   GLuint name = ~0;
861
862   struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
863   if (!irb) {
864      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
865      return NULL;
866   }
867
868   struct gl_renderbuffer *rb = &irb->Base;
869   _mesa_init_renderbuffer(rb, name);
870   rb->ClassID = INTEL_RB_CLASS;
871   rb->_BaseFormat = _mesa_get_format_base_format(format);
872   rb->Format = format;
873   rb->InternalFormat = rb->_BaseFormat;
874   rb->DataType = intel_mesa_format_to_rb_datatype(format);
875   rb->Width = width;
876   rb->Height = height;
877
878   return rb;
879}
880
881
882/**
883 * Create a new renderbuffer object.
884 * Typically called via glBindRenderbufferEXT().
885 */
886static struct gl_renderbuffer *
887intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
888{
889   /*struct intel_context *intel = intel_context(ctx); */
890   struct intel_renderbuffer *irb;
891
892   irb = CALLOC_STRUCT(intel_renderbuffer);
893   if (!irb) {
894      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
895      return NULL;
896   }
897
898   _mesa_init_renderbuffer(&irb->Base, name);
899   irb->Base.ClassID = INTEL_RB_CLASS;
900
901   /* intel-specific methods */
902   irb->Base.Delete = intel_delete_renderbuffer;
903   irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
904   irb->Base.GetPointer = intel_get_pointer;
905   /* span routines set in alloc_storage function */
906
907   return &irb->Base;
908}
909
910
911/**
912 * Called via glBindFramebufferEXT().
913 */
914static void
915intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
916                       struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
917{
918   if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
919      intel_draw_buffer(ctx);
920   }
921   else {
922      /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
923   }
924}
925
926
927/**
928 * Called via glFramebufferRenderbufferEXT().
929 */
930static void
931intel_framebuffer_renderbuffer(struct gl_context * ctx,
932                               struct gl_framebuffer *fb,
933                               GLenum attachment, struct gl_renderbuffer *rb)
934{
935   DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
936
937   intel_flush(ctx);
938
939   _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
940   intel_draw_buffer(ctx);
941}
942
943static bool
944intel_update_tex_wrapper_regions(struct intel_context *intel,
945				 struct intel_renderbuffer *irb,
946				 struct intel_texture_image *intel_image);
947
948static bool
949intel_update_wrapper(struct gl_context *ctx, struct intel_renderbuffer *irb,
950		     struct gl_texture_image *texImage)
951{
952   struct intel_context *intel = intel_context(ctx);
953   struct intel_texture_image *intel_image = intel_texture_image(texImage);
954   int width, height, depth;
955
956   if (!intel_span_supports_format(texImage->TexFormat)) {
957      DBG("Render to texture BAD FORMAT %s\n",
958	  _mesa_get_format_name(texImage->TexFormat));
959      return false;
960   } else {
961      DBG("Render to texture %s\n", _mesa_get_format_name(texImage->TexFormat));
962   }
963
964   intel_miptree_get_dimensions_for_image(texImage, &width, &height, &depth);
965
966   irb->Base.Format = texImage->TexFormat;
967   irb->Base.DataType = intel_mesa_format_to_rb_datatype(texImage->TexFormat);
968   irb->Base.InternalFormat = texImage->InternalFormat;
969   irb->Base._BaseFormat = _mesa_base_tex_format(ctx, irb->Base.InternalFormat);
970   irb->Base.Width = width;
971   irb->Base.Height = height;
972
973   irb->Base.Delete = intel_delete_renderbuffer;
974   irb->Base.AllocStorage = intel_nop_alloc_storage;
975
976   if (intel_image->stencil_rb) {
977      /*  The tex image has packed depth/stencil format, but is using separate
978       * stencil. */
979
980      bool ok;
981      struct intel_renderbuffer *depth_irb =
982	 intel_renderbuffer(intel_image->depth_rb);
983
984      /* Update the hiz region if necessary. */
985      ok =  intel_update_tex_wrapper_regions(intel, depth_irb, intel_image);
986      if (!ok) {
987	 return false;
988      }
989
990      /* The tex image shares its embedded depth and stencil renderbuffers with
991       * the renderbuffer wrapper. */
992      _mesa_reference_renderbuffer(&irb->wrapped_depth,
993				   intel_image->depth_rb);
994      _mesa_reference_renderbuffer(&irb->wrapped_stencil,
995				   intel_image->stencil_rb);
996
997      return true;
998   } else {
999      return intel_update_tex_wrapper_regions(intel, irb, intel_image);
1000   }
1001}
1002
1003/**
1004 * FIXME: The handling of the hiz region is broken for mipmapped depth textures
1005 * FIXME: because intel_finalize_mipmap_tree is unaware of it.
1006 */
1007static bool
1008intel_update_tex_wrapper_regions(struct intel_context *intel,
1009				 struct intel_renderbuffer *irb,
1010				 struct intel_texture_image *intel_image)
1011{
1012   struct gl_renderbuffer *rb = &irb->Base;
1013
1014   /* Point the renderbuffer's region to the texture's region. */
1015   if (irb->region != intel_image->mt->region) {
1016      intel_region_reference(&irb->region, intel_image->mt->region);
1017   }
1018
1019   /* Allocate the texture's hiz region if necessary. */
1020   if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)
1021       && !intel_image->mt->hiz_region) {
1022      intel_image->mt->hiz_region =
1023         intel_region_alloc(intel->intelScreen,
1024                            I915_TILING_Y,
1025                            _mesa_get_format_bytes(rb->Format),
1026                            rb->Width,
1027                            rb->Height,
1028                            true);
1029      if (!intel_image->mt->hiz_region)
1030         return false;
1031   }
1032
1033   /* Point the renderbuffer's hiz region to the texture's hiz region. */
1034   if (irb->hiz_region != intel_image->mt->hiz_region) {
1035      intel_region_reference(&irb->hiz_region, intel_image->mt->hiz_region);
1036   }
1037
1038   return true;
1039}
1040
1041
1042/**
1043 * When glFramebufferTexture[123]D is called this function sets up the
1044 * gl_renderbuffer wrapper around the texture image.
1045 * This will have the region info needed for hardware rendering.
1046 */
1047static struct intel_renderbuffer *
1048intel_wrap_texture(struct gl_context * ctx, struct gl_texture_image *texImage)
1049{
1050   const GLuint name = ~0;   /* not significant, but distinct for debugging */
1051   struct intel_renderbuffer *irb;
1052
1053   /* make an intel_renderbuffer to wrap the texture image */
1054   irb = CALLOC_STRUCT(intel_renderbuffer);
1055   if (!irb) {
1056      _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
1057      return NULL;
1058   }
1059
1060   _mesa_init_renderbuffer(&irb->Base, name);
1061   irb->Base.ClassID = INTEL_RB_CLASS;
1062
1063   if (!intel_update_wrapper(ctx, irb, texImage)) {
1064      free(irb);
1065      return NULL;
1066   }
1067
1068   return irb;
1069}
1070
1071void
1072intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb,
1073				   struct intel_texture_image *intel_image,
1074				   int zoffset)
1075{
1076   unsigned int dst_x, dst_y;
1077
1078   /* compute offset of the particular 2D image within the texture region */
1079   intel_miptree_get_image_offset(intel_image->mt,
1080				  intel_image->base.Base.Level,
1081				  intel_image->base.Base.Face,
1082				  zoffset,
1083				  &dst_x, &dst_y);
1084
1085   irb->draw_x = dst_x;
1086   irb->draw_y = dst_y;
1087}
1088
1089/**
1090 * Rendering to tiled buffers requires that the base address of the
1091 * buffer be aligned to a page boundary.  We generally render to
1092 * textures by pointing the surface at the mipmap image level, which
1093 * may not be aligned to a tile boundary.
1094 *
1095 * This function returns an appropriately-aligned base offset
1096 * according to the tiling restrictions, plus any required x/y offset
1097 * from there.
1098 */
1099uint32_t
1100intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
1101				uint32_t *tile_x,
1102				uint32_t *tile_y)
1103{
1104   int cpp = irb->region->cpp;
1105   uint32_t pitch = irb->region->pitch * cpp;
1106
1107   if (irb->region->tiling == I915_TILING_NONE) {
1108      *tile_x = 0;
1109      *tile_y = 0;
1110      return irb->draw_x * cpp + irb->draw_y * pitch;
1111   } else if (irb->region->tiling == I915_TILING_X) {
1112      *tile_x = irb->draw_x % (512 / cpp);
1113      *tile_y = irb->draw_y % 8;
1114      return ((irb->draw_y / 8) * (8 * pitch) +
1115	      (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
1116   } else {
1117      assert(irb->region->tiling == I915_TILING_Y);
1118      *tile_x = irb->draw_x % (128 / cpp);
1119      *tile_y = irb->draw_y % 32;
1120      return ((irb->draw_y / 32) * (32 * pitch) +
1121	      (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
1122   }
1123}
1124
1125#ifndef I915
1126static bool
1127need_tile_offset_workaround(struct brw_context *brw,
1128			    struct intel_renderbuffer *irb)
1129{
1130   uint32_t tile_x, tile_y;
1131
1132   if (brw->has_surface_tile_offset)
1133      return false;
1134
1135   intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
1136
1137   return tile_x != 0 || tile_y != 0;
1138}
1139#endif
1140
1141/**
1142 * Called by glFramebufferTexture[123]DEXT() (and other places) to
1143 * prepare for rendering into texture memory.  This might be called
1144 * many times to choose different texture levels, cube faces, etc
1145 * before intel_finish_render_texture() is ever called.
1146 */
1147static void
1148intel_render_texture(struct gl_context * ctx,
1149                     struct gl_framebuffer *fb,
1150                     struct gl_renderbuffer_attachment *att)
1151{
1152   struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
1153   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
1154   struct intel_texture_image *intel_image = intel_texture_image(image);
1155
1156   (void) fb;
1157
1158   if (!intel_image->mt) {
1159      /* Fallback on drawing to a texture that doesn't have a miptree
1160       * (has a border, width/height 0, etc.)
1161       */
1162      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1163      _swrast_render_texture(ctx, fb, att);
1164      return;
1165   }
1166   else if (!irb) {
1167      irb = intel_wrap_texture(ctx, image);
1168      if (irb) {
1169         /* bind the wrapper to the attachment point */
1170         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
1171      }
1172      else {
1173         /* fallback to software rendering */
1174         _swrast_render_texture(ctx, fb, att);
1175         return;
1176      }
1177   }
1178
1179   if (!intel_update_wrapper(ctx, irb, image)) {
1180       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1181       _swrast_render_texture(ctx, fb, att);
1182       return;
1183   }
1184
1185   DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
1186       _glthread_GetID(),
1187       att->Texture->Name, image->Width, image->Height,
1188       irb->Base.RefCount);
1189
1190   intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset);
1191   intel_image->used_as_render_target = true;
1192
1193#ifndef I915
1194   if (need_tile_offset_workaround(brw_context(ctx), irb)) {
1195      /* Original gen4 hardware couldn't draw to a non-tile-aligned
1196       * destination in a miptree unless you actually setup your
1197       * renderbuffer as a miptree and used the fragile
1198       * lod/array_index/etc. controls to select the image.  So,
1199       * instead, we just make a new single-level miptree and render
1200       * into that.
1201       */
1202      struct intel_context *intel = intel_context(ctx);
1203      struct intel_mipmap_tree *new_mt;
1204      int width, height, depth;
1205
1206      intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1207
1208      new_mt = intel_miptree_create(intel, image->TexObject->Target,
1209				    intel_image->base.Base.TexFormat,
1210				    intel_image->base.Base.Level,
1211				    intel_image->base.Base.Level,
1212                                    width, height, depth,
1213				    true);
1214
1215      intel_miptree_copy_teximage(intel, intel_image, new_mt);
1216      intel_renderbuffer_set_draw_offset(irb, intel_image, att->Zoffset);
1217
1218      intel_region_reference(&irb->region, intel_image->mt->region);
1219      intel_miptree_release(&new_mt);
1220   }
1221#endif
1222   /* update drawing region, etc */
1223   intel_draw_buffer(ctx);
1224}
1225
1226
1227/**
1228 * Called by Mesa when rendering to a texture is done.
1229 */
1230static void
1231intel_finish_render_texture(struct gl_context * ctx,
1232                            struct gl_renderbuffer_attachment *att)
1233{
1234   struct intel_context *intel = intel_context(ctx);
1235   struct gl_texture_object *tex_obj = att->Texture;
1236   struct gl_texture_image *image =
1237      tex_obj->Image[att->CubeMapFace][att->TextureLevel];
1238   struct intel_texture_image *intel_image = intel_texture_image(image);
1239
1240   DBG("Finish render texture tid %lx tex=%u\n",
1241       _glthread_GetID(), att->Texture->Name);
1242
1243   /* Flag that this image may now be validated into the object's miptree. */
1244   if (intel_image)
1245      intel_image->used_as_render_target = false;
1246
1247   /* Since we've (probably) rendered to the texture and will (likely) use
1248    * it in the texture domain later on in this batchbuffer, flush the
1249    * batch.  Once again, we wish for a domain tracker in libdrm to cover
1250    * usage inside of a batchbuffer like GEM does in the kernel.
1251    */
1252   intel_batchbuffer_emit_mi_flush(intel);
1253}
1254
1255/**
1256 * Do additional "completeness" testing of a framebuffer object.
1257 */
1258static void
1259intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
1260{
1261   struct intel_context *intel = intel_context(ctx);
1262   const struct intel_renderbuffer *depthRb =
1263      intel_get_renderbuffer(fb, BUFFER_DEPTH);
1264   const struct intel_renderbuffer *stencilRb =
1265      intel_get_renderbuffer(fb, BUFFER_STENCIL);
1266   int i;
1267
1268   /*
1269    * The depth and stencil renderbuffers are the same renderbuffer or wrap
1270    * the same texture.
1271    */
1272   if (depthRb && stencilRb) {
1273      bool depth_stencil_are_same;
1274      if (depthRb == stencilRb)
1275	 depth_stencil_are_same = true;
1276      else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1277	       (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1278	       (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1279		fb->Attachment[BUFFER_STENCIL].Texture->Name))
1280	 depth_stencil_are_same = true;
1281      else
1282	 depth_stencil_are_same = false;
1283
1284      if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1285	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1286      }
1287   }
1288
1289   for (i = 0; i < Elements(fb->Attachment); i++) {
1290      struct gl_renderbuffer *rb;
1291      struct intel_renderbuffer *irb;
1292
1293      if (fb->Attachment[i].Type == GL_NONE)
1294	 continue;
1295
1296      /* A supported attachment will have a Renderbuffer set either
1297       * from being a Renderbuffer or being a texture that got the
1298       * intel_wrap_texture() treatment.
1299       */
1300      rb = fb->Attachment[i].Renderbuffer;
1301      if (rb == NULL) {
1302	 DBG("attachment without renderbuffer\n");
1303	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1304	 continue;
1305      }
1306
1307      irb = intel_renderbuffer(rb);
1308      if (irb == NULL) {
1309	 DBG("software rendering renderbuffer\n");
1310	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1311	 continue;
1312      }
1313
1314      if (!intel_span_supports_format(irb->Base.Format) ||
1315	  !intel->vtbl.render_target_supported(irb->Base.Format)) {
1316	 DBG("Unsupported texture/renderbuffer format attached: %s\n",
1317	     _mesa_get_format_name(irb->Base.Format));
1318	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1319      }
1320   }
1321}
1322
1323/**
1324 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1325 * We can do this when the dst renderbuffer is actually a texture and
1326 * there is no scaling, mirroring or scissoring.
1327 *
1328 * \return new buffer mask indicating the buffers left to blit using the
1329 *         normal path.
1330 */
1331static GLbitfield
1332intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1333                                          GLint srcX0, GLint srcY0,
1334                                          GLint srcX1, GLint srcY1,
1335                                          GLint dstX0, GLint dstY0,
1336                                          GLint dstX1, GLint dstY1,
1337                                          GLbitfield mask, GLenum filter)
1338{
1339   if (mask & GL_COLOR_BUFFER_BIT) {
1340      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1341      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1342      const struct gl_renderbuffer_attachment *drawAtt =
1343         &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1344
1345      /* If the source and destination are the same size with no
1346         mirroring, the rectangles are within the size of the
1347         texture and there is no scissor then we can use
1348         glCopyTexSubimage2D to implement the blit. This will end
1349         up as a fast hardware blit on some drivers */
1350      if (drawAtt && drawAtt->Texture &&
1351          srcX0 - srcX1 == dstX0 - dstX1 &&
1352          srcY0 - srcY1 == dstY0 - dstY1 &&
1353          srcX1 >= srcX0 &&
1354          srcY1 >= srcY0 &&
1355          srcX0 >= 0 && srcX1 <= readFb->Width &&
1356          srcY0 >= 0 && srcY1 <= readFb->Height &&
1357          dstX0 >= 0 && dstX1 <= drawFb->Width &&
1358          dstY0 >= 0 && dstY1 <= drawFb->Height &&
1359          !ctx->Scissor.Enabled) {
1360         const struct gl_texture_object *texObj = drawAtt->Texture;
1361         const GLuint dstLevel = drawAtt->TextureLevel;
1362         const GLenum target = texObj->Target;
1363
1364         struct gl_texture_image *texImage =
1365            _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1366
1367         if (intel_copy_texsubimage(intel_context(ctx),
1368                                    intel_texture_image(texImage),
1369                                    dstX0, dstY0,
1370                                    srcX0, srcY0,
1371                                    srcX1 - srcX0, /* width */
1372                                    srcY1 - srcY0))
1373            mask &= ~GL_COLOR_BUFFER_BIT;
1374      }
1375   }
1376
1377   return mask;
1378}
1379
1380static void
1381intel_blit_framebuffer(struct gl_context *ctx,
1382                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1383                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1384                       GLbitfield mask, GLenum filter)
1385{
1386   /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1387   mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1388                                                    srcX0, srcY0, srcX1, srcY1,
1389                                                    dstX0, dstY0, dstX1, dstY1,
1390                                                    mask, filter);
1391   if (mask == 0x0)
1392      return;
1393
1394   _mesa_meta_BlitFramebuffer(ctx,
1395                              srcX0, srcY0, srcX1, srcY1,
1396                              dstX0, dstY0, dstX1, dstY1,
1397                              mask, filter);
1398}
1399
1400/**
1401 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1402 * Hook in device driver functions.
1403 */
1404void
1405intel_fbo_init(struct intel_context *intel)
1406{
1407   intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1408   intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1409   intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1410   intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1411   intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1412   intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1413   intel->ctx.Driver.RenderTexture = intel_render_texture;
1414   intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1415   intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1416   intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1417   intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1418
1419#if FEATURE_OES_EGL_image
1420   intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1421      intel_image_target_renderbuffer_storage;
1422#endif
1423}
1424