intel_fbo.c revision 3d798abc818326a377bbbdaac29058ac7b41e1a0
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/enums.h"
30#include "main/imports.h"
31#include "main/macros.h"
32#include "main/mfeatures.h"
33#include "main/mtypes.h"
34#include "main/fbobject.h"
35#include "main/framebuffer.h"
36#include "main/renderbuffer.h"
37#include "main/context.h"
38#include "main/teximage.h"
39#include "main/image.h"
40
41#include "swrast/swrast.h"
42#include "drivers/common/meta.h"
43
44#include "intel_context.h"
45#include "intel_batchbuffer.h"
46#include "intel_buffers.h"
47#include "intel_blit.h"
48#include "intel_fbo.h"
49#include "intel_mipmap_tree.h"
50#include "intel_regions.h"
51#include "intel_tex.h"
52#include "intel_span.h"
53#ifndef I915
54#include "brw_context.h"
55#endif
56
57#define FILE_DEBUG_FLAG DEBUG_FBO
58
59
60bool
61intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
62{
63   struct intel_renderbuffer *rb = NULL;
64   if (fb)
65      rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
66   return rb && rb->mt && rb->mt->hiz_mt;
67}
68
69struct intel_region*
70intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
71{
72   struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
73   if (irb && irb->mt)
74      return irb->mt->region;
75   else
76      return NULL;
77}
78
79/**
80 * Create a new framebuffer object.
81 */
82static struct gl_framebuffer *
83intel_new_framebuffer(struct gl_context * ctx, GLuint name)
84{
85   /* Only drawable state in intel_framebuffer at this time, just use Mesa's
86    * class
87    */
88   return _mesa_new_framebuffer(ctx, name);
89}
90
91
92/** Called by gl_renderbuffer::Delete() */
93static void
94intel_delete_renderbuffer(struct gl_renderbuffer *rb)
95{
96   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
97
98   ASSERT(irb);
99
100   intel_miptree_release(&irb->mt);
101
102   _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
103   _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
104
105   free(irb);
106}
107
108/**
109 * \brief Map a renderbuffer through the GTT.
110 *
111 * \see intel_map_renderbuffer()
112 */
113static void
114intel_map_renderbuffer_gtt(struct gl_context *ctx,
115                           struct gl_renderbuffer *rb,
116                           GLuint x, GLuint y, GLuint w, GLuint h,
117                           GLbitfield mode,
118                           GLubyte **out_map,
119                           GLint *out_stride)
120{
121   struct intel_context *intel = intel_context(ctx);
122   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
123   GLubyte *map;
124   int stride, flip_stride;
125
126   assert(irb->mt);
127
128   intel_renderbuffer_resolve_depth(intel, irb);
129   if (mode & GL_MAP_WRITE_BIT) {
130      intel_renderbuffer_set_needs_hiz_resolve(irb);
131   }
132
133   irb->map_mode = mode;
134   irb->map_x = x;
135   irb->map_y = y;
136   irb->map_w = w;
137   irb->map_h = h;
138
139   stride = irb->mt->region->pitch * irb->mt->region->cpp;
140
141   if (rb->Name == 0) {
142      y = irb->mt->region->height - 1 - y;
143      flip_stride = -stride;
144   } else {
145      x += irb->draw_x;
146      y += irb->draw_y;
147      flip_stride = stride;
148   }
149
150   if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) {
151      intel_batchbuffer_flush(intel);
152   }
153
154   drm_intel_gem_bo_map_gtt(irb->mt->region->bo);
155
156   map = irb->mt->region->bo->virtual;
157   map += x * irb->mt->region->cpp;
158   map += (int)y * stride;
159
160   *out_map = map;
161   *out_stride = flip_stride;
162
163   DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
164       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
165       x, y, w, h, *out_map, *out_stride);
166}
167
168/**
169 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
170 *
171 * On gen6+, we have LLC sharing, which means we can get high-performance
172 * access to linear-mapped buffers.
173 *
174 * This function allocates a temporary gem buffer at
175 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
176 * returns a map of that. (Note: Only X tiled buffers can be blitted).
177 *
178 * \see intel_renderbuffer::map_bo
179 * \see intel_map_renderbuffer()
180 */
181static void
182intel_map_renderbuffer_blit(struct gl_context *ctx,
183			    struct gl_renderbuffer *rb,
184			    GLuint x, GLuint y, GLuint w, GLuint h,
185			    GLbitfield mode,
186			    GLubyte **out_map,
187			    GLint *out_stride)
188{
189   struct intel_context *intel = intel_context(ctx);
190   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
191
192   int src_x, src_y;
193   int dst_stride;
194
195   assert(irb->mt->region);
196   assert(intel->gen >= 6);
197   assert(!(mode & GL_MAP_WRITE_BIT));
198   assert(irb->mt->region->tiling == I915_TILING_X);
199
200   irb->map_mode = mode;
201   irb->map_x = x;
202   irb->map_y = y;
203   irb->map_w = w;
204   irb->map_h = h;
205
206   dst_stride = ALIGN(w * irb->mt->region->cpp, 4);
207
208   if (rb->Name) {
209      src_x = x + irb->draw_x;
210      src_y = y + irb->draw_y;
211   } else {
212      src_x = x;
213      src_y = irb->mt->region->height - y - h;
214   }
215
216   irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
217				    dst_stride * h, 4096);
218
219   /* We don't do the flip in the blit, because it's always so tricky to get
220    * right.
221    */
222   if (irb->map_bo &&
223       intelEmitCopyBlit(intel,
224			 irb->mt->region->cpp,
225			 irb->mt->region->pitch, irb->mt->region->bo,
226			 0, irb->mt->region->tiling,
227			 dst_stride / irb->mt->region->cpp, irb->map_bo,
228			 0, I915_TILING_NONE,
229			 src_x, src_y,
230			 0, 0,
231			 w, h,
232			 GL_COPY)) {
233      intel_batchbuffer_flush(intel);
234      drm_intel_bo_map(irb->map_bo, false);
235
236      if (rb->Name) {
237	 *out_map = irb->map_bo->virtual;
238	 *out_stride = dst_stride;
239      } else {
240	 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
241	 *out_stride = -dst_stride;
242      }
243
244      DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
245	  __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
246	  src_x, src_y, w, h, *out_map, *out_stride);
247   } else {
248      /* Fallback to GTT mapping. */
249      drm_intel_bo_unreference(irb->map_bo);
250      irb->map_bo = NULL;
251      intel_map_renderbuffer_gtt(ctx, rb,
252				 x, y, w, h,
253				 mode,
254				 out_map, out_stride);
255   }
256}
257
258/**
259 * \brief Map a stencil renderbuffer.
260 *
261 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile
262 * the buffer in software.
263 *
264 * This function allocates a temporary malloc'd buffer at
265 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then
266 * returns the temporary buffer as the map.
267 *
268 * \see intel_renderbuffer::map_buffer
269 * \see intel_map_renderbuffer()
270 * \see intel_unmap_renderbuffer_s8()
271 */
272static void
273intel_map_renderbuffer_s8(struct gl_context *ctx,
274			  struct gl_renderbuffer *rb,
275			  GLuint x, GLuint y, GLuint w, GLuint h,
276			  GLbitfield mode,
277			  GLubyte **out_map,
278			  GLint *out_stride)
279{
280   struct intel_context *intel = intel_context(ctx);
281   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
282   uint8_t *tiled_s8_map;
283   uint8_t *untiled_s8_map;
284
285   assert(rb->Format == MESA_FORMAT_S8);
286   assert(irb->mt);
287
288   irb->map_mode = mode;
289   irb->map_x = x;
290   irb->map_y = y;
291   irb->map_w = w;
292   irb->map_h = h;
293
294   /* Flip the Y axis for the default framebuffer. */
295   int y_flip = (rb->Name == 0) ? -1 : 1;
296   int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
297
298   irb->map_buffer = malloc(w * h);
299   untiled_s8_map = irb->map_buffer;
300   tiled_s8_map = intel_region_map(intel, irb->mt->region, mode);
301
302   for (uint32_t pix_y = 0; pix_y < h; pix_y++) {
303      for (uint32_t pix_x = 0; pix_x < w; pix_x++) {
304	 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
305	 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
306	                                    x + pix_x,
307	                                    flipped_y);
308	 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset];
309      }
310   }
311
312   *out_map = untiled_s8_map;
313   *out_stride = w;
314
315   DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
316       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
317       x, y, w, h, *out_map, *out_stride);
318}
319
320/**
321 * \brief Map a depthstencil buffer with separate stencil.
322 *
323 * A depthstencil renderbuffer, if using separate stencil, consists of a depth
324 * renderbuffer and a hidden stencil renderbuffer.  This function maps the
325 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and
326 * returns that as the mapped pointer. The caller need not be aware of the
327 * hidden stencil buffer and may safely assume that the mapped pointer points
328 * to a MESA_FORMAT_S8_Z24 buffer
329 *
330 * The consistency between the depth buffer's S8 bits and the hidden stencil
331 * buffer is managed within intel_map_renderbuffer() and
332 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits
333 * according to the map mode.
334 *
335 * \see intel_map_renderbuffer()
336 * \see intel_unmap_renderbuffer_separate_s8z24()
337 */
338static void
339intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx,
340				      struct gl_renderbuffer *rb,
341				      GLuint x, GLuint y, GLuint w, GLuint h,
342				      GLbitfield mode,
343				      GLubyte **out_map,
344				      GLint *out_stride)
345{
346   struct intel_context *intel = intel_context(ctx);
347   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
348
349   uint8_t *s8z24_map;
350   int32_t s8z24_stride;
351
352   struct intel_renderbuffer *s8_irb;
353   uint8_t *s8_map;
354
355   assert(rb->Name != 0);
356   assert(rb->Format == MESA_FORMAT_S8_Z24);
357   assert(irb->wrapped_depth != NULL);
358   assert(irb->wrapped_stencil != NULL);
359
360   irb->map_mode = mode;
361   irb->map_x = x;
362   irb->map_y = y;
363   irb->map_w = w;
364   irb->map_h = h;
365
366   /* Map with write mode for the gather below. */
367   intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth,
368			       x, y, w, h, mode | GL_MAP_WRITE_BIT,
369			       &s8z24_map, &s8z24_stride);
370
371   s8_irb = intel_renderbuffer(irb->wrapped_stencil);
372   s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_READ_BIT);
373
374   /* Gather the stencil buffer into the depth buffer. */
375   for (uint32_t pix_y = 0; pix_y < h; ++pix_y) {
376      for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
377	 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
378					       x + pix_x,
379					       y + pix_y);
380	 ptrdiff_t s8z24_offset = pix_y * s8z24_stride
381				+ pix_x * 4
382				+ 3;
383	 s8z24_map[s8z24_offset] = s8_map[s8_offset];
384      }
385   }
386
387   intel_region_unmap(intel, s8_irb->mt->region);
388
389   *out_map = s8z24_map;
390   *out_stride = s8z24_stride;
391}
392
393/**
394 * \see dd_function_table::MapRenderbuffer
395 */
396static void
397intel_map_renderbuffer(struct gl_context *ctx,
398		       struct gl_renderbuffer *rb,
399		       GLuint x, GLuint y, GLuint w, GLuint h,
400		       GLbitfield mode,
401		       GLubyte **out_map,
402		       GLint *out_stride)
403{
404   struct intel_context *intel = intel_context(ctx);
405   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
406
407   /* We sometimes get called with this by our intel_span.c usage. */
408   if (!irb->mt && !irb->wrapped_depth) {
409      *out_map = NULL;
410      *out_stride = 0;
411      return;
412   }
413
414   if (rb->Format == MESA_FORMAT_S8) {
415      intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode,
416			        out_map, out_stride);
417   } else if (irb->wrapped_depth) {
418      intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode,
419					    out_map, out_stride);
420   } else if (intel->gen >= 6 &&
421	      !(mode & GL_MAP_WRITE_BIT) &&
422	      irb->mt->region->tiling == I915_TILING_X) {
423      intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
424				  out_map, out_stride);
425   } else {
426      intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
427				 out_map, out_stride);
428   }
429}
430
431/**
432 * \see intel_map_renderbuffer_s8()
433 */
434static void
435intel_unmap_renderbuffer_s8(struct gl_context *ctx,
436			    struct gl_renderbuffer *rb)
437{
438   struct intel_context *intel = intel_context(ctx);
439   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
440
441   DBG("%s: rb %d (%s)\n", __FUNCTION__,
442       rb->Name, _mesa_get_format_name(rb->Format));
443
444   assert(rb->Format == MESA_FORMAT_S8);
445
446   if (!irb->map_buffer)
447      return;
448
449   if (irb->map_mode & GL_MAP_WRITE_BIT) {
450      /* The temporary buffer was written to, so we must copy its pixels into
451       * the real buffer.
452       */
453      uint8_t *untiled_s8_map = irb->map_buffer;
454      uint8_t *tiled_s8_map = irb->mt->region->bo->virtual;
455
456      /* Flip the Y axis for the default framebuffer. */
457      int y_flip = (rb->Name == 0) ? -1 : 1;
458      int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
459
460      for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) {
461	 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) {
462	    uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias;
463	    ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
464	                                       pix_x + irb->map_x,
465	                                       flipped_y);
466	    tiled_s8_map[offset] =
467	       untiled_s8_map[pix_y * irb->map_w + pix_x];
468	 }
469      }
470   }
471
472   intel_region_unmap(intel, irb->mt->region);
473   free(irb->map_buffer);
474   irb->map_buffer = NULL;
475}
476
477/**
478 * \brief Unmap a depthstencil renderbuffer with separate stencil.
479 *
480 * \see intel_map_renderbuffer_separate_s8z24()
481 * \see intel_unmap_renderbuffer()
482 */
483static void
484intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx,
485				        struct gl_renderbuffer *rb)
486{
487   struct intel_context *intel = intel_context(ctx);
488   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
489   struct intel_renderbuffer *s8z24_irb;
490
491   assert(rb->Name != 0);
492   assert(rb->Format == MESA_FORMAT_S8_Z24);
493   assert(irb->wrapped_depth != NULL);
494   assert(irb->wrapped_stencil != NULL);
495
496   s8z24_irb = intel_renderbuffer(irb->wrapped_depth);
497
498   if (irb->map_mode & GL_MAP_WRITE_BIT) {
499      /* Copy the stencil bits from the depth buffer into the stencil buffer.
500       */
501      uint32_t map_x = irb->map_x;
502      uint32_t map_y = irb->map_y;
503      uint32_t map_w = irb->map_w;
504      uint32_t map_h = irb->map_h;
505
506      struct intel_renderbuffer *s8_irb;
507      uint8_t *s8_map;
508
509      s8_irb = intel_renderbuffer(irb->wrapped_stencil);
510      s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_WRITE_BIT);
511
512      int32_t s8z24_stride = 4 * s8z24_irb->mt->region->pitch;
513      uint8_t *s8z24_map = s8z24_irb->mt->region->bo->virtual
514			 + map_y * s8z24_stride
515			 + map_x * 4;
516
517      for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) {
518	 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) {
519	    ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
520						  map_x + pix_x,
521						  map_y + pix_y);
522	    ptrdiff_t s8z24_offset = pix_y * s8z24_stride
523				   + pix_x * 4
524				   + 3;
525	    s8_map[s8_offset] = s8z24_map[s8z24_offset];
526	 }
527      }
528
529      intel_region_unmap(intel, s8_irb->mt->region);
530   }
531
532   drm_intel_gem_bo_unmap_gtt(s8z24_irb->mt->region->bo);
533}
534
535/**
536 * \see dd_function_table::UnmapRenderbuffer
537 */
538static void
539intel_unmap_renderbuffer(struct gl_context *ctx,
540			 struct gl_renderbuffer *rb)
541{
542   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
543
544   DBG("%s: rb %d (%s)\n", __FUNCTION__,
545       rb->Name, _mesa_get_format_name(rb->Format));
546
547   if (rb->Format == MESA_FORMAT_S8) {
548      intel_unmap_renderbuffer_s8(ctx, rb);
549   } else if (irb->wrapped_depth) {
550      intel_unmap_renderbuffer_separate_s8z24(ctx, rb);
551   } else if (irb->map_bo) {
552      /* Paired with intel_map_renderbuffer_blit(). */
553      drm_intel_bo_unmap(irb->map_bo);
554      drm_intel_bo_unreference(irb->map_bo);
555      irb->map_bo = 0;
556   } else {
557      /* Paired with intel_map_renderbuffer_gtt(). */
558      if (irb->mt) {
559	 /* The miptree may be null when intel_map_renderbuffer() is
560	  * called from intel_span.c.
561	  */
562	 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo);
563      }
564   }
565}
566
567/**
568 * Return a pointer to a specific pixel in a renderbuffer.
569 */
570static void *
571intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
572                  GLint x, GLint y)
573{
574   /* By returning NULL we force all software rendering to go through
575    * the span routines.
576    */
577   return NULL;
578}
579
580
581/**
582 * Called via glRenderbufferStorageEXT() to set the format and allocate
583 * storage for a user-created renderbuffer.
584 */
585GLboolean
586intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
587                                 GLenum internalFormat,
588                                 GLuint width, GLuint height)
589{
590   struct intel_context *intel = intel_context(ctx);
591   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
592   int cpp, tiling;
593
594   ASSERT(rb->Name != 0);
595
596   switch (internalFormat) {
597   default:
598      /* Use the same format-choice logic as for textures.
599       * Renderbuffers aren't any different from textures for us,
600       * except they're less useful because you can't texture with
601       * them.
602       */
603      rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
604							 GL_NONE, GL_NONE);
605      break;
606   case GL_STENCIL_INDEX:
607   case GL_STENCIL_INDEX1_EXT:
608   case GL_STENCIL_INDEX4_EXT:
609   case GL_STENCIL_INDEX8_EXT:
610   case GL_STENCIL_INDEX16_EXT:
611      /* These aren't actual texture formats, so force them here. */
612      if (intel->has_separate_stencil) {
613	 rb->Format = MESA_FORMAT_S8;
614      } else {
615	 assert(!intel->must_use_separate_stencil);
616	 rb->Format = MESA_FORMAT_S8_Z24;
617      }
618      break;
619   }
620
621   rb->Width = width;
622   rb->Height = height;
623   rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
624   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
625   cpp = _mesa_get_format_bytes(rb->Format);
626
627   intel_flush(ctx);
628
629   intel_miptree_release(&irb->mt);
630
631   DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
632       _mesa_lookup_enum_by_nr(internalFormat),
633       _mesa_get_format_name(rb->Format), width, height);
634
635   tiling = I915_TILING_NONE;
636   if (intel->use_texture_tiling) {
637      GLenum base_format = _mesa_get_format_base_format(rb->Format);
638
639      if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
640			      base_format == GL_STENCIL_INDEX ||
641			      base_format == GL_DEPTH_STENCIL))
642	 tiling = I915_TILING_Y;
643      else
644	 tiling = I915_TILING_X;
645   }
646
647   if (irb->Base.Format == MESA_FORMAT_S8) {
648      /*
649       * The stencil buffer is W tiled. However, we request from the kernel a
650       * non-tiled buffer because the GTT is incapable of W fencing.
651       *
652       * The stencil buffer has quirky pitch requirements.  From Vol 2a,
653       * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
654       *    The pitch must be set to 2x the value computed based on width, as
655       *    the stencil buffer is stored with two rows interleaved.
656       * To accomplish this, we resort to the nasty hack of doubling the drm
657       * region's cpp and halving its height.
658       *
659       * If we neglect to double the pitch, then render corruption occurs.
660       */
661      irb->mt = intel_miptree_create_for_renderbuffer(
662		  intel,
663		  rb->Format,
664		  I915_TILING_NONE,
665		  cpp * 2,
666		  ALIGN(width, 64),
667		  ALIGN((height + 1) / 2, 64));
668      if (!irb->mt)
669	 return false;
670
671   } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
672	      && intel->has_separate_stencil) {
673
674      bool ok = true;
675      struct gl_renderbuffer *depth_rb;
676      struct gl_renderbuffer *stencil_rb;
677
678      depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
679						   MESA_FORMAT_X8_Z24);
680      stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
681						     MESA_FORMAT_S8);
682      ok = depth_rb && stencil_rb;
683      ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
684						  depth_rb->InternalFormat,
685						  width, height);
686      ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
687						  stencil_rb->InternalFormat,
688						  width, height);
689
690      if (!ok) {
691	 if (depth_rb) {
692	    intel_delete_renderbuffer(depth_rb);
693	 }
694	 if (stencil_rb) {
695	    intel_delete_renderbuffer(stencil_rb);
696	 }
697	 return false;
698      }
699
700      depth_rb->Wrapped = rb;
701      stencil_rb->Wrapped = rb;
702      _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
703      _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
704
705   } else {
706      irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
707                                                      tiling, cpp,
708                                                      width, height);
709      if (!irb->mt)
710	 return false;
711
712      if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
713	 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
714	 if (!ok) {
715	    intel_miptree_release(&irb->mt);
716	    return false;
717	 }
718      }
719   }
720
721   return true;
722}
723
724
725#if FEATURE_OES_EGL_image
726static void
727intel_image_target_renderbuffer_storage(struct gl_context *ctx,
728					struct gl_renderbuffer *rb,
729					void *image_handle)
730{
731   struct intel_context *intel = intel_context(ctx);
732   struct intel_renderbuffer *irb;
733   __DRIscreen *screen;
734   __DRIimage *image;
735
736   screen = intel->intelScreen->driScrnPriv;
737   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
738					      screen->loaderPrivate);
739   if (image == NULL)
740      return;
741
742   /* __DRIimage is opaque to the core so it has to be checked here */
743   switch (image->format) {
744   case MESA_FORMAT_RGBA8888_REV:
745      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
746            "glEGLImageTargetRenderbufferStorage(unsupported image format");
747      return;
748      break;
749   default:
750      break;
751   }
752
753   irb = intel_renderbuffer(rb);
754   intel_miptree_release(&irb->mt);
755   irb->mt = intel_miptree_create_for_region(intel,
756                                             GL_TEXTURE_2D,
757                                             image->format,
758                                             image->region);
759   if (!irb->mt)
760      return;
761
762   rb->InternalFormat = image->internal_format;
763   rb->Width = image->region->width;
764   rb->Height = image->region->height;
765   rb->Format = image->format;
766   rb->DataType = image->data_type;
767   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
768					   image->internal_format);
769}
770#endif
771
772/**
773 * Called for each hardware renderbuffer when a _window_ is resized.
774 * Just update fields.
775 * Not used for user-created renderbuffers!
776 */
777static GLboolean
778intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
779                           GLenum internalFormat, GLuint width, GLuint height)
780{
781   ASSERT(rb->Name == 0);
782   rb->Width = width;
783   rb->Height = height;
784   rb->InternalFormat = internalFormat;
785
786   return true;
787}
788
789
790static void
791intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
792		     GLuint width, GLuint height)
793{
794   int i;
795
796   _mesa_resize_framebuffer(ctx, fb, width, height);
797
798   fb->Initialized = true; /* XXX remove someday */
799
800   if (fb->Name != 0) {
801      return;
802   }
803
804
805   /* Make sure all window system renderbuffers are up to date */
806   for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
807      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
808
809      /* only resize if size is changing */
810      if (rb && (rb->Width != width || rb->Height != height)) {
811	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
812      }
813   }
814}
815
816
817/** Dummy function for gl_renderbuffer::AllocStorage() */
818static GLboolean
819intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
820                        GLenum internalFormat, GLuint width, GLuint height)
821{
822   _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
823   return false;
824}
825
826/**
827 * Create a new intel_renderbuffer which corresponds to an on-screen window,
828 * not a user-created renderbuffer.
829 */
830struct intel_renderbuffer *
831intel_create_renderbuffer(gl_format format)
832{
833   GET_CURRENT_CONTEXT(ctx);
834
835   struct intel_renderbuffer *irb;
836
837   irb = CALLOC_STRUCT(intel_renderbuffer);
838   if (!irb) {
839      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
840      return NULL;
841   }
842
843   _mesa_init_renderbuffer(&irb->Base, 0);
844   irb->Base.ClassID = INTEL_RB_CLASS;
845   irb->Base._BaseFormat = _mesa_get_format_base_format(format);
846   irb->Base.Format = format;
847   irb->Base.InternalFormat = irb->Base._BaseFormat;
848   irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
849
850   /* intel-specific methods */
851   irb->Base.Delete = intel_delete_renderbuffer;
852   irb->Base.AllocStorage = intel_alloc_window_storage;
853   irb->Base.GetPointer = intel_get_pointer;
854
855   return irb;
856}
857
858
859struct gl_renderbuffer*
860intel_create_wrapped_renderbuffer(struct gl_context * ctx,
861				  int width, int height,
862				  gl_format format)
863{
864   /*
865    * The name here is irrelevant, as long as its nonzero, because the
866    * renderbuffer never gets entered into Mesa's renderbuffer hash table.
867    */
868   GLuint name = ~0;
869
870   struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
871   if (!irb) {
872      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
873      return NULL;
874   }
875
876   struct gl_renderbuffer *rb = &irb->Base;
877   _mesa_init_renderbuffer(rb, name);
878   rb->ClassID = INTEL_RB_CLASS;
879   rb->_BaseFormat = _mesa_get_format_base_format(format);
880   rb->Format = format;
881   rb->InternalFormat = rb->_BaseFormat;
882   rb->DataType = intel_mesa_format_to_rb_datatype(format);
883   rb->Width = width;
884   rb->Height = height;
885
886   return rb;
887}
888
889
890/**
891 * Create a new renderbuffer object.
892 * Typically called via glBindRenderbufferEXT().
893 */
894static struct gl_renderbuffer *
895intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
896{
897   /*struct intel_context *intel = intel_context(ctx); */
898   struct intel_renderbuffer *irb;
899
900   irb = CALLOC_STRUCT(intel_renderbuffer);
901   if (!irb) {
902      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
903      return NULL;
904   }
905
906   _mesa_init_renderbuffer(&irb->Base, name);
907   irb->Base.ClassID = INTEL_RB_CLASS;
908
909   /* intel-specific methods */
910   irb->Base.Delete = intel_delete_renderbuffer;
911   irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
912   irb->Base.GetPointer = intel_get_pointer;
913   /* span routines set in alloc_storage function */
914
915   return &irb->Base;
916}
917
918
919/**
920 * Called via glBindFramebufferEXT().
921 */
922static void
923intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
924                       struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
925{
926   if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
927      intel_draw_buffer(ctx);
928   }
929   else {
930      /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
931   }
932}
933
934
935/**
936 * Called via glFramebufferRenderbufferEXT().
937 */
938static void
939intel_framebuffer_renderbuffer(struct gl_context * ctx,
940                               struct gl_framebuffer *fb,
941                               GLenum attachment, struct gl_renderbuffer *rb)
942{
943   DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
944
945   intel_flush(ctx);
946
947   _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
948   intel_draw_buffer(ctx);
949}
950
951static struct intel_renderbuffer*
952intel_renderbuffer_wrap_miptree(struct intel_context *intel,
953                                struct intel_mipmap_tree *mt,
954                                uint32_t level,
955                                uint32_t layer,
956                                gl_format format,
957                                GLenum internal_format);
958
959/**
960 * \par Special case for separate stencil
961 *
962 *     When wrapping a depthstencil texture that uses separate stencil, this
963 *     function is recursively called twice: once to create \c
964 *     irb->wrapped_depth and again to create \c irb->wrapped_stencil.  On the
965 *     call to create \c irb->wrapped_depth, the \c format and \c
966 *     internal_format parameters do not match \c mt->format. In that case, \c
967 *     mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
968 *     MESA_FORMAT_X8_Z24.
969 *
970 * @return true on success
971 */
972static bool
973intel_renderbuffer_update_wrapper(struct intel_context *intel,
974                                  struct intel_renderbuffer *irb,
975                                  struct intel_mipmap_tree *mt,
976                                  uint32_t level,
977                                  uint32_t layer,
978                                  gl_format format,
979                                  GLenum internal_format)
980{
981   struct gl_renderbuffer *rb = &irb->Base;
982
983   rb->Format = format;
984   rb->InternalFormat = internal_format;
985   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
986   rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
987   rb->Width = mt->level[level].width;
988   rb->Height = mt->level[level].height;
989
990   irb->Base.Delete = intel_delete_renderbuffer;
991   irb->Base.AllocStorage = intel_nop_alloc_storage;
992
993   intel_miptree_check_level_layer(mt, level, layer);
994   irb->mt_level = level;
995   irb->mt_layer = layer;
996
997   if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) {
998      assert((irb->wrapped_depth == NULL) == (irb->wrapped_stencil == NULL));
999
1000      struct intel_renderbuffer *depth_irb;
1001      struct intel_renderbuffer *stencil_irb;
1002
1003      if (!irb->wrapped_depth) {
1004	 depth_irb = intel_renderbuffer_wrap_miptree(intel,
1005	                                             mt, level, layer,
1006	                                             MESA_FORMAT_X8_Z24,
1007	                                             GL_DEPTH_COMPONENT24);
1008	 stencil_irb = intel_renderbuffer_wrap_miptree(intel,
1009	                                               mt->stencil_mt,
1010	                                               level, layer,
1011	                                               MESA_FORMAT_S8,
1012	                                               GL_STENCIL_INDEX8);
1013	 _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base);
1014	 _mesa_reference_renderbuffer(&irb->wrapped_stencil, &stencil_irb->Base);
1015
1016	 if (!irb->wrapped_depth || !irb->wrapped_stencil)
1017	    return false;
1018      } else {
1019	 bool ok = true;
1020
1021	 depth_irb = intel_renderbuffer(irb->wrapped_depth);
1022	 stencil_irb = intel_renderbuffer(irb->wrapped_stencil);
1023
1024	 ok &= intel_renderbuffer_update_wrapper(intel,
1025	                                         depth_irb,
1026	                                         mt,
1027	                                         level, layer,
1028	                                         MESA_FORMAT_X8_Z24,
1029	                                         GL_DEPTH_COMPONENT24);
1030	 ok &= intel_renderbuffer_update_wrapper(intel,
1031	                                         stencil_irb,
1032	                                         mt->stencil_mt,
1033	                                         level, layer,
1034	                                         MESA_FORMAT_S8,
1035	                                         GL_STENCIL_INDEX8);
1036	 if (!ok)
1037	    return false;
1038      }
1039   } else {
1040      intel_miptree_reference(&irb->mt, mt);
1041      intel_renderbuffer_set_draw_offset(irb);
1042
1043      if (mt->hiz_mt == NULL &&
1044	  intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
1045	 intel_miptree_alloc_hiz(intel, mt);
1046         if (!mt->hiz_mt)
1047            return false;
1048      }
1049   }
1050
1051   return true;
1052}
1053
1054/**
1055 * \brief Wrap a renderbuffer around a single slice of a miptree.
1056 *
1057 * Called by glFramebufferTexture*(). This just allocates a
1058 * ``struct intel_renderbuffer`` then calls
1059 * intel_renderbuffer_update_wrapper() to do the real work.
1060 *
1061 * \see intel_renderbuffer_update_wrapper()
1062 */
1063static struct intel_renderbuffer*
1064intel_renderbuffer_wrap_miptree(struct intel_context *intel,
1065                                struct intel_mipmap_tree *mt,
1066                                uint32_t level,
1067                                uint32_t layer,
1068                                gl_format format,
1069                                GLenum internal_format)
1070
1071{
1072   const GLuint name = ~0;   /* not significant, but distinct for debugging */
1073   struct gl_context *ctx = &intel->ctx;
1074   struct intel_renderbuffer *irb;
1075
1076   intel_miptree_check_level_layer(mt, level, layer);
1077
1078   irb = CALLOC_STRUCT(intel_renderbuffer);
1079   if (!irb) {
1080      _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
1081      return NULL;
1082   }
1083
1084   _mesa_init_renderbuffer(&irb->Base, name);
1085   irb->Base.ClassID = INTEL_RB_CLASS;
1086
1087   if (!intel_renderbuffer_update_wrapper(intel, irb,
1088                                          mt, level, layer,
1089                                          format, internal_format)) {
1090      free(irb);
1091      return NULL;
1092   }
1093
1094   return irb;
1095}
1096
1097void
1098intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
1099{
1100   unsigned int dst_x, dst_y;
1101
1102   /* compute offset of the particular 2D image within the texture region */
1103   intel_miptree_get_image_offset(irb->mt,
1104				  irb->mt_level,
1105				  0, /* face, which we ignore */
1106				  irb->mt_layer,
1107				  &dst_x, &dst_y);
1108
1109   irb->draw_x = dst_x;
1110   irb->draw_y = dst_y;
1111}
1112
1113/**
1114 * Rendering to tiled buffers requires that the base address of the
1115 * buffer be aligned to a page boundary.  We generally render to
1116 * textures by pointing the surface at the mipmap image level, which
1117 * may not be aligned to a tile boundary.
1118 *
1119 * This function returns an appropriately-aligned base offset
1120 * according to the tiling restrictions, plus any required x/y offset
1121 * from there.
1122 */
1123uint32_t
1124intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
1125				uint32_t *tile_x,
1126				uint32_t *tile_y)
1127{
1128   struct intel_region *region = irb->mt->region;
1129   int cpp = region->cpp;
1130   uint32_t pitch = region->pitch * cpp;
1131
1132   if (region->tiling == I915_TILING_NONE) {
1133      *tile_x = 0;
1134      *tile_y = 0;
1135      return irb->draw_x * cpp + irb->draw_y * pitch;
1136   } else if (region->tiling == I915_TILING_X) {
1137      *tile_x = irb->draw_x % (512 / cpp);
1138      *tile_y = irb->draw_y % 8;
1139      return ((irb->draw_y / 8) * (8 * pitch) +
1140	      (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
1141   } else {
1142      assert(region->tiling == I915_TILING_Y);
1143      *tile_x = irb->draw_x % (128 / cpp);
1144      *tile_y = irb->draw_y % 32;
1145      return ((irb->draw_y / 32) * (32 * pitch) +
1146	      (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
1147   }
1148}
1149
1150#ifndef I915
1151static bool
1152need_tile_offset_workaround(struct brw_context *brw,
1153			    struct intel_renderbuffer *irb)
1154{
1155   uint32_t tile_x, tile_y;
1156
1157   if (brw->has_surface_tile_offset)
1158      return false;
1159
1160   intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
1161
1162   return tile_x != 0 || tile_y != 0;
1163}
1164#endif
1165
1166/**
1167 * Called by glFramebufferTexture[123]DEXT() (and other places) to
1168 * prepare for rendering into texture memory.  This might be called
1169 * many times to choose different texture levels, cube faces, etc
1170 * before intel_finish_render_texture() is ever called.
1171 */
1172static void
1173intel_render_texture(struct gl_context * ctx,
1174                     struct gl_framebuffer *fb,
1175                     struct gl_renderbuffer_attachment *att)
1176{
1177   struct intel_context *intel = intel_context(ctx);
1178   struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
1179   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
1180   struct intel_texture_image *intel_image = intel_texture_image(image);
1181   struct intel_mipmap_tree *mt = intel_image->mt;
1182
1183   (void) fb;
1184
1185   int layer;
1186   if (att->CubeMapFace > 0) {
1187      assert(att->Zoffset == 0);
1188      layer = att->CubeMapFace;
1189   } else {
1190      layer = att->Zoffset;
1191   }
1192
1193   if (!intel_image->mt) {
1194      /* Fallback on drawing to a texture that doesn't have a miptree
1195       * (has a border, width/height 0, etc.)
1196       */
1197      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1198      _swrast_render_texture(ctx, fb, att);
1199      return;
1200   }
1201   else if (!irb) {
1202      irb = intel_renderbuffer_wrap_miptree(intel,
1203                                            mt,
1204                                            att->TextureLevel,
1205                                            layer,
1206                                            image->TexFormat,
1207                                            image->InternalFormat);
1208
1209      if (irb) {
1210         /* bind the wrapper to the attachment point */
1211         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
1212      }
1213      else {
1214         /* fallback to software rendering */
1215         _swrast_render_texture(ctx, fb, att);
1216         return;
1217      }
1218   }
1219
1220   if (!intel_renderbuffer_update_wrapper(intel, irb,
1221                                          mt, att->TextureLevel, layer,
1222                                          image->TexFormat,
1223                                          image->InternalFormat)) {
1224       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1225       _swrast_render_texture(ctx, fb, att);
1226       return;
1227   }
1228
1229   DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
1230       _mesa_get_format_name(image->TexFormat),
1231       att->Texture->Name, image->Width, image->Height,
1232       irb->Base.RefCount);
1233
1234   intel_image->used_as_render_target = true;
1235
1236#ifndef I915
1237   if (need_tile_offset_workaround(brw_context(ctx), irb)) {
1238      /* Original gen4 hardware couldn't draw to a non-tile-aligned
1239       * destination in a miptree unless you actually setup your
1240       * renderbuffer as a miptree and used the fragile
1241       * lod/array_index/etc. controls to select the image.  So,
1242       * instead, we just make a new single-level miptree and render
1243       * into that.
1244       */
1245      struct intel_context *intel = intel_context(ctx);
1246      struct intel_mipmap_tree *new_mt;
1247      int width, height, depth;
1248
1249      intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1250
1251      new_mt = intel_miptree_create(intel, image->TexObject->Target,
1252				    intel_image->base.Base.TexFormat,
1253				    intel_image->base.Base.Level,
1254				    intel_image->base.Base.Level,
1255                                    width, height, depth,
1256				    true);
1257
1258      intel_miptree_copy_teximage(intel, intel_image, new_mt);
1259      intel_renderbuffer_set_draw_offset(irb);
1260
1261      intel_miptree_reference(&irb->mt, intel_image->mt);
1262      intel_miptree_release(&new_mt);
1263   }
1264#endif
1265   /* update drawing region, etc */
1266   intel_draw_buffer(ctx);
1267}
1268
1269
1270/**
1271 * Called by Mesa when rendering to a texture is done.
1272 */
1273static void
1274intel_finish_render_texture(struct gl_context * ctx,
1275                            struct gl_renderbuffer_attachment *att)
1276{
1277   struct intel_context *intel = intel_context(ctx);
1278   struct gl_texture_object *tex_obj = att->Texture;
1279   struct gl_texture_image *image =
1280      tex_obj->Image[att->CubeMapFace][att->TextureLevel];
1281   struct intel_texture_image *intel_image = intel_texture_image(image);
1282
1283   DBG("Finish render %s texture tex=%u\n",
1284       _mesa_get_format_name(image->TexFormat), att->Texture->Name);
1285
1286   /* Flag that this image may now be validated into the object's miptree. */
1287   if (intel_image)
1288      intel_image->used_as_render_target = false;
1289
1290   /* Since we've (probably) rendered to the texture and will (likely) use
1291    * it in the texture domain later on in this batchbuffer, flush the
1292    * batch.  Once again, we wish for a domain tracker in libdrm to cover
1293    * usage inside of a batchbuffer like GEM does in the kernel.
1294    */
1295   intel_batchbuffer_emit_mi_flush(intel);
1296}
1297
1298/**
1299 * Do additional "completeness" testing of a framebuffer object.
1300 */
1301static void
1302intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
1303{
1304   struct intel_context *intel = intel_context(ctx);
1305   const struct intel_renderbuffer *depthRb =
1306      intel_get_renderbuffer(fb, BUFFER_DEPTH);
1307   const struct intel_renderbuffer *stencilRb =
1308      intel_get_renderbuffer(fb, BUFFER_STENCIL);
1309   int i;
1310
1311   /*
1312    * The depth and stencil renderbuffers are the same renderbuffer or wrap
1313    * the same texture.
1314    */
1315   if (depthRb && stencilRb) {
1316      bool depth_stencil_are_same;
1317      if (depthRb == stencilRb)
1318	 depth_stencil_are_same = true;
1319      else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1320	       (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1321	       (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1322		fb->Attachment[BUFFER_STENCIL].Texture->Name))
1323	 depth_stencil_are_same = true;
1324      else
1325	 depth_stencil_are_same = false;
1326
1327      if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1328	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1329      }
1330   }
1331
1332   for (i = 0; i < Elements(fb->Attachment); i++) {
1333      struct gl_renderbuffer *rb;
1334      struct intel_renderbuffer *irb;
1335
1336      if (fb->Attachment[i].Type == GL_NONE)
1337	 continue;
1338
1339      /* A supported attachment will have a Renderbuffer set either
1340       * from being a Renderbuffer or being a texture that got the
1341       * intel_wrap_texture() treatment.
1342       */
1343      rb = fb->Attachment[i].Renderbuffer;
1344      if (rb == NULL) {
1345	 DBG("attachment without renderbuffer\n");
1346	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1347	 continue;
1348      }
1349
1350      irb = intel_renderbuffer(rb);
1351      if (irb == NULL) {
1352	 DBG("software rendering renderbuffer\n");
1353	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1354	 continue;
1355      }
1356
1357      if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
1358	 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
1359	     _mesa_get_format_name(irb->Base.Format));
1360	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1361      }
1362
1363#ifdef I915
1364      if (!intel_span_supports_format(irb->Base.Format)) {
1365	 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
1366	     _mesa_get_format_name(irb->Base.Format));
1367	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1368      }
1369#endif
1370   }
1371}
1372
1373/**
1374 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1375 * We can do this when the dst renderbuffer is actually a texture and
1376 * there is no scaling, mirroring or scissoring.
1377 *
1378 * \return new buffer mask indicating the buffers left to blit using the
1379 *         normal path.
1380 */
1381static GLbitfield
1382intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1383                                          GLint srcX0, GLint srcY0,
1384                                          GLint srcX1, GLint srcY1,
1385                                          GLint dstX0, GLint dstY0,
1386                                          GLint dstX1, GLint dstY1,
1387                                          GLbitfield mask, GLenum filter)
1388{
1389   if (mask & GL_COLOR_BUFFER_BIT) {
1390      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1391      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1392      const struct gl_renderbuffer_attachment *drawAtt =
1393         &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1394
1395      /* If the source and destination are the same size with no
1396         mirroring, the rectangles are within the size of the
1397         texture and there is no scissor then we can use
1398         glCopyTexSubimage2D to implement the blit. This will end
1399         up as a fast hardware blit on some drivers */
1400      if (drawAtt && drawAtt->Texture &&
1401          srcX0 - srcX1 == dstX0 - dstX1 &&
1402          srcY0 - srcY1 == dstY0 - dstY1 &&
1403          srcX1 >= srcX0 &&
1404          srcY1 >= srcY0 &&
1405          srcX0 >= 0 && srcX1 <= readFb->Width &&
1406          srcY0 >= 0 && srcY1 <= readFb->Height &&
1407          dstX0 >= 0 && dstX1 <= drawFb->Width &&
1408          dstY0 >= 0 && dstY1 <= drawFb->Height &&
1409          !ctx->Scissor.Enabled) {
1410         const struct gl_texture_object *texObj = drawAtt->Texture;
1411         const GLuint dstLevel = drawAtt->TextureLevel;
1412         const GLenum target = texObj->Target;
1413
1414         struct gl_texture_image *texImage =
1415            _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1416
1417         if (intel_copy_texsubimage(intel_context(ctx),
1418                                    intel_texture_image(texImage),
1419                                    dstX0, dstY0,
1420                                    srcX0, srcY0,
1421                                    srcX1 - srcX0, /* width */
1422                                    srcY1 - srcY0))
1423            mask &= ~GL_COLOR_BUFFER_BIT;
1424      }
1425   }
1426
1427   return mask;
1428}
1429
1430static void
1431intel_blit_framebuffer(struct gl_context *ctx,
1432                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1433                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1434                       GLbitfield mask, GLenum filter)
1435{
1436   /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1437   mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1438                                                    srcX0, srcY0, srcX1, srcY1,
1439                                                    dstX0, dstY0, dstX1, dstY1,
1440                                                    mask, filter);
1441   if (mask == 0x0)
1442      return;
1443
1444   _mesa_meta_BlitFramebuffer(ctx,
1445                              srcX0, srcY0, srcX1, srcY1,
1446                              dstX0, dstY0, dstX1, dstY1,
1447                              mask, filter);
1448}
1449
1450void
1451intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
1452{
1453   if (irb->mt) {
1454      intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
1455                                                irb->mt_level,
1456                                                irb->mt_layer);
1457   } else if (irb->wrapped_depth) {
1458      intel_renderbuffer_set_needs_hiz_resolve(
1459	    intel_renderbuffer(irb->wrapped_depth));
1460   } else {
1461      return;
1462   }
1463}
1464
1465void
1466intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
1467{
1468   if (irb->mt) {
1469      intel_miptree_slice_set_needs_depth_resolve(irb->mt,
1470                                                  irb->mt_level,
1471                                                  irb->mt_layer);
1472   } else if (irb->wrapped_depth) {
1473      intel_renderbuffer_set_needs_depth_resolve(
1474	    intel_renderbuffer(irb->wrapped_depth));
1475   } else {
1476      return;
1477   }
1478}
1479
1480bool
1481intel_renderbuffer_resolve_hiz(struct intel_context *intel,
1482			       struct intel_renderbuffer *irb)
1483{
1484   if (irb->mt)
1485      return intel_miptree_slice_resolve_hiz(intel,
1486                                             irb->mt,
1487                                             irb->mt_level,
1488                                             irb->mt_layer);
1489   if (irb->wrapped_depth)
1490      return intel_renderbuffer_resolve_hiz(intel,
1491					    intel_renderbuffer(irb->wrapped_depth));
1492
1493   return false;
1494}
1495
1496bool
1497intel_renderbuffer_resolve_depth(struct intel_context *intel,
1498				 struct intel_renderbuffer *irb)
1499{
1500   if (irb->mt)
1501      return intel_miptree_slice_resolve_depth(intel,
1502                                               irb->mt,
1503                                               irb->mt_level,
1504                                               irb->mt_layer);
1505
1506   if (irb->wrapped_depth)
1507      return intel_renderbuffer_resolve_depth(intel,
1508                                              intel_renderbuffer(irb->wrapped_depth));
1509
1510   return false;
1511}
1512
1513/**
1514 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1515 * Hook in device driver functions.
1516 */
1517void
1518intel_fbo_init(struct intel_context *intel)
1519{
1520   intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1521   intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1522   intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1523   intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1524   intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1525   intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1526   intel->ctx.Driver.RenderTexture = intel_render_texture;
1527   intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1528   intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1529   intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1530   intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1531
1532#if FEATURE_OES_EGL_image
1533   intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1534      intel_image_target_renderbuffer_storage;
1535#endif
1536}
1537