intel_fbo.c revision 925356c8c0b21998a1f53f042269818c19163385
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/enums.h"
30#include "main/imports.h"
31#include "main/macros.h"
32#include "main/mfeatures.h"
33#include "main/mtypes.h"
34#include "main/fbobject.h"
35#include "main/framebuffer.h"
36#include "main/renderbuffer.h"
37#include "main/context.h"
38#include "main/teximage.h"
39#include "main/image.h"
40
41#include "swrast/swrast.h"
42#include "drivers/common/meta.h"
43
44#include "intel_context.h"
45#include "intel_batchbuffer.h"
46#include "intel_buffers.h"
47#include "intel_blit.h"
48#include "intel_fbo.h"
49#include "intel_mipmap_tree.h"
50#include "intel_regions.h"
51#include "intel_tex.h"
52#include "intel_span.h"
53#ifndef I915
54#include "brw_context.h"
55#endif
56
57#define FILE_DEBUG_FLAG DEBUG_FBO
58
59
60bool
61intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
62{
63   struct intel_renderbuffer *rb = NULL;
64   if (fb)
65      rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
66   return rb && rb->mt && rb->mt->hiz_mt;
67}
68
69struct intel_region*
70intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
71{
72   struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
73   if (irb && irb->mt)
74      return irb->mt->region;
75   else
76      return NULL;
77}
78
79/**
80 * Create a new framebuffer object.
81 */
82static struct gl_framebuffer *
83intel_new_framebuffer(struct gl_context * ctx, GLuint name)
84{
85   /* Only drawable state in intel_framebuffer at this time, just use Mesa's
86    * class
87    */
88   return _mesa_new_framebuffer(ctx, name);
89}
90
91
92/** Called by gl_renderbuffer::Delete() */
93static void
94intel_delete_renderbuffer(struct gl_renderbuffer *rb)
95{
96   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
97
98   ASSERT(irb);
99
100   intel_miptree_release(&irb->mt);
101
102   _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
103   _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
104
105   free(irb);
106}
107
108/**
109 * \brief Map a renderbuffer through the GTT.
110 *
111 * \see intel_map_renderbuffer()
112 */
113static void
114intel_map_renderbuffer_gtt(struct gl_context *ctx,
115                           struct gl_renderbuffer *rb,
116                           GLuint x, GLuint y, GLuint w, GLuint h,
117                           GLbitfield mode,
118                           GLubyte **out_map,
119                           GLint *out_stride)
120{
121   struct intel_context *intel = intel_context(ctx);
122   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
123   GLubyte *map;
124   int stride, flip_stride;
125
126   assert(irb->mt);
127
128   intel_renderbuffer_resolve_depth(intel, irb);
129   if (mode & GL_MAP_WRITE_BIT) {
130      intel_renderbuffer_set_needs_hiz_resolve(irb);
131   }
132
133   irb->map_mode = mode;
134   irb->map_x = x;
135   irb->map_y = y;
136   irb->map_w = w;
137   irb->map_h = h;
138
139   stride = irb->mt->region->pitch * irb->mt->region->cpp;
140
141   if (rb->Name == 0) {
142      y = irb->mt->region->height - 1 - y;
143      flip_stride = -stride;
144   } else {
145      x += irb->draw_x;
146      y += irb->draw_y;
147      flip_stride = stride;
148   }
149
150   if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) {
151      intel_batchbuffer_flush(intel);
152   }
153
154   drm_intel_gem_bo_map_gtt(irb->mt->region->bo);
155
156   map = irb->mt->region->bo->virtual;
157   map += x * irb->mt->region->cpp;
158   map += (int)y * stride;
159
160   *out_map = map;
161   *out_stride = flip_stride;
162
163   DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
164       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
165       x, y, w, h, *out_map, *out_stride);
166}
167
168/**
169 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
170 *
171 * On gen6+, we have LLC sharing, which means we can get high-performance
172 * access to linear-mapped buffers.
173 *
174 * This function allocates a temporary gem buffer at
175 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
176 * returns a map of that. (Note: Only X tiled buffers can be blitted).
177 *
178 * \see intel_renderbuffer::map_bo
179 * \see intel_map_renderbuffer()
180 */
181static void
182intel_map_renderbuffer_blit(struct gl_context *ctx,
183			    struct gl_renderbuffer *rb,
184			    GLuint x, GLuint y, GLuint w, GLuint h,
185			    GLbitfield mode,
186			    GLubyte **out_map,
187			    GLint *out_stride)
188{
189   struct intel_context *intel = intel_context(ctx);
190   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
191
192   int src_x, src_y;
193   int dst_stride;
194
195   assert(irb->mt->region);
196   assert(intel->gen >= 6);
197   assert(!(mode & GL_MAP_WRITE_BIT));
198   assert(irb->mt->region->tiling == I915_TILING_X);
199
200   irb->map_mode = mode;
201   irb->map_x = x;
202   irb->map_y = y;
203   irb->map_w = w;
204   irb->map_h = h;
205
206   dst_stride = ALIGN(w * irb->mt->region->cpp, 4);
207
208   if (rb->Name) {
209      src_x = x + irb->draw_x;
210      src_y = y + irb->draw_y;
211   } else {
212      src_x = x;
213      src_y = irb->mt->region->height - y - h;
214   }
215
216   irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
217				    dst_stride * h, 4096);
218
219   /* We don't do the flip in the blit, because it's always so tricky to get
220    * right.
221    */
222   if (irb->map_bo &&
223       intelEmitCopyBlit(intel,
224			 irb->mt->region->cpp,
225			 irb->mt->region->pitch, irb->mt->region->bo,
226			 0, irb->mt->region->tiling,
227			 dst_stride / irb->mt->region->cpp, irb->map_bo,
228			 0, I915_TILING_NONE,
229			 src_x, src_y,
230			 0, 0,
231			 w, h,
232			 GL_COPY)) {
233      intel_batchbuffer_flush(intel);
234      drm_intel_bo_map(irb->map_bo, false);
235
236      if (rb->Name) {
237	 *out_map = irb->map_bo->virtual;
238	 *out_stride = dst_stride;
239      } else {
240	 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
241	 *out_stride = -dst_stride;
242      }
243
244      DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
245	  __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
246	  src_x, src_y, w, h, *out_map, *out_stride);
247   } else {
248      /* Fallback to GTT mapping. */
249      drm_intel_bo_unreference(irb->map_bo);
250      irb->map_bo = NULL;
251      intel_map_renderbuffer_gtt(ctx, rb,
252				 x, y, w, h,
253				 mode,
254				 out_map, out_stride);
255   }
256}
257
258/**
259 * \brief Map a stencil renderbuffer.
260 *
261 * Stencil buffers are W-tiled. Since the GTT has no W fence, we must detile
262 * the buffer in software.
263 *
264 * This function allocates a temporary malloc'd buffer at
265 * intel_renderbuffer::map_buffer, detiles the stencil buffer into it, then
266 * returns the temporary buffer as the map.
267 *
268 * \see intel_renderbuffer::map_buffer
269 * \see intel_map_renderbuffer()
270 * \see intel_unmap_renderbuffer_s8()
271 */
272static void
273intel_map_renderbuffer_s8(struct gl_context *ctx,
274			  struct gl_renderbuffer *rb,
275			  GLuint x, GLuint y, GLuint w, GLuint h,
276			  GLbitfield mode,
277			  GLubyte **out_map,
278			  GLint *out_stride)
279{
280   struct intel_context *intel = intel_context(ctx);
281   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
282   uint8_t *tiled_s8_map;
283   uint8_t *untiled_s8_map;
284
285   assert(rb->Format == MESA_FORMAT_S8);
286   assert(irb->mt);
287
288   irb->map_mode = mode;
289   irb->map_x = x;
290   irb->map_y = y;
291   irb->map_w = w;
292   irb->map_h = h;
293
294   /* Flip the Y axis for the default framebuffer. */
295   int y_flip = (rb->Name == 0) ? -1 : 1;
296   int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
297
298   irb->map_buffer = malloc(w * h);
299   untiled_s8_map = irb->map_buffer;
300   tiled_s8_map = intel_region_map(intel, irb->mt->region, mode);
301
302   for (uint32_t pix_y = 0; pix_y < h; pix_y++) {
303      for (uint32_t pix_x = 0; pix_x < w; pix_x++) {
304	 uint32_t flipped_y = y_flip * (int32_t)(y + pix_y) + y_bias;
305	 ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
306	                                    x + pix_x,
307	                                    flipped_y);
308	 untiled_s8_map[pix_y * w + pix_x] = tiled_s8_map[offset];
309      }
310   }
311
312   *out_map = untiled_s8_map;
313   *out_stride = w;
314
315   DBG("%s: rb %d (%s) s8 detiled mapped: (%d, %d) (%dx%d) -> %p/%d\n",
316       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
317       x, y, w, h, *out_map, *out_stride);
318}
319
320/**
321 * \brief Map a depthstencil buffer with separate stencil.
322 *
323 * A depthstencil renderbuffer, if using separate stencil, consists of a depth
324 * renderbuffer and a hidden stencil renderbuffer.  This function maps the
325 * depth buffer, whose format is MESA_FORMAT_X8_Z24, through the GTT and
326 * returns that as the mapped pointer. The caller need not be aware of the
327 * hidden stencil buffer and may safely assume that the mapped pointer points
328 * to a MESA_FORMAT_S8_Z24 buffer
329 *
330 * The consistency between the depth buffer's S8 bits and the hidden stencil
331 * buffer is managed within intel_map_renderbuffer() and
332 * intel_unmap_renderbuffer() by scattering or gathering the stencil bits
333 * according to the map mode.
334 *
335 * \see intel_map_renderbuffer()
336 * \see intel_unmap_renderbuffer_separate_s8z24()
337 */
338static void
339intel_map_renderbuffer_separate_s8z24(struct gl_context *ctx,
340				      struct gl_renderbuffer *rb,
341				      GLuint x, GLuint y, GLuint w, GLuint h,
342				      GLbitfield mode,
343				      GLubyte **out_map,
344				      GLint *out_stride)
345{
346   struct intel_context *intel = intel_context(ctx);
347   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
348
349   uint8_t *s8z24_map;
350   int32_t s8z24_stride;
351
352   struct intel_renderbuffer *s8_irb;
353   uint8_t *s8_map;
354
355   assert(rb->Name != 0);
356   assert(rb->Format == MESA_FORMAT_S8_Z24);
357   assert(irb->wrapped_depth != NULL);
358   assert(irb->wrapped_stencil != NULL);
359
360   irb->map_mode = mode;
361   irb->map_x = x;
362   irb->map_y = y;
363   irb->map_w = w;
364   irb->map_h = h;
365
366   /* Map with write mode for the gather below. */
367   intel_map_renderbuffer_gtt(ctx, irb->wrapped_depth,
368			       x, y, w, h, mode | GL_MAP_WRITE_BIT,
369			       &s8z24_map, &s8z24_stride);
370
371   s8_irb = intel_renderbuffer(irb->wrapped_stencil);
372   s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_READ_BIT);
373
374   /* Gather the stencil buffer into the depth buffer. */
375   for (uint32_t pix_y = 0; pix_y < h; ++pix_y) {
376      for (uint32_t pix_x = 0; pix_x < w; ++pix_x) {
377	 ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
378					       x + pix_x,
379					       y + pix_y);
380	 ptrdiff_t s8z24_offset = pix_y * s8z24_stride
381				+ pix_x * 4
382				+ 3;
383	 s8z24_map[s8z24_offset] = s8_map[s8_offset];
384      }
385   }
386
387   intel_region_unmap(intel, s8_irb->mt->region);
388
389   *out_map = s8z24_map;
390   *out_stride = s8z24_stride;
391}
392
393/**
394 * \see dd_function_table::MapRenderbuffer
395 */
396static void
397intel_map_renderbuffer(struct gl_context *ctx,
398		       struct gl_renderbuffer *rb,
399		       GLuint x, GLuint y, GLuint w, GLuint h,
400		       GLbitfield mode,
401		       GLubyte **out_map,
402		       GLint *out_stride)
403{
404   struct intel_context *intel = intel_context(ctx);
405   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
406
407   /* We sometimes get called with this by our intel_span.c usage. */
408   if (!irb->mt && !irb->wrapped_depth) {
409      *out_map = NULL;
410      *out_stride = 0;
411      return;
412   }
413
414   if (rb->Format == MESA_FORMAT_S8) {
415      intel_map_renderbuffer_s8(ctx, rb, x, y, w, h, mode,
416			        out_map, out_stride);
417   } else if (irb->wrapped_depth) {
418      intel_map_renderbuffer_separate_s8z24(ctx, rb, x, y, w, h, mode,
419					    out_map, out_stride);
420   } else if (intel->gen >= 6 &&
421	      !(mode & GL_MAP_WRITE_BIT) &&
422	      irb->mt->region->tiling == I915_TILING_X) {
423      intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
424				  out_map, out_stride);
425   } else {
426      intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
427				 out_map, out_stride);
428   }
429}
430
431/**
432 * \see intel_map_renderbuffer_s8()
433 */
434static void
435intel_unmap_renderbuffer_s8(struct gl_context *ctx,
436			    struct gl_renderbuffer *rb)
437{
438   struct intel_context *intel = intel_context(ctx);
439   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
440
441   DBG("%s: rb %d (%s)\n", __FUNCTION__,
442       rb->Name, _mesa_get_format_name(rb->Format));
443
444   assert(rb->Format == MESA_FORMAT_S8);
445
446   if (!irb->map_buffer)
447      return;
448
449   if (irb->map_mode & GL_MAP_WRITE_BIT) {
450      /* The temporary buffer was written to, so we must copy its pixels into
451       * the real buffer.
452       */
453      uint8_t *untiled_s8_map = irb->map_buffer;
454      uint8_t *tiled_s8_map = irb->mt->region->bo->virtual;
455
456      /* Flip the Y axis for the default framebuffer. */
457      int y_flip = (rb->Name == 0) ? -1 : 1;
458      int y_bias = (rb->Name == 0) ? (rb->Height - 1) : 0;
459
460      for (uint32_t pix_y = 0; pix_y < irb->map_h; pix_y++) {
461	 for (uint32_t pix_x = 0; pix_x < irb->map_w; pix_x++) {
462	    uint32_t flipped_y = y_flip * (int32_t)(pix_y + irb->map_y) + y_bias;
463	    ptrdiff_t offset = intel_offset_S8(irb->mt->region->pitch,
464	                                       pix_x + irb->map_x,
465	                                       flipped_y);
466	    tiled_s8_map[offset] =
467	       untiled_s8_map[pix_y * irb->map_w + pix_x];
468	 }
469      }
470   }
471
472   intel_region_unmap(intel, irb->mt->region);
473   free(irb->map_buffer);
474   irb->map_buffer = NULL;
475}
476
477/**
478 * \brief Unmap a depthstencil renderbuffer with separate stencil.
479 *
480 * \see intel_map_renderbuffer_separate_s8z24()
481 * \see intel_unmap_renderbuffer()
482 */
483static void
484intel_unmap_renderbuffer_separate_s8z24(struct gl_context *ctx,
485				        struct gl_renderbuffer *rb)
486{
487   struct intel_context *intel = intel_context(ctx);
488   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
489   struct intel_renderbuffer *s8z24_irb;
490
491   assert(rb->Name != 0);
492   assert(rb->Format == MESA_FORMAT_S8_Z24);
493   assert(irb->wrapped_depth != NULL);
494   assert(irb->wrapped_stencil != NULL);
495
496   s8z24_irb = intel_renderbuffer(irb->wrapped_depth);
497
498   if (irb->map_mode & GL_MAP_WRITE_BIT) {
499      /* Copy the stencil bits from the depth buffer into the stencil buffer.
500       */
501      uint32_t map_x = irb->map_x;
502      uint32_t map_y = irb->map_y;
503      uint32_t map_w = irb->map_w;
504      uint32_t map_h = irb->map_h;
505
506      struct intel_renderbuffer *s8_irb;
507      uint8_t *s8_map;
508
509      s8_irb = intel_renderbuffer(irb->wrapped_stencil);
510      s8_map = intel_region_map(intel, s8_irb->mt->region, GL_MAP_WRITE_BIT);
511
512      int32_t s8z24_stride = 4 * s8z24_irb->mt->region->pitch;
513      uint8_t *s8z24_map = s8z24_irb->mt->region->bo->virtual
514			 + map_y * s8z24_stride
515			 + map_x * 4;
516
517      for (uint32_t pix_y = 0; pix_y < map_h; ++pix_y) {
518	 for (uint32_t pix_x = 0; pix_x < map_w; ++pix_x) {
519	    ptrdiff_t s8_offset = intel_offset_S8(s8_irb->mt->region->pitch,
520						  map_x + pix_x,
521						  map_y + pix_y);
522	    ptrdiff_t s8z24_offset = pix_y * s8z24_stride
523				   + pix_x * 4
524				   + 3;
525	    s8_map[s8_offset] = s8z24_map[s8z24_offset];
526	 }
527      }
528
529      intel_region_unmap(intel, s8_irb->mt->region);
530   }
531
532   drm_intel_gem_bo_unmap_gtt(s8z24_irb->mt->region->bo);
533}
534
535/**
536 * \see dd_function_table::UnmapRenderbuffer
537 */
538static void
539intel_unmap_renderbuffer(struct gl_context *ctx,
540			 struct gl_renderbuffer *rb)
541{
542   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
543
544   DBG("%s: rb %d (%s)\n", __FUNCTION__,
545       rb->Name, _mesa_get_format_name(rb->Format));
546
547   if (rb->Format == MESA_FORMAT_S8) {
548      intel_unmap_renderbuffer_s8(ctx, rb);
549   } else if (irb->wrapped_depth) {
550      intel_unmap_renderbuffer_separate_s8z24(ctx, rb);
551   } else if (irb->map_bo) {
552      /* Paired with intel_map_renderbuffer_blit(). */
553      drm_intel_bo_unmap(irb->map_bo);
554      drm_intel_bo_unreference(irb->map_bo);
555      irb->map_bo = 0;
556   } else {
557      /* Paired with intel_map_renderbuffer_gtt(). */
558      if (irb->mt) {
559	 /* The miptree may be null when intel_map_renderbuffer() is
560	  * called from intel_span.c.
561	  */
562	 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo);
563      }
564   }
565}
566
567/**
568 * Return a pointer to a specific pixel in a renderbuffer.
569 */
570static void *
571intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
572                  GLint x, GLint y)
573{
574   /* By returning NULL we force all software rendering to go through
575    * the span routines.
576    */
577   return NULL;
578}
579
580
581/**
582 * Called via glRenderbufferStorageEXT() to set the format and allocate
583 * storage for a user-created renderbuffer.
584 */
585GLboolean
586intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
587                                 GLenum internalFormat,
588                                 GLuint width, GLuint height)
589{
590   struct intel_context *intel = intel_context(ctx);
591   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
592   int cpp, tiling;
593
594   ASSERT(rb->Name != 0);
595
596   switch (internalFormat) {
597   default:
598      /* Use the same format-choice logic as for textures.
599       * Renderbuffers aren't any different from textures for us,
600       * except they're less useful because you can't texture with
601       * them.
602       */
603      rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
604							 GL_NONE, GL_NONE);
605      break;
606   case GL_STENCIL_INDEX:
607   case GL_STENCIL_INDEX1_EXT:
608   case GL_STENCIL_INDEX4_EXT:
609   case GL_STENCIL_INDEX8_EXT:
610   case GL_STENCIL_INDEX16_EXT:
611      /* These aren't actual texture formats, so force them here. */
612      if (intel->has_separate_stencil) {
613	 rb->Format = MESA_FORMAT_S8;
614      } else {
615	 assert(!intel->must_use_separate_stencil);
616	 rb->Format = MESA_FORMAT_S8_Z24;
617      }
618      break;
619   }
620
621   rb->Width = width;
622   rb->Height = height;
623   rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
624   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
625   cpp = _mesa_get_format_bytes(rb->Format);
626
627   intel_flush(ctx);
628
629   intel_miptree_release(&irb->mt);
630
631   DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
632       _mesa_lookup_enum_by_nr(internalFormat),
633       _mesa_get_format_name(rb->Format), width, height);
634
635   tiling = I915_TILING_NONE;
636   if (intel->use_texture_tiling) {
637      GLenum base_format = _mesa_get_format_base_format(rb->Format);
638
639      if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
640			      base_format == GL_STENCIL_INDEX ||
641			      base_format == GL_DEPTH_STENCIL))
642	 tiling = I915_TILING_Y;
643      else
644	 tiling = I915_TILING_X;
645   }
646
647   if (irb->Base.Format == MESA_FORMAT_S8) {
648      /*
649       * The stencil buffer is W tiled. However, we request from the kernel a
650       * non-tiled buffer because the GTT is incapable of W fencing.
651       *
652       * The stencil buffer has quirky pitch requirements.  From Vol 2a,
653       * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
654       *    The pitch must be set to 2x the value computed based on width, as
655       *    the stencil buffer is stored with two rows interleaved.
656       * To accomplish this, we resort to the nasty hack of doubling the drm
657       * region's cpp and halving its height.
658       *
659       * If we neglect to double the pitch, then render corruption occurs.
660       */
661      irb->mt = intel_miptree_create_for_renderbuffer(
662		  intel,
663		  rb->Format,
664		  I915_TILING_NONE,
665		  cpp * 2,
666		  ALIGN(width, 64),
667		  ALIGN((height + 1) / 2, 64));
668      if (!irb->mt)
669	 return false;
670
671   } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
672	      && intel->has_separate_stencil) {
673
674      bool ok = true;
675      struct gl_renderbuffer *depth_rb;
676      struct gl_renderbuffer *stencil_rb;
677
678      depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
679						   MESA_FORMAT_X8_Z24);
680      stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
681						     MESA_FORMAT_S8);
682      ok = depth_rb && stencil_rb;
683      ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
684						  depth_rb->InternalFormat,
685						  width, height);
686      ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
687						  stencil_rb->InternalFormat,
688						  width, height);
689
690      if (!ok) {
691	 if (depth_rb) {
692	    intel_delete_renderbuffer(depth_rb);
693	 }
694	 if (stencil_rb) {
695	    intel_delete_renderbuffer(stencil_rb);
696	 }
697	 return false;
698      }
699
700      depth_rb->Wrapped = rb;
701      stencil_rb->Wrapped = rb;
702      _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
703      _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
704
705   } else {
706      irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
707                                                      tiling, cpp,
708                                                      width, height);
709      if (!irb->mt)
710	 return false;
711
712      if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
713	 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
714	 if (!ok) {
715	    intel_miptree_release(&irb->mt);
716	    return false;
717	 }
718      }
719   }
720
721   return true;
722}
723
724
725#if FEATURE_OES_EGL_image
726static void
727intel_image_target_renderbuffer_storage(struct gl_context *ctx,
728					struct gl_renderbuffer *rb,
729					void *image_handle)
730{
731   struct intel_context *intel = intel_context(ctx);
732   struct intel_renderbuffer *irb;
733   __DRIscreen *screen;
734   __DRIimage *image;
735
736   screen = intel->intelScreen->driScrnPriv;
737   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
738					      screen->loaderPrivate);
739   if (image == NULL)
740      return;
741
742   /* __DRIimage is opaque to the core so it has to be checked here */
743   switch (image->format) {
744   case MESA_FORMAT_RGBA8888_REV:
745      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
746            "glEGLImageTargetRenderbufferStorage(unsupported image format");
747      return;
748      break;
749   default:
750      break;
751   }
752
753   irb = intel_renderbuffer(rb);
754   intel_miptree_release(&irb->mt);
755   irb->mt = intel_miptree_create_for_region(intel,
756                                             GL_TEXTURE_2D,
757                                             image->format,
758                                             image->region);
759   if (!irb->mt)
760      return;
761
762   rb->InternalFormat = image->internal_format;
763   rb->Width = image->region->width;
764   rb->Height = image->region->height;
765   rb->Format = image->format;
766   rb->DataType = image->data_type;
767   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
768					   image->internal_format);
769}
770#endif
771
772/**
773 * Called for each hardware renderbuffer when a _window_ is resized.
774 * Just update fields.
775 * Not used for user-created renderbuffers!
776 */
777static GLboolean
778intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
779                           GLenum internalFormat, GLuint width, GLuint height)
780{
781   ASSERT(rb->Name == 0);
782   rb->Width = width;
783   rb->Height = height;
784   rb->InternalFormat = internalFormat;
785
786   return true;
787}
788
789
790static void
791intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
792		     GLuint width, GLuint height)
793{
794   int i;
795
796   _mesa_resize_framebuffer(ctx, fb, width, height);
797
798   fb->Initialized = true; /* XXX remove someday */
799
800   if (fb->Name != 0) {
801      return;
802   }
803
804
805   /* Make sure all window system renderbuffers are up to date */
806   for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
807      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
808
809      /* only resize if size is changing */
810      if (rb && (rb->Width != width || rb->Height != height)) {
811	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
812      }
813   }
814}
815
816
817/** Dummy function for gl_renderbuffer::AllocStorage() */
818static GLboolean
819intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
820                        GLenum internalFormat, GLuint width, GLuint height)
821{
822   _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
823   return false;
824}
825
826/**
827 * Create a new intel_renderbuffer which corresponds to an on-screen window,
828 * not a user-created renderbuffer.
829 */
830struct intel_renderbuffer *
831intel_create_renderbuffer(gl_format format)
832{
833   GET_CURRENT_CONTEXT(ctx);
834
835   struct intel_renderbuffer *irb;
836
837   irb = CALLOC_STRUCT(intel_renderbuffer);
838   if (!irb) {
839      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
840      return NULL;
841   }
842
843   _mesa_init_renderbuffer(&irb->Base, 0);
844   irb->Base.ClassID = INTEL_RB_CLASS;
845   irb->Base._BaseFormat = _mesa_get_format_base_format(format);
846   irb->Base.Format = format;
847   irb->Base.InternalFormat = irb->Base._BaseFormat;
848   irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
849
850   /* intel-specific methods */
851   irb->Base.Delete = intel_delete_renderbuffer;
852   irb->Base.AllocStorage = intel_alloc_window_storage;
853   irb->Base.GetPointer = intel_get_pointer;
854
855   return irb;
856}
857
858
859struct gl_renderbuffer*
860intel_create_wrapped_renderbuffer(struct gl_context * ctx,
861				  int width, int height,
862				  gl_format format)
863{
864   /*
865    * The name here is irrelevant, as long as its nonzero, because the
866    * renderbuffer never gets entered into Mesa's renderbuffer hash table.
867    */
868   GLuint name = ~0;
869
870   struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
871   if (!irb) {
872      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
873      return NULL;
874   }
875
876   struct gl_renderbuffer *rb = &irb->Base;
877   _mesa_init_renderbuffer(rb, name);
878   rb->ClassID = INTEL_RB_CLASS;
879   rb->_BaseFormat = _mesa_get_format_base_format(format);
880   rb->Format = format;
881   rb->InternalFormat = rb->_BaseFormat;
882   rb->DataType = intel_mesa_format_to_rb_datatype(format);
883   rb->Width = width;
884   rb->Height = height;
885
886   return rb;
887}
888
889
890/**
891 * Create a new renderbuffer object.
892 * Typically called via glBindRenderbufferEXT().
893 */
894static struct gl_renderbuffer *
895intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
896{
897   /*struct intel_context *intel = intel_context(ctx); */
898   struct intel_renderbuffer *irb;
899
900   irb = CALLOC_STRUCT(intel_renderbuffer);
901   if (!irb) {
902      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
903      return NULL;
904   }
905
906   _mesa_init_renderbuffer(&irb->Base, name);
907   irb->Base.ClassID = INTEL_RB_CLASS;
908
909   /* intel-specific methods */
910   irb->Base.Delete = intel_delete_renderbuffer;
911   irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
912   irb->Base.GetPointer = intel_get_pointer;
913   /* span routines set in alloc_storage function */
914
915   return &irb->Base;
916}
917
918
919/**
920 * Called via glBindFramebufferEXT().
921 */
922static void
923intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
924                       struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
925{
926   if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
927      intel_draw_buffer(ctx);
928   }
929   else {
930      /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
931   }
932}
933
934
935/**
936 * Called via glFramebufferRenderbufferEXT().
937 */
938static void
939intel_framebuffer_renderbuffer(struct gl_context * ctx,
940                               struct gl_framebuffer *fb,
941                               GLenum attachment, struct gl_renderbuffer *rb)
942{
943   DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
944
945   intel_flush(ctx);
946
947   _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
948   intel_draw_buffer(ctx);
949}
950
951static struct intel_renderbuffer*
952intel_renderbuffer_wrap_miptree(struct intel_context *intel,
953                                struct intel_mipmap_tree *mt,
954                                uint32_t level,
955                                uint32_t layer,
956                                gl_format format,
957                                GLenum internal_format);
958
959/**
960 * \par Special case for separate stencil
961 *
962 *     When wrapping a depthstencil texture that uses separate stencil, this
963 *     function is recursively called twice: once to create \c
964 *     irb->wrapped_depth and again to create \c irb->wrapped_stencil.  On the
965 *     call to create \c irb->wrapped_depth, the \c format and \c
966 *     internal_format parameters do not match \c mt->format. In that case, \c
967 *     mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
968 *     MESA_FORMAT_X8_Z24.
969 *
970 * @return true on success
971 */
972static bool
973intel_renderbuffer_update_wrapper(struct intel_context *intel,
974                                  struct intel_renderbuffer *irb,
975                                  struct intel_mipmap_tree *mt,
976                                  uint32_t level,
977                                  uint32_t layer,
978                                  gl_format format,
979                                  GLenum internal_format)
980{
981   struct gl_renderbuffer *rb = &irb->Base;
982
983   rb->Format = format;
984   if (!intel_span_supports_format(rb->Format)) {
985      DBG("Render to texture BAD FORMAT %s\n",
986	  _mesa_get_format_name(rb->Format));
987      return false;
988   } else {
989      DBG("Render to texture %s\n", _mesa_get_format_name(rb->Format));
990   }
991
992   rb->InternalFormat = internal_format;
993   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
994   rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
995   rb->Width = mt->level[level].width;
996   rb->Height = mt->level[level].height;
997
998   irb->Base.Delete = intel_delete_renderbuffer;
999   irb->Base.AllocStorage = intel_nop_alloc_storage;
1000
1001   intel_miptree_check_level_layer(mt, level, layer);
1002   irb->mt_level = level;
1003   irb->mt_layer = layer;
1004
1005   if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) {
1006      assert((irb->wrapped_depth == NULL) == (irb->wrapped_stencil == NULL));
1007
1008      struct intel_renderbuffer *depth_irb;
1009      struct intel_renderbuffer *stencil_irb;
1010
1011      if (!irb->wrapped_depth) {
1012	 depth_irb = intel_renderbuffer_wrap_miptree(intel,
1013	                                             mt, level, layer,
1014	                                             MESA_FORMAT_X8_Z24,
1015	                                             GL_DEPTH_COMPONENT24);
1016	 stencil_irb = intel_renderbuffer_wrap_miptree(intel,
1017	                                               mt->stencil_mt,
1018	                                               level, layer,
1019	                                               MESA_FORMAT_S8,
1020	                                               GL_STENCIL_INDEX8);
1021	 _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base);
1022	 _mesa_reference_renderbuffer(&irb->wrapped_stencil, &stencil_irb->Base);
1023
1024	 if (!irb->wrapped_depth || !irb->wrapped_stencil)
1025	    return false;
1026      } else {
1027	 bool ok = true;
1028
1029	 depth_irb = intel_renderbuffer(irb->wrapped_depth);
1030	 stencil_irb = intel_renderbuffer(irb->wrapped_stencil);
1031
1032	 ok &= intel_renderbuffer_update_wrapper(intel,
1033	                                         depth_irb,
1034	                                         mt,
1035	                                         level, layer,
1036	                                         MESA_FORMAT_X8_Z24,
1037	                                         GL_DEPTH_COMPONENT24);
1038	 ok &= intel_renderbuffer_update_wrapper(intel,
1039	                                         stencil_irb,
1040	                                         mt->stencil_mt,
1041	                                         level, layer,
1042	                                         MESA_FORMAT_S8,
1043	                                         GL_STENCIL_INDEX8);
1044	 if (!ok)
1045	    return false;
1046      }
1047   } else {
1048      intel_miptree_reference(&irb->mt, mt);
1049      intel_renderbuffer_set_draw_offset(irb);
1050
1051      if (mt->hiz_mt == NULL &&
1052	  intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
1053	 intel_miptree_alloc_hiz(intel, mt);
1054         if (!mt->hiz_mt)
1055            return false;
1056      }
1057   }
1058
1059   return true;
1060}
1061
1062/**
1063 * \brief Wrap a renderbuffer around a single slice of a miptree.
1064 *
1065 * Called by glFramebufferTexture*(). This just allocates a
1066 * ``struct intel_renderbuffer`` then calls
1067 * intel_renderbuffer_update_wrapper() to do the real work.
1068 *
1069 * \see intel_renderbuffer_update_wrapper()
1070 */
1071static struct intel_renderbuffer*
1072intel_renderbuffer_wrap_miptree(struct intel_context *intel,
1073                                struct intel_mipmap_tree *mt,
1074                                uint32_t level,
1075                                uint32_t layer,
1076                                gl_format format,
1077                                GLenum internal_format)
1078
1079{
1080   const GLuint name = ~0;   /* not significant, but distinct for debugging */
1081   struct gl_context *ctx = &intel->ctx;
1082   struct intel_renderbuffer *irb;
1083
1084   intel_miptree_check_level_layer(mt, level, layer);
1085
1086   irb = CALLOC_STRUCT(intel_renderbuffer);
1087   if (!irb) {
1088      _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
1089      return NULL;
1090   }
1091
1092   _mesa_init_renderbuffer(&irb->Base, name);
1093   irb->Base.ClassID = INTEL_RB_CLASS;
1094
1095   if (!intel_renderbuffer_update_wrapper(intel, irb,
1096                                          mt, level, layer,
1097                                          format, internal_format)) {
1098      free(irb);
1099      return NULL;
1100   }
1101
1102   return irb;
1103}
1104
1105void
1106intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
1107{
1108   unsigned int dst_x, dst_y;
1109
1110   /* compute offset of the particular 2D image within the texture region */
1111   intel_miptree_get_image_offset(irb->mt,
1112				  irb->mt_level,
1113				  0, /* face, which we ignore */
1114				  irb->mt_layer,
1115				  &dst_x, &dst_y);
1116
1117   irb->draw_x = dst_x;
1118   irb->draw_y = dst_y;
1119}
1120
1121/**
1122 * Rendering to tiled buffers requires that the base address of the
1123 * buffer be aligned to a page boundary.  We generally render to
1124 * textures by pointing the surface at the mipmap image level, which
1125 * may not be aligned to a tile boundary.
1126 *
1127 * This function returns an appropriately-aligned base offset
1128 * according to the tiling restrictions, plus any required x/y offset
1129 * from there.
1130 */
1131uint32_t
1132intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
1133				uint32_t *tile_x,
1134				uint32_t *tile_y)
1135{
1136   struct intel_region *region = irb->mt->region;
1137   int cpp = region->cpp;
1138   uint32_t pitch = region->pitch * cpp;
1139
1140   if (region->tiling == I915_TILING_NONE) {
1141      *tile_x = 0;
1142      *tile_y = 0;
1143      return irb->draw_x * cpp + irb->draw_y * pitch;
1144   } else if (region->tiling == I915_TILING_X) {
1145      *tile_x = irb->draw_x % (512 / cpp);
1146      *tile_y = irb->draw_y % 8;
1147      return ((irb->draw_y / 8) * (8 * pitch) +
1148	      (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
1149   } else {
1150      assert(region->tiling == I915_TILING_Y);
1151      *tile_x = irb->draw_x % (128 / cpp);
1152      *tile_y = irb->draw_y % 32;
1153      return ((irb->draw_y / 32) * (32 * pitch) +
1154	      (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
1155   }
1156}
1157
1158#ifndef I915
1159static bool
1160need_tile_offset_workaround(struct brw_context *brw,
1161			    struct intel_renderbuffer *irb)
1162{
1163   uint32_t tile_x, tile_y;
1164
1165   if (brw->has_surface_tile_offset)
1166      return false;
1167
1168   intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
1169
1170   return tile_x != 0 || tile_y != 0;
1171}
1172#endif
1173
1174/**
1175 * Called by glFramebufferTexture[123]DEXT() (and other places) to
1176 * prepare for rendering into texture memory.  This might be called
1177 * many times to choose different texture levels, cube faces, etc
1178 * before intel_finish_render_texture() is ever called.
1179 */
1180static void
1181intel_render_texture(struct gl_context * ctx,
1182                     struct gl_framebuffer *fb,
1183                     struct gl_renderbuffer_attachment *att)
1184{
1185   struct intel_context *intel = intel_context(ctx);
1186   struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
1187   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
1188   struct intel_texture_image *intel_image = intel_texture_image(image);
1189   struct intel_mipmap_tree *mt = intel_image->mt;
1190
1191   (void) fb;
1192
1193   int layer;
1194   if (att->CubeMapFace > 0) {
1195      assert(att->Zoffset == 0);
1196      layer = att->CubeMapFace;
1197   } else {
1198      layer = att->Zoffset;
1199   }
1200
1201   if (!intel_image->mt) {
1202      /* Fallback on drawing to a texture that doesn't have a miptree
1203       * (has a border, width/height 0, etc.)
1204       */
1205      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1206      _swrast_render_texture(ctx, fb, att);
1207      return;
1208   }
1209   else if (!irb) {
1210      irb = intel_renderbuffer_wrap_miptree(intel,
1211                                            mt,
1212                                            att->TextureLevel,
1213                                            layer,
1214                                            image->TexFormat,
1215                                            image->InternalFormat);
1216
1217      if (irb) {
1218         /* bind the wrapper to the attachment point */
1219         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
1220      }
1221      else {
1222         /* fallback to software rendering */
1223         _swrast_render_texture(ctx, fb, att);
1224         return;
1225      }
1226   }
1227
1228   if (!intel_renderbuffer_update_wrapper(intel, irb,
1229                                          mt, att->TextureLevel, layer,
1230                                          image->TexFormat,
1231                                          image->InternalFormat)) {
1232       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1233       _swrast_render_texture(ctx, fb, att);
1234       return;
1235   }
1236
1237   DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
1238       _glthread_GetID(),
1239       att->Texture->Name, image->Width, image->Height,
1240       irb->Base.RefCount);
1241
1242   intel_image->used_as_render_target = true;
1243
1244#ifndef I915
1245   if (need_tile_offset_workaround(brw_context(ctx), irb)) {
1246      /* Original gen4 hardware couldn't draw to a non-tile-aligned
1247       * destination in a miptree unless you actually setup your
1248       * renderbuffer as a miptree and used the fragile
1249       * lod/array_index/etc. controls to select the image.  So,
1250       * instead, we just make a new single-level miptree and render
1251       * into that.
1252       */
1253      struct intel_context *intel = intel_context(ctx);
1254      struct intel_mipmap_tree *new_mt;
1255      int width, height, depth;
1256
1257      intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1258
1259      new_mt = intel_miptree_create(intel, image->TexObject->Target,
1260				    intel_image->base.Base.TexFormat,
1261				    intel_image->base.Base.Level,
1262				    intel_image->base.Base.Level,
1263                                    width, height, depth,
1264				    true);
1265
1266      intel_miptree_copy_teximage(intel, intel_image, new_mt);
1267      intel_renderbuffer_set_draw_offset(irb);
1268
1269      intel_miptree_reference(&irb->mt, intel_image->mt);
1270      intel_miptree_release(&new_mt);
1271   }
1272#endif
1273   /* update drawing region, etc */
1274   intel_draw_buffer(ctx);
1275}
1276
1277
1278/**
1279 * Called by Mesa when rendering to a texture is done.
1280 */
1281static void
1282intel_finish_render_texture(struct gl_context * ctx,
1283                            struct gl_renderbuffer_attachment *att)
1284{
1285   struct intel_context *intel = intel_context(ctx);
1286   struct gl_texture_object *tex_obj = att->Texture;
1287   struct gl_texture_image *image =
1288      tex_obj->Image[att->CubeMapFace][att->TextureLevel];
1289   struct intel_texture_image *intel_image = intel_texture_image(image);
1290
1291   DBG("Finish render texture tid %lx tex=%u\n",
1292       _glthread_GetID(), att->Texture->Name);
1293
1294   /* Flag that this image may now be validated into the object's miptree. */
1295   if (intel_image)
1296      intel_image->used_as_render_target = false;
1297
1298   /* Since we've (probably) rendered to the texture and will (likely) use
1299    * it in the texture domain later on in this batchbuffer, flush the
1300    * batch.  Once again, we wish for a domain tracker in libdrm to cover
1301    * usage inside of a batchbuffer like GEM does in the kernel.
1302    */
1303   intel_batchbuffer_emit_mi_flush(intel);
1304}
1305
1306/**
1307 * Do additional "completeness" testing of a framebuffer object.
1308 */
1309static void
1310intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
1311{
1312   struct intel_context *intel = intel_context(ctx);
1313   const struct intel_renderbuffer *depthRb =
1314      intel_get_renderbuffer(fb, BUFFER_DEPTH);
1315   const struct intel_renderbuffer *stencilRb =
1316      intel_get_renderbuffer(fb, BUFFER_STENCIL);
1317   int i;
1318
1319   /*
1320    * The depth and stencil renderbuffers are the same renderbuffer or wrap
1321    * the same texture.
1322    */
1323   if (depthRb && stencilRb) {
1324      bool depth_stencil_are_same;
1325      if (depthRb == stencilRb)
1326	 depth_stencil_are_same = true;
1327      else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1328	       (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1329	       (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1330		fb->Attachment[BUFFER_STENCIL].Texture->Name))
1331	 depth_stencil_are_same = true;
1332      else
1333	 depth_stencil_are_same = false;
1334
1335      if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1336	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1337      }
1338   }
1339
1340   for (i = 0; i < Elements(fb->Attachment); i++) {
1341      struct gl_renderbuffer *rb;
1342      struct intel_renderbuffer *irb;
1343
1344      if (fb->Attachment[i].Type == GL_NONE)
1345	 continue;
1346
1347      /* A supported attachment will have a Renderbuffer set either
1348       * from being a Renderbuffer or being a texture that got the
1349       * intel_wrap_texture() treatment.
1350       */
1351      rb = fb->Attachment[i].Renderbuffer;
1352      if (rb == NULL) {
1353	 DBG("attachment without renderbuffer\n");
1354	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1355	 continue;
1356      }
1357
1358      irb = intel_renderbuffer(rb);
1359      if (irb == NULL) {
1360	 DBG("software rendering renderbuffer\n");
1361	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1362	 continue;
1363      }
1364
1365      if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
1366	 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
1367	     _mesa_get_format_name(irb->Base.Format));
1368	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1369      }
1370
1371#ifdef I915
1372      if (!intel_span_supports_format(irb->Base.Format)) {
1373	 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
1374	     _mesa_get_format_name(irb->Base.Format));
1375	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1376      }
1377#endif
1378   }
1379}
1380
1381/**
1382 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1383 * We can do this when the dst renderbuffer is actually a texture and
1384 * there is no scaling, mirroring or scissoring.
1385 *
1386 * \return new buffer mask indicating the buffers left to blit using the
1387 *         normal path.
1388 */
1389static GLbitfield
1390intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1391                                          GLint srcX0, GLint srcY0,
1392                                          GLint srcX1, GLint srcY1,
1393                                          GLint dstX0, GLint dstY0,
1394                                          GLint dstX1, GLint dstY1,
1395                                          GLbitfield mask, GLenum filter)
1396{
1397   if (mask & GL_COLOR_BUFFER_BIT) {
1398      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1399      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1400      const struct gl_renderbuffer_attachment *drawAtt =
1401         &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1402
1403      /* If the source and destination are the same size with no
1404         mirroring, the rectangles are within the size of the
1405         texture and there is no scissor then we can use
1406         glCopyTexSubimage2D to implement the blit. This will end
1407         up as a fast hardware blit on some drivers */
1408      if (drawAtt && drawAtt->Texture &&
1409          srcX0 - srcX1 == dstX0 - dstX1 &&
1410          srcY0 - srcY1 == dstY0 - dstY1 &&
1411          srcX1 >= srcX0 &&
1412          srcY1 >= srcY0 &&
1413          srcX0 >= 0 && srcX1 <= readFb->Width &&
1414          srcY0 >= 0 && srcY1 <= readFb->Height &&
1415          dstX0 >= 0 && dstX1 <= drawFb->Width &&
1416          dstY0 >= 0 && dstY1 <= drawFb->Height &&
1417          !ctx->Scissor.Enabled) {
1418         const struct gl_texture_object *texObj = drawAtt->Texture;
1419         const GLuint dstLevel = drawAtt->TextureLevel;
1420         const GLenum target = texObj->Target;
1421
1422         struct gl_texture_image *texImage =
1423            _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1424
1425         if (intel_copy_texsubimage(intel_context(ctx),
1426                                    intel_texture_image(texImage),
1427                                    dstX0, dstY0,
1428                                    srcX0, srcY0,
1429                                    srcX1 - srcX0, /* width */
1430                                    srcY1 - srcY0))
1431            mask &= ~GL_COLOR_BUFFER_BIT;
1432      }
1433   }
1434
1435   return mask;
1436}
1437
1438static void
1439intel_blit_framebuffer(struct gl_context *ctx,
1440                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1441                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1442                       GLbitfield mask, GLenum filter)
1443{
1444   /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1445   mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1446                                                    srcX0, srcY0, srcX1, srcY1,
1447                                                    dstX0, dstY0, dstX1, dstY1,
1448                                                    mask, filter);
1449   if (mask == 0x0)
1450      return;
1451
1452   _mesa_meta_BlitFramebuffer(ctx,
1453                              srcX0, srcY0, srcX1, srcY1,
1454                              dstX0, dstY0, dstX1, dstY1,
1455                              mask, filter);
1456}
1457
1458void
1459intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
1460{
1461   if (irb->mt) {
1462      intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
1463                                                irb->mt_level,
1464                                                irb->mt_layer);
1465   } else if (irb->wrapped_depth) {
1466      intel_renderbuffer_set_needs_hiz_resolve(
1467	    intel_renderbuffer(irb->wrapped_depth));
1468   } else {
1469      return;
1470   }
1471}
1472
1473void
1474intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
1475{
1476   if (irb->mt) {
1477      intel_miptree_slice_set_needs_depth_resolve(irb->mt,
1478                                                  irb->mt_level,
1479                                                  irb->mt_layer);
1480   } else if (irb->wrapped_depth) {
1481      intel_renderbuffer_set_needs_depth_resolve(
1482	    intel_renderbuffer(irb->wrapped_depth));
1483   } else {
1484      return;
1485   }
1486}
1487
1488bool
1489intel_renderbuffer_resolve_hiz(struct intel_context *intel,
1490			       struct intel_renderbuffer *irb)
1491{
1492   if (irb->mt)
1493      return intel_miptree_slice_resolve_hiz(intel,
1494                                             irb->mt,
1495                                             irb->mt_level,
1496                                             irb->mt_layer);
1497   if (irb->wrapped_depth)
1498      return intel_renderbuffer_resolve_hiz(intel,
1499					    intel_renderbuffer(irb->wrapped_depth));
1500
1501   return false;
1502}
1503
1504bool
1505intel_renderbuffer_resolve_depth(struct intel_context *intel,
1506				 struct intel_renderbuffer *irb)
1507{
1508   if (irb->mt)
1509      return intel_miptree_slice_resolve_depth(intel,
1510                                               irb->mt,
1511                                               irb->mt_level,
1512                                               irb->mt_layer);
1513
1514   if (irb->wrapped_depth)
1515      return intel_renderbuffer_resolve_depth(intel,
1516                                              intel_renderbuffer(irb->wrapped_depth));
1517
1518   return false;
1519}
1520
1521/**
1522 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1523 * Hook in device driver functions.
1524 */
1525void
1526intel_fbo_init(struct intel_context *intel)
1527{
1528   intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1529   intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1530   intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1531   intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1532   intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1533   intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1534   intel->ctx.Driver.RenderTexture = intel_render_texture;
1535   intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1536   intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1537   intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1538   intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1539
1540#if FEATURE_OES_EGL_image
1541   intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1542      intel_image_target_renderbuffer_storage;
1543#endif
1544}
1545