intel_fbo.c revision 2d2bfd1f2643b93caf76087b6ac04544af52ee63
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/enums.h"
30#include "main/imports.h"
31#include "main/macros.h"
32#include "main/mfeatures.h"
33#include "main/mtypes.h"
34#include "main/fbobject.h"
35#include "main/framebuffer.h"
36#include "main/renderbuffer.h"
37#include "main/context.h"
38#include "main/teximage.h"
39#include "main/image.h"
40
41#include "swrast/swrast.h"
42#include "drivers/common/meta.h"
43
44#include "intel_context.h"
45#include "intel_batchbuffer.h"
46#include "intel_buffers.h"
47#include "intel_blit.h"
48#include "intel_fbo.h"
49#include "intel_mipmap_tree.h"
50#include "intel_regions.h"
51#include "intel_tex.h"
52#include "intel_span.h"
53#ifndef I915
54#include "brw_context.h"
55#endif
56
57#define FILE_DEBUG_FLAG DEBUG_FBO
58
59
60bool
61intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
62{
63   struct intel_renderbuffer *rb = NULL;
64   if (fb)
65      rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
66   return rb && rb->mt && rb->mt->hiz_mt;
67}
68
69struct intel_region*
70intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
71{
72   struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
73   if (irb && irb->mt)
74      return irb->mt->region;
75   else
76      return NULL;
77}
78
79/**
80 * Create a new framebuffer object.
81 */
82static struct gl_framebuffer *
83intel_new_framebuffer(struct gl_context * ctx, GLuint name)
84{
85   /* Only drawable state in intel_framebuffer at this time, just use Mesa's
86    * class
87    */
88   return _mesa_new_framebuffer(ctx, name);
89}
90
91
92/** Called by gl_renderbuffer::Delete() */
93static void
94intel_delete_renderbuffer(struct gl_renderbuffer *rb)
95{
96   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
97
98   ASSERT(irb);
99
100   intel_miptree_release(&irb->mt);
101
102   _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
103   _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
104
105   free(irb);
106}
107
108/**
109 * \brief Map a renderbuffer through the GTT.
110 *
111 * \see intel_map_renderbuffer()
112 */
113static void
114intel_map_renderbuffer_gtt(struct gl_context *ctx,
115                           struct gl_renderbuffer *rb,
116                           GLuint x, GLuint y, GLuint w, GLuint h,
117                           GLbitfield mode,
118                           GLubyte **out_map,
119                           GLint *out_stride)
120{
121   struct intel_context *intel = intel_context(ctx);
122   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
123   GLubyte *map;
124   int stride, flip_stride;
125
126   assert(irb->mt);
127
128   intel_renderbuffer_resolve_depth(intel, irb);
129   if (mode & GL_MAP_WRITE_BIT) {
130      intel_renderbuffer_set_needs_hiz_resolve(irb);
131   }
132
133   irb->map_mode = mode;
134   irb->map_x = x;
135   irb->map_y = y;
136   irb->map_w = w;
137   irb->map_h = h;
138
139   stride = irb->mt->region->pitch * irb->mt->region->cpp;
140
141   if (rb->Name == 0) {
142      y = irb->mt->region->height - 1 - y;
143      flip_stride = -stride;
144   } else {
145      x += irb->draw_x;
146      y += irb->draw_y;
147      flip_stride = stride;
148   }
149
150   if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) {
151      intel_batchbuffer_flush(intel);
152   }
153
154   drm_intel_gem_bo_map_gtt(irb->mt->region->bo);
155
156   map = irb->mt->region->bo->virtual;
157   map += x * irb->mt->region->cpp;
158   map += (int)y * stride;
159
160   *out_map = map;
161   *out_stride = flip_stride;
162
163   DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
164       __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
165       x, y, w, h, *out_map, *out_stride);
166}
167
168/**
169 * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
170 *
171 * On gen6+, we have LLC sharing, which means we can get high-performance
172 * access to linear-mapped buffers.
173 *
174 * This function allocates a temporary gem buffer at
175 * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
176 * returns a map of that. (Note: Only X tiled buffers can be blitted).
177 *
178 * \see intel_renderbuffer::map_bo
179 * \see intel_map_renderbuffer()
180 */
181static void
182intel_map_renderbuffer_blit(struct gl_context *ctx,
183			    struct gl_renderbuffer *rb,
184			    GLuint x, GLuint y, GLuint w, GLuint h,
185			    GLbitfield mode,
186			    GLubyte **out_map,
187			    GLint *out_stride)
188{
189   struct intel_context *intel = intel_context(ctx);
190   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
191
192   int src_x, src_y;
193   int dst_stride;
194
195   assert(irb->mt->region);
196   assert(intel->gen >= 6);
197   assert(!(mode & GL_MAP_WRITE_BIT));
198   assert(irb->mt->region->tiling == I915_TILING_X);
199
200   irb->map_mode = mode;
201   irb->map_x = x;
202   irb->map_y = y;
203   irb->map_w = w;
204   irb->map_h = h;
205
206   dst_stride = ALIGN(w * irb->mt->region->cpp, 4);
207
208   if (rb->Name) {
209      src_x = x + irb->draw_x;
210      src_y = y + irb->draw_y;
211   } else {
212      src_x = x;
213      src_y = irb->mt->region->height - y - h;
214   }
215
216   irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
217				    dst_stride * h, 4096);
218
219   /* We don't do the flip in the blit, because it's always so tricky to get
220    * right.
221    */
222   if (irb->map_bo &&
223       intelEmitCopyBlit(intel,
224			 irb->mt->region->cpp,
225			 irb->mt->region->pitch, irb->mt->region->bo,
226			 0, irb->mt->region->tiling,
227			 dst_stride / irb->mt->region->cpp, irb->map_bo,
228			 0, I915_TILING_NONE,
229			 src_x, src_y,
230			 0, 0,
231			 w, h,
232			 GL_COPY)) {
233      intel_batchbuffer_flush(intel);
234      drm_intel_bo_map(irb->map_bo, false);
235
236      if (rb->Name) {
237	 *out_map = irb->map_bo->virtual;
238	 *out_stride = dst_stride;
239      } else {
240	 *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
241	 *out_stride = -dst_stride;
242      }
243
244      DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
245	  __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
246	  src_x, src_y, w, h, *out_map, *out_stride);
247   } else {
248      /* Fallback to GTT mapping. */
249      drm_intel_bo_unreference(irb->map_bo);
250      irb->map_bo = NULL;
251      intel_map_renderbuffer_gtt(ctx, rb,
252				 x, y, w, h,
253				 mode,
254				 out_map, out_stride);
255   }
256}
257
258/**
259 * \see dd_function_table::MapRenderbuffer
260 */
261static void
262intel_map_renderbuffer(struct gl_context *ctx,
263		       struct gl_renderbuffer *rb,
264		       GLuint x, GLuint y, GLuint w, GLuint h,
265		       GLbitfield mode,
266		       GLubyte **out_map,
267		       GLint *out_stride)
268{
269   struct intel_context *intel = intel_context(ctx);
270   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
271
272   /* We sometimes get called with this by our intel_span.c usage. */
273   if (!irb->mt && !irb->wrapped_depth) {
274      *out_map = NULL;
275      *out_stride = 0;
276      return;
277   }
278
279   if (rb->Format == MESA_FORMAT_S8 || irb->wrapped_depth) {
280      void *map;
281      int stride;
282
283      /* For a window-system renderbuffer, we need to flip the mapping we
284       * receive upside-down.  So we need to ask for a rectangle on flipped
285       * vertically, and we then return a pointer to the bottom of it with a
286       * negative stride.
287       */
288      if (rb->Name == 0) {
289	 y = rb->Height - y - h;
290      }
291
292      intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
293			x, y, w, h, mode, &map, &stride);
294
295      if (rb->Name == 0) {
296	 map += (h - 1) * stride;
297	 stride = -stride;
298      }
299
300      DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
301	  __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
302	  x, y, w, h, *out_map, *out_stride);
303
304      *out_map = map;
305      *out_stride = stride;
306   } else if (intel->gen >= 6 &&
307	      !(mode & GL_MAP_WRITE_BIT) &&
308	      irb->mt->region->tiling == I915_TILING_X) {
309      intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
310				  out_map, out_stride);
311   } else {
312      intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
313				 out_map, out_stride);
314   }
315}
316
317/**
318 * \see dd_function_table::UnmapRenderbuffer
319 */
320static void
321intel_unmap_renderbuffer(struct gl_context *ctx,
322			 struct gl_renderbuffer *rb)
323{
324   struct intel_context *intel = intel_context(ctx);
325   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
326
327   DBG("%s: rb %d (%s)\n", __FUNCTION__,
328       rb->Name, _mesa_get_format_name(rb->Format));
329
330   if (rb->Format == MESA_FORMAT_S8 || irb->wrapped_depth) {
331      intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
332   } else if (irb->map_bo) {
333      /* Paired with intel_map_renderbuffer_blit(). */
334      drm_intel_bo_unmap(irb->map_bo);
335      drm_intel_bo_unreference(irb->map_bo);
336      irb->map_bo = 0;
337   } else {
338      /* Paired with intel_map_renderbuffer_gtt(). */
339      if (irb->mt) {
340	 /* The miptree may be null when intel_map_renderbuffer() is
341	  * called from intel_span.c.
342	  */
343	 drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo);
344      }
345   }
346}
347
348/**
349 * Return a pointer to a specific pixel in a renderbuffer.
350 */
351static void *
352intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
353                  GLint x, GLint y)
354{
355   /* By returning NULL we force all software rendering to go through
356    * the span routines.
357    */
358   return NULL;
359}
360
361
362/**
363 * Called via glRenderbufferStorageEXT() to set the format and allocate
364 * storage for a user-created renderbuffer.
365 */
366GLboolean
367intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
368                                 GLenum internalFormat,
369                                 GLuint width, GLuint height)
370{
371   struct intel_context *intel = intel_context(ctx);
372   struct intel_renderbuffer *irb = intel_renderbuffer(rb);
373   int cpp, tiling;
374
375   ASSERT(rb->Name != 0);
376
377   switch (internalFormat) {
378   default:
379      /* Use the same format-choice logic as for textures.
380       * Renderbuffers aren't any different from textures for us,
381       * except they're less useful because you can't texture with
382       * them.
383       */
384      rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
385							 GL_NONE, GL_NONE);
386      break;
387   case GL_STENCIL_INDEX:
388   case GL_STENCIL_INDEX1_EXT:
389   case GL_STENCIL_INDEX4_EXT:
390   case GL_STENCIL_INDEX8_EXT:
391   case GL_STENCIL_INDEX16_EXT:
392      /* These aren't actual texture formats, so force them here. */
393      if (intel->has_separate_stencil) {
394	 rb->Format = MESA_FORMAT_S8;
395      } else {
396	 assert(!intel->must_use_separate_stencil);
397	 rb->Format = MESA_FORMAT_S8_Z24;
398      }
399      break;
400   }
401
402   rb->Width = width;
403   rb->Height = height;
404   rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
405   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
406   cpp = _mesa_get_format_bytes(rb->Format);
407
408   intel_flush(ctx);
409
410   intel_miptree_release(&irb->mt);
411
412   DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
413       _mesa_lookup_enum_by_nr(internalFormat),
414       _mesa_get_format_name(rb->Format), width, height);
415
416   tiling = I915_TILING_NONE;
417   if (intel->use_texture_tiling) {
418      GLenum base_format = _mesa_get_format_base_format(rb->Format);
419
420      if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
421			      base_format == GL_STENCIL_INDEX ||
422			      base_format == GL_DEPTH_STENCIL))
423	 tiling = I915_TILING_Y;
424      else
425	 tiling = I915_TILING_X;
426   }
427
428   if (irb->Base.Format == MESA_FORMAT_S8) {
429      /*
430       * The stencil buffer is W tiled. However, we request from the kernel a
431       * non-tiled buffer because the GTT is incapable of W fencing.
432       *
433       * The stencil buffer has quirky pitch requirements.  From Vol 2a,
434       * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
435       *    The pitch must be set to 2x the value computed based on width, as
436       *    the stencil buffer is stored with two rows interleaved.
437       * To accomplish this, we resort to the nasty hack of doubling the drm
438       * region's cpp and halving its height.
439       *
440       * If we neglect to double the pitch, then render corruption occurs.
441       */
442      irb->mt = intel_miptree_create_for_renderbuffer(
443		  intel,
444		  rb->Format,
445		  I915_TILING_NONE,
446		  cpp * 2,
447		  ALIGN(width, 64),
448		  ALIGN((height + 1) / 2, 64));
449      if (!irb->mt)
450	 return false;
451
452   } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
453	      && intel->has_separate_stencil) {
454
455      bool ok = true;
456      struct gl_renderbuffer *depth_rb;
457      struct gl_renderbuffer *stencil_rb;
458      struct intel_renderbuffer *depth_irb, *stencil_irb;
459
460      depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
461						   MESA_FORMAT_X8_Z24);
462      stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
463						     MESA_FORMAT_S8);
464      ok = depth_rb && stencil_rb;
465      ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
466						  depth_rb->InternalFormat,
467						  width, height);
468      ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
469						  stencil_rb->InternalFormat,
470						  width, height);
471
472      if (!ok) {
473	 if (depth_rb) {
474	    intel_delete_renderbuffer(depth_rb);
475	 }
476	 if (stencil_rb) {
477	    intel_delete_renderbuffer(stencil_rb);
478	 }
479	 return false;
480      }
481
482      depth_irb = intel_renderbuffer(depth_rb);
483      stencil_irb = intel_renderbuffer(stencil_rb);
484
485      intel_miptree_reference(&depth_irb->mt->stencil_mt, stencil_irb->mt);
486      intel_miptree_reference(&irb->mt, depth_irb->mt);
487
488      depth_rb->Wrapped = rb;
489      stencil_rb->Wrapped = rb;
490      _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
491      _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
492
493   } else {
494      irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
495                                                      tiling, cpp,
496                                                      width, height);
497      if (!irb->mt)
498	 return false;
499
500      if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
501	 bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
502	 if (!ok) {
503	    intel_miptree_release(&irb->mt);
504	    return false;
505	 }
506      }
507   }
508
509   return true;
510}
511
512
513#if FEATURE_OES_EGL_image
514static void
515intel_image_target_renderbuffer_storage(struct gl_context *ctx,
516					struct gl_renderbuffer *rb,
517					void *image_handle)
518{
519   struct intel_context *intel = intel_context(ctx);
520   struct intel_renderbuffer *irb;
521   __DRIscreen *screen;
522   __DRIimage *image;
523
524   screen = intel->intelScreen->driScrnPriv;
525   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
526					      screen->loaderPrivate);
527   if (image == NULL)
528      return;
529
530   /* __DRIimage is opaque to the core so it has to be checked here */
531   switch (image->format) {
532   case MESA_FORMAT_RGBA8888_REV:
533      _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
534            "glEGLImageTargetRenderbufferStorage(unsupported image format");
535      return;
536      break;
537   default:
538      break;
539   }
540
541   irb = intel_renderbuffer(rb);
542   intel_miptree_release(&irb->mt);
543   irb->mt = intel_miptree_create_for_region(intel,
544                                             GL_TEXTURE_2D,
545                                             image->format,
546                                             image->region);
547   if (!irb->mt)
548      return;
549
550   rb->InternalFormat = image->internal_format;
551   rb->Width = image->region->width;
552   rb->Height = image->region->height;
553   rb->Format = image->format;
554   rb->DataType = image->data_type;
555   rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
556					   image->internal_format);
557}
558#endif
559
560/**
561 * Called for each hardware renderbuffer when a _window_ is resized.
562 * Just update fields.
563 * Not used for user-created renderbuffers!
564 */
565static GLboolean
566intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
567                           GLenum internalFormat, GLuint width, GLuint height)
568{
569   ASSERT(rb->Name == 0);
570   rb->Width = width;
571   rb->Height = height;
572   rb->InternalFormat = internalFormat;
573
574   return true;
575}
576
577
578static void
579intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
580		     GLuint width, GLuint height)
581{
582   int i;
583
584   _mesa_resize_framebuffer(ctx, fb, width, height);
585
586   fb->Initialized = true; /* XXX remove someday */
587
588   if (fb->Name != 0) {
589      return;
590   }
591
592
593   /* Make sure all window system renderbuffers are up to date */
594   for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
595      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
596
597      /* only resize if size is changing */
598      if (rb && (rb->Width != width || rb->Height != height)) {
599	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
600      }
601   }
602}
603
604
605/** Dummy function for gl_renderbuffer::AllocStorage() */
606static GLboolean
607intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
608                        GLenum internalFormat, GLuint width, GLuint height)
609{
610   _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
611   return false;
612}
613
614/**
615 * Create a new intel_renderbuffer which corresponds to an on-screen window,
616 * not a user-created renderbuffer.
617 */
618struct intel_renderbuffer *
619intel_create_renderbuffer(gl_format format)
620{
621   GET_CURRENT_CONTEXT(ctx);
622
623   struct intel_renderbuffer *irb;
624
625   irb = CALLOC_STRUCT(intel_renderbuffer);
626   if (!irb) {
627      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
628      return NULL;
629   }
630
631   _mesa_init_renderbuffer(&irb->Base, 0);
632   irb->Base.ClassID = INTEL_RB_CLASS;
633   irb->Base._BaseFormat = _mesa_get_format_base_format(format);
634   irb->Base.Format = format;
635   irb->Base.InternalFormat = irb->Base._BaseFormat;
636   irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
637
638   /* intel-specific methods */
639   irb->Base.Delete = intel_delete_renderbuffer;
640   irb->Base.AllocStorage = intel_alloc_window_storage;
641   irb->Base.GetPointer = intel_get_pointer;
642
643   return irb;
644}
645
646
647struct gl_renderbuffer*
648intel_create_wrapped_renderbuffer(struct gl_context * ctx,
649				  int width, int height,
650				  gl_format format)
651{
652   /*
653    * The name here is irrelevant, as long as its nonzero, because the
654    * renderbuffer never gets entered into Mesa's renderbuffer hash table.
655    */
656   GLuint name = ~0;
657
658   struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
659   if (!irb) {
660      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
661      return NULL;
662   }
663
664   struct gl_renderbuffer *rb = &irb->Base;
665   _mesa_init_renderbuffer(rb, name);
666   rb->ClassID = INTEL_RB_CLASS;
667   rb->_BaseFormat = _mesa_get_format_base_format(format);
668   rb->Format = format;
669   rb->InternalFormat = rb->_BaseFormat;
670   rb->DataType = intel_mesa_format_to_rb_datatype(format);
671   rb->Width = width;
672   rb->Height = height;
673
674   return rb;
675}
676
677
678/**
679 * Create a new renderbuffer object.
680 * Typically called via glBindRenderbufferEXT().
681 */
682static struct gl_renderbuffer *
683intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
684{
685   /*struct intel_context *intel = intel_context(ctx); */
686   struct intel_renderbuffer *irb;
687
688   irb = CALLOC_STRUCT(intel_renderbuffer);
689   if (!irb) {
690      _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
691      return NULL;
692   }
693
694   _mesa_init_renderbuffer(&irb->Base, name);
695   irb->Base.ClassID = INTEL_RB_CLASS;
696
697   /* intel-specific methods */
698   irb->Base.Delete = intel_delete_renderbuffer;
699   irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
700   irb->Base.GetPointer = intel_get_pointer;
701   /* span routines set in alloc_storage function */
702
703   return &irb->Base;
704}
705
706
707/**
708 * Called via glBindFramebufferEXT().
709 */
710static void
711intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
712                       struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
713{
714   if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
715      intel_draw_buffer(ctx);
716   }
717   else {
718      /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
719   }
720}
721
722
723/**
724 * Called via glFramebufferRenderbufferEXT().
725 */
726static void
727intel_framebuffer_renderbuffer(struct gl_context * ctx,
728                               struct gl_framebuffer *fb,
729                               GLenum attachment, struct gl_renderbuffer *rb)
730{
731   DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
732
733   intel_flush(ctx);
734
735   _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
736   intel_draw_buffer(ctx);
737}
738
739static struct intel_renderbuffer*
740intel_renderbuffer_wrap_miptree(struct intel_context *intel,
741                                struct intel_mipmap_tree *mt,
742                                uint32_t level,
743                                uint32_t layer,
744                                gl_format format,
745                                GLenum internal_format);
746
747/**
748 * \par Special case for separate stencil
749 *
750 *     When wrapping a depthstencil texture that uses separate stencil, this
751 *     function is recursively called twice: once to create \c
752 *     irb->wrapped_depth and again to create \c irb->wrapped_stencil.  On the
753 *     call to create \c irb->wrapped_depth, the \c format and \c
754 *     internal_format parameters do not match \c mt->format. In that case, \c
755 *     mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
756 *     MESA_FORMAT_X8_Z24.
757 *
758 * @return true on success
759 */
760static bool
761intel_renderbuffer_update_wrapper(struct intel_context *intel,
762                                  struct intel_renderbuffer *irb,
763                                  struct intel_mipmap_tree *mt,
764                                  uint32_t level,
765                                  uint32_t layer,
766                                  gl_format format,
767                                  GLenum internal_format)
768{
769   struct gl_renderbuffer *rb = &irb->Base;
770
771   rb->Format = format;
772   rb->InternalFormat = internal_format;
773   rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
774   rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
775   rb->Width = mt->level[level].width;
776   rb->Height = mt->level[level].height;
777
778   irb->Base.Delete = intel_delete_renderbuffer;
779   irb->Base.AllocStorage = intel_nop_alloc_storage;
780
781   intel_miptree_check_level_layer(mt, level, layer);
782   irb->mt_level = level;
783   irb->mt_layer = layer;
784
785   if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) {
786      assert((irb->wrapped_depth == NULL) == (irb->wrapped_stencil == NULL));
787
788      struct intel_renderbuffer *depth_irb;
789      struct intel_renderbuffer *stencil_irb;
790
791      if (!irb->wrapped_depth) {
792	 depth_irb = intel_renderbuffer_wrap_miptree(intel,
793	                                             mt, level, layer,
794	                                             MESA_FORMAT_X8_Z24,
795	                                             GL_DEPTH_COMPONENT24);
796	 stencil_irb = intel_renderbuffer_wrap_miptree(intel,
797	                                               mt->stencil_mt,
798	                                               level, layer,
799	                                               MESA_FORMAT_S8,
800	                                               GL_STENCIL_INDEX8);
801	 _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base);
802	 _mesa_reference_renderbuffer(&irb->wrapped_stencil, &stencil_irb->Base);
803
804	 if (!irb->wrapped_depth || !irb->wrapped_stencil)
805	    return false;
806      } else {
807	 bool ok = true;
808
809	 depth_irb = intel_renderbuffer(irb->wrapped_depth);
810	 stencil_irb = intel_renderbuffer(irb->wrapped_stencil);
811
812	 ok &= intel_renderbuffer_update_wrapper(intel,
813	                                         depth_irb,
814	                                         mt,
815	                                         level, layer,
816	                                         MESA_FORMAT_X8_Z24,
817	                                         GL_DEPTH_COMPONENT24);
818	 ok &= intel_renderbuffer_update_wrapper(intel,
819	                                         stencil_irb,
820	                                         mt->stencil_mt,
821	                                         level, layer,
822	                                         MESA_FORMAT_S8,
823	                                         GL_STENCIL_INDEX8);
824	 if (!ok)
825	    return false;
826      }
827
828      intel_miptree_reference(&depth_irb->mt->stencil_mt, stencil_irb->mt);
829      intel_miptree_reference(&irb->mt, depth_irb->mt);
830   } else {
831      intel_miptree_reference(&irb->mt, mt);
832      intel_renderbuffer_set_draw_offset(irb);
833
834      if (mt->hiz_mt == NULL &&
835	  intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
836	 intel_miptree_alloc_hiz(intel, mt);
837         if (!mt->hiz_mt)
838            return false;
839      }
840   }
841
842   return true;
843}
844
845/**
846 * \brief Wrap a renderbuffer around a single slice of a miptree.
847 *
848 * Called by glFramebufferTexture*(). This just allocates a
849 * ``struct intel_renderbuffer`` then calls
850 * intel_renderbuffer_update_wrapper() to do the real work.
851 *
852 * \see intel_renderbuffer_update_wrapper()
853 */
854static struct intel_renderbuffer*
855intel_renderbuffer_wrap_miptree(struct intel_context *intel,
856                                struct intel_mipmap_tree *mt,
857                                uint32_t level,
858                                uint32_t layer,
859                                gl_format format,
860                                GLenum internal_format)
861
862{
863   const GLuint name = ~0;   /* not significant, but distinct for debugging */
864   struct gl_context *ctx = &intel->ctx;
865   struct intel_renderbuffer *irb;
866
867   intel_miptree_check_level_layer(mt, level, layer);
868
869   irb = CALLOC_STRUCT(intel_renderbuffer);
870   if (!irb) {
871      _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
872      return NULL;
873   }
874
875   _mesa_init_renderbuffer(&irb->Base, name);
876   irb->Base.ClassID = INTEL_RB_CLASS;
877
878   if (!intel_renderbuffer_update_wrapper(intel, irb,
879                                          mt, level, layer,
880                                          format, internal_format)) {
881      free(irb);
882      return NULL;
883   }
884
885   return irb;
886}
887
888void
889intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
890{
891   unsigned int dst_x, dst_y;
892
893   /* compute offset of the particular 2D image within the texture region */
894   intel_miptree_get_image_offset(irb->mt,
895				  irb->mt_level,
896				  0, /* face, which we ignore */
897				  irb->mt_layer,
898				  &dst_x, &dst_y);
899
900   irb->draw_x = dst_x;
901   irb->draw_y = dst_y;
902}
903
904/**
905 * Rendering to tiled buffers requires that the base address of the
906 * buffer be aligned to a page boundary.  We generally render to
907 * textures by pointing the surface at the mipmap image level, which
908 * may not be aligned to a tile boundary.
909 *
910 * This function returns an appropriately-aligned base offset
911 * according to the tiling restrictions, plus any required x/y offset
912 * from there.
913 */
914uint32_t
915intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
916				uint32_t *tile_x,
917				uint32_t *tile_y)
918{
919   struct intel_region *region = irb->mt->region;
920   int cpp = region->cpp;
921   uint32_t pitch = region->pitch * cpp;
922
923   if (region->tiling == I915_TILING_NONE) {
924      *tile_x = 0;
925      *tile_y = 0;
926      return irb->draw_x * cpp + irb->draw_y * pitch;
927   } else if (region->tiling == I915_TILING_X) {
928      *tile_x = irb->draw_x % (512 / cpp);
929      *tile_y = irb->draw_y % 8;
930      return ((irb->draw_y / 8) * (8 * pitch) +
931	      (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
932   } else {
933      assert(region->tiling == I915_TILING_Y);
934      *tile_x = irb->draw_x % (128 / cpp);
935      *tile_y = irb->draw_y % 32;
936      return ((irb->draw_y / 32) * (32 * pitch) +
937	      (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
938   }
939}
940
941#ifndef I915
942static bool
943need_tile_offset_workaround(struct brw_context *brw,
944			    struct intel_renderbuffer *irb)
945{
946   uint32_t tile_x, tile_y;
947
948   if (brw->has_surface_tile_offset)
949      return false;
950
951   intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
952
953   return tile_x != 0 || tile_y != 0;
954}
955#endif
956
957/**
958 * Called by glFramebufferTexture[123]DEXT() (and other places) to
959 * prepare for rendering into texture memory.  This might be called
960 * many times to choose different texture levels, cube faces, etc
961 * before intel_finish_render_texture() is ever called.
962 */
963static void
964intel_render_texture(struct gl_context * ctx,
965                     struct gl_framebuffer *fb,
966                     struct gl_renderbuffer_attachment *att)
967{
968   struct intel_context *intel = intel_context(ctx);
969   struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
970   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
971   struct intel_texture_image *intel_image = intel_texture_image(image);
972   struct intel_mipmap_tree *mt = intel_image->mt;
973
974   (void) fb;
975
976   int layer;
977   if (att->CubeMapFace > 0) {
978      assert(att->Zoffset == 0);
979      layer = att->CubeMapFace;
980   } else {
981      layer = att->Zoffset;
982   }
983
984   if (!intel_image->mt) {
985      /* Fallback on drawing to a texture that doesn't have a miptree
986       * (has a border, width/height 0, etc.)
987       */
988      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
989      _swrast_render_texture(ctx, fb, att);
990      return;
991   }
992   else if (!irb) {
993      irb = intel_renderbuffer_wrap_miptree(intel,
994                                            mt,
995                                            att->TextureLevel,
996                                            layer,
997                                            image->TexFormat,
998                                            image->InternalFormat);
999
1000      if (irb) {
1001         /* bind the wrapper to the attachment point */
1002         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
1003      }
1004      else {
1005         /* fallback to software rendering */
1006         _swrast_render_texture(ctx, fb, att);
1007         return;
1008      }
1009   }
1010
1011   if (!intel_renderbuffer_update_wrapper(intel, irb,
1012                                          mt, att->TextureLevel, layer,
1013                                          image->TexFormat,
1014                                          image->InternalFormat)) {
1015       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
1016       _swrast_render_texture(ctx, fb, att);
1017       return;
1018   }
1019
1020   DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
1021       _mesa_get_format_name(image->TexFormat),
1022       att->Texture->Name, image->Width, image->Height,
1023       irb->Base.RefCount);
1024
1025   intel_image->used_as_render_target = true;
1026
1027#ifndef I915
1028   if (need_tile_offset_workaround(brw_context(ctx), irb)) {
1029      /* Original gen4 hardware couldn't draw to a non-tile-aligned
1030       * destination in a miptree unless you actually setup your
1031       * renderbuffer as a miptree and used the fragile
1032       * lod/array_index/etc. controls to select the image.  So,
1033       * instead, we just make a new single-level miptree and render
1034       * into that.
1035       */
1036      struct intel_context *intel = intel_context(ctx);
1037      struct intel_mipmap_tree *new_mt;
1038      int width, height, depth;
1039
1040      intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
1041
1042      new_mt = intel_miptree_create(intel, image->TexObject->Target,
1043				    intel_image->base.Base.TexFormat,
1044				    intel_image->base.Base.Level,
1045				    intel_image->base.Base.Level,
1046                                    width, height, depth,
1047				    true);
1048
1049      intel_miptree_copy_teximage(intel, intel_image, new_mt);
1050      intel_renderbuffer_set_draw_offset(irb);
1051
1052      intel_miptree_reference(&irb->mt, intel_image->mt);
1053      intel_miptree_release(&new_mt);
1054   }
1055#endif
1056   /* update drawing region, etc */
1057   intel_draw_buffer(ctx);
1058}
1059
1060
1061/**
1062 * Called by Mesa when rendering to a texture is done.
1063 */
1064static void
1065intel_finish_render_texture(struct gl_context * ctx,
1066                            struct gl_renderbuffer_attachment *att)
1067{
1068   struct intel_context *intel = intel_context(ctx);
1069   struct gl_texture_object *tex_obj = att->Texture;
1070   struct gl_texture_image *image =
1071      tex_obj->Image[att->CubeMapFace][att->TextureLevel];
1072   struct intel_texture_image *intel_image = intel_texture_image(image);
1073
1074   DBG("Finish render %s texture tex=%u\n",
1075       _mesa_get_format_name(image->TexFormat), att->Texture->Name);
1076
1077   /* Flag that this image may now be validated into the object's miptree. */
1078   if (intel_image)
1079      intel_image->used_as_render_target = false;
1080
1081   /* Since we've (probably) rendered to the texture and will (likely) use
1082    * it in the texture domain later on in this batchbuffer, flush the
1083    * batch.  Once again, we wish for a domain tracker in libdrm to cover
1084    * usage inside of a batchbuffer like GEM does in the kernel.
1085    */
1086   intel_batchbuffer_emit_mi_flush(intel);
1087}
1088
1089/**
1090 * Do additional "completeness" testing of a framebuffer object.
1091 */
1092static void
1093intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
1094{
1095   struct intel_context *intel = intel_context(ctx);
1096   const struct intel_renderbuffer *depthRb =
1097      intel_get_renderbuffer(fb, BUFFER_DEPTH);
1098   const struct intel_renderbuffer *stencilRb =
1099      intel_get_renderbuffer(fb, BUFFER_STENCIL);
1100   int i;
1101
1102   /*
1103    * The depth and stencil renderbuffers are the same renderbuffer or wrap
1104    * the same texture.
1105    */
1106   if (depthRb && stencilRb) {
1107      bool depth_stencil_are_same;
1108      if (depthRb == stencilRb)
1109	 depth_stencil_are_same = true;
1110      else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
1111	       (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
1112	       (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
1113		fb->Attachment[BUFFER_STENCIL].Texture->Name))
1114	 depth_stencil_are_same = true;
1115      else
1116	 depth_stencil_are_same = false;
1117
1118      if (!intel->has_separate_stencil && !depth_stencil_are_same) {
1119	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1120      }
1121   }
1122
1123   for (i = 0; i < Elements(fb->Attachment); i++) {
1124      struct gl_renderbuffer *rb;
1125      struct intel_renderbuffer *irb;
1126
1127      if (fb->Attachment[i].Type == GL_NONE)
1128	 continue;
1129
1130      /* A supported attachment will have a Renderbuffer set either
1131       * from being a Renderbuffer or being a texture that got the
1132       * intel_wrap_texture() treatment.
1133       */
1134      rb = fb->Attachment[i].Renderbuffer;
1135      if (rb == NULL) {
1136	 DBG("attachment without renderbuffer\n");
1137	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1138	 continue;
1139      }
1140
1141      irb = intel_renderbuffer(rb);
1142      if (irb == NULL) {
1143	 DBG("software rendering renderbuffer\n");
1144	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1145	 continue;
1146      }
1147
1148      if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
1149	 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
1150	     _mesa_get_format_name(irb->Base.Format));
1151	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1152      }
1153
1154#ifdef I915
1155      if (!intel_span_supports_format(irb->Base.Format)) {
1156	 DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
1157	     _mesa_get_format_name(irb->Base.Format));
1158	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
1159      }
1160#endif
1161   }
1162}
1163
1164/**
1165 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
1166 * We can do this when the dst renderbuffer is actually a texture and
1167 * there is no scaling, mirroring or scissoring.
1168 *
1169 * \return new buffer mask indicating the buffers left to blit using the
1170 *         normal path.
1171 */
1172static GLbitfield
1173intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
1174                                          GLint srcX0, GLint srcY0,
1175                                          GLint srcX1, GLint srcY1,
1176                                          GLint dstX0, GLint dstY0,
1177                                          GLint dstX1, GLint dstY1,
1178                                          GLbitfield mask, GLenum filter)
1179{
1180   if (mask & GL_COLOR_BUFFER_BIT) {
1181      const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
1182      const struct gl_framebuffer *readFb = ctx->ReadBuffer;
1183      const struct gl_renderbuffer_attachment *drawAtt =
1184         &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
1185
1186      /* If the source and destination are the same size with no
1187         mirroring, the rectangles are within the size of the
1188         texture and there is no scissor then we can use
1189         glCopyTexSubimage2D to implement the blit. This will end
1190         up as a fast hardware blit on some drivers */
1191      if (drawAtt && drawAtt->Texture &&
1192          srcX0 - srcX1 == dstX0 - dstX1 &&
1193          srcY0 - srcY1 == dstY0 - dstY1 &&
1194          srcX1 >= srcX0 &&
1195          srcY1 >= srcY0 &&
1196          srcX0 >= 0 && srcX1 <= readFb->Width &&
1197          srcY0 >= 0 && srcY1 <= readFb->Height &&
1198          dstX0 >= 0 && dstX1 <= drawFb->Width &&
1199          dstY0 >= 0 && dstY1 <= drawFb->Height &&
1200          !ctx->Scissor.Enabled) {
1201         const struct gl_texture_object *texObj = drawAtt->Texture;
1202         const GLuint dstLevel = drawAtt->TextureLevel;
1203         const GLenum target = texObj->Target;
1204
1205         struct gl_texture_image *texImage =
1206            _mesa_select_tex_image(ctx, texObj, target, dstLevel);
1207
1208         if (intel_copy_texsubimage(intel_context(ctx),
1209                                    intel_texture_image(texImage),
1210                                    dstX0, dstY0,
1211                                    srcX0, srcY0,
1212                                    srcX1 - srcX0, /* width */
1213                                    srcY1 - srcY0))
1214            mask &= ~GL_COLOR_BUFFER_BIT;
1215      }
1216   }
1217
1218   return mask;
1219}
1220
1221static void
1222intel_blit_framebuffer(struct gl_context *ctx,
1223                       GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
1224                       GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
1225                       GLbitfield mask, GLenum filter)
1226{
1227   /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
1228   mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
1229                                                    srcX0, srcY0, srcX1, srcY1,
1230                                                    dstX0, dstY0, dstX1, dstY1,
1231                                                    mask, filter);
1232   if (mask == 0x0)
1233      return;
1234
1235   _mesa_meta_BlitFramebuffer(ctx,
1236                              srcX0, srcY0, srcX1, srcY1,
1237                              dstX0, dstY0, dstX1, dstY1,
1238                              mask, filter);
1239}
1240
1241void
1242intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
1243{
1244   if (irb->mt) {
1245      intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
1246                                                irb->mt_level,
1247                                                irb->mt_layer);
1248   } else if (irb->wrapped_depth) {
1249      intel_renderbuffer_set_needs_hiz_resolve(
1250	    intel_renderbuffer(irb->wrapped_depth));
1251   } else {
1252      return;
1253   }
1254}
1255
1256void
1257intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
1258{
1259   if (irb->mt) {
1260      intel_miptree_slice_set_needs_depth_resolve(irb->mt,
1261                                                  irb->mt_level,
1262                                                  irb->mt_layer);
1263   } else if (irb->wrapped_depth) {
1264      intel_renderbuffer_set_needs_depth_resolve(
1265	    intel_renderbuffer(irb->wrapped_depth));
1266   } else {
1267      return;
1268   }
1269}
1270
1271bool
1272intel_renderbuffer_resolve_hiz(struct intel_context *intel,
1273			       struct intel_renderbuffer *irb)
1274{
1275   if (irb->mt)
1276      return intel_miptree_slice_resolve_hiz(intel,
1277                                             irb->mt,
1278                                             irb->mt_level,
1279                                             irb->mt_layer);
1280   if (irb->wrapped_depth)
1281      return intel_renderbuffer_resolve_hiz(intel,
1282					    intel_renderbuffer(irb->wrapped_depth));
1283
1284   return false;
1285}
1286
1287bool
1288intel_renderbuffer_resolve_depth(struct intel_context *intel,
1289				 struct intel_renderbuffer *irb)
1290{
1291   if (irb->mt)
1292      return intel_miptree_slice_resolve_depth(intel,
1293                                               irb->mt,
1294                                               irb->mt_level,
1295                                               irb->mt_layer);
1296
1297   if (irb->wrapped_depth)
1298      return intel_renderbuffer_resolve_depth(intel,
1299                                              intel_renderbuffer(irb->wrapped_depth));
1300
1301   return false;
1302}
1303
1304/**
1305 * Do one-time context initializations related to GL_EXT_framebuffer_object.
1306 * Hook in device driver functions.
1307 */
1308void
1309intel_fbo_init(struct intel_context *intel)
1310{
1311   intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
1312   intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
1313   intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
1314   intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
1315   intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
1316   intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
1317   intel->ctx.Driver.RenderTexture = intel_render_texture;
1318   intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
1319   intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
1320   intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
1321   intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
1322
1323#if FEATURE_OES_EGL_image
1324   intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
1325      intel_image_target_renderbuffer_storage;
1326#endif
1327}
1328