brw_context.c revision 30f61c471de5a9637e5d830e2b5b9dc4145f94d2
1/*
2 Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 Copyright (C) Intel Corp.  2006.  All Rights Reserved.
4 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29  * Authors:
30  *   Keith Whitwell <keith@tungstengraphics.com>
31  */
32
33
34#include "main/api_exec.h"
35#include "main/context.h"
36#include "main/fbobject.h"
37#include "main/imports.h"
38#include "main/macros.h"
39#include "main/points.h"
40#include "main/version.h"
41#include "main/vtxfmt.h"
42
43#include "vbo/vbo_context.h"
44
45#include "drivers/common/driverfuncs.h"
46#include "drivers/common/meta.h"
47#include "utils.h"
48
49#include "brw_context.h"
50#include "brw_defines.h"
51#include "brw_draw.h"
52#include "brw_state.h"
53
54#include "intel_batchbuffer.h"
55#include "intel_buffer_objects.h"
56#include "intel_buffers.h"
57#include "intel_fbo.h"
58#include "intel_mipmap_tree.h"
59#include "intel_pixel.h"
60#include "intel_regions.h"
61#include "intel_tex.h"
62#include "intel_tex_obj.h"
63
64#include "swrast_setup/swrast_setup.h"
65#include "tnl/tnl.h"
66#include "tnl/t_pipeline.h"
67#include "glsl/ralloc.h"
68
69/***************************************
70 * Mesa's Driver Functions
71 ***************************************/
72
73static size_t
74brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
75                             GLenum internalFormat, int samples[16])
76{
77   struct brw_context *brw = brw_context(ctx);
78
79   (void) target;
80
81   switch (brw->gen) {
82   case 7:
83      samples[0] = 8;
84      samples[1] = 4;
85      return 2;
86
87   case 6:
88      samples[0] = 4;
89      return 1;
90
91   default:
92      samples[0] = 1;
93      return 1;
94   }
95}
96
97static const GLubyte *
98intelGetString(struct gl_context * ctx, GLenum name)
99{
100   const struct brw_context *const brw = brw_context(ctx);
101   const char *chipset;
102   static char buffer[128];
103
104   switch (name) {
105   case GL_VENDOR:
106      return (GLubyte *) "Intel Open Source Technology Center";
107      break;
108
109   case GL_RENDERER:
110      switch (brw->intelScreen->deviceID) {
111#undef CHIPSET
112#define CHIPSET(id, family, str) case id: chipset = str; break;
113#include "pci_ids/i965_pci_ids.h"
114      default:
115         chipset = "Unknown Intel Chipset";
116         break;
117      }
118
119      (void) driGetRendererString(buffer, chipset, 0);
120      return (GLubyte *) buffer;
121
122   default:
123      return NULL;
124   }
125}
126
127static void
128intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
129{
130   struct brw_context *brw = brw_context(ctx);
131   __DRIcontext *driContext = brw->driContext;
132
133   (void) x;
134   (void) y;
135   (void) w;
136   (void) h;
137
138   if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
139      dri2InvalidateDrawable(driContext->driDrawablePriv);
140      dri2InvalidateDrawable(driContext->driReadablePriv);
141   }
142}
143
144static void
145intelInvalidateState(struct gl_context * ctx, GLuint new_state)
146{
147   struct brw_context *brw = brw_context(ctx);
148
149   if (ctx->swrast_context)
150      _swrast_InvalidateState(ctx, new_state);
151   _vbo_InvalidateState(ctx, new_state);
152
153   brw->NewGLState |= new_state;
154}
155
156static void
157intel_flush_front(struct gl_context *ctx)
158{
159   struct brw_context *brw = brw_context(ctx);
160   __DRIcontext *driContext = brw->driContext;
161   __DRIdrawable *driDrawable = driContext->driDrawablePriv;
162   __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
163
164   if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
165      if (screen->dri2.loader->flushFrontBuffer != NULL &&
166          driDrawable &&
167          driDrawable->loaderPrivate) {
168
169         /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
170          *
171          * This potentially resolves both front and back buffer. It
172          * is unnecessary to resolve the back, but harms nothing except
173          * performance. And no one cares about front-buffer render
174          * performance.
175          */
176         intel_resolve_for_dri2_flush(brw, driDrawable);
177         intel_batchbuffer_flush(brw);
178
179         screen->dri2.loader->flushFrontBuffer(driDrawable,
180                                               driDrawable->loaderPrivate);
181
182         /* We set the dirty bit in intel_prepare_render() if we're
183          * front buffer rendering once we get there.
184          */
185         brw->front_buffer_dirty = false;
186      }
187   }
188}
189
190static void
191intel_glFlush(struct gl_context *ctx)
192{
193   struct brw_context *brw = brw_context(ctx);
194
195   intel_batchbuffer_flush(brw);
196   intel_flush_front(ctx);
197   if (brw->is_front_buffer_rendering)
198      brw->need_throttle = true;
199}
200
201void
202intelFinish(struct gl_context * ctx)
203{
204   struct brw_context *brw = brw_context(ctx);
205
206   intel_glFlush(ctx);
207
208   if (brw->batch.last_bo)
209      drm_intel_bo_wait_rendering(brw->batch.last_bo);
210}
211
212static void
213brw_init_driver_functions(struct brw_context *brw,
214                          struct dd_function_table *functions)
215{
216   _mesa_init_driver_functions(functions);
217
218   /* GLX uses DRI2 invalidate events to handle window resizing.
219    * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
220    * which doesn't provide a mechanism for snooping the event queues.
221    *
222    * So EGL still relies on viewport hacks to handle window resizing.
223    * This should go away with DRI3000.
224    */
225   if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
226      functions->Viewport = intel_viewport;
227
228   functions->Flush = intel_glFlush;
229   functions->Finish = intelFinish;
230   functions->GetString = intelGetString;
231   functions->UpdateState = intelInvalidateState;
232
233   intelInitTextureFuncs(functions);
234   intelInitTextureImageFuncs(functions);
235   intelInitTextureSubImageFuncs(functions);
236   intelInitTextureCopyImageFuncs(functions);
237   intelInitClearFuncs(functions);
238   intelInitBufferFuncs(functions);
239   intelInitPixelFuncs(functions);
240   intelInitBufferObjectFuncs(functions);
241   intel_init_syncobj_functions(functions);
242   brw_init_object_purgeable_functions(functions);
243
244   brwInitFragProgFuncs( functions );
245   brw_init_common_queryobj_functions(functions);
246   if (brw->gen >= 6)
247      gen6_init_queryobj_functions(functions);
248   else
249      gen4_init_queryobj_functions(functions);
250
251   functions->QuerySamplesForFormat = brw_query_samples_for_format;
252
253   functions->NewTransformFeedback = brw_new_transform_feedback;
254   functions->DeleteTransformFeedback = brw_delete_transform_feedback;
255   functions->GetTransformFeedbackVertexCount =
256      brw_get_transform_feedback_vertex_count;
257   if (brw->gen >= 7) {
258      functions->BeginTransformFeedback = gen7_begin_transform_feedback;
259      functions->EndTransformFeedback = gen7_end_transform_feedback;
260      functions->PauseTransformFeedback = gen7_pause_transform_feedback;
261      functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
262   } else {
263      functions->BeginTransformFeedback = brw_begin_transform_feedback;
264      functions->EndTransformFeedback = brw_end_transform_feedback;
265   }
266
267   if (brw->gen >= 6)
268      functions->GetSamplePosition = gen6_get_sample_position;
269}
270
271/**
272 * Return array of MSAA modes supported by the hardware. The array is
273 * zero-terminated and sorted in decreasing order.
274 */
275static const int*
276brw_supported_msaa_modes(const struct brw_context *brw)
277{
278   static const int gen7_samples[] = {8, 4, 0};
279   static const int gen6_samples[] = {4, 0};
280   static const int gen4_samples[] = {0};
281   if (brw->gen >= 7) {
282      return gen7_samples;
283   } else if (brw->gen == 6) {
284      return gen6_samples;
285   } else {
286      return gen4_samples;
287   }
288}
289
290/**
291 * Override GL_MAX_SAMPLES and related constants according to value of driconf
292 * option 'clamp_max_samples'.
293 */
294static void
295brw_override_max_samples(struct brw_context *brw)
296{
297   const int clamp_max_samples = driQueryOptioni(&brw->optionCache,
298                                                 "clamp_max_samples");
299   if (clamp_max_samples < 0)
300      return;
301
302   const int *supported_msaa_modes = brw_supported_msaa_modes(brw);
303   int max_samples = 0;
304
305   /* Select the largest supported MSAA mode that does not exceed
306    * clamp_max_samples.
307    */
308   for (int i = 0; supported_msaa_modes[i] != 0; ++i) {
309      if (supported_msaa_modes[i] <= clamp_max_samples) {
310         max_samples = supported_msaa_modes[i];
311         break;
312      }
313   }
314
315   brw->ctx.Const.MaxSamples = max_samples;
316   brw->ctx.Const.MaxColorTextureSamples = max_samples;
317   brw->ctx.Const.MaxDepthTextureSamples = max_samples;
318   brw->ctx.Const.MaxIntegerSamples = max_samples;
319}
320
321static void
322brw_initialize_context_constants(struct brw_context *brw)
323{
324   struct gl_context *ctx = &brw->ctx;
325
326   ctx->Const.QueryCounterBits.Timestamp = 36;
327
328   ctx->Const.StripTextureBorder = true;
329
330   ctx->Const.MaxDualSourceDrawBuffers = 1;
331   ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
332   ctx->Const.FragmentProgram.MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
333   ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
334   ctx->Const.MaxTextureUnits =
335      MIN2(ctx->Const.MaxTextureCoordUnits,
336           ctx->Const.FragmentProgram.MaxTextureImageUnits);
337   ctx->Const.VertexProgram.MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
338   if (brw->gen >= 7)
339      ctx->Const.GeometryProgram.MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
340   else
341      ctx->Const.GeometryProgram.MaxTextureImageUnits = 0;
342   ctx->Const.MaxCombinedTextureImageUnits =
343      ctx->Const.VertexProgram.MaxTextureImageUnits +
344      ctx->Const.FragmentProgram.MaxTextureImageUnits +
345      ctx->Const.GeometryProgram.MaxTextureImageUnits;
346
347   ctx->Const.MaxTextureLevels = 14; /* 8192 */
348   if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
349      ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
350   ctx->Const.Max3DTextureLevels = 9;
351   ctx->Const.MaxCubeTextureLevels = 12;
352
353   if (brw->gen >= 7)
354      ctx->Const.MaxArrayTextureLayers = 2048;
355   else
356      ctx->Const.MaxArrayTextureLayers = 512;
357
358   ctx->Const.MaxTextureRectSize = 1 << 12;
359
360   ctx->Const.MaxTextureMaxAnisotropy = 16.0;
361
362   ctx->Const.MaxRenderbufferSize = 8192;
363
364   /* Hardware only supports a limited number of transform feedback buffers.
365    * So we need to override the Mesa default (which is based only on software
366    * limits).
367    */
368   ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
369
370   /* On Gen6, in the worst case, we use up one binding table entry per
371    * transform feedback component (see comments above the definition of
372    * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
373    * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
374    * BRW_MAX_SOL_BINDINGS.
375    *
376    * In "separate components" mode, we need to divide this value by
377    * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
378    * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
379    */
380   ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
381   ctx->Const.MaxTransformFeedbackSeparateComponents =
382      BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
383
384   ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
385
386   const int max_samples = brw_supported_msaa_modes(brw)[0];
387   ctx->Const.MaxSamples = max_samples;
388   ctx->Const.MaxColorTextureSamples = max_samples;
389   ctx->Const.MaxDepthTextureSamples = max_samples;
390   ctx->Const.MaxIntegerSamples = max_samples;
391
392   if (brw->gen >= 7)
393      ctx->Const.MaxProgramTextureGatherComponents = 4;
394
395   ctx->Const.MinLineWidth = 1.0;
396   ctx->Const.MinLineWidthAA = 1.0;
397   ctx->Const.MaxLineWidth = 5.0;
398   ctx->Const.MaxLineWidthAA = 5.0;
399   ctx->Const.LineWidthGranularity = 0.5;
400
401   ctx->Const.MinPointSize = 1.0;
402   ctx->Const.MinPointSizeAA = 1.0;
403   ctx->Const.MaxPointSize = 255.0;
404   ctx->Const.MaxPointSizeAA = 255.0;
405   ctx->Const.PointSizeGranularity = 1.0;
406
407   if (brw->gen >= 5 || brw->is_g4x)
408      ctx->Const.MaxClipPlanes = 8;
409
410   ctx->Const.VertexProgram.MaxNativeInstructions = 16 * 1024;
411   ctx->Const.VertexProgram.MaxAluInstructions = 0;
412   ctx->Const.VertexProgram.MaxTexInstructions = 0;
413   ctx->Const.VertexProgram.MaxTexIndirections = 0;
414   ctx->Const.VertexProgram.MaxNativeAluInstructions = 0;
415   ctx->Const.VertexProgram.MaxNativeTexInstructions = 0;
416   ctx->Const.VertexProgram.MaxNativeTexIndirections = 0;
417   ctx->Const.VertexProgram.MaxNativeAttribs = 16;
418   ctx->Const.VertexProgram.MaxNativeTemps = 256;
419   ctx->Const.VertexProgram.MaxNativeAddressRegs = 1;
420   ctx->Const.VertexProgram.MaxNativeParameters = 1024;
421   ctx->Const.VertexProgram.MaxEnvParams =
422      MIN2(ctx->Const.VertexProgram.MaxNativeParameters,
423	   ctx->Const.VertexProgram.MaxEnvParams);
424
425   ctx->Const.FragmentProgram.MaxNativeInstructions = 1024;
426   ctx->Const.FragmentProgram.MaxNativeAluInstructions = 1024;
427   ctx->Const.FragmentProgram.MaxNativeTexInstructions = 1024;
428   ctx->Const.FragmentProgram.MaxNativeTexIndirections = 1024;
429   ctx->Const.FragmentProgram.MaxNativeAttribs = 12;
430   ctx->Const.FragmentProgram.MaxNativeTemps = 256;
431   ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0;
432   ctx->Const.FragmentProgram.MaxNativeParameters = 1024;
433   ctx->Const.FragmentProgram.MaxEnvParams =
434      MIN2(ctx->Const.FragmentProgram.MaxNativeParameters,
435	   ctx->Const.FragmentProgram.MaxEnvParams);
436
437   /* Fragment shaders use real, 32-bit twos-complement integers for all
438    * integer types.
439    */
440   ctx->Const.FragmentProgram.LowInt.RangeMin = 31;
441   ctx->Const.FragmentProgram.LowInt.RangeMax = 30;
442   ctx->Const.FragmentProgram.LowInt.Precision = 0;
443   ctx->Const.FragmentProgram.HighInt = ctx->Const.FragmentProgram.LowInt;
444   ctx->Const.FragmentProgram.MediumInt = ctx->Const.FragmentProgram.LowInt;
445
446   /* Gen6 converts quads to polygon in beginning of 3D pipeline,
447    * but we're not sure how it's actually done for vertex order,
448    * that affect provoking vertex decision. Always use last vertex
449    * convention for quad primitive which works as expected for now.
450    */
451   if (brw->gen >= 6)
452      ctx->Const.QuadsFollowProvokingVertexConvention = false;
453
454   ctx->Const.NativeIntegers = true;
455   ctx->Const.UniformBooleanTrue = 1;
456
457   /* From the gen4 PRM, volume 4 page 127:
458    *
459    *     "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
460    *      the base address of the first element of the surface, computed in
461    *      software by adding the surface base address to the byte offset of
462    *      the element in the buffer."
463    *
464    * However, unaligned accesses are slower, so enforce buffer alignment.
465    */
466   ctx->Const.UniformBufferOffsetAlignment = 16;
467   ctx->Const.TextureBufferOffsetAlignment = 16;
468
469   if (brw->gen >= 6) {
470      ctx->Const.MaxVarying = 32;
471      ctx->Const.VertexProgram.MaxOutputComponents = 128;
472      ctx->Const.GeometryProgram.MaxInputComponents = 64;
473      ctx->Const.GeometryProgram.MaxOutputComponents = 128;
474      ctx->Const.FragmentProgram.MaxInputComponents = 128;
475   }
476
477   /* We want the GLSL compiler to emit code that uses condition codes */
478   for (int i = 0; i < MESA_SHADER_TYPES; i++) {
479      ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
480      ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
481      ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
482      ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true;
483      ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = true;
484      ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = true;
485
486      ctx->ShaderCompilerOptions[i].EmitNoIndirectUniform =
487	 (i == MESA_SHADER_FRAGMENT);
488      ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp =
489	 (i == MESA_SHADER_FRAGMENT);
490      ctx->ShaderCompilerOptions[i].LowerClipDistance = true;
491   }
492
493   ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].PreferDP4 = true;
494}
495
496/**
497 * Process driconf (drirc) options, setting appropriate context flags.
498 *
499 * intelInitExtensions still pokes at optionCache directly, in order to
500 * avoid advertising various extensions.  No flags are set, so it makes
501 * sense to continue doing that there.
502 */
503static void
504brw_process_driconf_options(struct brw_context *brw)
505{
506   struct gl_context *ctx = &brw->ctx;
507
508   driOptionCache *options = &brw->optionCache;
509   driParseConfigFiles(options, &brw->intelScreen->optionCache,
510                       brw->driContext->driScreenPriv->myNum, "i965");
511
512   int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
513   switch (bo_reuse_mode) {
514   case DRI_CONF_BO_REUSE_DISABLED:
515      break;
516   case DRI_CONF_BO_REUSE_ALL:
517      intel_bufmgr_gem_enable_reuse(brw->bufmgr);
518      break;
519   }
520
521   if (!driQueryOptionb(options, "hiz")) {
522       brw->has_hiz = false;
523       /* On gen6, you can only do separate stencil with HIZ. */
524       if (brw->gen == 6)
525          brw->has_separate_stencil = false;
526   }
527
528   if (driQueryOptionb(options, "always_flush_batch")) {
529      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
530      brw->always_flush_batch = true;
531   }
532
533   if (driQueryOptionb(options, "always_flush_cache")) {
534      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
535      brw->always_flush_cache = true;
536   }
537
538   if (driQueryOptionb(options, "disable_throttling")) {
539      fprintf(stderr, "disabling flush throttling\n");
540      brw->disable_throttling = true;
541   }
542
543   brw->disable_derivative_optimization =
544      driQueryOptionb(&brw->optionCache, "disable_derivative_optimization");
545
546   brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
547
548   ctx->Const.ForceGLSLExtensionsWarn =
549      driQueryOptionb(options, "force_glsl_extensions_warn");
550
551   ctx->Const.DisableGLSLLineContinuations =
552      driQueryOptionb(options, "disable_glsl_line_continuations");
553}
554
555GLboolean
556brwCreateContext(gl_api api,
557	         const struct gl_config *mesaVis,
558		 __DRIcontext *driContextPriv,
559                 unsigned major_version,
560                 unsigned minor_version,
561                 uint32_t flags,
562                 unsigned *dri_ctx_error,
563	         void *sharedContextPrivate)
564{
565   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
566   struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
567   struct intel_screen *screen = sPriv->driverPrivate;
568   const struct brw_device_info *devinfo = screen->devinfo;
569   struct dd_function_table functions;
570   struct gl_config visual;
571
572   struct brw_context *brw = rzalloc(NULL, struct brw_context);
573   if (!brw) {
574      printf("%s: failed to alloc context\n", __FUNCTION__);
575      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
576      return false;
577   }
578
579   driContextPriv->driverPrivate = brw;
580   brw->driContext = driContextPriv;
581   brw->intelScreen = screen;
582   brw->bufmgr = screen->bufmgr;
583
584   brw->gen = devinfo->gen;
585   brw->gt = devinfo->gt;
586   brw->is_g4x = devinfo->is_g4x;
587   brw->is_baytrail = devinfo->is_baytrail;
588   brw->is_haswell = devinfo->is_haswell;
589   brw->has_llc = devinfo->has_llc;
590   brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
591   brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
592   brw->has_pln = devinfo->has_pln;
593   brw->has_compr4 = devinfo->has_compr4;
594   brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
595   brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
596   brw->needs_unlit_centroid_workaround =
597      devinfo->needs_unlit_centroid_workaround;
598
599   brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
600   brw->has_swizzling = screen->hw_has_swizzling;
601
602   if (brw->gen >= 7) {
603      gen7_init_vtable_surface_functions(brw);
604      gen7_init_vtable_sampler_functions(brw);
605      brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
606   } else {
607      gen4_init_vtable_surface_functions(brw);
608      gen4_init_vtable_sampler_functions(brw);
609      brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
610   }
611
612   brw_init_driver_functions(brw, &functions);
613
614   struct gl_context *ctx = &brw->ctx;
615
616   if (mesaVis == NULL) {
617      memset(&visual, 0, sizeof visual);
618      mesaVis = &visual;
619   }
620
621   if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
622      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
623      printf("%s: failed to init mesa context\n", __FUNCTION__);
624      intelDestroyContext(driContextPriv);
625      return false;
626   }
627
628   /* Initialize the software rasterizer and helper modules.
629    *
630    * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
631    * software fallbacks (which we have to support on legacy GL to do weird
632    * glDrawPixels(), glBitmap(), and other functions).
633    */
634   if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
635      _swrast_CreateContext(ctx);
636   }
637
638   _vbo_CreateContext(ctx);
639   if (ctx->swrast_context) {
640      _tnl_CreateContext(ctx);
641      TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
642      _swsetup_CreateContext(ctx);
643
644      /* Configure swrast to match hardware characteristics: */
645      _swrast_allow_pixel_fog(ctx, false);
646      _swrast_allow_vertex_fog(ctx, true);
647   }
648
649   _mesa_meta_init(ctx);
650
651   brw_process_driconf_options(brw);
652   brw_process_intel_debug_variable(brw);
653   brw_initialize_context_constants(brw);
654
655   /* Reinitialize the context point state.  It depends on ctx->Const values. */
656   _mesa_init_point(ctx);
657
658   intel_batchbuffer_init(brw);
659
660   brw_init_state(brw);
661
662   intelInitExtensions(ctx);
663
664   intel_fbo_init(brw);
665
666   if (brw->gen >= 6) {
667      /* Create a new hardware context.  Using a hardware context means that
668       * our GPU state will be saved/restored on context switch, allowing us
669       * to assume that the GPU is in the same state we left it in.
670       *
671       * This is required for transform feedback buffer offsets, query objects,
672       * and also allows us to reduce how much state we have to emit.
673       */
674      brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
675
676      if (!brw->hw_ctx) {
677         fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
678         intelDestroyContext(driContextPriv);
679         return false;
680      }
681   }
682
683   brw_init_surface_formats(brw);
684
685   if (brw->is_g4x || brw->gen >= 5) {
686      brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
687      brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
688  } else {
689      brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
690      brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
691   }
692
693   brw->max_vs_threads = devinfo->max_vs_threads;
694   brw->max_gs_threads = devinfo->max_gs_threads;
695   brw->max_wm_threads = devinfo->max_wm_threads;
696   brw->urb.size = devinfo->urb.size;
697   brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
698   brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
699   brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
700
701   /* Estimate the size of the mappable aperture into the GTT.  There's an
702    * ioctl to get the whole GTT size, but not one to get the mappable subset.
703    * It turns out it's basically always 256MB, though some ancient hardware
704    * was smaller.
705    */
706   uint32_t gtt_size = 256 * 1024 * 1024;
707
708   /* We don't want to map two objects such that a memcpy between them would
709    * just fault one mapping in and then the other over and over forever.  So
710    * we would need to divide the GTT size by 2.  Additionally, some GTT is
711    * taken up by things like the framebuffer and the ringbuffer and such, so
712    * be more conservative.
713    */
714   brw->max_gtt_map_object_size = gtt_size / 4;
715
716   if (brw->gen == 6)
717      brw->urb.gen6_gs_previously_active = false;
718
719   brw->prim_restart.in_progress = false;
720   brw->prim_restart.enable_cut_index = false;
721
722   if (brw->gen < 6) {
723      brw->curbe.last_buf = calloc(1, 4096);
724      brw->curbe.next_buf = calloc(1, 4096);
725   }
726
727   ctx->VertexProgram._MaintainTnlProgram = true;
728   ctx->FragmentProgram._MaintainTexEnvProgram = true;
729
730   brw_draw_init( brw );
731
732   if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
733      /* Turn on some extra GL_ARB_debug_output generation. */
734      brw->perf_debug = true;
735   }
736
737   brw_fs_alloc_reg_sets(brw);
738   brw_vec4_alloc_reg_set(brw);
739
740   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
741      brw_init_shader_time(brw);
742
743   _mesa_compute_version(ctx);
744
745   /* Here we override context constants. We apply the overrides after
746    * calculation of the context version because we do not want the overridden
747    * constants to change the version.
748    */
749   brw_override_max_samples(brw);
750
751   _mesa_initialize_dispatch_tables(ctx);
752   _mesa_initialize_vbo_vtxfmt(ctx);
753
754   return true;
755}
756
757void
758intelDestroyContext(__DRIcontext * driContextPriv)
759{
760   struct brw_context *brw =
761      (struct brw_context *) driContextPriv->driverPrivate;
762   struct gl_context *ctx = &brw->ctx;
763
764   assert(brw); /* should never be null */
765   if (!brw)
766      return;
767
768   /* Dump a final BMP in case the application doesn't call SwapBuffers */
769   if (INTEL_DEBUG & DEBUG_AUB) {
770      intel_batchbuffer_flush(brw);
771      aub_dump_bmp(&brw->ctx);
772   }
773
774   _mesa_meta_free(&brw->ctx);
775
776   if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
777      /* Force a report. */
778      brw->shader_time.report_time = 0;
779
780      brw_collect_and_report_shader_time(brw);
781      brw_destroy_shader_time(brw);
782   }
783
784   brw_destroy_state(brw);
785   brw_draw_destroy(brw);
786
787   drm_intel_bo_unreference(brw->curbe.curbe_bo);
788   drm_intel_bo_unreference(brw->vs.base.const_bo);
789   drm_intel_bo_unreference(brw->wm.base.const_bo);
790
791   free(brw->curbe.last_buf);
792   free(brw->curbe.next_buf);
793
794   drm_intel_gem_context_destroy(brw->hw_ctx);
795
796   if (ctx->swrast_context) {
797      _swsetup_DestroyContext(&brw->ctx);
798      _tnl_DestroyContext(&brw->ctx);
799   }
800   _vbo_DestroyContext(&brw->ctx);
801
802   if (ctx->swrast_context)
803      _swrast_DestroyContext(&brw->ctx);
804
805   intel_batchbuffer_free(brw);
806
807   drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
808   brw->first_post_swapbuffers_batch = NULL;
809
810   driDestroyOptionCache(&brw->optionCache);
811
812   /* free the Mesa context */
813   _mesa_free_context_data(&brw->ctx);
814
815   ralloc_free(brw);
816   driContextPriv->driverPrivate = NULL;
817}
818
819GLboolean
820intelUnbindContext(__DRIcontext * driContextPriv)
821{
822   /* Unset current context and dispath table */
823   _mesa_make_current(NULL, NULL, NULL);
824
825   return true;
826}
827
828/**
829 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
830 * on window system framebuffers.
831 *
832 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
833 * your renderbuffer can do sRGB encode, and you can flip a switch that does
834 * sRGB encode if the renderbuffer can handle it.  You can ask specifically
835 * for a visual where you're guaranteed to be capable, but it turns out that
836 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
837 * incapable ones, becuase there's no difference between the two in resources
838 * used.  Applications thus get built that accidentally rely on the default
839 * visual choice being sRGB, so we make ours sRGB capable.  Everything sounds
840 * great...
841 *
842 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
843 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
844 * So they removed the enable knob and made it "if the renderbuffer is sRGB
845 * capable, do sRGB encode".  Then, for your window system renderbuffers, you
846 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
847 * and get no sRGB encode (assuming that both kinds of visual are available).
848 * Thus our choice to support sRGB by default on our visuals for desktop would
849 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
850 *
851 * Unfortunately, renderbuffer setup happens before a context is created.  So
852 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
853 * context (without an sRGB visual, though we don't have sRGB visuals exposed
854 * yet), we go turn that back off before anyone finds out.
855 */
856static void
857intel_gles3_srgb_workaround(struct brw_context *brw,
858                            struct gl_framebuffer *fb)
859{
860   struct gl_context *ctx = &brw->ctx;
861
862   if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
863      return;
864
865   /* Some day when we support the sRGB capable bit on visuals available for
866    * GLES, we'll need to respect that and not disable things here.
867    */
868   fb->Visual.sRGBCapable = false;
869   for (int i = 0; i < BUFFER_COUNT; i++) {
870      if (fb->Attachment[i].Renderbuffer &&
871          fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_SARGB8) {
872         fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_ARGB8888;
873      }
874   }
875}
876
877GLboolean
878intelMakeCurrent(__DRIcontext * driContextPriv,
879                 __DRIdrawable * driDrawPriv,
880                 __DRIdrawable * driReadPriv)
881{
882   struct brw_context *brw;
883   GET_CURRENT_CONTEXT(curCtx);
884
885   if (driContextPriv)
886      brw = (struct brw_context *) driContextPriv->driverPrivate;
887   else
888      brw = NULL;
889
890   /* According to the glXMakeCurrent() man page: "Pending commands to
891    * the previous context, if any, are flushed before it is released."
892    * But only flush if we're actually changing contexts.
893    */
894   if (brw_context(curCtx) && brw_context(curCtx) != brw) {
895      _mesa_flush(curCtx);
896   }
897
898   if (driContextPriv) {
899      struct gl_context *ctx = &brw->ctx;
900      struct gl_framebuffer *fb, *readFb;
901
902      if (driDrawPriv == NULL && driReadPriv == NULL) {
903         fb = _mesa_get_incomplete_framebuffer();
904         readFb = _mesa_get_incomplete_framebuffer();
905      } else {
906         fb = driDrawPriv->driverPrivate;
907         readFb = driReadPriv->driverPrivate;
908         driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
909         driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
910      }
911
912      /* The sRGB workaround changes the renderbuffer's format. We must change
913       * the format before the renderbuffer's miptree get's allocated, otherwise
914       * the formats of the renderbuffer and its miptree will differ.
915       */
916      intel_gles3_srgb_workaround(brw, fb);
917      intel_gles3_srgb_workaround(brw, readFb);
918
919      intel_prepare_render(brw);
920      _mesa_make_current(ctx, fb, readFb);
921   } else {
922      _mesa_make_current(NULL, NULL, NULL);
923   }
924
925   return true;
926}
927
928void
929intel_resolve_for_dri2_flush(struct brw_context *brw,
930                             __DRIdrawable *drawable)
931{
932   if (brw->gen < 6) {
933      /* MSAA and fast color clear are not supported, so don't waste time
934       * checking whether a resolve is needed.
935       */
936      return;
937   }
938
939   struct gl_framebuffer *fb = drawable->driverPrivate;
940   struct intel_renderbuffer *rb;
941
942   /* Usually, only the back buffer will need to be downsampled. However,
943    * the front buffer will also need it if the user has rendered into it.
944    */
945   static const gl_buffer_index buffers[2] = {
946         BUFFER_BACK_LEFT,
947         BUFFER_FRONT_LEFT,
948   };
949
950   for (int i = 0; i < 2; ++i) {
951      rb = intel_get_renderbuffer(fb, buffers[i]);
952      if (rb == NULL || rb->mt == NULL)
953         continue;
954      if (rb->mt->num_samples <= 1)
955         intel_miptree_resolve_color(brw, rb->mt);
956      else
957         intel_miptree_downsample(brw, rb->mt);
958   }
959}
960
961static unsigned
962intel_bits_per_pixel(const struct intel_renderbuffer *rb)
963{
964   return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
965}
966
967static void
968intel_query_dri2_buffers(struct brw_context *brw,
969                         __DRIdrawable *drawable,
970                         __DRIbuffer **buffers,
971                         int *count);
972
973static void
974intel_process_dri2_buffer(struct brw_context *brw,
975                          __DRIdrawable *drawable,
976                          __DRIbuffer *buffer,
977                          struct intel_renderbuffer *rb,
978                          const char *buffer_name);
979
980void
981intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
982{
983   struct gl_framebuffer *fb = drawable->driverPrivate;
984   struct intel_renderbuffer *rb;
985   struct brw_context *brw = context->driverPrivate;
986   __DRIbuffer *buffers = NULL;
987   int i, count;
988   const char *region_name;
989
990   /* Set this up front, so that in case our buffers get invalidated
991    * while we're getting new buffers, we don't clobber the stamp and
992    * thus ignore the invalidate. */
993   drawable->lastStamp = drawable->dri2.stamp;
994
995   if (unlikely(INTEL_DEBUG & DEBUG_DRI))
996      fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
997
998   intel_query_dri2_buffers(brw, drawable, &buffers, &count);
999
1000   if (buffers == NULL)
1001      return;
1002
1003   for (i = 0; i < count; i++) {
1004       switch (buffers[i].attachment) {
1005       case __DRI_BUFFER_FRONT_LEFT:
1006           rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1007           region_name = "dri2 front buffer";
1008           break;
1009
1010       case __DRI_BUFFER_FAKE_FRONT_LEFT:
1011           rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1012           region_name = "dri2 fake front buffer";
1013           break;
1014
1015       case __DRI_BUFFER_BACK_LEFT:
1016           rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1017           region_name = "dri2 back buffer";
1018           break;
1019
1020       case __DRI_BUFFER_DEPTH:
1021       case __DRI_BUFFER_HIZ:
1022       case __DRI_BUFFER_DEPTH_STENCIL:
1023       case __DRI_BUFFER_STENCIL:
1024       case __DRI_BUFFER_ACCUM:
1025       default:
1026           fprintf(stderr,
1027                   "unhandled buffer attach event, attachment type %d\n",
1028                   buffers[i].attachment);
1029           return;
1030       }
1031
1032       intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1033   }
1034
1035   driUpdateFramebufferSize(&brw->ctx, drawable);
1036}
1037
1038/**
1039 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1040 * state is required.
1041 */
1042void
1043intel_prepare_render(struct brw_context *brw)
1044{
1045   __DRIcontext *driContext = brw->driContext;
1046   __DRIdrawable *drawable;
1047
1048   drawable = driContext->driDrawablePriv;
1049   if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1050      if (drawable->lastStamp != drawable->dri2.stamp)
1051         intel_update_renderbuffers(driContext, drawable);
1052      driContext->dri2.draw_stamp = drawable->dri2.stamp;
1053   }
1054
1055   drawable = driContext->driReadablePriv;
1056   if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1057      if (drawable->lastStamp != drawable->dri2.stamp)
1058         intel_update_renderbuffers(driContext, drawable);
1059      driContext->dri2.read_stamp = drawable->dri2.stamp;
1060   }
1061
1062   /* If we're currently rendering to the front buffer, the rendering
1063    * that will happen next will probably dirty the front buffer.  So
1064    * mark it as dirty here.
1065    */
1066   if (brw->is_front_buffer_rendering)
1067      brw->front_buffer_dirty = true;
1068
1069   /* Wait for the swapbuffers before the one we just emitted, so we
1070    * don't get too many swaps outstanding for apps that are GPU-heavy
1071    * but not CPU-heavy.
1072    *
1073    * We're using intelDRI2Flush (called from the loader before
1074    * swapbuffer) and glFlush (for front buffer rendering) as the
1075    * indicator that a frame is done and then throttle when we get
1076    * here as we prepare to render the next frame.  At this point for
1077    * round trips for swap/copy and getting new buffers are done and
1078    * we'll spend less time waiting on the GPU.
1079    *
1080    * Unfortunately, we don't have a handle to the batch containing
1081    * the swap, and getting our hands on that doesn't seem worth it,
1082    * so we just us the first batch we emitted after the last swap.
1083    */
1084   if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1085      if (!brw->disable_throttling)
1086         drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1087      drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1088      brw->first_post_swapbuffers_batch = NULL;
1089      brw->need_throttle = false;
1090   }
1091}
1092
1093/**
1094 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1095 *
1096 * To determine which DRI buffers to request, examine the renderbuffers
1097 * attached to the drawable's framebuffer. Then request the buffers with
1098 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1099 *
1100 * This is called from intel_update_renderbuffers().
1101 *
1102 * \param drawable      Drawable whose buffers are queried.
1103 * \param buffers       [out] List of buffers returned by DRI2 query.
1104 * \param buffer_count  [out] Number of buffers returned.
1105 *
1106 * \see intel_update_renderbuffers()
1107 * \see DRI2GetBuffers()
1108 * \see DRI2GetBuffersWithFormat()
1109 */
1110static void
1111intel_query_dri2_buffers(struct brw_context *brw,
1112                         __DRIdrawable *drawable,
1113                         __DRIbuffer **buffers,
1114                         int *buffer_count)
1115{
1116   __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1117   struct gl_framebuffer *fb = drawable->driverPrivate;
1118   int i = 0;
1119   unsigned attachments[8];
1120
1121   struct intel_renderbuffer *front_rb;
1122   struct intel_renderbuffer *back_rb;
1123
1124   front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1125   back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1126
1127   memset(attachments, 0, sizeof(attachments));
1128   if ((brw->is_front_buffer_rendering ||
1129        brw->is_front_buffer_reading ||
1130        !back_rb) && front_rb) {
1131      /* If a fake front buffer is in use, then querying for
1132       * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1133       * the real front buffer to the fake front buffer.  So before doing the
1134       * query, we need to make sure all the pending drawing has landed in the
1135       * real front buffer.
1136       */
1137      intel_batchbuffer_flush(brw);
1138      intel_flush_front(&brw->ctx);
1139
1140      attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1141      attachments[i++] = intel_bits_per_pixel(front_rb);
1142   } else if (front_rb && brw->front_buffer_dirty) {
1143      /* We have pending front buffer rendering, but we aren't querying for a
1144       * front buffer.  If the front buffer we have is a fake front buffer,
1145       * the X server is going to throw it away when it processes the query.
1146       * So before doing the query, make sure all the pending drawing has
1147       * landed in the real front buffer.
1148       */
1149      intel_batchbuffer_flush(brw);
1150      intel_flush_front(&brw->ctx);
1151   }
1152
1153   if (back_rb) {
1154      attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1155      attachments[i++] = intel_bits_per_pixel(back_rb);
1156   }
1157
1158   assert(i <= ARRAY_SIZE(attachments));
1159
1160   *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1161                                                        &drawable->w,
1162                                                        &drawable->h,
1163                                                        attachments, i / 2,
1164                                                        buffer_count,
1165                                                        drawable->loaderPrivate);
1166}
1167
1168/**
1169 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1170 *
1171 * This is called from intel_update_renderbuffers().
1172 *
1173 * \par Note:
1174 *    DRI buffers whose attachment point is DRI2BufferStencil or
1175 *    DRI2BufferDepthStencil are handled as special cases.
1176 *
1177 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1178 *        that is passed to intel_region_alloc_for_handle().
1179 *
1180 * \see intel_update_renderbuffers()
1181 * \see intel_region_alloc_for_handle()
1182 */
1183static void
1184intel_process_dri2_buffer(struct brw_context *brw,
1185                          __DRIdrawable *drawable,
1186                          __DRIbuffer *buffer,
1187                          struct intel_renderbuffer *rb,
1188                          const char *buffer_name)
1189{
1190   struct intel_region *region = NULL;
1191
1192   if (!rb)
1193      return;
1194
1195   unsigned num_samples = rb->Base.Base.NumSamples;
1196
1197   /* We try to avoid closing and reopening the same BO name, because the first
1198    * use of a mapping of the buffer involves a bunch of page faulting which is
1199    * moderately expensive.
1200    */
1201   if (num_samples == 0) {
1202       if (rb->mt &&
1203           rb->mt->region &&
1204           rb->mt->region->name == buffer->name)
1205          return;
1206   } else {
1207       if (rb->mt &&
1208           rb->mt->singlesample_mt &&
1209           rb->mt->singlesample_mt->region &&
1210           rb->mt->singlesample_mt->region->name == buffer->name)
1211          return;
1212   }
1213
1214   if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1215      fprintf(stderr,
1216              "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1217              buffer->name, buffer->attachment,
1218              buffer->cpp, buffer->pitch);
1219   }
1220
1221   intel_miptree_release(&rb->mt);
1222   region = intel_region_alloc_for_handle(brw->intelScreen,
1223                                          buffer->cpp,
1224                                          drawable->w,
1225                                          drawable->h,
1226                                          buffer->pitch,
1227                                          buffer->name,
1228                                          buffer_name);
1229   if (!region)
1230      return;
1231
1232   rb->mt = intel_miptree_create_for_dri2_buffer(brw,
1233                                                 buffer->attachment,
1234                                                 intel_rb_format(rb),
1235                                                 num_samples,
1236                                                 region);
1237   intel_region_release(&region);
1238}
1239