brw_context.c revision 3f319eef76a31776085accb38c06851bc04f64b8
1/*
2 Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 Copyright (C) Intel Corp.  2006.  All Rights Reserved.
4 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29  * Authors:
30  *   Keith Whitwell <keith@tungstengraphics.com>
31  */
32
33
34#include "main/api_exec.h"
35#include "main/context.h"
36#include "main/fbobject.h"
37#include "main/imports.h"
38#include "main/macros.h"
39#include "main/points.h"
40#include "main/version.h"
41#include "main/vtxfmt.h"
42
43#include "vbo/vbo_context.h"
44
45#include "drivers/common/driverfuncs.h"
46#include "drivers/common/meta.h"
47#include "utils.h"
48
49#include "brw_context.h"
50#include "brw_defines.h"
51#include "brw_draw.h"
52#include "brw_state.h"
53
54#include "intel_batchbuffer.h"
55#include "intel_buffer_objects.h"
56#include "intel_buffers.h"
57#include "intel_fbo.h"
58#include "intel_mipmap_tree.h"
59#include "intel_pixel.h"
60#include "intel_regions.h"
61#include "intel_tex.h"
62#include "intel_tex_obj.h"
63
64#include "swrast_setup/swrast_setup.h"
65#include "tnl/tnl.h"
66#include "tnl/t_pipeline.h"
67#include "glsl/ralloc.h"
68
69/***************************************
70 * Mesa's Driver Functions
71 ***************************************/
72
73static size_t
74brw_query_samples_for_format(struct gl_context *ctx, GLenum target,
75                             GLenum internalFormat, int samples[16])
76{
77   struct brw_context *brw = brw_context(ctx);
78
79   (void) target;
80
81   switch (brw->gen) {
82   case 7:
83      samples[0] = 8;
84      samples[1] = 4;
85      return 2;
86
87   case 6:
88      samples[0] = 4;
89      return 1;
90
91   default:
92      samples[0] = 1;
93      return 1;
94   }
95}
96
97static const GLubyte *
98intelGetString(struct gl_context * ctx, GLenum name)
99{
100   const struct brw_context *const brw = brw_context(ctx);
101   const char *chipset;
102   static char buffer[128];
103
104   switch (name) {
105   case GL_VENDOR:
106      return (GLubyte *) "Intel Open Source Technology Center";
107      break;
108
109   case GL_RENDERER:
110      switch (brw->intelScreen->deviceID) {
111#undef CHIPSET
112#define CHIPSET(id, family, str) case id: chipset = str; break;
113#include "pci_ids/i965_pci_ids.h"
114      default:
115         chipset = "Unknown Intel Chipset";
116         break;
117      }
118
119      (void) driGetRendererString(buffer, chipset, 0);
120      return (GLubyte *) buffer;
121
122   default:
123      return NULL;
124   }
125}
126
127static void
128intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
129{
130   struct brw_context *brw = brw_context(ctx);
131   __DRIcontext *driContext = brw->driContext;
132
133   (void) x;
134   (void) y;
135   (void) w;
136   (void) h;
137
138   if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
139      dri2InvalidateDrawable(driContext->driDrawablePriv);
140      dri2InvalidateDrawable(driContext->driReadablePriv);
141   }
142}
143
144static void
145intelInvalidateState(struct gl_context * ctx, GLuint new_state)
146{
147   struct brw_context *brw = brw_context(ctx);
148
149   if (ctx->swrast_context)
150      _swrast_InvalidateState(ctx, new_state);
151   _vbo_InvalidateState(ctx, new_state);
152
153   brw->NewGLState |= new_state;
154}
155
156static void
157intel_flush_front(struct gl_context *ctx)
158{
159   struct brw_context *brw = brw_context(ctx);
160   __DRIcontext *driContext = brw->driContext;
161   __DRIdrawable *driDrawable = driContext->driDrawablePriv;
162   __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
163
164   if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
165      if (screen->dri2.loader->flushFrontBuffer != NULL &&
166          driDrawable &&
167          driDrawable->loaderPrivate) {
168
169         /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
170          *
171          * This potentially resolves both front and back buffer. It
172          * is unnecessary to resolve the back, but harms nothing except
173          * performance. And no one cares about front-buffer render
174          * performance.
175          */
176         intel_resolve_for_dri2_flush(brw, driDrawable);
177         intel_batchbuffer_flush(brw);
178
179         screen->dri2.loader->flushFrontBuffer(driDrawable,
180                                               driDrawable->loaderPrivate);
181
182         /* We set the dirty bit in intel_prepare_render() if we're
183          * front buffer rendering once we get there.
184          */
185         brw->front_buffer_dirty = false;
186      }
187   }
188}
189
190static void
191intel_glFlush(struct gl_context *ctx)
192{
193   struct brw_context *brw = brw_context(ctx);
194
195   intel_batchbuffer_flush(brw);
196   intel_flush_front(ctx);
197   if (brw->is_front_buffer_rendering)
198      brw->need_throttle = true;
199}
200
201void
202intelFinish(struct gl_context * ctx)
203{
204   struct brw_context *brw = brw_context(ctx);
205
206   intel_glFlush(ctx);
207
208   if (brw->batch.last_bo)
209      drm_intel_bo_wait_rendering(brw->batch.last_bo);
210}
211
212static void
213brw_init_driver_functions(struct brw_context *brw,
214                          struct dd_function_table *functions)
215{
216   _mesa_init_driver_functions(functions);
217
218   /* GLX uses DRI2 invalidate events to handle window resizing.
219    * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
220    * which doesn't provide a mechanism for snooping the event queues.
221    *
222    * So EGL still relies on viewport hacks to handle window resizing.
223    * This should go away with DRI3000.
224    */
225   if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
226      functions->Viewport = intel_viewport;
227
228   functions->Flush = intel_glFlush;
229   functions->Finish = intelFinish;
230   functions->GetString = intelGetString;
231   functions->UpdateState = intelInvalidateState;
232
233   intelInitTextureFuncs(functions);
234   intelInitTextureImageFuncs(functions);
235   intelInitTextureSubImageFuncs(functions);
236   intelInitTextureCopyImageFuncs(functions);
237   intelInitClearFuncs(functions);
238   intelInitBufferFuncs(functions);
239   intelInitPixelFuncs(functions);
240   intelInitBufferObjectFuncs(functions);
241   intel_init_syncobj_functions(functions);
242   brw_init_object_purgeable_functions(functions);
243
244   brwInitFragProgFuncs( functions );
245   brw_init_common_queryobj_functions(functions);
246   if (brw->gen >= 6)
247      gen6_init_queryobj_functions(functions);
248   else
249      gen4_init_queryobj_functions(functions);
250
251   functions->QuerySamplesForFormat = brw_query_samples_for_format;
252
253   functions->NewTransformFeedback = brw_new_transform_feedback;
254   functions->DeleteTransformFeedback = brw_delete_transform_feedback;
255   functions->GetTransformFeedbackVertexCount =
256      brw_get_transform_feedback_vertex_count;
257   if (brw->gen >= 7) {
258      functions->BeginTransformFeedback = gen7_begin_transform_feedback;
259      functions->EndTransformFeedback = gen7_end_transform_feedback;
260      functions->PauseTransformFeedback = gen7_pause_transform_feedback;
261      functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
262   } else {
263      functions->BeginTransformFeedback = brw_begin_transform_feedback;
264      functions->EndTransformFeedback = brw_end_transform_feedback;
265   }
266
267   if (brw->gen >= 6)
268      functions->GetSamplePosition = gen6_get_sample_position;
269}
270
271/**
272 * Return array of MSAA modes supported by the hardware. The array is
273 * zero-terminated and sorted in decreasing order.
274 */
275static const int*
276brw_supported_msaa_modes(const struct brw_context *brw)
277{
278   static const int gen7_samples[] = {8, 4, 0};
279   static const int gen6_samples[] = {4, 0};
280   static const int gen4_samples[] = {0};
281   if (brw->gen >= 7) {
282      return gen7_samples;
283   } else if (brw->gen == 6) {
284      return gen6_samples;
285   } else {
286      return gen4_samples;
287   }
288}
289
290/**
291 * Override GL_MAX_SAMPLES and related constants according to value of driconf
292 * option 'clamp_max_samples'.
293 */
294static void
295brw_override_max_samples(struct brw_context *brw)
296{
297   const int clamp_max_samples = driQueryOptioni(&brw->optionCache,
298                                                 "clamp_max_samples");
299   if (clamp_max_samples < 0)
300      return;
301
302   const int *supported_msaa_modes = brw_supported_msaa_modes(brw);
303   int max_samples = 0;
304
305   /* Select the largest supported MSAA mode that does not exceed
306    * clamp_max_samples.
307    */
308   for (int i = 0; supported_msaa_modes[i] != 0; ++i) {
309      if (supported_msaa_modes[i] <= clamp_max_samples) {
310         max_samples = supported_msaa_modes[i];
311         break;
312      }
313   }
314
315   brw->ctx.Const.MaxSamples = max_samples;
316   brw->ctx.Const.MaxColorTextureSamples = max_samples;
317   brw->ctx.Const.MaxDepthTextureSamples = max_samples;
318   brw->ctx.Const.MaxIntegerSamples = max_samples;
319}
320
321static void
322brw_initialize_context_constants(struct brw_context *brw)
323{
324   struct gl_context *ctx = &brw->ctx;
325
326   ctx->Const.QueryCounterBits.Timestamp = 36;
327
328   ctx->Const.StripTextureBorder = true;
329
330   ctx->Const.MaxDualSourceDrawBuffers = 1;
331   ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
332   ctx->Const.FragmentProgram.MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
333   ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
334   ctx->Const.MaxTextureUnits =
335      MIN2(ctx->Const.MaxTextureCoordUnits,
336           ctx->Const.FragmentProgram.MaxTextureImageUnits);
337   ctx->Const.VertexProgram.MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
338   if (brw->gen >= 7)
339      ctx->Const.GeometryProgram.MaxTextureImageUnits = BRW_MAX_TEX_UNIT;
340   else
341      ctx->Const.GeometryProgram.MaxTextureImageUnits = 0;
342   ctx->Const.MaxCombinedTextureImageUnits =
343      ctx->Const.VertexProgram.MaxTextureImageUnits +
344      ctx->Const.FragmentProgram.MaxTextureImageUnits +
345      ctx->Const.GeometryProgram.MaxTextureImageUnits;
346
347   ctx->Const.MaxTextureLevels = 14; /* 8192 */
348   if (ctx->Const.MaxTextureLevels > MAX_TEXTURE_LEVELS)
349      ctx->Const.MaxTextureLevels = MAX_TEXTURE_LEVELS;
350   ctx->Const.Max3DTextureLevels = 9;
351   ctx->Const.MaxCubeTextureLevels = 12;
352
353   if (brw->gen >= 7)
354      ctx->Const.MaxArrayTextureLayers = 2048;
355   else
356      ctx->Const.MaxArrayTextureLayers = 512;
357
358   ctx->Const.MaxTextureRectSize = 1 << 12;
359
360   ctx->Const.MaxTextureMaxAnisotropy = 16.0;
361
362   ctx->Const.MaxRenderbufferSize = 8192;
363
364   /* Hardware only supports a limited number of transform feedback buffers.
365    * So we need to override the Mesa default (which is based only on software
366    * limits).
367    */
368   ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
369
370   /* On Gen6, in the worst case, we use up one binding table entry per
371    * transform feedback component (see comments above the definition of
372    * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
373    * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
374    * BRW_MAX_SOL_BINDINGS.
375    *
376    * In "separate components" mode, we need to divide this value by
377    * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
378    * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
379    */
380   ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
381   ctx->Const.MaxTransformFeedbackSeparateComponents =
382      BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
383
384   ctx->Const.AlwaysUseGetTransformFeedbackVertexCount = true;
385
386   const int max_samples = brw_supported_msaa_modes(brw)[0];
387   ctx->Const.MaxSamples = max_samples;
388   ctx->Const.MaxColorTextureSamples = max_samples;
389   ctx->Const.MaxDepthTextureSamples = max_samples;
390   ctx->Const.MaxIntegerSamples = max_samples;
391
392   if (brw->gen >= 7)
393      ctx->Const.MaxProgramTextureGatherComponents = 4;
394
395   ctx->Const.MinLineWidth = 1.0;
396   ctx->Const.MinLineWidthAA = 1.0;
397   ctx->Const.MaxLineWidth = 5.0;
398   ctx->Const.MaxLineWidthAA = 5.0;
399   ctx->Const.LineWidthGranularity = 0.5;
400
401   ctx->Const.MinPointSize = 1.0;
402   ctx->Const.MinPointSizeAA = 1.0;
403   ctx->Const.MaxPointSize = 255.0;
404   ctx->Const.MaxPointSizeAA = 255.0;
405   ctx->Const.PointSizeGranularity = 1.0;
406
407   if (brw->gen >= 5 || brw->is_g4x)
408      ctx->Const.MaxClipPlanes = 8;
409
410   ctx->Const.VertexProgram.MaxNativeInstructions = 16 * 1024;
411   ctx->Const.VertexProgram.MaxAluInstructions = 0;
412   ctx->Const.VertexProgram.MaxTexInstructions = 0;
413   ctx->Const.VertexProgram.MaxTexIndirections = 0;
414   ctx->Const.VertexProgram.MaxNativeAluInstructions = 0;
415   ctx->Const.VertexProgram.MaxNativeTexInstructions = 0;
416   ctx->Const.VertexProgram.MaxNativeTexIndirections = 0;
417   ctx->Const.VertexProgram.MaxNativeAttribs = 16;
418   ctx->Const.VertexProgram.MaxNativeTemps = 256;
419   ctx->Const.VertexProgram.MaxNativeAddressRegs = 1;
420   ctx->Const.VertexProgram.MaxNativeParameters = 1024;
421   ctx->Const.VertexProgram.MaxEnvParams =
422      MIN2(ctx->Const.VertexProgram.MaxNativeParameters,
423	   ctx->Const.VertexProgram.MaxEnvParams);
424
425   ctx->Const.FragmentProgram.MaxNativeInstructions = 1024;
426   ctx->Const.FragmentProgram.MaxNativeAluInstructions = 1024;
427   ctx->Const.FragmentProgram.MaxNativeTexInstructions = 1024;
428   ctx->Const.FragmentProgram.MaxNativeTexIndirections = 1024;
429   ctx->Const.FragmentProgram.MaxNativeAttribs = 12;
430   ctx->Const.FragmentProgram.MaxNativeTemps = 256;
431   ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0;
432   ctx->Const.FragmentProgram.MaxNativeParameters = 1024;
433   ctx->Const.FragmentProgram.MaxEnvParams =
434      MIN2(ctx->Const.FragmentProgram.MaxNativeParameters,
435	   ctx->Const.FragmentProgram.MaxEnvParams);
436
437   /* Fragment shaders use real, 32-bit twos-complement integers for all
438    * integer types.
439    */
440   ctx->Const.FragmentProgram.LowInt.RangeMin = 31;
441   ctx->Const.FragmentProgram.LowInt.RangeMax = 30;
442   ctx->Const.FragmentProgram.LowInt.Precision = 0;
443   ctx->Const.FragmentProgram.HighInt = ctx->Const.FragmentProgram.LowInt;
444   ctx->Const.FragmentProgram.MediumInt = ctx->Const.FragmentProgram.LowInt;
445
446   /* Gen6 converts quads to polygon in beginning of 3D pipeline,
447    * but we're not sure how it's actually done for vertex order,
448    * that affect provoking vertex decision. Always use last vertex
449    * convention for quad primitive which works as expected for now.
450    */
451   if (brw->gen >= 6)
452      ctx->Const.QuadsFollowProvokingVertexConvention = false;
453
454   ctx->Const.NativeIntegers = true;
455   ctx->Const.UniformBooleanTrue = 1;
456
457   /* From the gen4 PRM, volume 4 page 127:
458    *
459    *     "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
460    *      the base address of the first element of the surface, computed in
461    *      software by adding the surface base address to the byte offset of
462    *      the element in the buffer."
463    *
464    * However, unaligned accesses are slower, so enforce buffer alignment.
465    */
466   ctx->Const.UniformBufferOffsetAlignment = 16;
467   ctx->Const.TextureBufferOffsetAlignment = 16;
468
469   if (brw->gen >= 6) {
470      ctx->Const.MaxVarying = 32;
471      ctx->Const.VertexProgram.MaxOutputComponents = 128;
472      ctx->Const.GeometryProgram.MaxInputComponents = 64;
473      ctx->Const.GeometryProgram.MaxOutputComponents = 128;
474      ctx->Const.FragmentProgram.MaxInputComponents = 128;
475   }
476
477   /* We want the GLSL compiler to emit code that uses condition codes */
478   for (int i = 0; i < MESA_SHADER_TYPES; i++) {
479      ctx->ShaderCompilerOptions[i].MaxIfDepth = brw->gen < 6 ? 16 : UINT_MAX;
480      ctx->ShaderCompilerOptions[i].EmitCondCodes = true;
481      ctx->ShaderCompilerOptions[i].EmitNoNoise = true;
482      ctx->ShaderCompilerOptions[i].EmitNoMainReturn = true;
483      ctx->ShaderCompilerOptions[i].EmitNoIndirectInput = true;
484      ctx->ShaderCompilerOptions[i].EmitNoIndirectOutput = true;
485
486      ctx->ShaderCompilerOptions[i].EmitNoIndirectUniform =
487	 (i == MESA_SHADER_FRAGMENT);
488      ctx->ShaderCompilerOptions[i].EmitNoIndirectTemp =
489	 (i == MESA_SHADER_FRAGMENT);
490      ctx->ShaderCompilerOptions[i].LowerClipDistance = true;
491   }
492
493   ctx->ShaderCompilerOptions[MESA_SHADER_VERTEX].PreferDP4 = true;
494}
495
496/**
497 * Process driconf (drirc) options, setting appropriate context flags.
498 *
499 * intelInitExtensions still pokes at optionCache directly, in order to
500 * avoid advertising various extensions.  No flags are set, so it makes
501 * sense to continue doing that there.
502 */
503static void
504brw_process_driconf_options(struct brw_context *brw)
505{
506   struct gl_context *ctx = &brw->ctx;
507
508   driOptionCache *options = &brw->optionCache;
509   driParseConfigFiles(options, &brw->intelScreen->optionCache,
510                       brw->driContext->driScreenPriv->myNum, "i965");
511
512   int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
513   switch (bo_reuse_mode) {
514   case DRI_CONF_BO_REUSE_DISABLED:
515      break;
516   case DRI_CONF_BO_REUSE_ALL:
517      intel_bufmgr_gem_enable_reuse(brw->bufmgr);
518      break;
519   }
520
521   if (!driQueryOptionb(options, "hiz")) {
522       brw->has_hiz = false;
523       /* On gen6, you can only do separate stencil with HIZ. */
524       if (brw->gen == 6)
525          brw->has_separate_stencil = false;
526   }
527
528   if (driQueryOptionb(options, "always_flush_batch")) {
529      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
530      brw->always_flush_batch = true;
531   }
532
533   if (driQueryOptionb(options, "always_flush_cache")) {
534      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
535      brw->always_flush_cache = true;
536   }
537
538   if (driQueryOptionb(options, "disable_throttling")) {
539      fprintf(stderr, "disabling flush throttling\n");
540      brw->disable_throttling = true;
541   }
542
543   brw->disable_derivative_optimization =
544      driQueryOptionb(&brw->optionCache, "disable_derivative_optimization");
545
546   brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
547
548   ctx->Const.ForceGLSLExtensionsWarn =
549      driQueryOptionb(options, "force_glsl_extensions_warn");
550
551   ctx->Const.DisableGLSLLineContinuations =
552      driQueryOptionb(options, "disable_glsl_line_continuations");
553}
554
555bool
556brwCreateContext(gl_api api,
557	         const struct gl_config *mesaVis,
558		 __DRIcontext *driContextPriv,
559                 unsigned major_version,
560                 unsigned minor_version,
561                 uint32_t flags,
562                 unsigned *dri_ctx_error,
563	         void *sharedContextPrivate)
564{
565   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
566   struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
567   struct intel_screen *screen = sPriv->driverPrivate;
568   const struct brw_device_info *devinfo = screen->devinfo;
569   struct dd_function_table functions;
570   struct gl_config visual;
571
572   struct brw_context *brw = rzalloc(NULL, struct brw_context);
573   if (!brw) {
574      printf("%s: failed to alloc context\n", __FUNCTION__);
575      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
576      return false;
577   }
578
579   driContextPriv->driverPrivate = brw;
580   brw->driContext = driContextPriv;
581   brw->intelScreen = screen;
582   brw->bufmgr = screen->bufmgr;
583
584   brw->gen = devinfo->gen;
585   brw->gt = devinfo->gt;
586   brw->is_g4x = devinfo->is_g4x;
587   brw->is_baytrail = devinfo->is_baytrail;
588   brw->is_haswell = devinfo->is_haswell;
589   brw->has_llc = devinfo->has_llc;
590   brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
591   brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
592   brw->has_pln = devinfo->has_pln;
593   brw->has_compr4 = devinfo->has_compr4;
594   brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
595   brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
596   brw->needs_unlit_centroid_workaround =
597      devinfo->needs_unlit_centroid_workaround;
598
599   brw->must_use_separate_stencil = screen->hw_must_use_separate_stencil;
600   brw->has_swizzling = screen->hw_has_swizzling;
601
602   if (brw->gen >= 7) {
603      gen7_init_vtable_surface_functions(brw);
604      gen7_init_vtable_sampler_functions(brw);
605      brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
606   } else {
607      gen4_init_vtable_surface_functions(brw);
608      gen4_init_vtable_sampler_functions(brw);
609      brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
610   }
611
612   brw_init_driver_functions(brw, &functions);
613
614   struct gl_context *ctx = &brw->ctx;
615
616   if (mesaVis == NULL) {
617      memset(&visual, 0, sizeof visual);
618      mesaVis = &visual;
619   }
620
621   if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
622      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
623      printf("%s: failed to init mesa context\n", __FUNCTION__);
624      intelDestroyContext(driContextPriv);
625      return false;
626   }
627
628   /* Initialize the software rasterizer and helper modules.
629    *
630    * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
631    * software fallbacks (which we have to support on legacy GL to do weird
632    * glDrawPixels(), glBitmap(), and other functions).
633    */
634   if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
635      _swrast_CreateContext(ctx);
636   }
637
638   _vbo_CreateContext(ctx);
639   if (ctx->swrast_context) {
640      _tnl_CreateContext(ctx);
641      TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
642      _swsetup_CreateContext(ctx);
643
644      /* Configure swrast to match hardware characteristics: */
645      _swrast_allow_pixel_fog(ctx, false);
646      _swrast_allow_vertex_fog(ctx, true);
647   }
648
649   _mesa_meta_init(ctx);
650
651   brw_process_driconf_options(brw);
652   brw_process_intel_debug_variable(brw);
653   brw_initialize_context_constants(brw);
654
655   /* Reinitialize the context point state.  It depends on ctx->Const values. */
656   _mesa_init_point(ctx);
657
658   intel_batchbuffer_init(brw);
659
660   brw_init_state(brw);
661
662   intelInitExtensions(ctx);
663
664   intel_fbo_init(brw);
665
666   if (brw->gen >= 6) {
667      /* Create a new hardware context.  Using a hardware context means that
668       * our GPU state will be saved/restored on context switch, allowing us
669       * to assume that the GPU is in the same state we left it in.
670       *
671       * This is required for transform feedback buffer offsets, query objects,
672       * and also allows us to reduce how much state we have to emit.
673       */
674      brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
675
676      if (!brw->hw_ctx) {
677         fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
678         intelDestroyContext(driContextPriv);
679         return false;
680      }
681   }
682
683   brw_init_surface_formats(brw);
684
685   if (brw->is_g4x || brw->gen >= 5) {
686      brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS;
687      brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
688  } else {
689      brw->CMD_VF_STATISTICS = GEN4_3DSTATE_VF_STATISTICS;
690      brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_965;
691   }
692
693   brw->max_vs_threads = devinfo->max_vs_threads;
694   brw->max_gs_threads = devinfo->max_gs_threads;
695   brw->max_wm_threads = devinfo->max_wm_threads;
696   brw->urb.size = devinfo->urb.size;
697   brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
698   brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
699   brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
700
701   /* Estimate the size of the mappable aperture into the GTT.  There's an
702    * ioctl to get the whole GTT size, but not one to get the mappable subset.
703    * It turns out it's basically always 256MB, though some ancient hardware
704    * was smaller.
705    */
706   uint32_t gtt_size = 256 * 1024 * 1024;
707
708   /* We don't want to map two objects such that a memcpy between them would
709    * just fault one mapping in and then the other over and over forever.  So
710    * we would need to divide the GTT size by 2.  Additionally, some GTT is
711    * taken up by things like the framebuffer and the ringbuffer and such, so
712    * be more conservative.
713    */
714   brw->max_gtt_map_object_size = gtt_size / 4;
715
716   if (brw->gen == 6)
717      brw->urb.gen6_gs_previously_active = false;
718
719   brw->prim_restart.in_progress = false;
720   brw->prim_restart.enable_cut_index = false;
721
722   if (brw->gen < 6) {
723      brw->curbe.last_buf = calloc(1, 4096);
724      brw->curbe.next_buf = calloc(1, 4096);
725   }
726
727   ctx->VertexProgram._MaintainTnlProgram = true;
728   ctx->FragmentProgram._MaintainTexEnvProgram = true;
729
730   brw_draw_init( brw );
731
732   if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
733      /* Turn on some extra GL_ARB_debug_output generation. */
734      brw->perf_debug = true;
735   }
736
737   brw_fs_alloc_reg_sets(brw);
738   brw_vec4_alloc_reg_set(brw);
739
740   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
741      brw_init_shader_time(brw);
742
743   _mesa_compute_version(ctx);
744
745   /* Here we override context constants. We apply the overrides after
746    * calculation of the context version because we do not want the overridden
747    * constants to change the version.
748    */
749   brw_override_max_samples(brw);
750
751   _mesa_initialize_dispatch_tables(ctx);
752   _mesa_initialize_vbo_vtxfmt(ctx);
753
754   if (ctx->Extensions.AMD_performance_monitor) {
755      brw_init_performance_monitors(brw);
756   }
757
758   return true;
759}
760
761void
762intelDestroyContext(__DRIcontext * driContextPriv)
763{
764   struct brw_context *brw =
765      (struct brw_context *) driContextPriv->driverPrivate;
766   struct gl_context *ctx = &brw->ctx;
767
768   assert(brw); /* should never be null */
769   if (!brw)
770      return;
771
772   /* Dump a final BMP in case the application doesn't call SwapBuffers */
773   if (INTEL_DEBUG & DEBUG_AUB) {
774      intel_batchbuffer_flush(brw);
775      aub_dump_bmp(&brw->ctx);
776   }
777
778   _mesa_meta_free(&brw->ctx);
779
780   if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
781      /* Force a report. */
782      brw->shader_time.report_time = 0;
783
784      brw_collect_and_report_shader_time(brw);
785      brw_destroy_shader_time(brw);
786   }
787
788   brw_destroy_state(brw);
789   brw_draw_destroy(brw);
790
791   drm_intel_bo_unreference(brw->curbe.curbe_bo);
792   drm_intel_bo_unreference(brw->vs.base.const_bo);
793   drm_intel_bo_unreference(brw->wm.base.const_bo);
794
795   free(brw->curbe.last_buf);
796   free(brw->curbe.next_buf);
797
798   drm_intel_gem_context_destroy(brw->hw_ctx);
799
800   if (ctx->swrast_context) {
801      _swsetup_DestroyContext(&brw->ctx);
802      _tnl_DestroyContext(&brw->ctx);
803   }
804   _vbo_DestroyContext(&brw->ctx);
805
806   if (ctx->swrast_context)
807      _swrast_DestroyContext(&brw->ctx);
808
809   intel_batchbuffer_free(brw);
810
811   drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
812   brw->first_post_swapbuffers_batch = NULL;
813
814   driDestroyOptionCache(&brw->optionCache);
815
816   /* free the Mesa context */
817   _mesa_free_context_data(&brw->ctx);
818
819   ralloc_free(brw);
820   driContextPriv->driverPrivate = NULL;
821}
822
823GLboolean
824intelUnbindContext(__DRIcontext * driContextPriv)
825{
826   /* Unset current context and dispath table */
827   _mesa_make_current(NULL, NULL, NULL);
828
829   return true;
830}
831
832/**
833 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
834 * on window system framebuffers.
835 *
836 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
837 * your renderbuffer can do sRGB encode, and you can flip a switch that does
838 * sRGB encode if the renderbuffer can handle it.  You can ask specifically
839 * for a visual where you're guaranteed to be capable, but it turns out that
840 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
841 * incapable ones, becuase there's no difference between the two in resources
842 * used.  Applications thus get built that accidentally rely on the default
843 * visual choice being sRGB, so we make ours sRGB capable.  Everything sounds
844 * great...
845 *
846 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
847 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
848 * So they removed the enable knob and made it "if the renderbuffer is sRGB
849 * capable, do sRGB encode".  Then, for your window system renderbuffers, you
850 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
851 * and get no sRGB encode (assuming that both kinds of visual are available).
852 * Thus our choice to support sRGB by default on our visuals for desktop would
853 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
854 *
855 * Unfortunately, renderbuffer setup happens before a context is created.  So
856 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
857 * context (without an sRGB visual, though we don't have sRGB visuals exposed
858 * yet), we go turn that back off before anyone finds out.
859 */
860static void
861intel_gles3_srgb_workaround(struct brw_context *brw,
862                            struct gl_framebuffer *fb)
863{
864   struct gl_context *ctx = &brw->ctx;
865
866   if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
867      return;
868
869   /* Some day when we support the sRGB capable bit on visuals available for
870    * GLES, we'll need to respect that and not disable things here.
871    */
872   fb->Visual.sRGBCapable = false;
873   for (int i = 0; i < BUFFER_COUNT; i++) {
874      if (fb->Attachment[i].Renderbuffer &&
875          fb->Attachment[i].Renderbuffer->Format == MESA_FORMAT_SARGB8) {
876         fb->Attachment[i].Renderbuffer->Format = MESA_FORMAT_ARGB8888;
877      }
878   }
879}
880
881GLboolean
882intelMakeCurrent(__DRIcontext * driContextPriv,
883                 __DRIdrawable * driDrawPriv,
884                 __DRIdrawable * driReadPriv)
885{
886   struct brw_context *brw;
887   GET_CURRENT_CONTEXT(curCtx);
888
889   if (driContextPriv)
890      brw = (struct brw_context *) driContextPriv->driverPrivate;
891   else
892      brw = NULL;
893
894   /* According to the glXMakeCurrent() man page: "Pending commands to
895    * the previous context, if any, are flushed before it is released."
896    * But only flush if we're actually changing contexts.
897    */
898   if (brw_context(curCtx) && brw_context(curCtx) != brw) {
899      _mesa_flush(curCtx);
900   }
901
902   if (driContextPriv) {
903      struct gl_context *ctx = &brw->ctx;
904      struct gl_framebuffer *fb, *readFb;
905
906      if (driDrawPriv == NULL && driReadPriv == NULL) {
907         fb = _mesa_get_incomplete_framebuffer();
908         readFb = _mesa_get_incomplete_framebuffer();
909      } else {
910         fb = driDrawPriv->driverPrivate;
911         readFb = driReadPriv->driverPrivate;
912         driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
913         driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
914      }
915
916      /* The sRGB workaround changes the renderbuffer's format. We must change
917       * the format before the renderbuffer's miptree get's allocated, otherwise
918       * the formats of the renderbuffer and its miptree will differ.
919       */
920      intel_gles3_srgb_workaround(brw, fb);
921      intel_gles3_srgb_workaround(brw, readFb);
922
923      intel_prepare_render(brw);
924      _mesa_make_current(ctx, fb, readFb);
925   } else {
926      _mesa_make_current(NULL, NULL, NULL);
927   }
928
929   return true;
930}
931
932void
933intel_resolve_for_dri2_flush(struct brw_context *brw,
934                             __DRIdrawable *drawable)
935{
936   if (brw->gen < 6) {
937      /* MSAA and fast color clear are not supported, so don't waste time
938       * checking whether a resolve is needed.
939       */
940      return;
941   }
942
943   struct gl_framebuffer *fb = drawable->driverPrivate;
944   struct intel_renderbuffer *rb;
945
946   /* Usually, only the back buffer will need to be downsampled. However,
947    * the front buffer will also need it if the user has rendered into it.
948    */
949   static const gl_buffer_index buffers[2] = {
950         BUFFER_BACK_LEFT,
951         BUFFER_FRONT_LEFT,
952   };
953
954   for (int i = 0; i < 2; ++i) {
955      rb = intel_get_renderbuffer(fb, buffers[i]);
956      if (rb == NULL || rb->mt == NULL)
957         continue;
958      if (rb->mt->num_samples <= 1)
959         intel_miptree_resolve_color(brw, rb->mt);
960      else
961         intel_miptree_downsample(brw, rb->mt);
962   }
963}
964
965static unsigned
966intel_bits_per_pixel(const struct intel_renderbuffer *rb)
967{
968   return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
969}
970
971static void
972intel_query_dri2_buffers(struct brw_context *brw,
973                         __DRIdrawable *drawable,
974                         __DRIbuffer **buffers,
975                         int *count);
976
977static void
978intel_process_dri2_buffer(struct brw_context *brw,
979                          __DRIdrawable *drawable,
980                          __DRIbuffer *buffer,
981                          struct intel_renderbuffer *rb,
982                          const char *buffer_name);
983
984void
985intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
986{
987   struct gl_framebuffer *fb = drawable->driverPrivate;
988   struct intel_renderbuffer *rb;
989   struct brw_context *brw = context->driverPrivate;
990   __DRIbuffer *buffers = NULL;
991   int i, count;
992   const char *region_name;
993
994   /* Set this up front, so that in case our buffers get invalidated
995    * while we're getting new buffers, we don't clobber the stamp and
996    * thus ignore the invalidate. */
997   drawable->lastStamp = drawable->dri2.stamp;
998
999   if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1000      fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1001
1002   intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1003
1004   if (buffers == NULL)
1005      return;
1006
1007   for (i = 0; i < count; i++) {
1008       switch (buffers[i].attachment) {
1009       case __DRI_BUFFER_FRONT_LEFT:
1010           rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1011           region_name = "dri2 front buffer";
1012           break;
1013
1014       case __DRI_BUFFER_FAKE_FRONT_LEFT:
1015           rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1016           region_name = "dri2 fake front buffer";
1017           break;
1018
1019       case __DRI_BUFFER_BACK_LEFT:
1020           rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1021           region_name = "dri2 back buffer";
1022           break;
1023
1024       case __DRI_BUFFER_DEPTH:
1025       case __DRI_BUFFER_HIZ:
1026       case __DRI_BUFFER_DEPTH_STENCIL:
1027       case __DRI_BUFFER_STENCIL:
1028       case __DRI_BUFFER_ACCUM:
1029       default:
1030           fprintf(stderr,
1031                   "unhandled buffer attach event, attachment type %d\n",
1032                   buffers[i].attachment);
1033           return;
1034       }
1035
1036       intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1037   }
1038
1039   driUpdateFramebufferSize(&brw->ctx, drawable);
1040}
1041
1042/**
1043 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1044 * state is required.
1045 */
1046void
1047intel_prepare_render(struct brw_context *brw)
1048{
1049   __DRIcontext *driContext = brw->driContext;
1050   __DRIdrawable *drawable;
1051
1052   drawable = driContext->driDrawablePriv;
1053   if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1054      if (drawable->lastStamp != drawable->dri2.stamp)
1055         intel_update_renderbuffers(driContext, drawable);
1056      driContext->dri2.draw_stamp = drawable->dri2.stamp;
1057   }
1058
1059   drawable = driContext->driReadablePriv;
1060   if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1061      if (drawable->lastStamp != drawable->dri2.stamp)
1062         intel_update_renderbuffers(driContext, drawable);
1063      driContext->dri2.read_stamp = drawable->dri2.stamp;
1064   }
1065
1066   /* If we're currently rendering to the front buffer, the rendering
1067    * that will happen next will probably dirty the front buffer.  So
1068    * mark it as dirty here.
1069    */
1070   if (brw->is_front_buffer_rendering)
1071      brw->front_buffer_dirty = true;
1072
1073   /* Wait for the swapbuffers before the one we just emitted, so we
1074    * don't get too many swaps outstanding for apps that are GPU-heavy
1075    * but not CPU-heavy.
1076    *
1077    * We're using intelDRI2Flush (called from the loader before
1078    * swapbuffer) and glFlush (for front buffer rendering) as the
1079    * indicator that a frame is done and then throttle when we get
1080    * here as we prepare to render the next frame.  At this point for
1081    * round trips for swap/copy and getting new buffers are done and
1082    * we'll spend less time waiting on the GPU.
1083    *
1084    * Unfortunately, we don't have a handle to the batch containing
1085    * the swap, and getting our hands on that doesn't seem worth it,
1086    * so we just us the first batch we emitted after the last swap.
1087    */
1088   if (brw->need_throttle && brw->first_post_swapbuffers_batch) {
1089      if (!brw->disable_throttling)
1090         drm_intel_bo_wait_rendering(brw->first_post_swapbuffers_batch);
1091      drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
1092      brw->first_post_swapbuffers_batch = NULL;
1093      brw->need_throttle = false;
1094   }
1095}
1096
1097/**
1098 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1099 *
1100 * To determine which DRI buffers to request, examine the renderbuffers
1101 * attached to the drawable's framebuffer. Then request the buffers with
1102 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1103 *
1104 * This is called from intel_update_renderbuffers().
1105 *
1106 * \param drawable      Drawable whose buffers are queried.
1107 * \param buffers       [out] List of buffers returned by DRI2 query.
1108 * \param buffer_count  [out] Number of buffers returned.
1109 *
1110 * \see intel_update_renderbuffers()
1111 * \see DRI2GetBuffers()
1112 * \see DRI2GetBuffersWithFormat()
1113 */
1114static void
1115intel_query_dri2_buffers(struct brw_context *brw,
1116                         __DRIdrawable *drawable,
1117                         __DRIbuffer **buffers,
1118                         int *buffer_count)
1119{
1120   __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1121   struct gl_framebuffer *fb = drawable->driverPrivate;
1122   int i = 0;
1123   unsigned attachments[8];
1124
1125   struct intel_renderbuffer *front_rb;
1126   struct intel_renderbuffer *back_rb;
1127
1128   front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1129   back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1130
1131   memset(attachments, 0, sizeof(attachments));
1132   if ((brw->is_front_buffer_rendering ||
1133        brw->is_front_buffer_reading ||
1134        !back_rb) && front_rb) {
1135      /* If a fake front buffer is in use, then querying for
1136       * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1137       * the real front buffer to the fake front buffer.  So before doing the
1138       * query, we need to make sure all the pending drawing has landed in the
1139       * real front buffer.
1140       */
1141      intel_batchbuffer_flush(brw);
1142      intel_flush_front(&brw->ctx);
1143
1144      attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1145      attachments[i++] = intel_bits_per_pixel(front_rb);
1146   } else if (front_rb && brw->front_buffer_dirty) {
1147      /* We have pending front buffer rendering, but we aren't querying for a
1148       * front buffer.  If the front buffer we have is a fake front buffer,
1149       * the X server is going to throw it away when it processes the query.
1150       * So before doing the query, make sure all the pending drawing has
1151       * landed in the real front buffer.
1152       */
1153      intel_batchbuffer_flush(brw);
1154      intel_flush_front(&brw->ctx);
1155   }
1156
1157   if (back_rb) {
1158      attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1159      attachments[i++] = intel_bits_per_pixel(back_rb);
1160   }
1161
1162   assert(i <= ARRAY_SIZE(attachments));
1163
1164   *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1165                                                        &drawable->w,
1166                                                        &drawable->h,
1167                                                        attachments, i / 2,
1168                                                        buffer_count,
1169                                                        drawable->loaderPrivate);
1170}
1171
1172/**
1173 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1174 *
1175 * This is called from intel_update_renderbuffers().
1176 *
1177 * \par Note:
1178 *    DRI buffers whose attachment point is DRI2BufferStencil or
1179 *    DRI2BufferDepthStencil are handled as special cases.
1180 *
1181 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1182 *        that is passed to intel_region_alloc_for_handle().
1183 *
1184 * \see intel_update_renderbuffers()
1185 * \see intel_region_alloc_for_handle()
1186 */
1187static void
1188intel_process_dri2_buffer(struct brw_context *brw,
1189                          __DRIdrawable *drawable,
1190                          __DRIbuffer *buffer,
1191                          struct intel_renderbuffer *rb,
1192                          const char *buffer_name)
1193{
1194   struct intel_region *region = NULL;
1195
1196   if (!rb)
1197      return;
1198
1199   unsigned num_samples = rb->Base.Base.NumSamples;
1200
1201   /* We try to avoid closing and reopening the same BO name, because the first
1202    * use of a mapping of the buffer involves a bunch of page faulting which is
1203    * moderately expensive.
1204    */
1205   if (num_samples == 0) {
1206       if (rb->mt &&
1207           rb->mt->region &&
1208           rb->mt->region->name == buffer->name)
1209          return;
1210   } else {
1211       if (rb->mt &&
1212           rb->mt->singlesample_mt &&
1213           rb->mt->singlesample_mt->region &&
1214           rb->mt->singlesample_mt->region->name == buffer->name)
1215          return;
1216   }
1217
1218   if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1219      fprintf(stderr,
1220              "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1221              buffer->name, buffer->attachment,
1222              buffer->cpp, buffer->pitch);
1223   }
1224
1225   intel_miptree_release(&rb->mt);
1226   region = intel_region_alloc_for_handle(brw->intelScreen,
1227                                          buffer->cpp,
1228                                          drawable->w,
1229                                          drawable->h,
1230                                          buffer->pitch,
1231                                          buffer->name,
1232                                          buffer_name);
1233   if (!region)
1234      return;
1235
1236   rb->mt = intel_miptree_create_for_dri2_buffer(brw,
1237                                                 buffer->attachment,
1238                                                 intel_rb_format(rb),
1239                                                 num_samples,
1240                                                 region);
1241   intel_region_release(&region);
1242}
1243