brw_context.c revision 1fb8c6df884c2a17cf980c4ea32db4c214903b55
1/*
2 Copyright 2003 VMware, Inc.
3 Copyright (C) Intel Corp.  2006.  All Rights Reserved.
4 Intel funded Tungsten Graphics to
5 develop this 3D driver.
6
7 Permission is hereby granted, free of charge, to any person obtaining
8 a copy of this software and associated documentation files (the
9 "Software"), to deal in the Software without restriction, including
10 without limitation the rights to use, copy, modify, merge, publish,
11 distribute, sublicense, and/or sell copies of the Software, and to
12 permit persons to whom the Software is furnished to do so, subject to
13 the following conditions:
14
15 The above copyright notice and this permission notice (including the
16 next paragraph) shall be included in all copies or substantial
17 portions of the Software.
18
19 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 **********************************************************************/
28 /*
29  * Authors:
30  *   Keith Whitwell <keithw@vmware.com>
31  */
32
33
34#include "main/api_exec.h"
35#include "main/context.h"
36#include "main/fbobject.h"
37#include "main/extensions.h"
38#include "main/imports.h"
39#include "main/macros.h"
40#include "main/points.h"
41#include "main/version.h"
42#include "main/vtxfmt.h"
43#include "main/texobj.h"
44#include "main/framebuffer.h"
45
46#include "vbo/vbo_context.h"
47
48#include "drivers/common/driverfuncs.h"
49#include "drivers/common/meta.h"
50#include "utils.h"
51
52#include "brw_context.h"
53#include "brw_defines.h"
54#include "brw_compiler.h"
55#include "brw_draw.h"
56#include "brw_state.h"
57
58#include "intel_batchbuffer.h"
59#include "intel_buffer_objects.h"
60#include "intel_buffers.h"
61#include "intel_fbo.h"
62#include "intel_mipmap_tree.h"
63#include "intel_pixel.h"
64#include "intel_image.h"
65#include "intel_tex.h"
66#include "intel_tex_obj.h"
67
68#include "swrast_setup/swrast_setup.h"
69#include "tnl/tnl.h"
70#include "tnl/t_pipeline.h"
71#include "util/ralloc.h"
72#include "util/debug.h"
73#include "isl/isl.h"
74
75/***************************************
76 * Mesa's Driver Functions
77 ***************************************/
78
79const char *const brw_vendor_string = "Intel Open Source Technology Center";
80
81static const char *
82get_bsw_model(const struct intel_screen *intelScreen)
83{
84   switch (intelScreen->eu_total) {
85   case 16:
86      return "405";
87   case 12:
88      return "400";
89   default:
90      return "   ";
91   }
92}
93
94const char *
95brw_get_renderer_string(const struct intel_screen *intelScreen)
96{
97   const char *chipset;
98   static char buffer[128];
99   char *bsw = NULL;
100
101   switch (intelScreen->deviceID) {
102#undef CHIPSET
103#define CHIPSET(id, symbol, str) case id: chipset = str; break;
104#include "pci_ids/i965_pci_ids.h"
105   default:
106      chipset = "Unknown Intel Chipset";
107      break;
108   }
109
110   /* Braswell branding is funny, so we have to fix it up here */
111   if (intelScreen->deviceID == 0x22B1) {
112      bsw = strdup(chipset);
113      char *needle = strstr(bsw, "XXX");
114      if (needle) {
115         memcpy(needle, get_bsw_model(intelScreen), 3);
116         chipset = bsw;
117      }
118   }
119
120   (void) driGetRendererString(buffer, chipset, 0);
121   free(bsw);
122   return buffer;
123}
124
125static const GLubyte *
126intel_get_string(struct gl_context * ctx, GLenum name)
127{
128   const struct brw_context *const brw = brw_context(ctx);
129
130   switch (name) {
131   case GL_VENDOR:
132      return (GLubyte *) brw_vendor_string;
133
134   case GL_RENDERER:
135      return
136         (GLubyte *) brw_get_renderer_string(brw->intelScreen);
137
138   default:
139      return NULL;
140   }
141}
142
143static void
144intel_viewport(struct gl_context *ctx)
145{
146   struct brw_context *brw = brw_context(ctx);
147   __DRIcontext *driContext = brw->driContext;
148
149   if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
150      if (driContext->driDrawablePriv)
151         dri2InvalidateDrawable(driContext->driDrawablePriv);
152      if (driContext->driReadablePriv)
153         dri2InvalidateDrawable(driContext->driReadablePriv);
154   }
155}
156
157static void
158intel_update_framebuffer(struct gl_context *ctx,
159                         struct gl_framebuffer *fb)
160{
161   struct brw_context *brw = brw_context(ctx);
162
163   /* Quantize the derived default number of samples
164    */
165   fb->DefaultGeometry._NumSamples =
166      intel_quantize_num_samples(brw->intelScreen,
167                                 fb->DefaultGeometry.NumSamples);
168}
169
170/* On Gen9 color buffers may be compressed by the hardware (lossless
171 * compression). There are, however, format restrictions and care needs to be
172 * taken that the sampler engine is capable for re-interpreting a buffer with
173 * format different the buffer was originally written with.
174 *
175 * For example, SRGB formats are not compressible and the sampler engine isn't
176 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
177 * color buffer needs to be resolved so that the sampling surface can be
178 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
179 * set).
180 */
181static bool
182intel_texture_view_requires_resolve(struct brw_context *brw,
183                                    struct intel_texture_object *intel_tex)
184{
185   if (brw->gen < 9 ||
186       !intel_miptree_is_lossless_compressed(brw, intel_tex->mt))
187     return false;
188
189   const uint32_t brw_format = brw_format_for_mesa_format(intel_tex->_Format);
190
191   if (isl_format_supports_lossless_compression(brw->intelScreen->devinfo,
192                                                brw_format))
193      return false;
194
195   perf_debug("Incompatible sampling format (%s) for rbc (%s)\n",
196              _mesa_get_format_name(intel_tex->_Format),
197              _mesa_get_format_name(intel_tex->mt->format));
198
199   return true;
200}
201
202static void
203intel_update_state(struct gl_context * ctx, GLuint new_state)
204{
205   struct brw_context *brw = brw_context(ctx);
206   struct intel_texture_object *tex_obj;
207   struct intel_renderbuffer *depth_irb;
208
209   if (ctx->swrast_context)
210      _swrast_InvalidateState(ctx, new_state);
211   _vbo_InvalidateState(ctx, new_state);
212
213   brw->NewGLState |= new_state;
214
215   _mesa_unlock_context_textures(ctx);
216
217   /* Resolve the depth buffer's HiZ buffer. */
218   depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
219   if (depth_irb)
220      intel_renderbuffer_resolve_hiz(brw, depth_irb);
221
222   /* Resolve depth buffer and render cache of each enabled texture. */
223   int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
224   for (int i = 0; i <= maxEnabledUnit; i++) {
225      if (!ctx->Texture.Unit[i]._Current)
226	 continue;
227      tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
228      if (!tex_obj || !tex_obj->mt)
229	 continue;
230      intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
231      /* Sampling engine understands lossless compression and resolving
232       * those surfaces should be skipped for performance reasons.
233       */
234      const int flags = intel_texture_view_requires_resolve(brw, tex_obj) ?
235                           0 : INTEL_MIPTREE_IGNORE_CCS_E;
236      intel_miptree_resolve_color(brw, tex_obj->mt, flags);
237      brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
238   }
239
240   /* Resolve color for each active shader image. */
241   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
242      const struct gl_linked_shader *shader =
243         ctx->_Shader->CurrentProgram[i] ?
244            ctx->_Shader->CurrentProgram[i]->_LinkedShaders[i] : NULL;
245
246      if (unlikely(shader && shader->NumImages)) {
247         for (unsigned j = 0; j < shader->NumImages; j++) {
248            struct gl_image_unit *u = &ctx->ImageUnits[shader->ImageUnits[j]];
249            tex_obj = intel_texture_object(u->TexObj);
250
251            if (tex_obj && tex_obj->mt) {
252               /* Access to images is implemented using indirect messages
253                * against data port. Normal render target write understands
254                * lossless compression but unfortunately the typed/untyped
255                * read/write interface doesn't. Therefore the compressed
256                * surfaces need to be resolved prior to accessing them.
257                */
258               intel_miptree_resolve_color(brw, tex_obj->mt, 0);
259               brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
260            }
261         }
262      }
263   }
264
265   /* If FRAMEBUFFER_SRGB is used on Gen9+ then we need to resolve any of the
266    * single-sampled color renderbuffers because the CCS buffer isn't
267    * supported for SRGB formats. This only matters if FRAMEBUFFER_SRGB is
268    * enabled because otherwise the surface state will be programmed with the
269    * linear equivalent format anyway.
270    */
271   if (brw->gen >= 9 && ctx->Color.sRGBEnabled) {
272      struct gl_framebuffer *fb = ctx->DrawBuffer;
273      for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
274         struct gl_renderbuffer *rb = fb->_ColorDrawBuffers[i];
275
276         if (rb == NULL)
277            continue;
278
279         struct intel_renderbuffer *irb = intel_renderbuffer(rb);
280         struct intel_mipmap_tree *mt = irb->mt;
281
282         if (mt == NULL ||
283             mt->num_samples > 1 ||
284             _mesa_get_srgb_format_linear(mt->format) == mt->format)
285               continue;
286
287         /* Lossless compression is not supported for SRGB formats, it
288          * should be impossible to get here with such surfaces.
289          */
290         assert(!intel_miptree_is_lossless_compressed(brw, mt));
291         intel_miptree_resolve_color(brw, mt, 0);
292         brw_render_cache_set_check_flush(brw, mt->bo);
293      }
294   }
295
296   _mesa_lock_context_textures(ctx);
297
298   if (new_state & _NEW_BUFFERS) {
299      intel_update_framebuffer(ctx, ctx->DrawBuffer);
300      if (ctx->DrawBuffer != ctx->ReadBuffer)
301         intel_update_framebuffer(ctx, ctx->ReadBuffer);
302   }
303}
304
305#define flushFront(screen)      ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
306
307static void
308intel_flush_front(struct gl_context *ctx)
309{
310   struct brw_context *brw = brw_context(ctx);
311   __DRIcontext *driContext = brw->driContext;
312   __DRIdrawable *driDrawable = driContext->driDrawablePriv;
313   __DRIscreen *const screen = brw->intelScreen->driScrnPriv;
314
315   if (brw->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
316      if (flushFront(screen) && driDrawable &&
317          driDrawable->loaderPrivate) {
318
319         /* Resolve before flushing FAKE_FRONT_LEFT to FRONT_LEFT.
320          *
321          * This potentially resolves both front and back buffer. It
322          * is unnecessary to resolve the back, but harms nothing except
323          * performance. And no one cares about front-buffer render
324          * performance.
325          */
326         intel_resolve_for_dri2_flush(brw, driDrawable);
327         intel_batchbuffer_flush(brw);
328
329         flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
330
331         /* We set the dirty bit in intel_prepare_render() if we're
332          * front buffer rendering once we get there.
333          */
334         brw->front_buffer_dirty = false;
335      }
336   }
337}
338
339static void
340intel_glFlush(struct gl_context *ctx)
341{
342   struct brw_context *brw = brw_context(ctx);
343
344   intel_batchbuffer_flush(brw);
345   intel_flush_front(ctx);
346
347   brw->need_flush_throttle = true;
348}
349
350static void
351intel_finish(struct gl_context * ctx)
352{
353   struct brw_context *brw = brw_context(ctx);
354
355   intel_glFlush(ctx);
356
357   if (brw->batch.last_bo)
358      drm_intel_bo_wait_rendering(brw->batch.last_bo);
359}
360
361static void
362brw_init_driver_functions(struct brw_context *brw,
363                          struct dd_function_table *functions)
364{
365   _mesa_init_driver_functions(functions);
366
367   /* GLX uses DRI2 invalidate events to handle window resizing.
368    * Unfortunately, EGL does not - libEGL is written in XCB (not Xlib),
369    * which doesn't provide a mechanism for snooping the event queues.
370    *
371    * So EGL still relies on viewport hacks to handle window resizing.
372    * This should go away with DRI3000.
373    */
374   if (!brw->driContext->driScreenPriv->dri2.useInvalidate)
375      functions->Viewport = intel_viewport;
376
377   functions->Flush = intel_glFlush;
378   functions->Finish = intel_finish;
379   functions->GetString = intel_get_string;
380   functions->UpdateState = intel_update_state;
381
382   intelInitTextureFuncs(functions);
383   intelInitTextureImageFuncs(functions);
384   intelInitTextureSubImageFuncs(functions);
385   intelInitTextureCopyImageFuncs(functions);
386   intelInitCopyImageFuncs(functions);
387   intelInitClearFuncs(functions);
388   intelInitBufferFuncs(functions);
389   intelInitPixelFuncs(functions);
390   intelInitBufferObjectFuncs(functions);
391   intel_init_syncobj_functions(functions);
392   brw_init_object_purgeable_functions(functions);
393
394   brwInitFragProgFuncs( functions );
395   brw_init_common_queryobj_functions(functions);
396   if (brw->gen >= 8 || brw->is_haswell)
397      hsw_init_queryobj_functions(functions);
398   else if (brw->gen >= 6)
399      gen6_init_queryobj_functions(functions);
400   else
401      gen4_init_queryobj_functions(functions);
402   brw_init_compute_functions(functions);
403   if (brw->gen >= 7)
404      brw_init_conditional_render_functions(functions);
405
406   functions->QueryInternalFormat = brw_query_internal_format;
407
408   functions->NewTransformFeedback = brw_new_transform_feedback;
409   functions->DeleteTransformFeedback = brw_delete_transform_feedback;
410   if (brw->intelScreen->has_mi_math_and_lrr) {
411      functions->BeginTransformFeedback = hsw_begin_transform_feedback;
412      functions->EndTransformFeedback = hsw_end_transform_feedback;
413      functions->PauseTransformFeedback = hsw_pause_transform_feedback;
414      functions->ResumeTransformFeedback = hsw_resume_transform_feedback;
415   } else if (brw->gen >= 7) {
416      functions->BeginTransformFeedback = gen7_begin_transform_feedback;
417      functions->EndTransformFeedback = gen7_end_transform_feedback;
418      functions->PauseTransformFeedback = gen7_pause_transform_feedback;
419      functions->ResumeTransformFeedback = gen7_resume_transform_feedback;
420      functions->GetTransformFeedbackVertexCount =
421         brw_get_transform_feedback_vertex_count;
422   } else {
423      functions->BeginTransformFeedback = brw_begin_transform_feedback;
424      functions->EndTransformFeedback = brw_end_transform_feedback;
425   }
426
427   if (brw->gen >= 6)
428      functions->GetSamplePosition = gen6_get_sample_position;
429}
430
431static void
432brw_initialize_context_constants(struct brw_context *brw)
433{
434   struct gl_context *ctx = &brw->ctx;
435   const struct brw_compiler *compiler = brw->intelScreen->compiler;
436
437   const bool stage_exists[MESA_SHADER_STAGES] = {
438      [MESA_SHADER_VERTEX] = true,
439      [MESA_SHADER_TESS_CTRL] = brw->gen >= 7,
440      [MESA_SHADER_TESS_EVAL] = brw->gen >= 7,
441      [MESA_SHADER_GEOMETRY] = brw->gen >= 6,
442      [MESA_SHADER_FRAGMENT] = true,
443      [MESA_SHADER_COMPUTE] =
444         (ctx->API == API_OPENGL_CORE &&
445          ctx->Const.MaxComputeWorkGroupSize[0] >= 1024) ||
446         (ctx->API == API_OPENGLES2 &&
447          ctx->Const.MaxComputeWorkGroupSize[0] >= 128) ||
448         _mesa_extension_override_enables.ARB_compute_shader,
449   };
450
451   unsigned num_stages = 0;
452   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
453      if (stage_exists[i])
454         num_stages++;
455   }
456
457   unsigned max_samplers =
458      brw->gen >= 8 || brw->is_haswell ? BRW_MAX_TEX_UNIT : 16;
459
460   ctx->Const.MaxDualSourceDrawBuffers = 1;
461   ctx->Const.MaxDrawBuffers = BRW_MAX_DRAW_BUFFERS;
462   ctx->Const.MaxCombinedShaderOutputResources =
463      MAX_IMAGE_UNITS + BRW_MAX_DRAW_BUFFERS;
464
465   ctx->Const.QueryCounterBits.Timestamp = 36;
466
467   ctx->Const.MaxTextureCoordUnits = 8; /* Mesa limit */
468   ctx->Const.MaxImageUnits = MAX_IMAGE_UNITS;
469   ctx->Const.MaxRenderbufferSize = 8192;
470   ctx->Const.MaxTextureLevels = MIN2(14 /* 8192 */, MAX_TEXTURE_LEVELS);
471
472   /* On Sandy Bridge and prior, the "Render Target View Extent" field of
473    * RENDER_SURFACE_STATE is only 9 bits so the largest 3-D texture we can do
474    * a layered render into has a depth of 512.  On Iron Lake and earlier, we
475    * don't support layered rendering and we use manual offsetting to render
476    * into the different layers so this doesn't matter.  On Sandy Bridge,
477    * however, we do support layered rendering so this is a problem.
478    */
479   ctx->Const.Max3DTextureLevels = brw->gen == 6 ? 10 /* 512 */ : 12; /* 2048 */
480
481   ctx->Const.MaxCubeTextureLevels = 14; /* 8192 */
482   ctx->Const.MaxArrayTextureLayers = brw->gen >= 7 ? 2048 : 512;
483   ctx->Const.MaxTextureMbytes = 1536;
484   ctx->Const.MaxTextureRectSize = 1 << 12;
485   ctx->Const.MaxTextureMaxAnisotropy = 16.0;
486   ctx->Const.StripTextureBorder = true;
487   if (brw->gen >= 7)
488      ctx->Const.MaxProgramTextureGatherComponents = 4;
489   else if (brw->gen == 6)
490      ctx->Const.MaxProgramTextureGatherComponents = 1;
491
492   ctx->Const.MaxUniformBlockSize = 65536;
493
494   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
495      struct gl_program_constants *prog = &ctx->Const.Program[i];
496
497      if (!stage_exists[i])
498         continue;
499
500      prog->MaxTextureImageUnits = max_samplers;
501
502      prog->MaxUniformBlocks = BRW_MAX_UBO;
503      prog->MaxCombinedUniformComponents =
504         prog->MaxUniformComponents +
505         ctx->Const.MaxUniformBlockSize / 4 * prog->MaxUniformBlocks;
506
507      prog->MaxAtomicCounters = MAX_ATOMIC_COUNTERS;
508      prog->MaxAtomicBuffers = BRW_MAX_ABO;
509      prog->MaxImageUniforms = compiler->scalar_stage[i] ? BRW_MAX_IMAGES : 0;
510      prog->MaxShaderStorageBlocks = BRW_MAX_SSBO;
511   }
512
513   ctx->Const.MaxTextureUnits =
514      MIN2(ctx->Const.MaxTextureCoordUnits,
515           ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
516
517   ctx->Const.MaxUniformBufferBindings = num_stages * BRW_MAX_UBO;
518   ctx->Const.MaxCombinedUniformBlocks = num_stages * BRW_MAX_UBO;
519   ctx->Const.MaxCombinedAtomicBuffers = num_stages * BRW_MAX_ABO;
520   ctx->Const.MaxCombinedShaderStorageBlocks = num_stages * BRW_MAX_SSBO;
521   ctx->Const.MaxShaderStorageBufferBindings = num_stages * BRW_MAX_SSBO;
522   ctx->Const.MaxCombinedTextureImageUnits = num_stages * max_samplers;
523   ctx->Const.MaxCombinedImageUniforms = num_stages * BRW_MAX_IMAGES;
524
525
526   /* Hardware only supports a limited number of transform feedback buffers.
527    * So we need to override the Mesa default (which is based only on software
528    * limits).
529    */
530   ctx->Const.MaxTransformFeedbackBuffers = BRW_MAX_SOL_BUFFERS;
531
532   /* On Gen6, in the worst case, we use up one binding table entry per
533    * transform feedback component (see comments above the definition of
534    * BRW_MAX_SOL_BINDINGS, in brw_context.h), so we need to advertise a value
535    * for MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS equal to
536    * BRW_MAX_SOL_BINDINGS.
537    *
538    * In "separate components" mode, we need to divide this value by
539    * BRW_MAX_SOL_BUFFERS, so that the total number of binding table entries
540    * used up by all buffers will not exceed BRW_MAX_SOL_BINDINGS.
541    */
542   ctx->Const.MaxTransformFeedbackInterleavedComponents = BRW_MAX_SOL_BINDINGS;
543   ctx->Const.MaxTransformFeedbackSeparateComponents =
544      BRW_MAX_SOL_BINDINGS / BRW_MAX_SOL_BUFFERS;
545
546   ctx->Const.AlwaysUseGetTransformFeedbackVertexCount =
547      !brw->intelScreen->has_mi_math_and_lrr;
548
549   int max_samples;
550   const int *msaa_modes = intel_supported_msaa_modes(brw->intelScreen);
551   const int clamp_max_samples =
552      driQueryOptioni(&brw->optionCache, "clamp_max_samples");
553
554   if (clamp_max_samples < 0) {
555      max_samples = msaa_modes[0];
556   } else {
557      /* Select the largest supported MSAA mode that does not exceed
558       * clamp_max_samples.
559       */
560      max_samples = 0;
561      for (int i = 0; msaa_modes[i] != 0; ++i) {
562         if (msaa_modes[i] <= clamp_max_samples) {
563            max_samples = msaa_modes[i];
564            break;
565         }
566      }
567   }
568
569   ctx->Const.MaxSamples = max_samples;
570   ctx->Const.MaxColorTextureSamples = max_samples;
571   ctx->Const.MaxDepthTextureSamples = max_samples;
572   ctx->Const.MaxIntegerSamples = max_samples;
573   ctx->Const.MaxImageSamples = 0;
574
575   /* gen6_set_sample_maps() sets SampleMap{2,4,8}x variables which are used
576    * to map indices of rectangular grid to sample numbers within a pixel.
577    * These variables are used by GL_EXT_framebuffer_multisample_blit_scaled
578    * extension implementation. For more details see the comment above
579    * gen6_set_sample_maps() definition.
580    */
581   gen6_set_sample_maps(ctx);
582
583   ctx->Const.MinLineWidth = 1.0;
584   ctx->Const.MinLineWidthAA = 1.0;
585   if (brw->gen >= 6) {
586      ctx->Const.MaxLineWidth = 7.375;
587      ctx->Const.MaxLineWidthAA = 7.375;
588      ctx->Const.LineWidthGranularity = 0.125;
589   } else {
590      ctx->Const.MaxLineWidth = 7.0;
591      ctx->Const.MaxLineWidthAA = 7.0;
592      ctx->Const.LineWidthGranularity = 0.5;
593   }
594
595   /* For non-antialiased lines, we have to round the line width to the
596    * nearest whole number. Make sure that we don't advertise a line
597    * width that, when rounded, will be beyond the actual hardware
598    * maximum.
599    */
600   assert(roundf(ctx->Const.MaxLineWidth) <= ctx->Const.MaxLineWidth);
601
602   ctx->Const.MinPointSize = 1.0;
603   ctx->Const.MinPointSizeAA = 1.0;
604   ctx->Const.MaxPointSize = 255.0;
605   ctx->Const.MaxPointSizeAA = 255.0;
606   ctx->Const.PointSizeGranularity = 1.0;
607
608   if (brw->gen >= 5 || brw->is_g4x)
609      ctx->Const.MaxClipPlanes = 8;
610
611   ctx->Const.LowerTessLevel = true;
612   ctx->Const.LowerTCSPatchVerticesIn = brw->gen >= 8;
613   ctx->Const.LowerTESPatchVerticesIn = true;
614   ctx->Const.PrimitiveRestartForPatches = true;
615
616   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeInstructions = 16 * 1024;
617   ctx->Const.Program[MESA_SHADER_VERTEX].MaxAluInstructions = 0;
618   ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexInstructions = 0;
619   ctx->Const.Program[MESA_SHADER_VERTEX].MaxTexIndirections = 0;
620   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAluInstructions = 0;
621   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexInstructions = 0;
622   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTexIndirections = 0;
623   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAttribs = 16;
624   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeTemps = 256;
625   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeAddressRegs = 1;
626   ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters = 1024;
627   ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams =
628      MIN2(ctx->Const.Program[MESA_SHADER_VERTEX].MaxNativeParameters,
629	   ctx->Const.Program[MESA_SHADER_VERTEX].MaxEnvParams);
630
631   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeInstructions = 1024;
632   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAluInstructions = 1024;
633   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexInstructions = 1024;
634   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTexIndirections = 1024;
635   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAttribs = 12;
636   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeTemps = 256;
637   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeAddressRegs = 0;
638   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters = 1024;
639   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams =
640      MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxNativeParameters,
641	   ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxEnvParams);
642
643   /* Fragment shaders use real, 32-bit twos-complement integers for all
644    * integer types.
645    */
646   ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMin = 31;
647   ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.RangeMax = 30;
648   ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt.Precision = 0;
649   ctx->Const.Program[MESA_SHADER_FRAGMENT].HighInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
650   ctx->Const.Program[MESA_SHADER_FRAGMENT].MediumInt = ctx->Const.Program[MESA_SHADER_FRAGMENT].LowInt;
651
652   ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMin = 31;
653   ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.RangeMax = 30;
654   ctx->Const.Program[MESA_SHADER_VERTEX].LowInt.Precision = 0;
655   ctx->Const.Program[MESA_SHADER_VERTEX].HighInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
656   ctx->Const.Program[MESA_SHADER_VERTEX].MediumInt = ctx->Const.Program[MESA_SHADER_VERTEX].LowInt;
657
658   /* Gen6 converts quads to polygon in beginning of 3D pipeline,
659    * but we're not sure how it's actually done for vertex order,
660    * that affect provoking vertex decision. Always use last vertex
661    * convention for quad primitive which works as expected for now.
662    */
663   if (brw->gen >= 6)
664      ctx->Const.QuadsFollowProvokingVertexConvention = false;
665
666   ctx->Const.NativeIntegers = true;
667   ctx->Const.VertexID_is_zero_based = true;
668
669   /* Regarding the CMP instruction, the Ivybridge PRM says:
670    *
671    *   "For each enabled channel 0b or 1b is assigned to the appropriate flag
672    *    bit and 0/all zeros or all ones (e.g, byte 0xFF, word 0xFFFF, DWord
673    *    0xFFFFFFFF) is assigned to dst."
674    *
675    * but PRMs for earlier generations say
676    *
677    *   "In dword format, one GRF may store up to 8 results. When the register
678    *    is used later as a vector of Booleans, as only LSB at each channel
679    *    contains meaning [sic] data, software should make sure all higher bits
680    *    are masked out (e.g. by 'and-ing' an [sic] 0x01 constant)."
681    *
682    * We select the representation of a true boolean uniform to be ~0, and fix
683    * the results of Gen <= 5 CMP instruction's with -(result & 1).
684    */
685   ctx->Const.UniformBooleanTrue = ~0;
686
687   /* From the gen4 PRM, volume 4 page 127:
688    *
689    *     "For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
690    *      the base address of the first element of the surface, computed in
691    *      software by adding the surface base address to the byte offset of
692    *      the element in the buffer."
693    *
694    * However, unaligned accesses are slower, so enforce buffer alignment.
695    */
696   ctx->Const.UniformBufferOffsetAlignment = 16;
697
698   /* ShaderStorageBufferOffsetAlignment should be a cacheline (64 bytes) so
699    * that we can safely have the CPU and GPU writing the same SSBO on
700    * non-cachecoherent systems (our Atom CPUs). With UBOs, the GPU never
701    * writes, so there's no problem. For an SSBO, the GPU and the CPU can
702    * be updating disjoint regions of the buffer simultaneously and that will
703    * break if the regions overlap the same cacheline.
704    */
705   ctx->Const.ShaderStorageBufferOffsetAlignment = 64;
706   ctx->Const.TextureBufferOffsetAlignment = 16;
707   ctx->Const.MaxTextureBufferSize = 128 * 1024 * 1024;
708
709   if (brw->gen >= 6) {
710      ctx->Const.MaxVarying = 32;
711      ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 128;
712      ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents = 64;
713      ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
714      ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 128;
715      ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents = 128;
716      ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents = 128;
717      ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents = 128;
718      ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents = 128;
719   }
720
721   /* We want the GLSL compiler to emit code that uses condition codes */
722   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
723      ctx->Const.ShaderCompilerOptions[i] =
724         brw->intelScreen->compiler->glsl_compiler_options[i];
725   }
726
727   if (brw->gen >= 7) {
728      ctx->Const.MaxViewportWidth = 32768;
729      ctx->Const.MaxViewportHeight = 32768;
730   }
731
732   /* ARB_viewport_array */
733   if (brw->gen >= 6 && ctx->API == API_OPENGL_CORE) {
734      ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
735      ctx->Const.ViewportSubpixelBits = 0;
736
737      /* Cast to float before negating because MaxViewportWidth is unsigned.
738       */
739      ctx->Const.ViewportBounds.Min = -(float)ctx->Const.MaxViewportWidth;
740      ctx->Const.ViewportBounds.Max = ctx->Const.MaxViewportWidth;
741   }
742
743   /* ARB_gpu_shader5 */
744   if (brw->gen >= 7)
745      ctx->Const.MaxVertexStreams = MIN2(4, MAX_VERTEX_STREAMS);
746
747   /* ARB_framebuffer_no_attachments */
748   ctx->Const.MaxFramebufferWidth = 16384;
749   ctx->Const.MaxFramebufferHeight = 16384;
750   ctx->Const.MaxFramebufferLayers = ctx->Const.MaxArrayTextureLayers;
751   ctx->Const.MaxFramebufferSamples = max_samples;
752}
753
754static void
755brw_initialize_cs_context_constants(struct brw_context *brw, unsigned max_threads)
756{
757   struct gl_context *ctx = &brw->ctx;
758   /* Maximum number of scalar compute shader invocations that can be run in
759    * parallel in the same subslice assuming SIMD32 dispatch.
760    */
761   const uint32_t max_invocations = 32 * max_threads;
762   ctx->Const.MaxComputeWorkGroupSize[0] = max_invocations;
763   ctx->Const.MaxComputeWorkGroupSize[1] = max_invocations;
764   ctx->Const.MaxComputeWorkGroupSize[2] = max_invocations;
765   ctx->Const.MaxComputeWorkGroupInvocations = max_invocations;
766   ctx->Const.MaxComputeSharedMemorySize = 64 * 1024;
767}
768
769/**
770 * Process driconf (drirc) options, setting appropriate context flags.
771 *
772 * intelInitExtensions still pokes at optionCache directly, in order to
773 * avoid advertising various extensions.  No flags are set, so it makes
774 * sense to continue doing that there.
775 */
776static void
777brw_process_driconf_options(struct brw_context *brw)
778{
779   struct gl_context *ctx = &brw->ctx;
780
781   driOptionCache *options = &brw->optionCache;
782   driParseConfigFiles(options, &brw->intelScreen->optionCache,
783                       brw->driContext->driScreenPriv->myNum, "i965");
784
785   int bo_reuse_mode = driQueryOptioni(options, "bo_reuse");
786   switch (bo_reuse_mode) {
787   case DRI_CONF_BO_REUSE_DISABLED:
788      break;
789   case DRI_CONF_BO_REUSE_ALL:
790      intel_bufmgr_gem_enable_reuse(brw->bufmgr);
791      break;
792   }
793
794   if (!driQueryOptionb(options, "hiz")) {
795       brw->has_hiz = false;
796       /* On gen6, you can only do separate stencil with HIZ. */
797       if (brw->gen == 6)
798          brw->has_separate_stencil = false;
799   }
800
801   if (driQueryOptionb(options, "always_flush_batch")) {
802      fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
803      brw->always_flush_batch = true;
804   }
805
806   if (driQueryOptionb(options, "always_flush_cache")) {
807      fprintf(stderr, "flushing GPU caches before/after each draw call\n");
808      brw->always_flush_cache = true;
809   }
810
811   if (driQueryOptionb(options, "disable_throttling")) {
812      fprintf(stderr, "disabling flush throttling\n");
813      brw->disable_throttling = true;
814   }
815
816   brw->precompile = driQueryOptionb(&brw->optionCache, "shader_precompile");
817
818   if (driQueryOptionb(&brw->optionCache, "precise_trig"))
819      brw->intelScreen->compiler->precise_trig = true;
820
821   ctx->Const.ForceGLSLExtensionsWarn =
822      driQueryOptionb(options, "force_glsl_extensions_warn");
823
824   ctx->Const.DisableGLSLLineContinuations =
825      driQueryOptionb(options, "disable_glsl_line_continuations");
826
827   ctx->Const.AllowGLSLExtensionDirectiveMidShader =
828      driQueryOptionb(options, "allow_glsl_extension_directive_midshader");
829
830   brw->dual_color_blend_by_location =
831      driQueryOptionb(options, "dual_color_blend_by_location");
832}
833
834GLboolean
835brwCreateContext(gl_api api,
836	         const struct gl_config *mesaVis,
837		 __DRIcontext *driContextPriv,
838                 unsigned major_version,
839                 unsigned minor_version,
840                 uint32_t flags,
841                 bool notify_reset,
842                 unsigned *dri_ctx_error,
843	         void *sharedContextPrivate)
844{
845   __DRIscreen *sPriv = driContextPriv->driScreenPriv;
846   struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
847   struct intel_screen *screen = sPriv->driverPrivate;
848   const struct brw_device_info *devinfo = screen->devinfo;
849   struct dd_function_table functions;
850
851   /* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
852    * provides us with context reset notifications.
853    */
854   uint32_t allowed_flags = __DRI_CTX_FLAG_DEBUG
855      | __DRI_CTX_FLAG_FORWARD_COMPATIBLE;
856
857   if (screen->has_context_reset_notification)
858      allowed_flags |= __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS;
859
860   if (flags & ~allowed_flags) {
861      *dri_ctx_error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
862      return false;
863   }
864
865   struct brw_context *brw = rzalloc(NULL, struct brw_context);
866   if (!brw) {
867      fprintf(stderr, "%s: failed to alloc context\n", __func__);
868      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
869      return false;
870   }
871
872   driContextPriv->driverPrivate = brw;
873   brw->driContext = driContextPriv;
874   brw->intelScreen = screen;
875   brw->bufmgr = screen->bufmgr;
876
877   brw->gen = devinfo->gen;
878   brw->gt = devinfo->gt;
879   brw->is_g4x = devinfo->is_g4x;
880   brw->is_baytrail = devinfo->is_baytrail;
881   brw->is_haswell = devinfo->is_haswell;
882   brw->is_cherryview = devinfo->is_cherryview;
883   brw->is_broxton = devinfo->is_broxton;
884   brw->has_llc = devinfo->has_llc;
885   brw->has_hiz = devinfo->has_hiz_and_separate_stencil;
886   brw->has_separate_stencil = devinfo->has_hiz_and_separate_stencil;
887   brw->has_pln = devinfo->has_pln;
888   brw->has_compr4 = devinfo->has_compr4;
889   brw->has_surface_tile_offset = devinfo->has_surface_tile_offset;
890   brw->has_negative_rhw_bug = devinfo->has_negative_rhw_bug;
891   brw->needs_unlit_centroid_workaround =
892      devinfo->needs_unlit_centroid_workaround;
893
894   brw->must_use_separate_stencil = devinfo->must_use_separate_stencil;
895   brw->has_swizzling = screen->hw_has_swizzling;
896
897   brw->vs.base.stage = MESA_SHADER_VERTEX;
898   brw->tcs.base.stage = MESA_SHADER_TESS_CTRL;
899   brw->tes.base.stage = MESA_SHADER_TESS_EVAL;
900   brw->gs.base.stage = MESA_SHADER_GEOMETRY;
901   brw->wm.base.stage = MESA_SHADER_FRAGMENT;
902   if (brw->gen >= 8) {
903      gen8_init_vtable_surface_functions(brw);
904      brw->vtbl.emit_depth_stencil_hiz = gen8_emit_depth_stencil_hiz;
905   } else if (brw->gen >= 7) {
906      gen7_init_vtable_surface_functions(brw);
907      brw->vtbl.emit_depth_stencil_hiz = gen7_emit_depth_stencil_hiz;
908   } else if (brw->gen >= 6) {
909      gen6_init_vtable_surface_functions(brw);
910      brw->vtbl.emit_depth_stencil_hiz = gen6_emit_depth_stencil_hiz;
911   } else {
912      gen4_init_vtable_surface_functions(brw);
913      brw->vtbl.emit_depth_stencil_hiz = brw_emit_depth_stencil_hiz;
914   }
915
916   brw_init_driver_functions(brw, &functions);
917
918   if (notify_reset)
919      functions.GetGraphicsResetStatus = brw_get_graphics_reset_status;
920
921   struct gl_context *ctx = &brw->ctx;
922
923   if (!_mesa_initialize_context(ctx, api, mesaVis, shareCtx, &functions)) {
924      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
925      fprintf(stderr, "%s: failed to init mesa context\n", __func__);
926      intelDestroyContext(driContextPriv);
927      return false;
928   }
929
930   driContextSetFlags(ctx, flags);
931
932   /* Initialize the software rasterizer and helper modules.
933    *
934    * As of GL 3.1 core, the gen4+ driver doesn't need the swrast context for
935    * software fallbacks (which we have to support on legacy GL to do weird
936    * glDrawPixels(), glBitmap(), and other functions).
937    */
938   if (api != API_OPENGL_CORE && api != API_OPENGLES2) {
939      _swrast_CreateContext(ctx);
940   }
941
942   _vbo_CreateContext(ctx);
943   if (ctx->swrast_context) {
944      _tnl_CreateContext(ctx);
945      TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
946      _swsetup_CreateContext(ctx);
947
948      /* Configure swrast to match hardware characteristics: */
949      _swrast_allow_pixel_fog(ctx, false);
950      _swrast_allow_vertex_fog(ctx, true);
951   }
952
953   _mesa_meta_init(ctx);
954
955   brw_process_driconf_options(brw);
956
957   if (INTEL_DEBUG & DEBUG_PERF)
958      brw->perf_debug = true;
959
960   brw_initialize_cs_context_constants(brw, devinfo->max_cs_threads);
961   brw_initialize_context_constants(brw);
962
963   ctx->Const.ResetStrategy = notify_reset
964      ? GL_LOSE_CONTEXT_ON_RESET_ARB : GL_NO_RESET_NOTIFICATION_ARB;
965
966   /* Reinitialize the context point state.  It depends on ctx->Const values. */
967   _mesa_init_point(ctx);
968
969   intel_fbo_init(brw);
970
971   intel_batchbuffer_init(brw);
972
973   if (brw->gen >= 6) {
974      /* Create a new hardware context.  Using a hardware context means that
975       * our GPU state will be saved/restored on context switch, allowing us
976       * to assume that the GPU is in the same state we left it in.
977       *
978       * This is required for transform feedback buffer offsets, query objects,
979       * and also allows us to reduce how much state we have to emit.
980       */
981      brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
982
983      if (!brw->hw_ctx) {
984         fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
985         intelDestroyContext(driContextPriv);
986         return false;
987      }
988   }
989
990   if (brw_init_pipe_control(brw, devinfo)) {
991      *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
992      intelDestroyContext(driContextPriv);
993      return false;
994   }
995
996   brw_init_state(brw);
997
998   intelInitExtensions(ctx);
999
1000   brw_init_surface_formats(brw);
1001
1002   brw->max_vs_threads = devinfo->max_vs_threads;
1003   brw->max_hs_threads = devinfo->max_hs_threads;
1004   brw->max_ds_threads = devinfo->max_ds_threads;
1005   brw->max_gs_threads = devinfo->max_gs_threads;
1006   brw->max_wm_threads = devinfo->max_wm_threads;
1007   /* FINISHME: Do this for all platforms that the kernel supports */
1008   if (brw->is_cherryview &&
1009       screen->subslice_total > 0 && screen->eu_total > 0) {
1010      /* Logical CS threads = EUs per subslice * 7 threads per EU */
1011      brw->max_cs_threads = screen->eu_total / screen->subslice_total * 7;
1012
1013      /* Fuse configurations may give more threads than expected, never less. */
1014      if (brw->max_cs_threads < devinfo->max_cs_threads)
1015         brw->max_cs_threads = devinfo->max_cs_threads;
1016   } else {
1017      brw->max_cs_threads = devinfo->max_cs_threads;
1018   }
1019   brw->urb.size = devinfo->urb.size;
1020   brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
1021   brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
1022   brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
1023   brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
1024   brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
1025
1026   /* Estimate the size of the mappable aperture into the GTT.  There's an
1027    * ioctl to get the whole GTT size, but not one to get the mappable subset.
1028    * It turns out it's basically always 256MB, though some ancient hardware
1029    * was smaller.
1030    */
1031   uint32_t gtt_size = 256 * 1024 * 1024;
1032
1033   /* We don't want to map two objects such that a memcpy between them would
1034    * just fault one mapping in and then the other over and over forever.  So
1035    * we would need to divide the GTT size by 2.  Additionally, some GTT is
1036    * taken up by things like the framebuffer and the ringbuffer and such, so
1037    * be more conservative.
1038    */
1039   brw->max_gtt_map_object_size = gtt_size / 4;
1040
1041   if (brw->gen == 6)
1042      brw->urb.gs_present = false;
1043
1044   brw->prim_restart.in_progress = false;
1045   brw->prim_restart.enable_cut_index = false;
1046   brw->gs.enabled = false;
1047   brw->sf.viewport_transform_enable = true;
1048
1049   brw->predicate.state = BRW_PREDICATE_STATE_RENDER;
1050
1051   brw->use_resource_streamer = screen->has_resource_streamer &&
1052      (env_var_as_boolean("INTEL_USE_HW_BT", false) ||
1053       env_var_as_boolean("INTEL_USE_GATHER", false));
1054
1055   ctx->VertexProgram._MaintainTnlProgram = true;
1056   ctx->FragmentProgram._MaintainTexEnvProgram = true;
1057
1058   brw_draw_init( brw );
1059
1060   if ((flags & __DRI_CTX_FLAG_DEBUG) != 0) {
1061      /* Turn on some extra GL_ARB_debug_output generation. */
1062      brw->perf_debug = true;
1063   }
1064
1065   if ((flags & __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS) != 0)
1066      ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB;
1067
1068   if (INTEL_DEBUG & DEBUG_SHADER_TIME)
1069      brw_init_shader_time(brw);
1070
1071   _mesa_compute_version(ctx);
1072
1073   _mesa_initialize_dispatch_tables(ctx);
1074   _mesa_initialize_vbo_vtxfmt(ctx);
1075
1076   if (ctx->Extensions.AMD_performance_monitor) {
1077      brw_init_performance_monitors(brw);
1078   }
1079
1080   vbo_use_buffer_objects(ctx);
1081   vbo_always_unmap_buffers(ctx);
1082
1083   return true;
1084}
1085
1086void
1087intelDestroyContext(__DRIcontext * driContextPriv)
1088{
1089   struct brw_context *brw =
1090      (struct brw_context *) driContextPriv->driverPrivate;
1091   struct gl_context *ctx = &brw->ctx;
1092
1093   /* Dump a final BMP in case the application doesn't call SwapBuffers */
1094   if (INTEL_DEBUG & DEBUG_AUB) {
1095      intel_batchbuffer_flush(brw);
1096      aub_dump_bmp(&brw->ctx);
1097   }
1098
1099   _mesa_meta_free(&brw->ctx);
1100
1101   if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
1102      /* Force a report. */
1103      brw->shader_time.report_time = 0;
1104
1105      brw_collect_and_report_shader_time(brw);
1106      brw_destroy_shader_time(brw);
1107   }
1108
1109   brw_destroy_state(brw);
1110   brw_draw_destroy(brw);
1111
1112   drm_intel_bo_unreference(brw->curbe.curbe_bo);
1113   if (brw->vs.base.scratch_bo)
1114      drm_intel_bo_unreference(brw->vs.base.scratch_bo);
1115   if (brw->tcs.base.scratch_bo)
1116      drm_intel_bo_unreference(brw->tcs.base.scratch_bo);
1117   if (brw->tes.base.scratch_bo)
1118      drm_intel_bo_unreference(brw->tes.base.scratch_bo);
1119   if (brw->gs.base.scratch_bo)
1120      drm_intel_bo_unreference(brw->gs.base.scratch_bo);
1121   if (brw->wm.base.scratch_bo)
1122      drm_intel_bo_unreference(brw->wm.base.scratch_bo);
1123
1124   gen7_reset_hw_bt_pool_offsets(brw);
1125   drm_intel_bo_unreference(brw->hw_bt_pool.bo);
1126   brw->hw_bt_pool.bo = NULL;
1127
1128   drm_intel_gem_context_destroy(brw->hw_ctx);
1129
1130   if (ctx->swrast_context) {
1131      _swsetup_DestroyContext(&brw->ctx);
1132      _tnl_DestroyContext(&brw->ctx);
1133   }
1134   _vbo_DestroyContext(&brw->ctx);
1135
1136   if (ctx->swrast_context)
1137      _swrast_DestroyContext(&brw->ctx);
1138
1139   brw_fini_pipe_control(brw);
1140   intel_batchbuffer_free(brw);
1141
1142   drm_intel_bo_unreference(brw->throttle_batch[1]);
1143   drm_intel_bo_unreference(brw->throttle_batch[0]);
1144   brw->throttle_batch[1] = NULL;
1145   brw->throttle_batch[0] = NULL;
1146
1147   driDestroyOptionCache(&brw->optionCache);
1148
1149   /* free the Mesa context */
1150   _mesa_free_context_data(&brw->ctx);
1151
1152   ralloc_free(brw);
1153   driContextPriv->driverPrivate = NULL;
1154}
1155
1156GLboolean
1157intelUnbindContext(__DRIcontext * driContextPriv)
1158{
1159   /* Unset current context and dispath table */
1160   _mesa_make_current(NULL, NULL, NULL);
1161
1162   return true;
1163}
1164
1165/**
1166 * Fixes up the context for GLES23 with our default-to-sRGB-capable behavior
1167 * on window system framebuffers.
1168 *
1169 * Desktop GL is fairly reasonable in its handling of sRGB: You can ask if
1170 * your renderbuffer can do sRGB encode, and you can flip a switch that does
1171 * sRGB encode if the renderbuffer can handle it.  You can ask specifically
1172 * for a visual where you're guaranteed to be capable, but it turns out that
1173 * everyone just makes all their ARGB8888 visuals capable and doesn't offer
1174 * incapable ones, because there's no difference between the two in resources
1175 * used.  Applications thus get built that accidentally rely on the default
1176 * visual choice being sRGB, so we make ours sRGB capable.  Everything sounds
1177 * great...
1178 *
1179 * But for GLES2/3, they decided that it was silly to not turn on sRGB encode
1180 * for sRGB renderbuffers you made with the GL_EXT_texture_sRGB equivalent.
1181 * So they removed the enable knob and made it "if the renderbuffer is sRGB
1182 * capable, do sRGB encode".  Then, for your window system renderbuffers, you
1183 * can ask for sRGB visuals and get sRGB encode, or not ask for sRGB visuals
1184 * and get no sRGB encode (assuming that both kinds of visual are available).
1185 * Thus our choice to support sRGB by default on our visuals for desktop would
1186 * result in broken rendering of GLES apps that aren't expecting sRGB encode.
1187 *
1188 * Unfortunately, renderbuffer setup happens before a context is created.  So
1189 * in intel_screen.c we always set up sRGB, and here, if you're a GLES2/3
1190 * context (without an sRGB visual, though we don't have sRGB visuals exposed
1191 * yet), we go turn that back off before anyone finds out.
1192 */
1193static void
1194intel_gles3_srgb_workaround(struct brw_context *brw,
1195                            struct gl_framebuffer *fb)
1196{
1197   struct gl_context *ctx = &brw->ctx;
1198
1199   if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
1200      return;
1201
1202   /* Some day when we support the sRGB capable bit on visuals available for
1203    * GLES, we'll need to respect that and not disable things here.
1204    */
1205   fb->Visual.sRGBCapable = false;
1206   for (int i = 0; i < BUFFER_COUNT; i++) {
1207      struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
1208      if (rb)
1209         rb->Format = _mesa_get_srgb_format_linear(rb->Format);
1210   }
1211}
1212
1213GLboolean
1214intelMakeCurrent(__DRIcontext * driContextPriv,
1215                 __DRIdrawable * driDrawPriv,
1216                 __DRIdrawable * driReadPriv)
1217{
1218   struct brw_context *brw;
1219   GET_CURRENT_CONTEXT(curCtx);
1220
1221   if (driContextPriv)
1222      brw = (struct brw_context *) driContextPriv->driverPrivate;
1223   else
1224      brw = NULL;
1225
1226   /* According to the glXMakeCurrent() man page: "Pending commands to
1227    * the previous context, if any, are flushed before it is released."
1228    * But only flush if we're actually changing contexts.
1229    */
1230   if (brw_context(curCtx) && brw_context(curCtx) != brw) {
1231      _mesa_flush(curCtx);
1232   }
1233
1234   if (driContextPriv) {
1235      struct gl_context *ctx = &brw->ctx;
1236      struct gl_framebuffer *fb, *readFb;
1237
1238      if (driDrawPriv == NULL) {
1239         fb = _mesa_get_incomplete_framebuffer();
1240      } else {
1241         fb = driDrawPriv->driverPrivate;
1242         driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
1243      }
1244
1245      if (driReadPriv == NULL) {
1246         readFb = _mesa_get_incomplete_framebuffer();
1247      } else {
1248         readFb = driReadPriv->driverPrivate;
1249         driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
1250      }
1251
1252      /* The sRGB workaround changes the renderbuffer's format. We must change
1253       * the format before the renderbuffer's miptree get's allocated, otherwise
1254       * the formats of the renderbuffer and its miptree will differ.
1255       */
1256      intel_gles3_srgb_workaround(brw, fb);
1257      intel_gles3_srgb_workaround(brw, readFb);
1258
1259      /* If the context viewport hasn't been initialized, force a call out to
1260       * the loader to get buffers so we have a drawable size for the initial
1261       * viewport. */
1262      if (!brw->ctx.ViewportInitialized)
1263         intel_prepare_render(brw);
1264
1265      _mesa_make_current(ctx, fb, readFb);
1266   } else {
1267      _mesa_make_current(NULL, NULL, NULL);
1268   }
1269
1270   return true;
1271}
1272
1273void
1274intel_resolve_for_dri2_flush(struct brw_context *brw,
1275                             __DRIdrawable *drawable)
1276{
1277   if (brw->gen < 6) {
1278      /* MSAA and fast color clear are not supported, so don't waste time
1279       * checking whether a resolve is needed.
1280       */
1281      return;
1282   }
1283
1284   struct gl_framebuffer *fb = drawable->driverPrivate;
1285   struct intel_renderbuffer *rb;
1286
1287   /* Usually, only the back buffer will need to be downsampled. However,
1288    * the front buffer will also need it if the user has rendered into it.
1289    */
1290   static const gl_buffer_index buffers[2] = {
1291         BUFFER_BACK_LEFT,
1292         BUFFER_FRONT_LEFT,
1293   };
1294
1295   for (int i = 0; i < 2; ++i) {
1296      rb = intel_get_renderbuffer(fb, buffers[i]);
1297      if (rb == NULL || rb->mt == NULL)
1298         continue;
1299      if (rb->mt->num_samples <= 1)
1300         intel_miptree_resolve_color(brw, rb->mt, 0);
1301      else
1302         intel_renderbuffer_downsample(brw, rb);
1303   }
1304}
1305
1306static unsigned
1307intel_bits_per_pixel(const struct intel_renderbuffer *rb)
1308{
1309   return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
1310}
1311
1312static void
1313intel_query_dri2_buffers(struct brw_context *brw,
1314                         __DRIdrawable *drawable,
1315                         __DRIbuffer **buffers,
1316                         int *count);
1317
1318static void
1319intel_process_dri2_buffer(struct brw_context *brw,
1320                          __DRIdrawable *drawable,
1321                          __DRIbuffer *buffer,
1322                          struct intel_renderbuffer *rb,
1323                          const char *buffer_name);
1324
1325static void
1326intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable);
1327
1328static void
1329intel_update_dri2_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1330{
1331   struct gl_framebuffer *fb = drawable->driverPrivate;
1332   struct intel_renderbuffer *rb;
1333   __DRIbuffer *buffers = NULL;
1334   int i, count;
1335   const char *region_name;
1336
1337   /* Set this up front, so that in case our buffers get invalidated
1338    * while we're getting new buffers, we don't clobber the stamp and
1339    * thus ignore the invalidate. */
1340   drawable->lastStamp = drawable->dri2.stamp;
1341
1342   if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1343      fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1344
1345   intel_query_dri2_buffers(brw, drawable, &buffers, &count);
1346
1347   if (buffers == NULL)
1348      return;
1349
1350   for (i = 0; i < count; i++) {
1351       switch (buffers[i].attachment) {
1352       case __DRI_BUFFER_FRONT_LEFT:
1353           rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1354           region_name = "dri2 front buffer";
1355           break;
1356
1357       case __DRI_BUFFER_FAKE_FRONT_LEFT:
1358           rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1359           region_name = "dri2 fake front buffer";
1360           break;
1361
1362       case __DRI_BUFFER_BACK_LEFT:
1363           rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1364           region_name = "dri2 back buffer";
1365           break;
1366
1367       case __DRI_BUFFER_DEPTH:
1368       case __DRI_BUFFER_HIZ:
1369       case __DRI_BUFFER_DEPTH_STENCIL:
1370       case __DRI_BUFFER_STENCIL:
1371       case __DRI_BUFFER_ACCUM:
1372       default:
1373           fprintf(stderr,
1374                   "unhandled buffer attach event, attachment type %d\n",
1375                   buffers[i].attachment);
1376           return;
1377       }
1378
1379       intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
1380   }
1381
1382}
1383
1384void
1385intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
1386{
1387   struct brw_context *brw = context->driverPrivate;
1388   __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1389
1390   /* Set this up front, so that in case our buffers get invalidated
1391    * while we're getting new buffers, we don't clobber the stamp and
1392    * thus ignore the invalidate. */
1393   drawable->lastStamp = drawable->dri2.stamp;
1394
1395   if (unlikely(INTEL_DEBUG & DEBUG_DRI))
1396      fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
1397
1398   if (screen->image.loader)
1399      intel_update_image_buffers(brw, drawable);
1400   else
1401      intel_update_dri2_buffers(brw, drawable);
1402
1403   driUpdateFramebufferSize(&brw->ctx, drawable);
1404}
1405
1406/**
1407 * intel_prepare_render should be called anywhere that curent read/drawbuffer
1408 * state is required.
1409 */
1410void
1411intel_prepare_render(struct brw_context *brw)
1412{
1413   struct gl_context *ctx = &brw->ctx;
1414   __DRIcontext *driContext = brw->driContext;
1415   __DRIdrawable *drawable;
1416
1417   drawable = driContext->driDrawablePriv;
1418   if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
1419      if (drawable->lastStamp != drawable->dri2.stamp)
1420         intel_update_renderbuffers(driContext, drawable);
1421      driContext->dri2.draw_stamp = drawable->dri2.stamp;
1422   }
1423
1424   drawable = driContext->driReadablePriv;
1425   if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
1426      if (drawable->lastStamp != drawable->dri2.stamp)
1427         intel_update_renderbuffers(driContext, drawable);
1428      driContext->dri2.read_stamp = drawable->dri2.stamp;
1429   }
1430
1431   /* If we're currently rendering to the front buffer, the rendering
1432    * that will happen next will probably dirty the front buffer.  So
1433    * mark it as dirty here.
1434    */
1435   if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
1436      brw->front_buffer_dirty = true;
1437}
1438
1439/**
1440 * \brief Query DRI2 to obtain a DRIdrawable's buffers.
1441 *
1442 * To determine which DRI buffers to request, examine the renderbuffers
1443 * attached to the drawable's framebuffer. Then request the buffers with
1444 * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
1445 *
1446 * This is called from intel_update_renderbuffers().
1447 *
1448 * \param drawable      Drawable whose buffers are queried.
1449 * \param buffers       [out] List of buffers returned by DRI2 query.
1450 * \param buffer_count  [out] Number of buffers returned.
1451 *
1452 * \see intel_update_renderbuffers()
1453 * \see DRI2GetBuffers()
1454 * \see DRI2GetBuffersWithFormat()
1455 */
1456static void
1457intel_query_dri2_buffers(struct brw_context *brw,
1458                         __DRIdrawable *drawable,
1459                         __DRIbuffer **buffers,
1460                         int *buffer_count)
1461{
1462   __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1463   struct gl_framebuffer *fb = drawable->driverPrivate;
1464   int i = 0;
1465   unsigned attachments[8];
1466
1467   struct intel_renderbuffer *front_rb;
1468   struct intel_renderbuffer *back_rb;
1469
1470   front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1471   back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1472
1473   memset(attachments, 0, sizeof(attachments));
1474   if ((_mesa_is_front_buffer_drawing(fb) ||
1475        _mesa_is_front_buffer_reading(fb) ||
1476        !back_rb) && front_rb) {
1477      /* If a fake front buffer is in use, then querying for
1478       * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
1479       * the real front buffer to the fake front buffer.  So before doing the
1480       * query, we need to make sure all the pending drawing has landed in the
1481       * real front buffer.
1482       */
1483      intel_batchbuffer_flush(brw);
1484      intel_flush_front(&brw->ctx);
1485
1486      attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
1487      attachments[i++] = intel_bits_per_pixel(front_rb);
1488   } else if (front_rb && brw->front_buffer_dirty) {
1489      /* We have pending front buffer rendering, but we aren't querying for a
1490       * front buffer.  If the front buffer we have is a fake front buffer,
1491       * the X server is going to throw it away when it processes the query.
1492       * So before doing the query, make sure all the pending drawing has
1493       * landed in the real front buffer.
1494       */
1495      intel_batchbuffer_flush(brw);
1496      intel_flush_front(&brw->ctx);
1497   }
1498
1499   if (back_rb) {
1500      attachments[i++] = __DRI_BUFFER_BACK_LEFT;
1501      attachments[i++] = intel_bits_per_pixel(back_rb);
1502   }
1503
1504   assert(i <= ARRAY_SIZE(attachments));
1505
1506   *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
1507                                                        &drawable->w,
1508                                                        &drawable->h,
1509                                                        attachments, i / 2,
1510                                                        buffer_count,
1511                                                        drawable->loaderPrivate);
1512}
1513
1514/**
1515 * \brief Assign a DRI buffer's DRM region to a renderbuffer.
1516 *
1517 * This is called from intel_update_renderbuffers().
1518 *
1519 * \par Note:
1520 *    DRI buffers whose attachment point is DRI2BufferStencil or
1521 *    DRI2BufferDepthStencil are handled as special cases.
1522 *
1523 * \param buffer_name is a human readable name, such as "dri2 front buffer",
1524 *        that is passed to drm_intel_bo_gem_create_from_name().
1525 *
1526 * \see intel_update_renderbuffers()
1527 */
1528static void
1529intel_process_dri2_buffer(struct brw_context *brw,
1530                          __DRIdrawable *drawable,
1531                          __DRIbuffer *buffer,
1532                          struct intel_renderbuffer *rb,
1533                          const char *buffer_name)
1534{
1535   struct gl_framebuffer *fb = drawable->driverPrivate;
1536   drm_intel_bo *bo;
1537
1538   if (!rb)
1539      return;
1540
1541   unsigned num_samples = rb->Base.Base.NumSamples;
1542
1543   /* We try to avoid closing and reopening the same BO name, because the first
1544    * use of a mapping of the buffer involves a bunch of page faulting which is
1545    * moderately expensive.
1546    */
1547   struct intel_mipmap_tree *last_mt;
1548   if (num_samples == 0)
1549      last_mt = rb->mt;
1550   else
1551      last_mt = rb->singlesample_mt;
1552
1553   uint32_t old_name = 0;
1554   if (last_mt) {
1555       /* The bo already has a name because the miptree was created by a
1556	* previous call to intel_process_dri2_buffer(). If a bo already has a
1557	* name, then drm_intel_bo_flink() is a low-cost getter.  It does not
1558	* create a new name.
1559	*/
1560      drm_intel_bo_flink(last_mt->bo, &old_name);
1561   }
1562
1563   if (old_name == buffer->name)
1564      return;
1565
1566   if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
1567      fprintf(stderr,
1568              "attaching buffer %d, at %d, cpp %d, pitch %d\n",
1569              buffer->name, buffer->attachment,
1570              buffer->cpp, buffer->pitch);
1571   }
1572
1573   bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
1574                                          buffer->name);
1575   if (!bo) {
1576      fprintf(stderr,
1577              "Failed to open BO for returned DRI2 buffer "
1578              "(%dx%d, %s, named %d).\n"
1579              "This is likely a bug in the X Server that will lead to a "
1580              "crash soon.\n",
1581              drawable->w, drawable->h, buffer_name, buffer->name);
1582      return;
1583   }
1584
1585   intel_update_winsys_renderbuffer_miptree(brw, rb, bo,
1586                                            drawable->w, drawable->h,
1587                                            buffer->pitch);
1588
1589   if (_mesa_is_front_buffer_drawing(fb) &&
1590       (buffer->attachment == __DRI_BUFFER_FRONT_LEFT ||
1591        buffer->attachment == __DRI_BUFFER_FAKE_FRONT_LEFT) &&
1592       rb->Base.Base.NumSamples > 1) {
1593      intel_renderbuffer_upsample(brw, rb);
1594   }
1595
1596   assert(rb->mt);
1597
1598   drm_intel_bo_unreference(bo);
1599}
1600
1601/**
1602 * \brief Query DRI image loader to obtain a DRIdrawable's buffers.
1603 *
1604 * To determine which DRI buffers to request, examine the renderbuffers
1605 * attached to the drawable's framebuffer. Then request the buffers from
1606 * the image loader
1607 *
1608 * This is called from intel_update_renderbuffers().
1609 *
1610 * \param drawable      Drawable whose buffers are queried.
1611 * \param buffers       [out] List of buffers returned by DRI2 query.
1612 * \param buffer_count  [out] Number of buffers returned.
1613 *
1614 * \see intel_update_renderbuffers()
1615 */
1616
1617static void
1618intel_update_image_buffer(struct brw_context *intel,
1619                          __DRIdrawable *drawable,
1620                          struct intel_renderbuffer *rb,
1621                          __DRIimage *buffer,
1622                          enum __DRIimageBufferMask buffer_type)
1623{
1624   struct gl_framebuffer *fb = drawable->driverPrivate;
1625
1626   if (!rb || !buffer->bo)
1627      return;
1628
1629   unsigned num_samples = rb->Base.Base.NumSamples;
1630
1631   /* Check and see if we're already bound to the right
1632    * buffer object
1633    */
1634   struct intel_mipmap_tree *last_mt;
1635   if (num_samples == 0)
1636      last_mt = rb->mt;
1637   else
1638      last_mt = rb->singlesample_mt;
1639
1640   if (last_mt && last_mt->bo == buffer->bo)
1641      return;
1642
1643   intel_update_winsys_renderbuffer_miptree(intel, rb, buffer->bo,
1644                                            buffer->width, buffer->height,
1645                                            buffer->pitch);
1646
1647   if (_mesa_is_front_buffer_drawing(fb) &&
1648       buffer_type == __DRI_IMAGE_BUFFER_FRONT &&
1649       rb->Base.Base.NumSamples > 1) {
1650      intel_renderbuffer_upsample(intel, rb);
1651   }
1652}
1653
1654static void
1655intel_update_image_buffers(struct brw_context *brw, __DRIdrawable *drawable)
1656{
1657   struct gl_framebuffer *fb = drawable->driverPrivate;
1658   __DRIscreen *screen = brw->intelScreen->driScrnPriv;
1659   struct intel_renderbuffer *front_rb;
1660   struct intel_renderbuffer *back_rb;
1661   struct __DRIimageList images;
1662   unsigned int format;
1663   uint32_t buffer_mask = 0;
1664   int ret;
1665
1666   front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
1667   back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
1668
1669   if (back_rb)
1670      format = intel_rb_format(back_rb);
1671   else if (front_rb)
1672      format = intel_rb_format(front_rb);
1673   else
1674      return;
1675
1676   if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
1677                    _mesa_is_front_buffer_reading(fb) || !back_rb)) {
1678      buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
1679   }
1680
1681   if (back_rb)
1682      buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
1683
1684   ret = screen->image.loader->getBuffers(drawable,
1685                                          driGLFormatToImageFormat(format),
1686                                          &drawable->dri2.stamp,
1687                                          drawable->loaderPrivate,
1688                                          buffer_mask,
1689                                          &images);
1690   if (!ret)
1691      return;
1692
1693   if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
1694      drawable->w = images.front->width;
1695      drawable->h = images.front->height;
1696      intel_update_image_buffer(brw,
1697                                drawable,
1698                                front_rb,
1699                                images.front,
1700                                __DRI_IMAGE_BUFFER_FRONT);
1701   }
1702   if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
1703      drawable->w = images.back->width;
1704      drawable->h = images.back->height;
1705      intel_update_image_buffer(brw,
1706                                drawable,
1707                                back_rb,
1708                                images.back,
1709                                __DRI_IMAGE_BUFFER_BACK);
1710   }
1711}
1712