intel_context.c revision 15653b5d88c0f88f49c2d5497b4fb9e045f53560
1/**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "glheader.h"
30#include "context.h"
31#include "matrix.h"
32#include "simple_list.h"
33#include "extensions.h"
34#include "framebuffer.h"
35#include "imports.h"
36#include "points.h"
37
38#include "swrast/swrast.h"
39#include "swrast_setup/swrast_setup.h"
40#include "tnl/tnl.h"
41
42#include "tnl/t_pipeline.h"
43#include "tnl/t_vertex.h"
44
45#include "drivers/common/driverfuncs.h"
46
47#include "intel_screen.h"
48
49#include "i830_dri.h"
50
51#include "intel_buffers.h"
52#include "intel_tex.h"
53#include "intel_span.h"
54#include "intel_tris.h"
55#include "intel_ioctl.h"
56#include "intel_batchbuffer.h"
57#include "intel_blit.h"
58#include "intel_pixel.h"
59#include "intel_regions.h"
60#include "intel_buffer_objects.h"
61#include "intel_fbo.h"
62#include "intel_decode.h"
63#include "intel_bufmgr_ttm.h"
64
65#include "drirenderbuffer.h"
66#include "vblank.h"
67#include "utils.h"
68#include "xmlpool.h"            /* for symbolic values of enum-type options */
69#ifndef INTEL_DEBUG
70int INTEL_DEBUG = (0);
71#endif
72
73#define need_GL_ARB_multisample
74#define need_GL_ARB_point_parameters
75#define need_GL_ARB_texture_compression
76#define need_GL_ARB_vertex_buffer_object
77#define need_GL_ARB_vertex_program
78#define need_GL_ARB_window_pos
79#define need_GL_EXT_blend_color
80#define need_GL_EXT_blend_equation_separate
81#define need_GL_EXT_blend_func_separate
82#define need_GL_EXT_blend_minmax
83#define need_GL_EXT_cull_vertex
84#define need_GL_EXT_fog_coord
85#define need_GL_EXT_framebuffer_object
86#define need_GL_EXT_multi_draw_arrays
87#define need_GL_EXT_secondary_color
88#define need_GL_NV_vertex_program
89#include "extension_helper.h"
90
91
92#define DRIVER_DATE                     "20061102"
93
94_glthread_Mutex lockMutex;
95static GLboolean lockMutexInit = GL_FALSE;
96
97
98static const GLubyte *
99intelGetString(GLcontext * ctx, GLenum name)
100{
101   const char *chipset;
102   static char buffer[128];
103
104   switch (name) {
105   case GL_VENDOR:
106      return (GLubyte *) "Tungsten Graphics, Inc";
107      break;
108
109   case GL_RENDERER:
110      switch (intel_context(ctx)->intelScreen->deviceID) {
111      case PCI_CHIP_845_G:
112         chipset = "Intel(R) 845G";
113         break;
114      case PCI_CHIP_I830_M:
115         chipset = "Intel(R) 830M";
116         break;
117      case PCI_CHIP_I855_GM:
118         chipset = "Intel(R) 852GM/855GM";
119         break;
120      case PCI_CHIP_I865_G:
121         chipset = "Intel(R) 865G";
122         break;
123      case PCI_CHIP_I915_G:
124         chipset = "Intel(R) 915G";
125         break;
126      case PCI_CHIP_I915_GM:
127         chipset = "Intel(R) 915GM";
128         break;
129      case PCI_CHIP_I945_G:
130         chipset = "Intel(R) 945G";
131         break;
132      case PCI_CHIP_I945_GM:
133         chipset = "Intel(R) 945GM";
134         break;
135      case PCI_CHIP_I945_GME:
136         chipset = "Intel(R) 945GME";
137         break;
138      case PCI_CHIP_G33_G:
139	 chipset = "Intel(R) G33";
140	 break;
141      case PCI_CHIP_Q35_G:
142	 chipset = "Intel(R) Q35";
143	 break;
144      case PCI_CHIP_Q33_G:
145	 chipset = "Intel(R) Q33";
146	 break;
147      default:
148         chipset = "Unknown Intel Chipset";
149         break;
150      }
151
152      (void) driGetRendererString(buffer, chipset, DRIVER_DATE, 0);
153      return (GLubyte *) buffer;
154
155   default:
156      return NULL;
157   }
158}
159
160
161/**
162 * Extension strings exported by the intel driver.
163 */
164const struct dri_extension card_extensions[] = {
165   {"GL_ARB_multisample", GL_ARB_multisample_functions},
166   {"GL_ARB_multitexture", NULL},
167   {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
168   {"GL_ARB_texture_border_clamp", NULL},
169   {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
170   {"GL_ARB_texture_cube_map", NULL},
171   {"GL_ARB_texture_env_add", NULL},
172   {"GL_ARB_texture_env_combine", NULL},
173   {"GL_ARB_texture_env_dot3", NULL},
174   {"GL_ARB_texture_mirrored_repeat", NULL},
175   {"GL_ARB_texture_rectangle", NULL},
176   {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
177   {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
178   {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
179   {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
180   {"GL_EXT_blend_equation_separate",
181    GL_EXT_blend_equation_separate_functions},
182   {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
183   {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
184   {"GL_EXT_blend_subtract", NULL},
185   {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
186   {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
187   {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
188#if 1                           /* XXX FBO temporary? */
189   {"GL_EXT_packed_depth_stencil", NULL},
190#endif
191   {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
192   {"GL_EXT_stencil_wrap", NULL},
193   {"GL_EXT_texture_edge_clamp", NULL},
194   {"GL_EXT_texture_env_combine", NULL},
195   {"GL_EXT_texture_env_dot3", NULL},
196   {"GL_EXT_texture_filter_anisotropic", NULL},
197   {"GL_EXT_texture_lod_bias", NULL},
198   {"GL_3DFX_texture_compression_FXT1", NULL},
199   {"GL_APPLE_client_storage", NULL},
200   {"GL_MESA_pack_invert", NULL},
201   {"GL_MESA_ycbcr_texture", NULL},
202   {"GL_NV_blend_square", NULL},
203   {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
204   {"GL_NV_vertex_program1_1", NULL},
205   { "GL_SGIS_generate_mipmap", NULL },
206   {NULL, NULL}
207};
208
209const struct dri_extension ttm_extensions[] = {
210   {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
211   {"GL_ARB_pixel_buffer_object", NULL},
212   {NULL, NULL}
213};
214
215extern const struct tnl_pipeline_stage _intel_render_stage;
216
217static const struct tnl_pipeline_stage *intel_pipeline[] = {
218   &_tnl_vertex_transform_stage,
219   &_tnl_vertex_cull_stage,
220   &_tnl_normal_transform_stage,
221   &_tnl_lighting_stage,
222   &_tnl_fog_coordinate_stage,
223   &_tnl_texgen_stage,
224   &_tnl_texture_transform_stage,
225   &_tnl_point_attenuation_stage,
226   &_tnl_vertex_program_stage,
227#if 1
228   &_intel_render_stage,        /* ADD: unclipped rastersetup-to-dma */
229#endif
230   &_tnl_render_stage,
231   0,
232};
233
234
235static const struct dri_debug_control debug_control[] = {
236   {"tex", DEBUG_TEXTURE},
237   {"state", DEBUG_STATE},
238   {"ioctl", DEBUG_IOCTL},
239   {"blit", DEBUG_BLIT},
240   {"mip", DEBUG_MIPTREE},
241   {"fall", DEBUG_FALLBACKS},
242   {"verb", DEBUG_VERBOSE},
243   {"bat", DEBUG_BATCH},
244   {"pix", DEBUG_PIXEL},
245   {"buf", DEBUG_BUFMGR},
246   {"reg", DEBUG_REGION},
247   {"fbo", DEBUG_FBO},
248   {"lock", DEBUG_LOCK},
249   {"sync", DEBUG_SYNC},
250   {NULL, 0}
251};
252
253
254static void
255intelInvalidateState(GLcontext * ctx, GLuint new_state)
256{
257   _swrast_InvalidateState(ctx, new_state);
258   _swsetup_InvalidateState(ctx, new_state);
259   _vbo_InvalidateState(ctx, new_state);
260   _tnl_InvalidateState(ctx, new_state);
261   _tnl_invalidate_vertex_state(ctx, new_state);
262   intel_context(ctx)->NewGLState |= new_state;
263}
264
265
266void
267intelFlush(GLcontext * ctx)
268{
269   struct intel_context *intel = intel_context(ctx);
270
271   if (intel->Fallback)
272      _swrast_flush(ctx);
273
274   INTEL_FIREVERTICES(intel);
275
276   if (intel->batch->map != intel->batch->ptr)
277      intel_batchbuffer_flush(intel->batch);
278
279   /* XXX: Need to do an MI_FLUSH here.
280    */
281}
282
283void
284intelFinish(GLcontext * ctx)
285{
286   struct intel_context *intel = intel_context(ctx);
287   intelFlush(ctx);
288   if (intel->batch->last_fence) {
289      dri_fence_wait(intel->batch->last_fence);
290      dri_fence_unreference(intel->batch->last_fence);
291      intel->batch->last_fence = NULL;
292   }
293}
294
295/** Driver-specific fence emit implementation for the fake memory manager. */
296static unsigned int
297intel_fence_emit(void *private)
298{
299   struct intel_context *intel = (struct intel_context *)private;
300   unsigned int fence;
301
302   /* XXX: Need to emit a flush, if we haven't already (at least with the
303    * current batchbuffer implementation, we have).
304    */
305
306   fence = intelEmitIrqLocked(intel);
307
308   return fence;
309}
310
311/** Driver-specific fence wait implementation for the fake memory manager. */
312static int
313intel_fence_wait(void *private, unsigned int cookie)
314{
315   struct intel_context *intel = (struct intel_context *)private;
316
317   intelWaitIrq(intel, cookie);
318
319   return 0;
320}
321
322static GLboolean
323intel_init_bufmgr(struct intel_context *intel)
324{
325   intelScreenPrivate *intelScreen = intel->intelScreen;
326   GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
327
328   /* If we've got a new enough DDX that's initializing TTM and giving us
329    * object handles for the shared buffers, use that.
330    */
331   intel->ttm = GL_FALSE;
332   if (!ttm_disable &&
333       intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
334       intel->intelScreen->drmMinor >= 11 &&
335       intel->intelScreen->front.bo_handle != -1)
336   {
337      intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
338					    DRM_FENCE_TYPE_EXE,
339					    DRM_FENCE_TYPE_EXE |
340					    DRM_I915_FENCE_TYPE_RW,
341					    BATCH_SZ);
342      if (intel->bufmgr != NULL)
343	 intel->ttm = GL_TRUE;
344   }
345   /* Otherwise, use the classic buffer manager. */
346   if (intel->bufmgr == NULL) {
347      if (ttm_disable) {
348	 fprintf(stderr, "TTM buffer manager disabled.  Using classic.\n");
349      } else {
350	 fprintf(stderr, "Failed to initialize TTM buffer manager.  "
351		 "Falling back to classic.\n");
352      }
353
354      if (intelScreen->tex.size == 0) {
355	 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
356		 __func__, __LINE__);
357	 return GL_FALSE;
358      }
359
360      intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
361					   intelScreen->tex.map,
362					   intelScreen->tex.size,
363					   intel_fence_emit,
364					   intel_fence_wait,
365					   intel);
366   }
367
368   return GL_TRUE;
369}
370
371void
372intelInitDriverFunctions(struct dd_function_table *functions)
373{
374   _mesa_init_driver_functions(functions);
375
376   functions->Flush = intelFlush;
377   functions->Finish = intelFinish;
378   functions->GetString = intelGetString;
379   functions->UpdateState = intelInvalidateState;
380   functions->CopyColorTable = _swrast_CopyColorTable;
381   functions->CopyColorSubTable = _swrast_CopyColorSubTable;
382   functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
383   functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
384
385   intelInitTextureFuncs(functions);
386   intelInitPixelFuncs(functions);
387   intelInitStateFuncs(functions);
388   intelInitBufferFuncs(functions);
389}
390
391
392GLboolean
393intelInitContext(struct intel_context *intel,
394                 const __GLcontextModes * mesaVis,
395                 __DRIcontextPrivate * driContextPriv,
396                 void *sharedContextPrivate,
397                 struct dd_function_table *functions)
398{
399   GLcontext *ctx = &intel->ctx;
400   GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
401   __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
402   intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
403   drmI830Sarea *saPriv = (drmI830Sarea *)
404      (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
405   int fthrottle_mode;
406
407   if (!_mesa_initialize_context(&intel->ctx,
408                                 mesaVis, shareCtx,
409                                 functions, (void *) intel))
410      return GL_FALSE;
411
412   driContextPriv->driverPrivate = intel;
413   intel->intelScreen = intelScreen;
414   intel->driScreen = sPriv;
415   intel->sarea = saPriv;
416
417   /* Dri stuff */
418   intel->hHWContext = driContextPriv->hHWContext;
419   intel->driFd = sPriv->fd;
420   intel->driHwLock = (drmLock *) & sPriv->pSAREA->lock;
421
422   intel->width = intelScreen->width;
423   intel->height = intelScreen->height;
424
425   if (intelScreen->deviceID == PCI_CHIP_I865_G)
426      intel->maxBatchSize = 4096;
427   else
428      intel->maxBatchSize = BATCH_SZ;
429
430   if (!intel_init_bufmgr(intel))
431      return GL_FALSE;
432
433   if (!lockMutexInit) {
434      lockMutexInit = GL_TRUE;
435      _glthread_INIT_MUTEX(lockMutex);
436   }
437
438   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
439                       intel->driScreen->myNum, "i915");
440
441   ctx->Const.MaxTextureMaxAnisotropy = 2.0;
442
443   /* This doesn't yet catch all non-conformant rendering, but it's a
444    * start.
445    */
446   if (getenv("INTEL_STRICT_CONFORMANCE")) {
447      intel->strict_conformance = 1;
448   }
449
450   ctx->Const.MinLineWidth = 1.0;
451   ctx->Const.MinLineWidthAA = 1.0;
452   ctx->Const.MaxLineWidth = 3.0;
453   ctx->Const.MaxLineWidthAA = 3.0;
454   ctx->Const.LineWidthGranularity = 1.0;
455
456   ctx->Const.MinPointSize = 1.0;
457   ctx->Const.MinPointSizeAA = 1.0;
458   ctx->Const.MaxPointSize = 255.0;
459   ctx->Const.MaxPointSizeAA = 3.0;
460   ctx->Const.PointSizeGranularity = 1.0;
461
462   /* reinitialize the context point state.
463    * It depend on constants in __GLcontextRec::Const
464    */
465   _mesa_init_point(ctx);
466
467   ctx->Const.MaxColorAttachments = 4;  /* XXX FBO: review this */
468
469   /* Initialize the software rasterizer and helper modules. */
470   _swrast_CreateContext(ctx);
471   _vbo_CreateContext(ctx);
472   _tnl_CreateContext(ctx);
473   _swsetup_CreateContext(ctx);
474
475   /* Install the customized pipeline: */
476   _tnl_destroy_pipeline(ctx);
477   _tnl_install_pipeline(ctx, intel_pipeline);
478
479   /* Configure swrast to match hardware characteristics: */
480   _swrast_allow_pixel_fog(ctx, GL_FALSE);
481   _swrast_allow_vertex_fog(ctx, GL_TRUE);
482
483   intel->hw_stipple = 1;
484
485   /* XXX FBO: this doesn't seem to be used anywhere */
486   switch (mesaVis->depthBits) {
487   case 0:                     /* what to do in this case? */
488   case 16:
489      intel->polygon_offset_scale = 1.0;
490      break;
491   case 24:
492      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
493      break;
494   default:
495      assert(0);
496      break;
497   }
498
499   /* Initialize swrast, tnl driver tables: */
500   intelInitSpanFuncs(ctx);
501   intelInitTriFuncs(ctx);
502
503
504   intel->RenderIndex = ~0;
505
506   fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
507   intel->irqsEmitted = 0;
508
509   intel->do_irqs = (intel->intelScreen->irq_active &&
510                     fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
511
512   intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
513
514   _math_matrix_ctr(&intel->ViewportMatrix);
515
516   /* Disable imaging extension until convolution is working in
517    * teximage paths:
518    */
519   driInitExtensions(ctx, card_extensions,
520/* 		      GL_TRUE, */
521                     GL_FALSE);
522
523   if (intel->ttm)
524      driInitExtensions(ctx, ttm_extensions, GL_FALSE);
525
526   intel_recreate_static_regions(intel);
527
528   intel->batch = intel_batchbuffer_alloc(intel);
529   intel->last_swap_fence = NULL;
530   intel->first_swap_fence = NULL;
531
532   intel_bufferobj_init(intel);
533   intel_fbo_init(intel);
534
535   if (intel->ctx.Mesa_DXTn) {
536      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
537      _mesa_enable_extension(ctx, "GL_S3_s3tc");
538   }
539   else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
540      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
541   }
542
543   intel->prim.primitive = ~0;
544
545#if DO_DEBUG
546   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
547   if (INTEL_DEBUG & DEBUG_BUFMGR)
548      dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
549#endif
550
551   if (getenv("INTEL_NO_RAST")) {
552      fprintf(stderr, "disabling 3D rasterization\n");
553      FALLBACK(intel, INTEL_FALLBACK_USER, 1);
554   }
555
556   return GL_TRUE;
557}
558
559void
560intelDestroyContext(__DRIcontextPrivate * driContextPriv)
561{
562   struct intel_context *intel =
563      (struct intel_context *) driContextPriv->driverPrivate;
564
565   assert(intel);               /* should never be null */
566   if (intel) {
567      GLboolean release_texture_heaps;
568
569      INTEL_FIREVERTICES(intel);
570
571      intel->vtbl.destroy(intel);
572
573      release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
574      _swsetup_DestroyContext(&intel->ctx);
575      _tnl_DestroyContext(&intel->ctx);
576      _vbo_DestroyContext(&intel->ctx);
577
578      _swrast_DestroyContext(&intel->ctx);
579      intel->Fallback = 0;      /* don't call _swrast_Flush later */
580
581      intel_batchbuffer_free(intel->batch);
582
583      if (intel->last_swap_fence) {
584	 dri_fence_wait(intel->last_swap_fence);
585	 dri_fence_unreference(intel->last_swap_fence);
586	 intel->last_swap_fence = NULL;
587      }
588      if (intel->first_swap_fence) {
589	 dri_fence_wait(intel->first_swap_fence);
590	 dri_fence_unreference(intel->first_swap_fence);
591	 intel->first_swap_fence = NULL;
592      }
593
594      if (release_texture_heaps) {
595         /* This share group is about to go away, free our private
596          * texture object data.
597          */
598         if (INTEL_DEBUG & DEBUG_TEXTURE)
599            fprintf(stderr, "do something to free texture heaps\n");
600      }
601
602      /* free the Mesa context */
603      _mesa_free_context_data(&intel->ctx);
604
605      dri_bufmgr_destroy(intel->bufmgr);
606   }
607}
608
609GLboolean
610intelUnbindContext(__DRIcontextPrivate * driContextPriv)
611{
612   return GL_TRUE;
613}
614
615GLboolean
616intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
617                 __DRIdrawablePrivate * driDrawPriv,
618                 __DRIdrawablePrivate * driReadPriv)
619{
620
621   if (driContextPriv) {
622      struct intel_context *intel =
623         (struct intel_context *) driContextPriv->driverPrivate;
624      struct intel_framebuffer *intel_fb =
625	 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
626      GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
627
628
629      /* XXX FBO temporary fix-ups! */
630      /* if the renderbuffers don't have regions, init them from the context */
631      {
632         struct intel_renderbuffer *irbDepth
633            = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
634         struct intel_renderbuffer *irbStencil
635            = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
636
637         if (intel_fb->color_rb[0] && !intel_fb->color_rb[0]->region) {
638            intel_region_reference(&intel_fb->color_rb[0]->region,
639				   intel->front_region);
640         }
641         if (intel_fb->color_rb[1] && !intel_fb->color_rb[1]->region) {
642            intel_region_reference(&intel_fb->color_rb[1]->region,
643				   intel->back_region);
644         }
645         if (intel_fb->color_rb[2] && !intel_fb->color_rb[2]->region) {
646            intel_region_reference(&intel_fb->color_rb[2]->region,
647				   intel->third_region);
648         }
649         if (irbDepth && !irbDepth->region) {
650            intel_region_reference(&irbDepth->region, intel->depth_region);
651         }
652         if (irbStencil && !irbStencil->region) {
653            intel_region_reference(&irbStencil->region, intel->depth_region);
654         }
655      }
656
657      /* set GLframebuffer size to match window, if needed */
658      driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
659
660      if (driReadPriv != driDrawPriv) {
661	 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
662      }
663
664      _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
665
666      /* The drawbuffer won't always be updated by _mesa_make_current:
667       */
668      if (intel->ctx.DrawBuffer == &intel_fb->Base) {
669
670	 if (intel->driDrawable != driDrawPriv) {
671	    if (driDrawPriv->swap_interval == (unsigned)-1) {
672	       int i;
673
674	       driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
675		  ? driGetDefaultVBlankFlags(&intel->optionCache)
676		 : VBLANK_FLAG_NO_IRQ;
677
678	       (*dri_interface->getUST) (&intel_fb->swap_ust);
679	       driDrawableInitVBlank(driDrawPriv);
680	       intel_fb->vbl_waited = driDrawPriv->vblSeq;
681
682	       for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
683		  if (intel_fb->color_rb[i])
684		     intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
685	       }
686	    }
687	    intel->driDrawable = driDrawPriv;
688	    intelWindowMoved(intel);
689	 }
690
691	 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
692      }
693   }
694   else {
695      _mesa_make_current(NULL, NULL, NULL);
696   }
697
698   return GL_TRUE;
699}
700
701static void
702intelContendedLock(struct intel_context *intel, GLuint flags)
703{
704   __DRIdrawablePrivate *dPriv = intel->driDrawable;
705   __DRIscreenPrivate *sPriv = intel->driScreen;
706   drmI830Sarea *sarea = intel->sarea;
707
708   drmGetLock(intel->driFd, intel->hHWContext, flags);
709
710   if (INTEL_DEBUG & DEBUG_LOCK)
711      _mesa_printf("%s - got contended lock\n", __progname);
712
713   /* If the window moved, may need to set a new cliprect now.
714    *
715    * NOTE: This releases and regains the hw lock, so all state
716    * checking must be done *after* this call:
717    */
718   if (dPriv)
719      DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
720
721   /* If the last consumer of the texture memory wasn't us, notify the fake
722    * bufmgr and record the new owner.  We should have the memory shared
723    * between contexts of a single fake bufmgr, but this will at least make
724    * things correct for now.
725    */
726   if (!intel->ttm && sarea->texAge != intel->hHWContext) {
727      sarea->texAge = intel->hHWContext;
728      dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
729      if (INTEL_DEBUG & DEBUG_BATCH)
730	 intel_decode_context_reset();
731   }
732
733   if (sarea->width != intel->width ||
734       sarea->height != intel->height) {
735      int numClipRects = intel->numClipRects;
736
737      /*
738       * FIXME: Really only need to do this when drawing to a
739       * common back- or front buffer.
740       */
741
742      /*
743       * This will essentially drop the outstanding batchbuffer on the floor.
744       */
745      intel->numClipRects = 0;
746
747      if (intel->Fallback)
748	 _swrast_flush(&intel->ctx);
749
750      INTEL_FIREVERTICES(intel);
751
752      if (intel->batch->map != intel->batch->ptr)
753	 intel_batchbuffer_flush(intel->batch);
754
755      intel->numClipRects = numClipRects;
756
757      /* force window update */
758      intel->lastStamp = 0;
759
760      intel->width = sarea->width;
761      intel->height = sarea->height;
762   }
763
764   /* Drawable changed?
765    */
766   if (dPriv && intel->lastStamp != dPriv->lastStamp) {
767      intelWindowMoved(intel);
768      intel->lastStamp = dPriv->lastStamp;
769   }
770}
771
772
773
774/* Lock the hardware and validate our state.
775 */
776void LOCK_HARDWARE( struct intel_context *intel )
777{
778    __DRIdrawablePrivate *dPriv = intel->driDrawable;
779    char __ret=0;
780    struct intel_framebuffer *intel_fb = NULL;
781    struct intel_renderbuffer *intel_rb = NULL;
782    _glthread_LOCK_MUTEX(lockMutex);
783    assert(!intel->locked);
784
785    if (intel->driDrawable) {
786       intel_fb = intel->driDrawable->driverPrivate;
787
788       if (intel_fb)
789	  intel_rb =
790	     intel_get_renderbuffer(&intel_fb->Base,
791				    intel_fb->Base._ColorDrawBufferMask[0] ==
792				    BUFFER_BIT_FRONT_LEFT ? BUFFER_FRONT_LEFT :
793				    BUFFER_BACK_LEFT);
794    }
795
796    if (intel_rb && dPriv->vblFlags &&
797	!(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
798	(intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
799	drmVBlank vbl;
800
801	vbl.request.type = DRM_VBLANK_ABSOLUTE;
802
803	if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
804	    vbl.request.type |= DRM_VBLANK_SECONDARY;
805	}
806
807	vbl.request.sequence = intel_rb->vbl_pending;
808	drmWaitVBlank(intel->driFd, &vbl);
809	intel_fb->vbl_waited = vbl.reply.sequence;
810    }
811
812    DRM_CAS(intel->driHwLock, intel->hHWContext,
813        (DRM_LOCK_HELD|intel->hHWContext), __ret);
814
815    if (__ret)
816        intelContendedLock( intel, 0 );
817
818    if (INTEL_DEBUG & DEBUG_LOCK)
819      _mesa_printf("%s - locked\n", __progname);
820
821    intel->locked = 1;
822}
823
824
825  /* Unlock the hardware using the global current context
826   */
827void UNLOCK_HARDWARE( struct intel_context *intel )
828{
829   intel->locked = 0;
830
831   DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
832
833   _glthread_UNLOCK_MUTEX(lockMutex);
834
835   if (INTEL_DEBUG & DEBUG_LOCK)
836      _mesa_printf("%s - unlocked\n", __progname);
837}
838
839