intel_context.c revision 1e645b365900cf1c71ca5594bd6b549a1f203040
1/**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "glheader.h"
30#include "context.h"
31#include "matrix.h"
32#include "simple_list.h"
33#include "extensions.h"
34#include "framebuffer.h"
35#include "imports.h"
36#include "points.h"
37
38#include "swrast/swrast.h"
39#include "swrast_setup/swrast_setup.h"
40#include "tnl/tnl.h"
41
42#include "tnl/t_pipeline.h"
43#include "tnl/t_vertex.h"
44
45#include "drivers/common/driverfuncs.h"
46
47#include "intel_screen.h"
48
49#include "i830_dri.h"
50
51#include "intel_chipset.h"
52#include "intel_buffers.h"
53#include "intel_tex.h"
54#include "intel_ioctl.h"
55#include "intel_batchbuffer.h"
56#include "intel_blit.h"
57#include "intel_pixel.h"
58#include "intel_regions.h"
59#include "intel_buffer_objects.h"
60#include "intel_fbo.h"
61#include "intel_decode.h"
62#include "intel_bufmgr.h"
63
64#include "drirenderbuffer.h"
65#include "vblank.h"
66#include "utils.h"
67#include "xmlpool.h"            /* for symbolic values of enum-type options */
68#ifndef INTEL_DEBUG
69int INTEL_DEBUG = (0);
70#endif
71
72#define need_GL_NV_point_sprite
73#define need_GL_ARB_multisample
74#define need_GL_ARB_point_parameters
75#define need_GL_ARB_texture_compression
76#define need_GL_ARB_vertex_buffer_object
77#define need_GL_ARB_vertex_program
78#define need_GL_ARB_window_pos
79#define need_GL_ARB_occlusion_query
80#define need_GL_EXT_blend_color
81#define need_GL_EXT_blend_equation_separate
82#define need_GL_EXT_blend_func_separate
83#define need_GL_EXT_blend_minmax
84#define need_GL_EXT_cull_vertex
85#define need_GL_EXT_fog_coord
86#define need_GL_EXT_framebuffer_object
87#define need_GL_EXT_multi_draw_arrays
88#define need_GL_EXT_secondary_color
89#define need_GL_NV_vertex_program
90#define need_GL_ATI_separate_stencil
91#define need_GL_EXT_point_parameters
92#define need_GL_VERSION_2_0
93#define need_GL_VERSION_2_1
94#define need_GL_ARB_shader_objects
95#define need_GL_ARB_vertex_shader
96
97#include "extension_helper.h"
98
99#define DRIVER_DATE                     "20080716"
100#define DRIVER_DATE_GEM                 "GEM " DRIVER_DATE
101
102static const GLubyte *
103intelGetString(GLcontext * ctx, GLenum name)
104{
105   const struct intel_context *const intel = intel_context(ctx);
106   const char *chipset;
107   static char buffer[128];
108
109   switch (name) {
110   case GL_VENDOR:
111      return (GLubyte *) "Tungsten Graphics, Inc";
112      break;
113
114   case GL_RENDERER:
115      switch (intel->intelScreen->deviceID) {
116      case PCI_CHIP_845_G:
117         chipset = "Intel(R) 845G";
118         break;
119      case PCI_CHIP_I830_M:
120         chipset = "Intel(R) 830M";
121         break;
122      case PCI_CHIP_I855_GM:
123         chipset = "Intel(R) 852GM/855GM";
124         break;
125      case PCI_CHIP_I865_G:
126         chipset = "Intel(R) 865G";
127         break;
128      case PCI_CHIP_I915_G:
129         chipset = "Intel(R) 915G";
130         break;
131      case PCI_CHIP_E7221_G:
132	 chipset = "Intel (R) E7221G (i915)";
133	 break;
134      case PCI_CHIP_I915_GM:
135         chipset = "Intel(R) 915GM";
136         break;
137      case PCI_CHIP_I945_G:
138         chipset = "Intel(R) 945G";
139         break;
140      case PCI_CHIP_I945_GM:
141         chipset = "Intel(R) 945GM";
142         break;
143      case PCI_CHIP_I945_GME:
144         chipset = "Intel(R) 945GME";
145         break;
146      case PCI_CHIP_G33_G:
147	 chipset = "Intel(R) G33";
148	 break;
149      case PCI_CHIP_Q35_G:
150	 chipset = "Intel(R) Q35";
151	 break;
152      case PCI_CHIP_Q33_G:
153	 chipset = "Intel(R) Q33";
154	 break;
155      case PCI_CHIP_I965_Q:
156	 chipset = "Intel(R) 965Q";
157	 break;
158      case PCI_CHIP_I965_G:
159      case PCI_CHIP_I965_G_1:
160	 chipset = "Intel(R) 965G";
161	 break;
162      case PCI_CHIP_I946_GZ:
163	 chipset = "Intel(R) 946GZ";
164	 break;
165      case PCI_CHIP_I965_GM:
166	 chipset = "Intel(R) 965GM";
167	 break;
168      case PCI_CHIP_I965_GME:
169	 chipset = "Intel(R) 965GME/GLE";
170	 break;
171      case PCI_CHIP_GM45_GM:
172	 chipset = "Mobile Intel® GM45 Express Chipset";
173	 break;
174      case PCI_CHIP_IGD_E_G:
175	 chipset = "Intel(R) Integrated Graphics Device";
176	 break;
177      case PCI_CHIP_G45_G:
178         chipset = "Intel(R) G45/G43";
179         break;
180      case PCI_CHIP_Q45_G:
181         chipset = "Intel(R) Q45/Q43";
182         break;
183      default:
184         chipset = "Unknown Intel Chipset";
185         break;
186      }
187
188      (void) driGetRendererString(buffer, chipset,
189				  (intel->ttm) ? DRIVER_DATE_GEM : DRIVER_DATE,
190				  0);
191      return (GLubyte *) buffer;
192
193   default:
194      return NULL;
195   }
196}
197
198/**
199 * Extension strings exported by the intel driver.
200 *
201 * Extensions supported by all chips supported by i830_dri, i915_dri, or
202 * i965_dri.
203 */
204static const struct dri_extension card_extensions[] = {
205   {"GL_ARB_multisample", GL_ARB_multisample_functions},
206   {"GL_ARB_multitexture", NULL},
207   {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
208   {"GL_NV_point_sprite", GL_NV_point_sprite_functions},
209   {"GL_ARB_texture_border_clamp", NULL},
210   {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
211   {"GL_ARB_texture_cube_map", NULL},
212   {"GL_ARB_texture_env_add", NULL},
213   {"GL_ARB_texture_env_combine", NULL},
214   {"GL_ARB_texture_env_crossbar", NULL},
215   {"GL_ARB_texture_env_dot3", NULL},
216   {"GL_ARB_texture_mirrored_repeat", NULL},
217   {"GL_ARB_texture_non_power_of_two",   NULL },
218   {"GL_ARB_texture_rectangle", NULL},
219   {"GL_NV_texture_rectangle", NULL},
220   {"GL_EXT_texture_rectangle", NULL},
221   {"GL_ARB_point_parameters", NULL},
222   {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
223   {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
224   {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
225   {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
226   {"GL_EXT_blend_equation_separate",
227    GL_EXT_blend_equation_separate_functions},
228   {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
229   {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
230   {"GL_EXT_blend_logic_op", NULL},
231   {"GL_EXT_blend_subtract", NULL},
232   {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
233   {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
234   {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
235   {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions},
236#if 1                           /* XXX FBO temporary? */
237   {"GL_EXT_packed_depth_stencil", NULL},
238#endif
239   {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
240   {"GL_EXT_stencil_wrap", NULL},
241   {"GL_EXT_texture_edge_clamp", NULL},
242   {"GL_EXT_texture_env_combine", NULL},
243   {"GL_EXT_texture_env_dot3", NULL},
244   {"GL_EXT_texture_filter_anisotropic", NULL},
245   {"GL_EXT_texture_lod_bias", NULL},
246   {"GL_3DFX_texture_compression_FXT1", NULL},
247   {"GL_APPLE_client_storage", NULL},
248   {"GL_MESA_pack_invert", NULL},
249   {"GL_MESA_ycbcr_texture", NULL},
250   {"GL_NV_blend_square", NULL},
251   {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
252   {"GL_NV_vertex_program1_1", NULL},
253   { "GL_SGIS_generate_mipmap", NULL },
254   {NULL, NULL}
255};
256
257static const struct dri_extension brw_extensions[] = {
258   { "GL_ARB_shading_language_100",       GL_VERSION_2_0_functions},
259   { "GL_ARB_shading_language_120",       GL_VERSION_2_1_functions},
260   { "GL_ARB_shader_objects",             GL_ARB_shader_objects_functions},
261   { "GL_ARB_vertex_shader",              GL_ARB_vertex_shader_functions},
262   { "GL_ARB_point_sprite", 		  NULL},
263   { "GL_ARB_fragment_shader",            NULL },
264   { "GL_ARB_draw_buffers",               NULL },
265   { "GL_ARB_depth_texture",              NULL },
266   { "GL_ARB_fragment_program",           NULL },
267   { "GL_ARB_shadow",                     NULL },
268   { "GL_EXT_shadow_funcs",               NULL },
269   /* ARB extn won't work if not enabled */
270   { "GL_SGIX_depth_texture",             NULL },
271   { "GL_EXT_texture_sRGB",		  NULL},
272   { NULL,                                NULL }
273};
274
275static const struct dri_extension arb_oc_extensions[] = {
276   {"GL_ARB_occlusion_query",            GL_ARB_occlusion_query_functions},
277   {NULL, NULL}
278};
279
280static const struct dri_extension ttm_extensions[] = {
281   {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
282   {"GL_ARB_pixel_buffer_object", NULL},
283   {NULL, NULL}
284};
285
286/**
287 * Initializes potential list of extensions if ctx == NULL, or actually enables
288 * extensions for a context.
289 */
290void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
291{
292   struct intel_context *intel = ctx?intel_context(ctx):NULL;
293
294   /* Disable imaging extension until convolution is working in teximage paths.
295    */
296   enable_imaging = GL_FALSE;
297
298   driInitExtensions(ctx, card_extensions, enable_imaging);
299
300   if (intel == NULL || intel->ttm)
301      driInitExtensions(ctx, ttm_extensions, GL_FALSE);
302
303   if (intel == NULL ||
304       (IS_965(intel->intelScreen->deviceID) &&
305	intel->intelScreen->drmMinor >= 8))
306      driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
307
308   if (intel == NULL || IS_965(intel->intelScreen->deviceID))
309      driInitExtensions(ctx, brw_extensions, GL_FALSE);
310}
311
312static const struct dri_debug_control debug_control[] = {
313   { "tex",   DEBUG_TEXTURE},
314   { "state", DEBUG_STATE},
315   { "ioctl", DEBUG_IOCTL},
316   { "blit",  DEBUG_BLIT},
317   { "mip",   DEBUG_MIPTREE},
318   { "fall",  DEBUG_FALLBACKS},
319   { "verb",  DEBUG_VERBOSE},
320   { "bat",   DEBUG_BATCH},
321   { "pix",   DEBUG_PIXEL},
322   { "buf",   DEBUG_BUFMGR},
323   { "reg",   DEBUG_REGION},
324   { "fbo",   DEBUG_FBO},
325   { "lock",  DEBUG_LOCK},
326   { "sync",  DEBUG_SYNC},
327   { "prim",  DEBUG_PRIMS },
328   { "vert",  DEBUG_VERTS },
329   { "dri",   DEBUG_DRI },
330   { "dma",   DEBUG_DMA },
331   { "san",   DEBUG_SANITY },
332   { "sleep", DEBUG_SLEEP },
333   { "stats", DEBUG_STATS },
334   { "tile",  DEBUG_TILE },
335   { "sing",  DEBUG_SINGLE_THREAD },
336   { "thre",  DEBUG_SINGLE_THREAD },
337   { "wm",    DEBUG_WM },
338   { "urb",   DEBUG_URB },
339   { "vs",    DEBUG_VS },
340   { NULL,    0 }
341};
342
343
344static void
345intelInvalidateState(GLcontext * ctx, GLuint new_state)
346{
347    struct intel_context *intel = intel_context(ctx);
348
349   _swrast_InvalidateState(ctx, new_state);
350   _swsetup_InvalidateState(ctx, new_state);
351   _vbo_InvalidateState(ctx, new_state);
352   _tnl_InvalidateState(ctx, new_state);
353   _tnl_invalidate_vertex_state(ctx, new_state);
354
355   intel->NewGLState |= new_state;
356
357   if (intel->vtbl.invalidate_state)
358      intel->vtbl.invalidate_state( intel, new_state );
359}
360
361
362void
363intelFlush(GLcontext * ctx)
364{
365   struct intel_context *intel = intel_context(ctx);
366
367   if (intel->Fallback)
368      _swrast_flush(ctx);
369
370   if (!IS_965(intel->intelScreen->deviceID))
371      INTEL_FIREVERTICES(intel);
372
373   /* Emit a flush so that any frontbuffer rendering that might have occurred
374    * lands onscreen in a timely manner, even if the X Server doesn't trigger
375    * a flush for us.
376    */
377   intel_batchbuffer_emit_mi_flush(intel->batch);
378
379   if (intel->batch->map != intel->batch->ptr)
380      intel_batchbuffer_flush(intel->batch);
381}
382
383void
384intelFinish(GLcontext * ctx)
385{
386   struct gl_framebuffer *fb = ctx->DrawBuffer;
387   int i;
388
389   intelFlush(ctx);
390
391   for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
392       struct intel_renderbuffer *irb;
393
394       irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
395
396       if (irb->region)
397	  dri_bo_wait_rendering(irb->region->buffer);
398   }
399   if (fb->_DepthBuffer) {
400      /* XXX: Wait on buffer idle */
401   }
402}
403
404static void
405intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
406{
407	struct intel_context *intel = intel_context( ctx );
408	struct drm_i915_mmio io = {
409		.read_write = I915_MMIO_READ,
410		.reg = MMIO_REGS_PS_DEPTH_COUNT,
411		.data = &q->Result
412	};
413	intel->stats_wm++;
414	intelFinish(&intel->ctx);
415	drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
416}
417
418static void
419intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
420{
421	struct intel_context *intel = intel_context( ctx );
422	GLuint64EXT tmp;
423	struct drm_i915_mmio io = {
424		.read_write = I915_MMIO_READ,
425		.reg = MMIO_REGS_PS_DEPTH_COUNT,
426		.data = &tmp
427	};
428	intelFinish(&intel->ctx);
429	drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
430	q->Result = tmp - q->Result;
431	q->Ready = GL_TRUE;
432	intel->stats_wm--;
433}
434
435/** Driver-specific fence emit implementation for the fake memory manager. */
436static unsigned int
437intel_fence_emit(void *private)
438{
439   struct intel_context *intel = (struct intel_context *)private;
440   unsigned int fence;
441
442   /* XXX: Need to emit a flush, if we haven't already (at least with the
443    * current batchbuffer implementation, we have).
444    */
445
446   fence = intelEmitIrqLocked(intel);
447
448   return fence;
449}
450
451/** Driver-specific fence wait implementation for the fake memory manager. */
452static int
453intel_fence_wait(void *private, unsigned int cookie)
454{
455   struct intel_context *intel = (struct intel_context *)private;
456
457   intelWaitIrq(intel, cookie);
458
459   return 0;
460}
461
462static GLboolean
463intel_init_bufmgr(struct intel_context *intel)
464{
465   intelScreenPrivate *intelScreen = intel->intelScreen;
466   GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
467   GLboolean gem_supported;
468
469   /* If we've got a new enough DDX that's initializing GEM and giving us
470    * object handles for the shared buffers, use that.
471    */
472   intel->ttm = GL_FALSE;
473   if (intel->intelScreen->driScrnPriv->dri2.enabled)
474       gem_supported = GL_TRUE;
475   else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
476	    intel->intelScreen->drmMinor >= 11 &&
477	    intel->intelScreen->front.bo_handle != -1)
478       gem_supported = GL_TRUE;
479   else
480       gem_supported = GL_FALSE;
481
482   if (!gem_disable && gem_supported) {
483      int bo_reuse_mode;
484      intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
485					    BATCH_SZ);
486      if (intel->bufmgr != NULL)
487	 intel->ttm = GL_TRUE;
488
489      bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
490      switch (bo_reuse_mode) {
491      case DRI_CONF_BO_REUSE_DISABLED:
492	 break;
493      case DRI_CONF_BO_REUSE_ALL:
494	 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
495	 break;
496      }
497   }
498   /* Otherwise, use the classic buffer manager. */
499   if (intel->bufmgr == NULL) {
500      if (gem_disable) {
501	 fprintf(stderr, "GEM disabled.  Using classic.\n");
502      } else {
503	 fprintf(stderr, "Failed to initialize GEM.  "
504		 "Falling back to classic.\n");
505      }
506
507      if (intelScreen->tex.size == 0) {
508	 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
509		 __func__, __LINE__);
510	 return GL_FALSE;
511      }
512
513      intel->bufmgr = intel_bufmgr_fake_init(intelScreen->tex.offset,
514					     intelScreen->tex.map,
515					     intelScreen->tex.size,
516					     intel_fence_emit,
517					     intel_fence_wait,
518					     intel);
519   }
520
521   /* XXX bufmgr should be per-screen, not per-context */
522   intelScreen->ttm = intel->ttm;
523
524   return GL_TRUE;
525}
526
527void
528intelInitDriverFunctions(struct dd_function_table *functions)
529{
530   _mesa_init_driver_functions(functions);
531
532   functions->Flush = intelFlush;
533   functions->Finish = intelFinish;
534   functions->GetString = intelGetString;
535   functions->UpdateState = intelInvalidateState;
536
537   functions->CopyColorTable = _swrast_CopyColorTable;
538   functions->CopyColorSubTable = _swrast_CopyColorSubTable;
539   functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
540   functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
541
542   functions->BeginQuery = intelBeginQuery;
543   functions->EndQuery = intelEndQuery;
544
545   intelInitTextureFuncs(functions);
546   intelInitStateFuncs(functions);
547   intelInitBufferFuncs(functions);
548   intelInitPixelFuncs(functions);
549}
550
551
552GLboolean
553intelInitContext(struct intel_context *intel,
554                 const __GLcontextModes * mesaVis,
555                 __DRIcontextPrivate * driContextPriv,
556                 void *sharedContextPrivate,
557                 struct dd_function_table *functions)
558{
559   GLcontext *ctx = &intel->ctx;
560   GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
561   __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
562   intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
563   volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
564      (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
565   int fthrottle_mode;
566
567   if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
568                                 functions, (void *) intel)) {
569      _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
570      return GL_FALSE;
571   }
572
573   driContextPriv->driverPrivate = intel;
574   intel->intelScreen = intelScreen;
575   intel->driScreen = sPriv;
576   intel->sarea = saPriv;
577
578   /* Dri stuff */
579   intel->hHWContext = driContextPriv->hHWContext;
580   intel->driFd = sPriv->fd;
581   intel->driHwLock = sPriv->lock;
582
583   intel->width = intelScreen->width;
584   intel->height = intelScreen->height;
585
586   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
587                       intel->driScreen->myNum,
588		       IS_965(intelScreen->deviceID) ? "i965" : "i915");
589   if (intelScreen->deviceID == PCI_CHIP_I865_G)
590      intel->maxBatchSize = 4096;
591   else
592      intel->maxBatchSize = BATCH_SZ;
593
594   if (!intel_init_bufmgr(intel))
595      return GL_FALSE;
596
597   ctx->Const.MaxTextureMaxAnisotropy = 2.0;
598
599   /* This doesn't yet catch all non-conformant rendering, but it's a
600    * start.
601    */
602   if (getenv("INTEL_STRICT_CONFORMANCE")) {
603      intel->strict_conformance = 1;
604   }
605
606   if (intel->strict_conformance) {
607      ctx->Const.MinLineWidth = 1.0;
608      ctx->Const.MinLineWidthAA = 1.0;
609      ctx->Const.MaxLineWidth = 1.0;
610      ctx->Const.MaxLineWidthAA = 1.0;
611      ctx->Const.LineWidthGranularity = 1.0;
612   }
613   else {
614      ctx->Const.MinLineWidth = 1.0;
615      ctx->Const.MinLineWidthAA = 1.0;
616      ctx->Const.MaxLineWidth = 5.0;
617      ctx->Const.MaxLineWidthAA = 5.0;
618      ctx->Const.LineWidthGranularity = 0.5;
619   }
620
621   ctx->Const.MinPointSize = 1.0;
622   ctx->Const.MinPointSizeAA = 1.0;
623   ctx->Const.MaxPointSize = 255.0;
624   ctx->Const.MaxPointSizeAA = 3.0;
625   ctx->Const.PointSizeGranularity = 1.0;
626
627   /* reinitialize the context point state.
628    * It depend on constants in __GLcontextRec::Const
629    */
630   _mesa_init_point(ctx);
631
632   ctx->Const.MaxColorAttachments = 4;  /* XXX FBO: review this */
633
634   /* Initialize the software rasterizer and helper modules. */
635   _swrast_CreateContext(ctx);
636   _vbo_CreateContext(ctx);
637   _tnl_CreateContext(ctx);
638   _swsetup_CreateContext(ctx);
639
640   /* Configure swrast to match hardware characteristics: */
641   _swrast_allow_pixel_fog(ctx, GL_FALSE);
642   _swrast_allow_vertex_fog(ctx, GL_TRUE);
643
644   intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
645   intel->hw_stipple = 1;
646
647   /* XXX FBO: this doesn't seem to be used anywhere */
648   switch (mesaVis->depthBits) {
649   case 0:                     /* what to do in this case? */
650   case 16:
651      intel->polygon_offset_scale = 1.0;
652      break;
653   case 24:
654      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
655      break;
656   default:
657      assert(0);
658      break;
659   }
660
661   if (IS_965(intelScreen->deviceID))
662      intel->polygon_offset_scale /= 0xffff;
663
664   intel->RenderIndex = ~0;
665
666   fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
667   intel->irqsEmitted = 0;
668
669   intel->do_irqs = (intel->intelScreen->irq_active &&
670                     fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
671
672   intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
673
674   _math_matrix_ctr(&intel->ViewportMatrix);
675
676   if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
677      _mesa_printf("IRQs not active.  Exiting\n");
678      exit(1);
679   }
680
681   intelInitExtensions(ctx, GL_FALSE);
682
683   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
684   if (INTEL_DEBUG & DEBUG_BUFMGR)
685      dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
686
687   if (!sPriv->dri2.enabled)
688      intel_recreate_static_regions(intel);
689
690   intel->batch = intel_batchbuffer_alloc(intel);
691
692   intel_bufferobj_init(intel);
693   intel_fbo_init(intel);
694
695   if (intel->ctx.Mesa_DXTn) {
696      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
697      _mesa_enable_extension(ctx, "GL_S3_s3tc");
698   }
699   else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
700      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
701   }
702
703   intel->prim.primitive = ~0;
704
705   /* Force all software fallbacks */
706   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
707      fprintf(stderr, "disabling 3D rasterization\n");
708      intel->no_rast = 1;
709   }
710
711   /* Disable all hardware rendering (skip emitting batches and fences/waits
712    * to the kernel)
713    */
714   intel->no_hw = getenv("INTEL_NO_HW") != NULL;
715
716   return GL_TRUE;
717}
718
719void
720intelDestroyContext(__DRIcontextPrivate * driContextPriv)
721{
722   struct intel_context *intel =
723      (struct intel_context *) driContextPriv->driverPrivate;
724
725   assert(intel);               /* should never be null */
726   if (intel) {
727      GLboolean release_texture_heaps;
728
729      INTEL_FIREVERTICES(intel);
730
731      intel->vtbl.destroy(intel);
732
733      release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
734      _swsetup_DestroyContext(&intel->ctx);
735      _tnl_DestroyContext(&intel->ctx);
736      _vbo_DestroyContext(&intel->ctx);
737
738      _swrast_DestroyContext(&intel->ctx);
739      intel->Fallback = 0;      /* don't call _swrast_Flush later */
740
741      intel_batchbuffer_free(intel->batch);
742      free(intel->prim.vb);
743
744      if (release_texture_heaps) {
745         /* This share group is about to go away, free our private
746          * texture object data.
747          */
748         if (INTEL_DEBUG & DEBUG_TEXTURE)
749            fprintf(stderr, "do something to free texture heaps\n");
750      }
751
752      /* free the Mesa context */
753      _mesa_free_context_data(&intel->ctx);
754
755      dri_bufmgr_destroy(intel->bufmgr);
756   }
757}
758
759GLboolean
760intelUnbindContext(__DRIcontextPrivate * driContextPriv)
761{
762   return GL_TRUE;
763}
764
765GLboolean
766intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
767                 __DRIdrawablePrivate * driDrawPriv,
768                 __DRIdrawablePrivate * driReadPriv)
769{
770   __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
771
772   if (driContextPriv) {
773      struct intel_context *intel =
774         (struct intel_context *) driContextPriv->driverPrivate;
775      struct intel_framebuffer *intel_fb =
776	 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
777      GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
778
779
780      /* XXX FBO temporary fix-ups! */
781      /* if the renderbuffers don't have regions, init them from the context */
782      if (!driContextPriv->driScreenPriv->dri2.enabled) {
783         struct intel_renderbuffer *irbDepth
784            = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
785         struct intel_renderbuffer *irbStencil
786            = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
787
788         if (intel_fb->color_rb[0]) {
789	    intel_renderbuffer_set_region(intel_fb->color_rb[0],
790					  intel->front_region);
791         }
792         if (intel_fb->color_rb[1]) {
793	    intel_renderbuffer_set_region(intel_fb->color_rb[1],
794					  intel->back_region);
795         }
796#if 0
797         if (intel_fb->color_rb[2]) {
798	    intel_renderbuffer_set_region(intel_fb->color_rb[2],
799					  intel->third_region);
800         }
801#endif
802         if (irbDepth) {
803	    intel_renderbuffer_set_region(irbDepth, intel->depth_region);
804         }
805         if (irbStencil) {
806	    intel_renderbuffer_set_region(irbStencil, intel->depth_region);
807         }
808      }
809
810      /* set GLframebuffer size to match window, if needed */
811      driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
812
813      if (driReadPriv != driDrawPriv) {
814	 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
815      }
816
817      _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
818
819      /* The drawbuffer won't always be updated by _mesa_make_current:
820       */
821      if (intel->ctx.DrawBuffer == &intel_fb->Base) {
822
823	 if (intel->driReadDrawable != driReadPriv)
824	    intel->driReadDrawable = driReadPriv;
825
826	 if (intel->driDrawable != driDrawPriv) {
827	    if (driDrawPriv->swap_interval == (unsigned)-1) {
828	       int i;
829
830	       driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
831		  ? driGetDefaultVBlankFlags(&intel->optionCache)
832		 : VBLANK_FLAG_NO_IRQ;
833
834	       (*psp->systemTime->getUST) (&intel_fb->swap_ust);
835	       driDrawableInitVBlank(driDrawPriv);
836	       intel_fb->vbl_waited = driDrawPriv->vblSeq;
837
838	       for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
839		  if (intel_fb->color_rb[i])
840		     intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
841	       }
842	    }
843	    intel->driDrawable = driDrawPriv;
844	    intelWindowMoved(intel);
845	 }
846
847	 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
848      }
849   }
850   else {
851      _mesa_make_current(NULL, NULL, NULL);
852   }
853
854   return GL_TRUE;
855}
856
857static void
858intelContendedLock(struct intel_context *intel, GLuint flags)
859{
860   __DRIdrawablePrivate *dPriv = intel->driDrawable;
861   __DRIscreenPrivate *sPriv = intel->driScreen;
862   volatile struct drm_i915_sarea *sarea = intel->sarea;
863   int me = intel->hHWContext;
864
865   drmGetLock(intel->driFd, intel->hHWContext, flags);
866   intel->locked = 1;
867
868   if (INTEL_DEBUG & DEBUG_LOCK)
869      _mesa_printf("%s - got contended lock\n", __progname);
870
871   /* If the window moved, may need to set a new cliprect now.
872    *
873    * NOTE: This releases and regains the hw lock, so all state
874    * checking must be done *after* this call:
875    */
876   if (dPriv)
877       DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
878
879   if (sarea && sarea->ctxOwner != me) {
880      if (INTEL_DEBUG & DEBUG_BUFMGR) {
881	 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
882		 sarea->ctxOwner, me);
883      }
884      sarea->ctxOwner = me;
885   }
886
887   /* If the last consumer of the texture memory wasn't us, notify the fake
888    * bufmgr and record the new owner.  We should have the memory shared
889    * between contexts of a single fake bufmgr, but this will at least make
890    * things correct for now.
891    */
892   if (!intel->ttm && sarea->texAge != intel->hHWContext) {
893      sarea->texAge = intel->hHWContext;
894      intel_bufmgr_fake_contended_lock_take(intel->bufmgr);
895      if (INTEL_DEBUG & DEBUG_BATCH)
896	 intel_decode_context_reset();
897      if (INTEL_DEBUG & DEBUG_BUFMGR)
898	 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
899		 sarea->ctxOwner, intel->hHWContext);
900   }
901
902   if (sarea->width != intel->width || sarea->height != intel->height) {
903       int numClipRects = intel->numClipRects;
904
905       /*
906	* FIXME: Really only need to do this when drawing to a
907	* common back- or front buffer.
908	*/
909
910       /*
911	* This will essentially drop the outstanding batchbuffer on
912	* the floor.
913	*/
914       intel->numClipRects = 0;
915
916       if (intel->Fallback)
917	   _swrast_flush(&intel->ctx);
918
919       if (!IS_965(intel->intelScreen->deviceID))
920	   INTEL_FIREVERTICES(intel);
921
922       if (intel->batch->map != intel->batch->ptr)
923	   intel_batchbuffer_flush(intel->batch);
924
925       intel->numClipRects = numClipRects;
926
927       /* force window update */
928       intel->lastStamp = 0;
929
930       intel->width = sarea->width;
931       intel->height = sarea->height;
932   }
933
934   /* Drawable changed?
935    */
936   if (dPriv && intel->lastStamp != dPriv->lastStamp) {
937       intelWindowMoved(intel);
938       intel->lastStamp = dPriv->lastStamp;
939   }
940}
941
942
943_glthread_DECLARE_STATIC_MUTEX(lockMutex);
944
945/* Lock the hardware and validate our state.
946 */
947void LOCK_HARDWARE( struct intel_context *intel )
948{
949    __DRIdrawable *dPriv = intel->driDrawable;
950    __DRIscreen *sPriv = intel->driScreen;
951    char __ret = 0;
952    struct intel_framebuffer *intel_fb = NULL;
953    struct intel_renderbuffer *intel_rb = NULL;
954
955    _glthread_LOCK_MUTEX(lockMutex);
956    assert(!intel->locked);
957    intel->locked = 1;
958
959    if (intel->driDrawable) {
960       intel_fb = intel->driDrawable->driverPrivate;
961
962       if (intel_fb)
963	  intel_rb =
964	     intel_get_renderbuffer(&intel_fb->Base,
965				    intel_fb->Base._ColorDrawBufferIndexes[0]);
966    }
967
968    if (intel_rb && dPriv->vblFlags &&
969	!(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
970	(intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
971	drmVBlank vbl;
972
973	vbl.request.type = DRM_VBLANK_ABSOLUTE;
974
975	if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
976	    vbl.request.type |= DRM_VBLANK_SECONDARY;
977	}
978
979	vbl.request.sequence = intel_rb->vbl_pending;
980	drmWaitVBlank(intel->driFd, &vbl);
981	intel_fb->vbl_waited = vbl.reply.sequence;
982    }
983
984    DRM_CAS(intel->driHwLock, intel->hHWContext,
985        (DRM_LOCK_HELD|intel->hHWContext), __ret);
986
987    if (sPriv->dri2.enabled) {
988	if (__ret)
989	    drmGetLock(intel->driFd, intel->hHWContext, 0);
990	if (__driParseEvents(dPriv->driContextPriv, dPriv)) {
991	    intelWindowMoved(intel);
992	    intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
993	}
994    } else if (__ret) {
995        intelContendedLock( intel, 0 );
996    }
997
998
999    if (INTEL_DEBUG & DEBUG_LOCK)
1000      _mesa_printf("%s - locked\n", __progname);
1001}
1002
1003
1004/* Unlock the hardware using the global current context
1005 */
1006void UNLOCK_HARDWARE( struct intel_context *intel )
1007{
1008   intel->vtbl.note_unlock( intel );
1009   intel->locked = 0;
1010
1011   DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
1012
1013   _glthread_UNLOCK_MUTEX(lockMutex);
1014
1015   if (INTEL_DEBUG & DEBUG_LOCK)
1016      _mesa_printf("%s - unlocked\n", __progname);
1017
1018   /**
1019    * Nothing should be left in batch outside of LOCK/UNLOCK which references
1020    * cliprects.
1021    */
1022   if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS)
1023      intel_batchbuffer_flush(intel->batch);
1024}
1025
1026