intel_context.c revision 6cefae5354fb3015c5a14677071871613faa9c3a
1/**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "glheader.h"
30#include "context.h"
31#include "matrix.h"
32#include "simple_list.h"
33#include "extensions.h"
34#include "framebuffer.h"
35#include "imports.h"
36#include "points.h"
37
38#include "swrast/swrast.h"
39#include "swrast_setup/swrast_setup.h"
40#include "tnl/tnl.h"
41
42#include "tnl/t_pipeline.h"
43#include "tnl/t_vertex.h"
44
45#include "drivers/common/driverfuncs.h"
46
47#include "intel_screen.h"
48
49#include "i830_dri.h"
50
51#include "intel_chipset.h"
52#include "intel_buffers.h"
53#include "intel_tex.h"
54#include "intel_ioctl.h"
55#include "intel_batchbuffer.h"
56#include "intel_blit.h"
57#include "intel_pixel.h"
58#include "intel_regions.h"
59#include "intel_buffer_objects.h"
60#include "intel_fbo.h"
61#include "intel_decode.h"
62#include "intel_bufmgr_fake.h"
63#include "intel_bufmgr_gem.h"
64
65#include "drirenderbuffer.h"
66#include "vblank.h"
67#include "utils.h"
68#include "xmlpool.h"            /* for symbolic values of enum-type options */
69#ifndef INTEL_DEBUG
70int INTEL_DEBUG = (0);
71#endif
72
73#define need_GL_NV_point_sprite
74#define need_GL_ARB_multisample
75#define need_GL_ARB_point_parameters
76#define need_GL_ARB_texture_compression
77#define need_GL_ARB_vertex_buffer_object
78#define need_GL_ARB_vertex_program
79#define need_GL_ARB_window_pos
80#define need_GL_ARB_occlusion_query
81#define need_GL_EXT_blend_color
82#define need_GL_EXT_blend_equation_separate
83#define need_GL_EXT_blend_func_separate
84#define need_GL_EXT_blend_minmax
85#define need_GL_EXT_cull_vertex
86#define need_GL_EXT_fog_coord
87#define need_GL_EXT_framebuffer_object
88#define need_GL_EXT_multi_draw_arrays
89#define need_GL_EXT_secondary_color
90#define need_GL_NV_vertex_program
91#define need_GL_ATI_separate_stencil
92#define need_GL_EXT_point_parameters
93#define need_GL_VERSION_2_0
94#define need_GL_VERSION_2_1
95#define need_GL_ARB_shader_objects
96#define need_GL_ARB_vertex_shader
97
98#include "extension_helper.h"
99
100#define DRIVER_DATE                     "20061102"
101
102static const GLubyte *
103intelGetString(GLcontext * ctx, GLenum name)
104{
105   const char *chipset;
106   static char buffer[128];
107
108   switch (name) {
109   case GL_VENDOR:
110      return (GLubyte *) "Tungsten Graphics, Inc";
111      break;
112
113   case GL_RENDERER:
114      switch (intel_context(ctx)->intelScreen->deviceID) {
115      case PCI_CHIP_845_G:
116         chipset = "Intel(R) 845G";
117         break;
118      case PCI_CHIP_I830_M:
119         chipset = "Intel(R) 830M";
120         break;
121      case PCI_CHIP_I855_GM:
122         chipset = "Intel(R) 852GM/855GM";
123         break;
124      case PCI_CHIP_I865_G:
125         chipset = "Intel(R) 865G";
126         break;
127      case PCI_CHIP_I915_G:
128         chipset = "Intel(R) 915G";
129         break;
130      case PCI_CHIP_E7221_G:
131	 chipset = "Intel (R) E7221G (i915)";
132	 break;
133      case PCI_CHIP_I915_GM:
134         chipset = "Intel(R) 915GM";
135         break;
136      case PCI_CHIP_I945_G:
137         chipset = "Intel(R) 945G";
138         break;
139      case PCI_CHIP_I945_GM:
140         chipset = "Intel(R) 945GM";
141         break;
142      case PCI_CHIP_I945_GME:
143         chipset = "Intel(R) 945GME";
144         break;
145      case PCI_CHIP_G33_G:
146	 chipset = "Intel(R) G33";
147	 break;
148      case PCI_CHIP_Q35_G:
149	 chipset = "Intel(R) Q35";
150	 break;
151      case PCI_CHIP_Q33_G:
152	 chipset = "Intel(R) Q33";
153	 break;
154      case PCI_CHIP_I965_Q:
155	 chipset = "Intel(R) 965Q";
156	 break;
157      case PCI_CHIP_I965_G:
158      case PCI_CHIP_I965_G_1:
159	 chipset = "Intel(R) 965G";
160	 break;
161      case PCI_CHIP_I946_GZ:
162	 chipset = "Intel(R) 946GZ";
163	 break;
164      case PCI_CHIP_I965_GM:
165	 chipset = "Intel(R) 965GM";
166	 break;
167      case PCI_CHIP_I965_GME:
168	 chipset = "Intel(R) 965GME/GLE";
169	 break;
170      case PCI_CHIP_IGD_GM:
171	 chipset = "Intel(R) Integrated Graphics Device";
172	 break;
173      default:
174         chipset = "Unknown Intel Chipset";
175         break;
176      }
177
178      (void) driGetRendererString(buffer, chipset, DRIVER_DATE, 0);
179      return (GLubyte *) buffer;
180
181   default:
182      return NULL;
183   }
184}
185
186/**
187 * Extension strings exported by the intel driver.
188 *
189 * \note
190 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
191 * old i830-specific driver.
192 */
193static const struct dri_extension card_extensions[] = {
194   {"GL_ARB_multisample", GL_ARB_multisample_functions},
195   {"GL_ARB_multitexture", NULL},
196   {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
197   {"GL_NV_point_sprite", GL_NV_point_sprite_functions},
198   {"GL_ARB_texture_border_clamp", NULL},
199   {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
200   {"GL_ARB_texture_cube_map", NULL},
201   {"GL_ARB_texture_env_add", NULL},
202   {"GL_ARB_texture_env_combine", NULL},
203   {"GL_ARB_texture_env_dot3", NULL},
204   {"GL_ARB_texture_mirrored_repeat", NULL},
205   {"GL_ARB_texture_non_power_of_two",   NULL },
206   {"GL_ARB_texture_rectangle", NULL},
207   {"GL_NV_texture_rectangle", NULL},
208   {"GL_EXT_texture_rectangle", NULL},
209   {"GL_ARB_point_parameters", NULL},
210   {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
211   {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
212   {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
213   {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
214   {"GL_EXT_blend_equation_separate",
215    GL_EXT_blend_equation_separate_functions},
216   {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
217   {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
218   {"GL_EXT_blend_logic_op", NULL},
219   {"GL_EXT_blend_subtract", NULL},
220   {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
221   {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
222   {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
223   {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions},
224#if 1                           /* XXX FBO temporary? */
225   {"GL_EXT_packed_depth_stencil", NULL},
226#endif
227   {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
228   {"GL_EXT_stencil_wrap", NULL},
229   {"GL_EXT_texture_edge_clamp", NULL},
230   {"GL_EXT_texture_env_combine", NULL},
231   {"GL_EXT_texture_env_dot3", NULL},
232   {"GL_EXT_texture_filter_anisotropic", NULL},
233   {"GL_EXT_texture_lod_bias", NULL},
234   {"GL_3DFX_texture_compression_FXT1", NULL},
235   {"GL_APPLE_client_storage", NULL},
236   {"GL_MESA_pack_invert", NULL},
237   {"GL_MESA_ycbcr_texture", NULL},
238   {"GL_NV_blend_square", NULL},
239   {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
240   {"GL_NV_vertex_program1_1", NULL},
241   { "GL_SGIS_generate_mipmap", NULL },
242   {NULL, NULL}
243};
244
245static const struct dri_extension brw_extensions[] = {
246   { "GL_ARB_shading_language_100",       GL_VERSION_2_0_functions},
247   { "GL_ARB_shading_language_120",       GL_VERSION_2_1_functions},
248   { "GL_ARB_shader_objects",             GL_ARB_shader_objects_functions},
249   { "GL_ARB_vertex_shader",              GL_ARB_vertex_shader_functions},
250   { "GL_ARB_point_sprite", 		  NULL},
251   { "GL_ARB_fragment_shader",            NULL },
252   { "GL_ARB_draw_buffers",               NULL },
253   { "GL_ARB_depth_texture",              NULL },
254   { "GL_ARB_fragment_program",           NULL },
255   { "GL_ARB_shadow",                     NULL },
256   { "GL_EXT_shadow_funcs",               NULL },
257   /* ARB extn won't work if not enabled */
258   { "GL_SGIX_depth_texture",             NULL },
259   { "GL_ARB_texture_env_crossbar",       NULL },
260   { "GL_EXT_texture_sRGB",		  NULL},
261   { NULL,                                NULL }
262};
263
264static const struct dri_extension arb_oc_extensions[] = {
265   {"GL_ARB_occlusion_query",            GL_ARB_occlusion_query_functions},
266   {NULL, NULL}
267};
268
269static const struct dri_extension ttm_extensions[] = {
270   {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
271   {"GL_ARB_pixel_buffer_object", NULL},
272   {NULL, NULL}
273};
274
275/**
276 * Initializes potential list of extensions if ctx == NULL, or actually enables
277 * extensions for a context.
278 */
279void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
280{
281   struct intel_context *intel = ctx?intel_context(ctx):NULL;
282
283   /* Disable imaging extension until convolution is working in teximage paths.
284    */
285   enable_imaging = GL_FALSE;
286
287   driInitExtensions(ctx, card_extensions, enable_imaging);
288
289   if (intel == NULL || intel->ttm)
290      driInitExtensions(ctx, ttm_extensions, GL_FALSE);
291
292   if (intel == NULL ||
293       (IS_965(intel->intelScreen->deviceID) &&
294	intel->intelScreen->drmMinor >= 8))
295      driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
296
297   if (intel == NULL || IS_965(intel->intelScreen->deviceID))
298      driInitExtensions(ctx, brw_extensions, GL_FALSE);
299}
300
301static const struct dri_debug_control debug_control[] = {
302   { "tex",   DEBUG_TEXTURE},
303   { "state", DEBUG_STATE},
304   { "ioctl", DEBUG_IOCTL},
305   { "blit",  DEBUG_BLIT},
306   { "mip",   DEBUG_MIPTREE},
307   { "fall",  DEBUG_FALLBACKS},
308   { "verb",  DEBUG_VERBOSE},
309   { "bat",   DEBUG_BATCH},
310   { "pix",   DEBUG_PIXEL},
311   { "buf",   DEBUG_BUFMGR},
312   { "reg",   DEBUG_REGION},
313   { "fbo",   DEBUG_FBO},
314   { "lock",  DEBUG_LOCK},
315   { "sync",  DEBUG_SYNC},
316   { "prim",  DEBUG_PRIMS },
317   { "vert",  DEBUG_VERTS },
318   { "dri",   DEBUG_DRI },
319   { "dma",   DEBUG_DMA },
320   { "san",   DEBUG_SANITY },
321   { "sleep", DEBUG_SLEEP },
322   { "stats", DEBUG_STATS },
323   { "tile",  DEBUG_TILE },
324   { "sing",  DEBUG_SINGLE_THREAD },
325   { "thre",  DEBUG_SINGLE_THREAD },
326   { "wm",    DEBUG_WM },
327   { "urb",   DEBUG_URB },
328   { "vs",    DEBUG_VS },
329   { NULL,    0 }
330};
331
332
333static void
334intelInvalidateState(GLcontext * ctx, GLuint new_state)
335{
336    struct intel_context *intel = intel_context(ctx);
337
338   _swrast_InvalidateState(ctx, new_state);
339   _swsetup_InvalidateState(ctx, new_state);
340   _vbo_InvalidateState(ctx, new_state);
341   _tnl_InvalidateState(ctx, new_state);
342   _tnl_invalidate_vertex_state(ctx, new_state);
343
344   intel->NewGLState |= new_state;
345
346   if (intel->vtbl.invalidate_state)
347      intel->vtbl.invalidate_state( intel, new_state );
348}
349
350
351void
352intelFlush(GLcontext * ctx)
353{
354   struct intel_context *intel = intel_context(ctx);
355
356   if (intel->Fallback)
357      _swrast_flush(ctx);
358
359   if (!IS_965(intel->intelScreen->deviceID))
360      INTEL_FIREVERTICES(intel);
361
362   if (intel->batch->map != intel->batch->ptr)
363      intel_batchbuffer_flush(intel->batch);
364
365   /* XXX: Need to do an MI_FLUSH here.
366    */
367}
368
369void
370intelFinish(GLcontext * ctx)
371{
372   struct gl_framebuffer *fb = ctx->DrawBuffer;
373   int i;
374
375   intelFlush(ctx);
376
377   for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
378       struct intel_renderbuffer *irb;
379
380       irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
381
382       if (irb->region)
383	  dri_bo_wait_rendering(irb->region->buffer);
384   }
385   if (fb->_DepthBuffer) {
386      /* XXX: Wait on buffer idle */
387   }
388}
389
390static void
391intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
392{
393	struct intel_context *intel = intel_context( ctx );
394	struct drm_i915_mmio io = {
395		.read_write = I915_MMIO_READ,
396		.reg = MMIO_REGS_PS_DEPTH_COUNT,
397		.data = &q->Result
398	};
399	intel->stats_wm++;
400	intelFinish(&intel->ctx);
401	drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
402}
403
404static void
405intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
406{
407	struct intel_context *intel = intel_context( ctx );
408	GLuint64EXT tmp;
409	struct drm_i915_mmio io = {
410		.read_write = I915_MMIO_READ,
411		.reg = MMIO_REGS_PS_DEPTH_COUNT,
412		.data = &tmp
413	};
414	intelFinish(&intel->ctx);
415	drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
416	q->Result = tmp - q->Result;
417	q->Ready = GL_TRUE;
418	intel->stats_wm--;
419}
420
421/** Driver-specific fence emit implementation for the fake memory manager. */
422static unsigned int
423intel_fence_emit(void *private)
424{
425   struct intel_context *intel = (struct intel_context *)private;
426   unsigned int fence;
427
428   /* XXX: Need to emit a flush, if we haven't already (at least with the
429    * current batchbuffer implementation, we have).
430    */
431
432   fence = intelEmitIrqLocked(intel);
433
434   return fence;
435}
436
437/** Driver-specific fence wait implementation for the fake memory manager. */
438static int
439intel_fence_wait(void *private, unsigned int cookie)
440{
441   struct intel_context *intel = (struct intel_context *)private;
442
443   intelWaitIrq(intel, cookie);
444
445   return 0;
446}
447
448static GLboolean
449intel_init_bufmgr(struct intel_context *intel)
450{
451   intelScreenPrivate *intelScreen = intel->intelScreen;
452   GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
453   GLboolean gem_supported;
454
455   /* If we've got a new enough DDX that's initializing GEM and giving us
456    * object handles for the shared buffers, use that.
457    */
458   intel->ttm = GL_FALSE;
459   if (intel->intelScreen->driScrnPriv->dri2.enabled)
460       gem_supported = GL_TRUE;
461   else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
462	    intel->intelScreen->drmMinor >= 11 &&
463	    intel->intelScreen->front.bo_handle != -1)
464       gem_supported = GL_TRUE;
465   else
466       gem_supported = GL_FALSE;
467
468   if (!gem_disable && gem_supported) {
469      int bo_reuse_mode;
470      intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
471					    BATCH_SZ);
472      if (intel->bufmgr != NULL)
473	 intel->ttm = GL_TRUE;
474
475      bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
476      switch (bo_reuse_mode) {
477      case DRI_CONF_BO_REUSE_DISABLED:
478	 break;
479      case DRI_CONF_BO_REUSE_ALL:
480	 intel_gem_enable_bo_reuse(intel->bufmgr);
481	 break;
482      }
483   }
484   /* Otherwise, use the classic buffer manager. */
485   if (intel->bufmgr == NULL) {
486      if (gem_disable) {
487	 fprintf(stderr, "GEM disabled.  Using classic.\n");
488      } else {
489	 fprintf(stderr, "Failed to initialize GEM.  "
490		 "Falling back to classic.\n");
491      }
492
493      if (intelScreen->tex.size == 0) {
494	 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
495		 __func__, __LINE__);
496	 return GL_FALSE;
497      }
498
499      intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
500					   intelScreen->tex.map,
501					   intelScreen->tex.size,
502					   intel_fence_emit,
503					   intel_fence_wait,
504					   intel);
505   }
506
507   /* XXX bufmgr should be per-screen, not per-context */
508   intelScreen->ttm = intel->ttm;
509
510   return GL_TRUE;
511}
512
513void
514intelInitDriverFunctions(struct dd_function_table *functions)
515{
516   _mesa_init_driver_functions(functions);
517
518   functions->Flush = intelFlush;
519   functions->Finish = intelFinish;
520   functions->GetString = intelGetString;
521   functions->UpdateState = intelInvalidateState;
522
523   functions->CopyColorTable = _swrast_CopyColorTable;
524   functions->CopyColorSubTable = _swrast_CopyColorSubTable;
525   functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
526   functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
527
528   functions->BeginQuery = intelBeginQuery;
529   functions->EndQuery = intelEndQuery;
530
531   intelInitTextureFuncs(functions);
532   intelInitStateFuncs(functions);
533   intelInitBufferFuncs(functions);
534}
535
536
537GLboolean
538intelInitContext(struct intel_context *intel,
539                 const __GLcontextModes * mesaVis,
540                 __DRIcontextPrivate * driContextPriv,
541                 void *sharedContextPrivate,
542                 struct dd_function_table *functions)
543{
544   GLcontext *ctx = &intel->ctx;
545   GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
546   __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
547   intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
548   volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
549      (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
550   int fthrottle_mode;
551
552   if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
553                                 functions, (void *) intel)) {
554      _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
555      return GL_FALSE;
556   }
557
558   driContextPriv->driverPrivate = intel;
559   intel->intelScreen = intelScreen;
560   intel->driScreen = sPriv;
561   intel->sarea = saPriv;
562
563   /* Dri stuff */
564   intel->hHWContext = driContextPriv->hHWContext;
565   intel->driFd = sPriv->fd;
566   intel->driHwLock = sPriv->lock;
567
568   intel->width = intelScreen->width;
569   intel->height = intelScreen->height;
570
571   driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
572                       intel->driScreen->myNum,
573		       IS_965(intelScreen->deviceID) ? "i965" : "i915");
574   if (intelScreen->deviceID == PCI_CHIP_I865_G)
575      intel->maxBatchSize = 4096;
576   else
577      intel->maxBatchSize = BATCH_SZ;
578
579   if (!intel_init_bufmgr(intel))
580      return GL_FALSE;
581
582   ctx->Const.MaxTextureMaxAnisotropy = 2.0;
583
584   /* This doesn't yet catch all non-conformant rendering, but it's a
585    * start.
586    */
587   if (getenv("INTEL_STRICT_CONFORMANCE")) {
588      intel->strict_conformance = 1;
589   }
590
591   if (intel->strict_conformance) {
592      ctx->Const.MinLineWidth = 1.0;
593      ctx->Const.MinLineWidthAA = 1.0;
594      ctx->Const.MaxLineWidth = 1.0;
595      ctx->Const.MaxLineWidthAA = 1.0;
596      ctx->Const.LineWidthGranularity = 1.0;
597   }
598   else {
599      ctx->Const.MinLineWidth = 1.0;
600      ctx->Const.MinLineWidthAA = 1.0;
601      ctx->Const.MaxLineWidth = 5.0;
602      ctx->Const.MaxLineWidthAA = 5.0;
603      ctx->Const.LineWidthGranularity = 0.5;
604   }
605
606   ctx->Const.MinPointSize = 1.0;
607   ctx->Const.MinPointSizeAA = 1.0;
608   ctx->Const.MaxPointSize = 255.0;
609   ctx->Const.MaxPointSizeAA = 3.0;
610   ctx->Const.PointSizeGranularity = 1.0;
611
612   /* reinitialize the context point state.
613    * It depend on constants in __GLcontextRec::Const
614    */
615   _mesa_init_point(ctx);
616
617   ctx->Const.MaxColorAttachments = 4;  /* XXX FBO: review this */
618
619   /* Initialize the software rasterizer and helper modules. */
620   _swrast_CreateContext(ctx);
621   _vbo_CreateContext(ctx);
622   _tnl_CreateContext(ctx);
623   _swsetup_CreateContext(ctx);
624
625   /* Configure swrast to match hardware characteristics: */
626   _swrast_allow_pixel_fog(ctx, GL_FALSE);
627   _swrast_allow_vertex_fog(ctx, GL_TRUE);
628
629   intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
630   intel->hw_stipple = 1;
631
632   /* XXX FBO: this doesn't seem to be used anywhere */
633   switch (mesaVis->depthBits) {
634   case 0:                     /* what to do in this case? */
635   case 16:
636      intel->polygon_offset_scale = 1.0;
637      break;
638   case 24:
639      intel->polygon_offset_scale = 2.0;     /* req'd to pass glean */
640      break;
641   default:
642      assert(0);
643      break;
644   }
645
646   if (IS_965(intelScreen->deviceID))
647      intel->polygon_offset_scale /= 0xffff;
648
649   intel->RenderIndex = ~0;
650
651   fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
652   intel->irqsEmitted = 0;
653
654   intel->do_irqs = (intel->intelScreen->irq_active &&
655                     fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
656
657   intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
658
659   _math_matrix_ctr(&intel->ViewportMatrix);
660
661   if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
662      _mesa_printf("IRQs not active.  Exiting\n");
663      exit(1);
664   }
665
666   intelInitExtensions(ctx, GL_FALSE);
667
668   INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
669   if (INTEL_DEBUG & DEBUG_BUFMGR)
670      dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
671
672   if (!sPriv->dri2.enabled)
673      intel_recreate_static_regions(intel);
674
675   intel->batch = intel_batchbuffer_alloc(intel);
676
677   intel_bufferobj_init(intel);
678   intel_fbo_init(intel);
679
680   if (intel->ctx.Mesa_DXTn) {
681      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
682      _mesa_enable_extension(ctx, "GL_S3_s3tc");
683   }
684   else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
685      _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
686   }
687
688   intel->prim.primitive = ~0;
689
690   /* Force all software fallbacks */
691   if (driQueryOptionb(&intel->optionCache, "no_rast")) {
692      fprintf(stderr, "disabling 3D rasterization\n");
693      FALLBACK(intel, INTEL_FALLBACK_USER, 1);
694      intel->no_rast = 1;
695   }
696
697   /* Disable all hardware rendering (skip emitting batches and fences/waits
698    * to the kernel)
699    */
700   intel->no_hw = getenv("INTEL_NO_HW") != NULL;
701
702   return GL_TRUE;
703}
704
705void
706intelDestroyContext(__DRIcontextPrivate * driContextPriv)
707{
708   struct intel_context *intel =
709      (struct intel_context *) driContextPriv->driverPrivate;
710
711   assert(intel);               /* should never be null */
712   if (intel) {
713      GLboolean release_texture_heaps;
714
715      INTEL_FIREVERTICES(intel);
716
717      intel->vtbl.destroy(intel);
718
719      release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
720      _swsetup_DestroyContext(&intel->ctx);
721      _tnl_DestroyContext(&intel->ctx);
722      _vbo_DestroyContext(&intel->ctx);
723
724      _swrast_DestroyContext(&intel->ctx);
725      intel->Fallback = 0;      /* don't call _swrast_Flush later */
726
727      intel_batchbuffer_free(intel->batch);
728
729      if (release_texture_heaps) {
730         /* This share group is about to go away, free our private
731          * texture object data.
732          */
733         if (INTEL_DEBUG & DEBUG_TEXTURE)
734            fprintf(stderr, "do something to free texture heaps\n");
735      }
736
737      /* free the Mesa context */
738      _mesa_free_context_data(&intel->ctx);
739
740      dri_bufmgr_destroy(intel->bufmgr);
741   }
742}
743
744GLboolean
745intelUnbindContext(__DRIcontextPrivate * driContextPriv)
746{
747   return GL_TRUE;
748}
749
750GLboolean
751intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
752                 __DRIdrawablePrivate * driDrawPriv,
753                 __DRIdrawablePrivate * driReadPriv)
754{
755   __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
756
757   if (driContextPriv) {
758      struct intel_context *intel =
759         (struct intel_context *) driContextPriv->driverPrivate;
760      struct intel_framebuffer *intel_fb =
761	 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
762      GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
763
764
765      /* XXX FBO temporary fix-ups! */
766      /* if the renderbuffers don't have regions, init them from the context */
767      if (!driContextPriv->driScreenPriv->dri2.enabled) {
768         struct intel_renderbuffer *irbDepth
769            = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
770         struct intel_renderbuffer *irbStencil
771            = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
772
773         if (intel_fb->color_rb[0]) {
774	    intel_renderbuffer_set_region(intel_fb->color_rb[0],
775					  intel->front_region);
776         }
777         if (intel_fb->color_rb[1]) {
778	    intel_renderbuffer_set_region(intel_fb->color_rb[1],
779					  intel->back_region);
780         }
781#if 0
782         if (intel_fb->color_rb[2]) {
783	    intel_renderbuffer_set_region(intel_fb->color_rb[2],
784					  intel->third_region);
785         }
786#endif
787         if (irbDepth) {
788	    intel_renderbuffer_set_region(irbDepth, intel->depth_region);
789         }
790         if (irbStencil) {
791	    intel_renderbuffer_set_region(irbStencil, intel->depth_region);
792         }
793      }
794
795      /* set GLframebuffer size to match window, if needed */
796      driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
797
798      if (driReadPriv != driDrawPriv) {
799	 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
800      }
801
802      _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
803
804      /* The drawbuffer won't always be updated by _mesa_make_current:
805       */
806      if (intel->ctx.DrawBuffer == &intel_fb->Base) {
807
808	 if (intel->driReadDrawable != driReadPriv)
809	    intel->driReadDrawable = driReadPriv;
810
811	 if (intel->driDrawable != driDrawPriv) {
812	    if (driDrawPriv->swap_interval == (unsigned)-1) {
813	       int i;
814
815	       driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
816		  ? driGetDefaultVBlankFlags(&intel->optionCache)
817		 : VBLANK_FLAG_NO_IRQ;
818
819	       (*psp->systemTime->getUST) (&intel_fb->swap_ust);
820	       driDrawableInitVBlank(driDrawPriv);
821	       intel_fb->vbl_waited = driDrawPriv->vblSeq;
822
823	       for (i = 0; i < (intel->intelScreen->third.handle ? 3 : 2); i++) {
824		  if (intel_fb->color_rb[i])
825		     intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
826	       }
827	    }
828	    intel->driDrawable = driDrawPriv;
829	    intelWindowMoved(intel);
830	 }
831
832	 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
833      }
834   }
835   else {
836      _mesa_make_current(NULL, NULL, NULL);
837   }
838
839   return GL_TRUE;
840}
841
842static void
843intelContendedLock(struct intel_context *intel, GLuint flags)
844{
845   __DRIdrawablePrivate *dPriv = intel->driDrawable;
846   __DRIscreenPrivate *sPriv = intel->driScreen;
847   volatile struct drm_i915_sarea *sarea = intel->sarea;
848   int me = intel->hHWContext;
849
850   drmGetLock(intel->driFd, intel->hHWContext, flags);
851   intel->locked = 1;
852
853   if (INTEL_DEBUG & DEBUG_LOCK)
854      _mesa_printf("%s - got contended lock\n", __progname);
855
856   /* If the window moved, may need to set a new cliprect now.
857    *
858    * NOTE: This releases and regains the hw lock, so all state
859    * checking must be done *after* this call:
860    */
861   if (dPriv)
862       DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
863
864   if (sarea && sarea->ctxOwner != me) {
865      if (INTEL_DEBUG & DEBUG_BUFMGR) {
866	 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
867		 sarea->ctxOwner, me);
868      }
869      sarea->ctxOwner = me;
870   }
871
872   /* If the last consumer of the texture memory wasn't us, notify the fake
873    * bufmgr and record the new owner.  We should have the memory shared
874    * between contexts of a single fake bufmgr, but this will at least make
875    * things correct for now.
876    */
877   if (!intel->ttm && sarea->texAge != intel->hHWContext) {
878      sarea->texAge = intel->hHWContext;
879      dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
880      if (INTEL_DEBUG & DEBUG_BATCH)
881	 intel_decode_context_reset();
882      if (INTEL_DEBUG & DEBUG_BUFMGR)
883	 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
884		 sarea->ctxOwner, intel->hHWContext);
885   }
886
887   if (sarea->width != intel->width || sarea->height != intel->height) {
888       int numClipRects = intel->numClipRects;
889
890       /*
891	* FIXME: Really only need to do this when drawing to a
892	* common back- or front buffer.
893	*/
894
895       /*
896	* This will essentially drop the outstanding batchbuffer on
897	* the floor.
898	*/
899       intel->numClipRects = 0;
900
901       if (intel->Fallback)
902	   _swrast_flush(&intel->ctx);
903
904       if (!IS_965(intel->intelScreen->deviceID))
905	   INTEL_FIREVERTICES(intel);
906
907       if (intel->batch->map != intel->batch->ptr)
908	   intel_batchbuffer_flush(intel->batch);
909
910       intel->numClipRects = numClipRects;
911
912       /* force window update */
913       intel->lastStamp = 0;
914
915       intel->width = sarea->width;
916       intel->height = sarea->height;
917   }
918
919   /* Drawable changed?
920    */
921   if (dPriv && intel->lastStamp != dPriv->lastStamp) {
922       intelWindowMoved(intel);
923       intel->lastStamp = dPriv->lastStamp;
924   }
925}
926
927
928_glthread_DECLARE_STATIC_MUTEX(lockMutex);
929
930/* Lock the hardware and validate our state.
931 */
932void LOCK_HARDWARE( struct intel_context *intel )
933{
934    __DRIdrawable *dPriv = intel->driDrawable;
935    __DRIscreen *sPriv = intel->driScreen;
936    char __ret = 0;
937    struct intel_framebuffer *intel_fb = NULL;
938    struct intel_renderbuffer *intel_rb = NULL;
939
940    _glthread_LOCK_MUTEX(lockMutex);
941    assert(!intel->locked);
942    intel->locked = 1;
943
944    if (intel->driDrawable) {
945       intel_fb = intel->driDrawable->driverPrivate;
946
947       if (intel_fb)
948	  intel_rb =
949	     intel_get_renderbuffer(&intel_fb->Base,
950				    intel_fb->Base._ColorDrawBufferIndexes[0]);
951    }
952
953    if (intel_rb && dPriv->vblFlags &&
954	!(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
955	(intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
956	drmVBlank vbl;
957
958	vbl.request.type = DRM_VBLANK_ABSOLUTE;
959
960	if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
961	    vbl.request.type |= DRM_VBLANK_SECONDARY;
962	}
963
964	vbl.request.sequence = intel_rb->vbl_pending;
965	drmWaitVBlank(intel->driFd, &vbl);
966	intel_fb->vbl_waited = vbl.reply.sequence;
967    }
968
969    DRM_CAS(intel->driHwLock, intel->hHWContext,
970        (DRM_LOCK_HELD|intel->hHWContext), __ret);
971
972    if (sPriv->dri2.enabled) {
973	if (__ret)
974	    drmGetLock(intel->driFd, intel->hHWContext, 0);
975	if (__driParseEvents(dPriv->driContextPriv, dPriv)) {
976	    intelWindowMoved(intel);
977	    intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
978	}
979    } else if (__ret) {
980        intelContendedLock( intel, 0 );
981    }
982
983
984    if (INTEL_DEBUG & DEBUG_LOCK)
985      _mesa_printf("%s - locked\n", __progname);
986}
987
988
989/* Unlock the hardware using the global current context
990 */
991void UNLOCK_HARDWARE( struct intel_context *intel )
992{
993   intel->vtbl.note_unlock( intel );
994   intel->locked = 0;
995
996   DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
997
998   _glthread_UNLOCK_MUTEX(lockMutex);
999
1000   if (INTEL_DEBUG & DEBUG_LOCK)
1001      _mesa_printf("%s - unlocked\n", __progname);
1002
1003   /**
1004    * Nothing should be left in batch outside of LOCK/UNLOCK which references
1005    * cliprects.
1006    */
1007   assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
1008}
1009
1010