brw_misc_state.c revision f69d46150c308285a518391db02cc9cba0f2a12d
1/*
2 Copyright (C) Intel Corp.  2006.  All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28  * Authors:
29  *   Keith Whitwell <keith@tungstengraphics.com>
30  */
31
32
33
34#include "intel_batchbuffer.h"
35#include "intel_regions.h"
36
37#include "brw_context.h"
38#include "brw_state.h"
39#include "brw_defines.h"
40
41
42
43
44
45/***********************************************************************
46 * Blend color
47 */
48
49static void upload_blend_constant_color(struct brw_context *brw)
50{
51   GLcontext *ctx = &brw->intel.ctx;
52   struct brw_blend_constant_color bcc;
53
54   memset(&bcc, 0, sizeof(bcc));
55   bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
56   bcc.header.length = sizeof(bcc)/4-2;
57   bcc.blend_constant_color[0] = ctx->Color.BlendColor[0];
58   bcc.blend_constant_color[1] = ctx->Color.BlendColor[1];
59   bcc.blend_constant_color[2] = ctx->Color.BlendColor[2];
60   bcc.blend_constant_color[3] = ctx->Color.BlendColor[3];
61
62   BRW_CACHED_BATCH_STRUCT(brw, &bcc);
63}
64
65
66const struct brw_tracked_state brw_blend_constant_color = {
67   .dirty = {
68      .mesa = _NEW_COLOR,
69      .brw = BRW_NEW_CONTEXT,
70      .cache = 0
71   },
72   .emit = upload_blend_constant_color
73};
74
75/* Constant single cliprect for framebuffer object or DRI2 drawing */
76static void upload_drawing_rect(struct brw_context *brw)
77{
78   struct intel_context *intel = &brw->intel;
79   GLcontext *ctx = &intel->ctx;
80
81   BEGIN_BATCH(4);
82   OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
83   OUT_BATCH(0); /* xmin, ymin */
84   OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
85	    ((ctx->DrawBuffer->Height - 1) << 16));
86   OUT_BATCH(0);
87   ADVANCE_BATCH();
88}
89
90const struct brw_tracked_state brw_drawing_rect = {
91   .dirty = {
92      .mesa = _NEW_BUFFERS,
93      .brw = BRW_NEW_CONTEXT,
94      .cache = 0
95   },
96   .emit = upload_drawing_rect
97};
98
99static void prepare_binding_table_pointers(struct brw_context *brw)
100{
101   brw_add_validated_bo(brw, brw->vs.bind_bo);
102   brw_add_validated_bo(brw, brw->wm.bind_bo);
103}
104
105/**
106 * Upload the binding table pointers, which point each stage's array of surface
107 * state pointers.
108 *
109 * The binding table pointers are relative to the surface state base address,
110 * which is 0.
111 */
112static void upload_binding_table_pointers(struct brw_context *brw)
113{
114   struct intel_context *intel = &brw->intel;
115
116   BEGIN_BATCH(6);
117   OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
118   if (brw->vs.bind_bo != NULL)
119      OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
120   else
121      OUT_BATCH(0);
122   OUT_BATCH(0); /* gs */
123   OUT_BATCH(0); /* clip */
124   OUT_BATCH(0); /* sf */
125   OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
126   ADVANCE_BATCH();
127}
128
129const struct brw_tracked_state brw_binding_table_pointers = {
130   .dirty = {
131      .mesa = 0,
132      .brw = BRW_NEW_BATCH,
133      .cache = CACHE_NEW_SURF_BIND,
134   },
135   .prepare = prepare_binding_table_pointers,
136   .emit = upload_binding_table_pointers,
137};
138
139/**
140 * Upload the binding table pointers, which point each stage's array of surface
141 * state pointers.
142 *
143 * The binding table pointers are relative to the surface state base address,
144 * which is 0.
145 */
146static void upload_gen6_binding_table_pointers(struct brw_context *brw)
147{
148   struct intel_context *intel = &brw->intel;
149
150   BEGIN_BATCH(4);
151   OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 |
152	     GEN6_BINDING_TABLE_MODIFY_VS |
153	     GEN6_BINDING_TABLE_MODIFY_GS |
154	     GEN6_BINDING_TABLE_MODIFY_PS |
155	     (4 - 2));
156   if (brw->vs.bind_bo != NULL)
157      OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
158   else
159      OUT_BATCH(0);
160   OUT_BATCH(0); /* gs */
161   OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
162   ADVANCE_BATCH();
163}
164
165const struct brw_tracked_state gen6_binding_table_pointers = {
166   .dirty = {
167      .mesa = 0,
168      .brw = BRW_NEW_BATCH,
169      .cache = CACHE_NEW_SURF_BIND,
170   },
171   .prepare = prepare_binding_table_pointers,
172   .emit = upload_gen6_binding_table_pointers,
173};
174
175/**
176 * Upload pointers to the per-stage state.
177 *
178 * The state pointers in this packet are all relative to the general state
179 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
180 */
181static void upload_pipelined_state_pointers(struct brw_context *brw )
182{
183   struct intel_context *intel = &brw->intel;
184
185   BEGIN_BATCH(7);
186   OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
187   OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
188   if (brw->gs.prog_active)
189      OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
190   else
191      OUT_BATCH(0);
192   OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
193   OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
194   OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
195   OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
196   ADVANCE_BATCH();
197
198   brw->state.dirty.brw |= BRW_NEW_PSP;
199}
200
201
202static void prepare_psp_urb_cbs(struct brw_context *brw)
203{
204   brw_add_validated_bo(brw, brw->vs.state_bo);
205   brw_add_validated_bo(brw, brw->gs.state_bo);
206   brw_add_validated_bo(brw, brw->clip.state_bo);
207   brw_add_validated_bo(brw, brw->sf.state_bo);
208   brw_add_validated_bo(brw, brw->wm.state_bo);
209   brw_add_validated_bo(brw, brw->cc.state_bo);
210}
211
212static void upload_psp_urb_cbs(struct brw_context *brw )
213{
214   upload_pipelined_state_pointers(brw);
215   brw_upload_urb_fence(brw);
216   brw_upload_cs_urb_state(brw);
217}
218
219const struct brw_tracked_state brw_psp_urb_cbs = {
220   .dirty = {
221      .mesa = 0,
222      .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
223      .cache = (CACHE_NEW_VS_UNIT |
224		CACHE_NEW_GS_UNIT |
225		CACHE_NEW_GS_PROG |
226		CACHE_NEW_CLIP_UNIT |
227		CACHE_NEW_SF_UNIT |
228		CACHE_NEW_WM_UNIT |
229		CACHE_NEW_CC_UNIT)
230   },
231   .prepare = prepare_psp_urb_cbs,
232   .emit = upload_psp_urb_cbs,
233};
234
235static void prepare_depthbuffer(struct brw_context *brw)
236{
237   struct intel_region *region = brw->state.depth_region;
238
239   if (region != NULL)
240      brw_add_validated_bo(brw, region->buffer);
241}
242
243static void emit_depthbuffer(struct brw_context *brw)
244{
245   struct intel_context *intel = &brw->intel;
246   struct intel_region *region = brw->state.depth_region;
247   unsigned int len;
248
249   if (intel->gen >= 6)
250      len = 7;
251   else if (intel->is_g4x || intel->is_ironlake)
252      len = 6;
253   else
254      len = 5;
255
256   if (region == NULL) {
257      BEGIN_BATCH(len);
258      OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
259      OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
260		(BRW_SURFACE_NULL << 29));
261      OUT_BATCH(0);
262      OUT_BATCH(0);
263      OUT_BATCH(0);
264
265      if (intel->is_g4x || intel->is_ironlake || intel->gen >= 6)
266         OUT_BATCH(0);
267
268      if (intel->gen >= 6)
269	 OUT_BATCH(0);
270
271      ADVANCE_BATCH();
272   } else {
273      unsigned int format;
274
275      switch (region->cpp) {
276      case 2:
277	 format = BRW_DEPTHFORMAT_D16_UNORM;
278	 break;
279      case 4:
280	 if (intel->depth_buffer_is_float)
281	    format = BRW_DEPTHFORMAT_D32_FLOAT;
282	 else
283	    format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
284	 break;
285      default:
286	 assert(0);
287	 return;
288      }
289
290      assert(region->tiling != I915_TILING_X);
291      if (IS_GEN6(intel->intelScreen->deviceID))
292	 assert(region->tiling != I915_TILING_NONE);
293
294      BEGIN_BATCH(len);
295      OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
296      OUT_BATCH(((region->pitch * region->cpp) - 1) |
297		(format << 18) |
298		(BRW_TILEWALK_YMAJOR << 26) |
299		((region->tiling != I915_TILING_NONE) << 27) |
300		(BRW_SURFACE_2D << 29));
301      OUT_RELOC(region->buffer,
302		I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
303		0);
304      OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
305		((region->pitch - 1) << 6) |
306		((region->height - 1) << 19));
307      OUT_BATCH(0);
308
309      if (intel->is_g4x || intel->is_ironlake || intel->gen >= 6)
310         OUT_BATCH(0);
311
312      if (intel->gen >= 6)
313	 OUT_BATCH(0);
314
315      ADVANCE_BATCH();
316   }
317
318   /* Initialize it for safety. */
319   if (intel->gen >= 6) {
320      BEGIN_BATCH(2);
321      OUT_BATCH(CMD_3D_CLEAR_PARAMS << 16 | (2 - 2));
322      OUT_BATCH(0);
323      ADVANCE_BATCH();
324   }
325}
326
327const struct brw_tracked_state brw_depthbuffer = {
328   .dirty = {
329      .mesa = 0,
330      .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
331      .cache = 0,
332   },
333   .prepare = prepare_depthbuffer,
334   .emit = emit_depthbuffer,
335};
336
337
338
339/***********************************************************************
340 * Polygon stipple packet
341 */
342
343static void upload_polygon_stipple(struct brw_context *brw)
344{
345   GLcontext *ctx = &brw->intel.ctx;
346   struct brw_polygon_stipple bps;
347   GLuint i;
348
349   memset(&bps, 0, sizeof(bps));
350   bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
351   bps.header.length = sizeof(bps)/4-2;
352
353   /* Polygon stipple is provided in OpenGL order, i.e. bottom
354    * row first.  If we're rendering to a window (i.e. the
355    * default frame buffer object, 0), then we need to invert
356    * it to match our pixel layout.  But if we're rendering
357    * to a FBO (i.e. any named frame buffer object), we *don't*
358    * need to invert - we already match the layout.
359    */
360   if (ctx->DrawBuffer->Name == 0) {
361      for (i = 0; i < 32; i++)
362         bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
363   }
364   else {
365      for (i = 0; i < 32; i++)
366         bps.stipple[i] = ctx->PolygonStipple[i]; /* don't invert */
367   }
368
369   BRW_CACHED_BATCH_STRUCT(brw, &bps);
370}
371
372const struct brw_tracked_state brw_polygon_stipple = {
373   .dirty = {
374      .mesa = _NEW_POLYGONSTIPPLE,
375      .brw = BRW_NEW_CONTEXT,
376      .cache = 0
377   },
378   .emit = upload_polygon_stipple
379};
380
381
382/***********************************************************************
383 * Polygon stipple offset packet
384 */
385
386static void upload_polygon_stipple_offset(struct brw_context *brw)
387{
388   GLcontext *ctx = &brw->intel.ctx;
389   struct brw_polygon_stipple_offset bpso;
390
391   memset(&bpso, 0, sizeof(bpso));
392   bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
393   bpso.header.length = sizeof(bpso)/4-2;
394
395   /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
396    * we have to invert the Y axis in order to match the OpenGL
397    * pixel coordinate system, and our offset must be matched
398    * to the window position.  If we're drawing to a FBO
399    * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
400    * system works just fine, and there's no window system to
401    * worry about.
402    */
403   if (brw->intel.ctx.DrawBuffer->Name == 0) {
404      bpso.bits0.x_offset = 0;
405      bpso.bits0.y_offset = (32 - (ctx->DrawBuffer->Height & 31)) & 31;
406   }
407   else {
408      bpso.bits0.y_offset = 0;
409      bpso.bits0.x_offset = 0;
410   }
411
412   BRW_CACHED_BATCH_STRUCT(brw, &bpso);
413}
414
415#define _NEW_WINDOW_POS 0x40000000
416
417const struct brw_tracked_state brw_polygon_stipple_offset = {
418   .dirty = {
419      .mesa = _NEW_WINDOW_POS,
420      .brw = BRW_NEW_CONTEXT,
421      .cache = 0
422   },
423   .emit = upload_polygon_stipple_offset
424};
425
426/**********************************************************************
427 * AA Line parameters
428 */
429static void upload_aa_line_parameters(struct brw_context *brw)
430{
431   struct brw_aa_line_parameters balp;
432
433   if (!brw->has_aa_line_parameters)
434      return;
435
436   /* use legacy aa line coverage computation */
437   memset(&balp, 0, sizeof(balp));
438   balp.header.opcode = CMD_AA_LINE_PARAMETERS;
439   balp.header.length = sizeof(balp) / 4 - 2;
440
441   BRW_CACHED_BATCH_STRUCT(brw, &balp);
442}
443
444const struct brw_tracked_state brw_aa_line_parameters = {
445   .dirty = {
446      .mesa = 0,
447      .brw = BRW_NEW_CONTEXT,
448      .cache = 0
449   },
450   .emit = upload_aa_line_parameters
451};
452
453/***********************************************************************
454 * Line stipple packet
455 */
456
457static void upload_line_stipple(struct brw_context *brw)
458{
459   GLcontext *ctx = &brw->intel.ctx;
460   struct brw_line_stipple bls;
461   GLfloat tmp;
462   GLint tmpi;
463
464   memset(&bls, 0, sizeof(bls));
465   bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
466   bls.header.length = sizeof(bls)/4 - 2;
467
468   bls.bits0.pattern = ctx->Line.StipplePattern;
469   bls.bits1.repeat_count = ctx->Line.StippleFactor;
470
471   tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
472   tmpi = tmp * (1<<13);
473
474
475   bls.bits1.inverse_repeat_count = tmpi;
476
477   BRW_CACHED_BATCH_STRUCT(brw, &bls);
478}
479
480const struct brw_tracked_state brw_line_stipple = {
481   .dirty = {
482      .mesa = _NEW_LINE,
483      .brw = BRW_NEW_CONTEXT,
484      .cache = 0
485   },
486   .emit = upload_line_stipple
487};
488
489
490/***********************************************************************
491 * Misc invarient state packets
492 */
493
494static void upload_invarient_state( struct brw_context *brw )
495{
496   struct intel_context *intel = &brw->intel;
497
498   {
499      /* 0x61040000  Pipeline Select */
500      /*     PipelineSelect            : 0 */
501      struct brw_pipeline_select ps;
502
503      memset(&ps, 0, sizeof(ps));
504      ps.header.opcode = brw->CMD_PIPELINE_SELECT;
505      ps.header.pipeline_select = 0;
506      BRW_BATCH_STRUCT(brw, &ps);
507   }
508
509   if (intel->gen < 6) {
510      struct brw_global_depth_offset_clamp gdo;
511      memset(&gdo, 0, sizeof(gdo));
512
513      /* Disable depth offset clamping.
514       */
515      gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
516      gdo.header.length = sizeof(gdo)/4 - 2;
517      gdo.depth_offset_clamp = 0.0;
518
519      BRW_BATCH_STRUCT(brw, &gdo);
520   }
521
522   intel_batchbuffer_emit_mi_flush(intel->batch);
523
524   if (intel->gen >= 6) {
525      int i;
526
527      BEGIN_BATCH(3);
528      OUT_BATCH(CMD_3D_MULTISAMPLE << 16 | (3 - 2));
529      OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
530		MS_NUMSAMPLES_1);
531      OUT_BATCH(0); /* positions for 4/8-sample */
532      ADVANCE_BATCH();
533
534      BEGIN_BATCH(2);
535      OUT_BATCH(CMD_3D_SAMPLE_MASK << 16 | (2 - 2));
536      OUT_BATCH(1);
537      ADVANCE_BATCH();
538
539      for (i = 0; i < 4; i++) {
540	 BEGIN_BATCH(4);
541	 OUT_BATCH(CMD_GS_SVB_INDEX << 16 | (4 - 2));
542	 OUT_BATCH(i << SVB_INDEX_SHIFT);
543	 OUT_BATCH(0);
544	 OUT_BATCH(0xffffffff);
545	 ADVANCE_BATCH();
546      }
547   }
548
549   /* 0x61020000  State Instruction Pointer */
550   {
551      struct brw_system_instruction_pointer sip;
552      memset(&sip, 0, sizeof(sip));
553
554      sip.header.opcode = CMD_STATE_INSN_POINTER;
555      sip.header.length = 0;
556      sip.bits0.pad = 0;
557      sip.bits0.system_instruction_pointer = 0;
558      BRW_BATCH_STRUCT(brw, &sip);
559   }
560
561
562   {
563      struct brw_vf_statistics vfs;
564      memset(&vfs, 0, sizeof(vfs));
565
566      vfs.opcode = brw->CMD_VF_STATISTICS;
567      if (INTEL_DEBUG & DEBUG_STATS)
568	 vfs.statistics_enable = 1;
569
570      BRW_BATCH_STRUCT(brw, &vfs);
571   }
572}
573
574const struct brw_tracked_state brw_invarient_state = {
575   .dirty = {
576      .mesa = 0,
577      .brw = BRW_NEW_CONTEXT,
578      .cache = 0
579   },
580   .emit = upload_invarient_state
581};
582
583/**
584 * Define the base addresses which some state is referenced from.
585 *
586 * This allows us to avoid having to emit relocations in many places for
587 * cached state, and instead emit pointers inside of large, mostly-static
588 * state pools.  This comes at the expense of memory, and more expensive cache
589 * misses.
590 */
591static void upload_state_base_address( struct brw_context *brw )
592{
593   struct intel_context *intel = &brw->intel;
594
595   /* Output the structure (brw_state_base_address) directly to the
596    * batchbuffer, so we can emit relocations inline.
597    */
598   if (intel->gen >= 6) {
599       BEGIN_BATCH(10);
600       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
601       OUT_BATCH(1); /* General state base address */
602       OUT_BATCH(1); /* Surface state base address */
603       OUT_BATCH(1); /* Dynamic state base address */
604       OUT_BATCH(1); /* Indirect object base address */
605       OUT_BATCH(1); /* Instruction base address */
606       OUT_BATCH(1); /* General state upper bound */
607       OUT_BATCH(1); /* Dynamic state upper bound */
608       OUT_BATCH(1); /* Indirect object upper bound */
609       OUT_BATCH(1); /* Instruction access upper bound */
610       ADVANCE_BATCH();
611   } else if (intel->is_ironlake) {
612       BEGIN_BATCH(8);
613       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
614       OUT_BATCH(1); /* General state base address */
615       OUT_BATCH(1); /* Surface state base address */
616       OUT_BATCH(1); /* Indirect object base address */
617       OUT_BATCH(1); /* Instruction base address */
618       OUT_BATCH(1); /* General state upper bound */
619       OUT_BATCH(1); /* Indirect object upper bound */
620       OUT_BATCH(1); /* Instruction access upper bound */
621       ADVANCE_BATCH();
622   } else {
623       BEGIN_BATCH(6);
624       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
625       OUT_BATCH(1); /* General state base address */
626       OUT_BATCH(1); /* Surface state base address */
627       OUT_BATCH(1); /* Indirect object base address */
628       OUT_BATCH(1); /* General state upper bound */
629       OUT_BATCH(1); /* Indirect object upper bound */
630       ADVANCE_BATCH();
631   }
632}
633
634const struct brw_tracked_state brw_state_base_address = {
635   .dirty = {
636      .mesa = 0,
637      .brw = BRW_NEW_CONTEXT,
638      .cache = 0,
639   },
640   .emit = upload_state_base_address
641};
642