lp_setup_tri.c revision 89498d01531cd515c769e570bf799c39fbafc8fb
1/**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/*
29 * Recursive rasterization for triangles
30 */
31
32#include "lp_context.h"
33#include "lp_quad.h"
34#include "lp_quad_pipe.h"
35#include "lp_setup.h"
36#include "lp_state.h"
37#include "draw/draw_context.h"
38#include "draw/draw_private.h"
39#include "draw/draw_vertex.h"
40#include "pipe/p_shader_tokens.h"
41#include "pipe/p_thread.h"
42#include "util/u_math.h"
43#include "util/u_memory.h"
44
45#define BLOCKSIZE 4
46
47struct triangle {
48   /* one-pixel sized trivial accept offsets for each plane */
49   float ei1;
50   float ei2;
51   float ei3;
52
53   /* one-pixel sized trivial reject offsets for each plane */
54   float eo1;
55   float eo2;
56   float eo3;
57
58   /* y deltas for vertex pairs */
59   float dy12;
60   float dy23;
61   float dy31;
62
63   /* x deltas for vertex pairs */
64   float dx12;
65   float dx23;
66   float dx31;
67
68   /* Attribute interpolation:
69    */
70   float oneoverarea;
71   float x1;
72   float y1;
73   struct tgsi_interp_coef coef[PIPE_MAX_SHADER_INPUTS];
74   struct tgsi_interp_coef position_coef;
75
76   /* A run of pre-initialized quads:
77    */
78   struct llvmpipe_context *llvmpipe;
79   struct quad_header quad[4];
80};
81
82
83/**
84 * Compute a0 for a constant-valued coefficient (GL_FLAT shading).
85 */
86static void constant_coef( struct tgsi_interp_coef *coef,
87			   const float (*v3)[4],
88			   unsigned vert_attr,
89			   unsigned i )
90{
91   coef->a0[i] = v3[vert_attr][i];
92   coef->dadx[i] = 0;
93   coef->dady[i] = 0;
94}
95
96/**
97 * Compute a0, dadx and dady for a linearly interpolated coefficient,
98 * for a triangle.
99 */
100static void linear_coef( struct triangle *tri,
101			 struct tgsi_interp_coef *coef,
102			 const float (*v1)[4],
103			 const float (*v2)[4],
104			 const float (*v3)[4],
105			 unsigned vert_attr,
106			 unsigned i)
107{
108   float a1 = v1[vert_attr][i];
109   float a2 = v2[vert_attr][i];
110   float a3 = v3[vert_attr][i];
111
112   float da12 = a1 - a2;
113   float da31 = a3 - a1;
114   float dadx = (da12 * tri->dy31 - tri->dy12 * da31) * tri->oneoverarea;
115   float dady = (da31 * tri->dx12 - tri->dx31 * da12) * tri->oneoverarea;
116
117   coef->dadx[i] = dadx;
118   coef->dady[i] = dady;
119
120   /* calculate a0 as the value which would be sampled for the
121    * fragment at (0,0), taking into account that we want to sample at
122    * pixel centers, in other words (0.5, 0.5).
123    *
124    * this is neat but unfortunately not a good way to do things for
125    * triangles with very large values of dadx or dady as it will
126    * result in the subtraction and re-addition from a0 of a very
127    * large number, which means we'll end up loosing a lot of the
128    * fractional bits and precision from a0.  the way to fix this is
129    * to define a0 as the sample at a pixel center somewhere near vmin
130    * instead - i'll switch to this later.
131    */
132   coef->a0[i] = (v1[vert_attr][i] -
133                  (dadx * (v1[0][0] - 0.5f) +
134                   dady * (v1[0][1] - 0.5f)));
135}
136
137
138/**
139 * Compute a0, dadx and dady for a perspective-corrected interpolant,
140 * for a triangle.
141 * We basically multiply the vertex value by 1/w before computing
142 * the plane coefficients (a0, dadx, dady).
143 * Later, when we compute the value at a particular fragment position we'll
144 * divide the interpolated value by the interpolated W at that fragment.
145 */
146static void perspective_coef( struct triangle *tri,
147			      struct tgsi_interp_coef *coef,
148			      const float (*v1)[4],
149			      const float (*v2)[4],
150			      const float (*v3)[4],
151			      unsigned vert_attr,
152			      unsigned i)
153{
154   /* premultiply by 1/w  (v[0][3] is always 1/w):
155    */
156   float a1 = v1[vert_attr][i] * v1[0][3];
157   float a2 = v2[vert_attr][i] * v2[0][3];
158   float a3 = v3[vert_attr][i] * v3[0][3];
159   float da12 = a1 - a2;
160   float da31 = a3 - a1;
161   float dadx = (da12 * tri->dy31 - tri->dy12 * da31) * tri->oneoverarea;
162   float dady = (da31 * tri->dx12 - tri->dx31 * da12) * tri->oneoverarea;
163
164
165   coef->dadx[i] = dadx;
166   coef->dady[i] = dady;
167   coef->a0[i] = (a1 -
168                  (dadx * (v1[0][0] - 0.5f) +
169                   dady * (v1[0][1] - 0.5f)));
170}
171
172
173/**
174 * Special coefficient setup for gl_FragCoord.
175 * X and Y are trivial, though Y has to be inverted for OpenGL.
176 * Z and W are copied from position_coef which should have already been computed.
177 * We could do a bit less work if we'd examine gl_FragCoord's swizzle mask.
178 */
179static void
180setup_fragcoord_coef(struct triangle *tri, unsigned slot)
181{
182   /*X*/
183   tri->coef[slot].a0[0] = 0.0;
184   tri->coef[slot].dadx[0] = 1.0;
185   tri->coef[slot].dady[0] = 0.0;
186   /*Y*/
187   tri->coef[slot].a0[1] = 0.0;
188   tri->coef[slot].dadx[1] = 0.0;
189   tri->coef[slot].dady[1] = 1.0;
190   /*Z*/
191   tri->coef[slot].a0[2] = tri->position_coef.a0[2];
192   tri->coef[slot].dadx[2] = tri->position_coef.dadx[2];
193   tri->coef[slot].dady[2] = tri->position_coef.dady[2];
194   /*W*/
195   tri->coef[slot].a0[3] = tri->position_coef.a0[3];
196   tri->coef[slot].dadx[3] = tri->position_coef.dadx[3];
197   tri->coef[slot].dady[3] = tri->position_coef.dady[3];
198}
199
200
201
202/**
203 * Compute the tri->coef[] array dadx, dady, a0 values.
204 */
205static void setup_tri_coefficients( struct llvmpipe_context *llvmpipe,
206				    struct triangle *tri,
207				    const float (*v1)[4],
208				    const float (*v2)[4],
209				    const float (*v3)[4],
210				    boolean frontface )
211{
212   const struct lp_fragment_shader *fs = llvmpipe->fs;
213   const struct vertex_info *vinfo = llvmpipe_get_vertex_info(llvmpipe);
214   unsigned input;
215
216   /* z and w are done by linear interpolation:
217    */
218   linear_coef(tri, &tri->position_coef, v1, v2, v3, 0, 2);
219   linear_coef(tri, &tri->position_coef, v1, v2, v3, 0, 3);
220
221   /* setup interpolation for all the remaining attributes:
222    */
223   for (input = 0; input < fs->info.num_inputs; input++) {
224      unsigned vert_attr = vinfo->attrib[input].src_index;
225      unsigned i;
226
227      switch (vinfo->attrib[input].interp_mode) {
228      case INTERP_CONSTANT:
229         for (i = 0; i < NUM_CHANNELS; i++)
230            constant_coef(&tri->coef[input], v3, vert_attr, i);
231         break;
232
233      case INTERP_LINEAR:
234         for (i = 0; i < NUM_CHANNELS; i++)
235            linear_coef(tri, &tri->coef[input], v1, v2, v3, vert_attr, i);
236         break;
237
238      case INTERP_PERSPECTIVE:
239         for (i = 0; i < NUM_CHANNELS; i++)
240            perspective_coef(tri, &tri->coef[input], v1, v2, v3, vert_attr, i);
241         break;
242
243      case INTERP_POS:
244         setup_fragcoord_coef(tri, input);
245         break;
246
247      default:
248         assert(0);
249      }
250
251      if (fs->info.input_semantic_name[input] == TGSI_SEMANTIC_FACE) {
252         tri->coef[input].a0[0] = 1.0f - frontface;
253         tri->coef[input].dadx[0] = 0.0;
254         tri->coef[input].dady[0] = 0.0;
255      }
256   }
257}
258
259
260
261/* XXX: do this by add/subtracting a large floating point number:
262 */
263static inline float subpixel_snap( float a )
264{
265   int i = a * 16;
266   return (float)i * (1.0/16);
267}
268
269
270/* Convert 8x8 block into four runs of quads and render each in turn.
271 */
272#if (BLOCKSIZE == 8)
273static void block_full( struct triangle *tri, int x, int y )
274{
275   struct quad_header *ptrs[4];
276   int i;
277
278   tri->quad[0].input.x0 = x + 0;
279   tri->quad[1].input.x0 = x + 2;
280   tri->quad[2].input.x0 = x + 4;
281   tri->quad[3].input.x0 = x + 6;
282
283   for (i = 0; i < 4; i++, y += 2) {
284      tri->quad[0].inout.mask = 0xf;
285      tri->quad[1].inout.mask = 0xf;
286      tri->quad[2].inout.mask = 0xf;
287      tri->quad[3].inout.mask = 0xf;
288
289      tri->quad[0].input.y0 = y;
290      tri->quad[1].input.y0 = y;
291      tri->quad[2].input.y0 = y;
292      tri->quad[3].input.y0 = y;
293
294      /* XXX: don't bother with this ptrs business */
295      ptrs[0] = &tri->quad[0];
296      ptrs[1] = &tri->quad[1];
297      ptrs[2] = &tri->quad[2];
298      ptrs[3] = &tri->quad[3];
299
300      tri->llvmpipe->quad.first->run( tri->llvmpipe->quad.first, ptrs, 4 );
301   }
302}
303#elif (BLOCKSIZE == 4)
304static void block_full( struct triangle *tri, int x, int y )
305{
306   struct quad_header *ptrs[4];
307   int iy;
308
309   tri->quad[0].input.x0 = x + 0;
310   tri->quad[1].input.x0 = x + 2;
311
312   for (iy = 0; iy < 4; iy += 2) {
313      tri->quad[0].inout.mask = 0xf;
314      tri->quad[1].inout.mask = 0xf;
315
316      tri->quad[0].input.y0 = y + iy;
317      tri->quad[1].input.y0 = y + iy;
318
319      /* XXX: don't bother with this ptrs business */
320      ptrs[0] = &tri->quad[0];
321      ptrs[1] = &tri->quad[1];
322
323      tri->llvmpipe->quad.first->run( tri->llvmpipe->quad.first, ptrs, 2 );
324   }
325}
326#else
327static void block_full( struct triangle *tri, int x, int y )
328{
329   struct quad_header *ptrs[4];
330   int iy;
331
332   tri->quad[0].input.x0 = x;
333   tri->quad[0].input.y0 = y;
334   tri->quad[0].inout.mask = 0xf;
335
336   ptrs[0] = &tri->quad[0];
337   tri->llvmpipe->quad.first->run( tri->llvmpipe->quad.first, ptrs, 1 );
338}
339#endif
340
341
342static void
343do_quad( struct triangle *tri,
344	 int x, int y,
345	 float c1, float c2, float c3 )
346{
347   struct quad_header *quad = &tri->quad[0];
348
349   float xstep1 = -tri->dy12;
350   float xstep2 = -tri->dy23;
351   float xstep3 = -tri->dy31;
352
353   float ystep1 = tri->dx12;
354   float ystep2 = tri->dx23;
355   float ystep3 = tri->dx31;
356
357   quad->input.x0 = x;
358   quad->input.y0 = y;
359   quad->inout.mask = 0;
360
361   if (c1 > 0 &&
362       c2 > 0 &&
363       c3 > 0)
364      quad->inout.mask |= 1;
365
366   if (c1 + xstep1 > 0 &&
367       c2 + xstep2 > 0 &&
368       c3 + xstep3 > 0)
369      quad->inout.mask |= 2;
370
371   if (c1 + ystep1 > 0 &&
372       c2 + ystep2 > 0 &&
373       c3 + ystep3 > 0)
374      quad->inout.mask |= 4;
375
376   if (c1 + ystep1 + xstep1 > 0 &&
377       c2 + ystep2 + xstep2 > 0 &&
378       c3 + ystep3 + xstep3 > 0)
379      quad->inout.mask |= 8;
380
381   if (quad->inout.mask)
382      tri->llvmpipe->quad.first->run( tri->llvmpipe->quad.first, &quad, 1 );
383}
384
385/* Evaluate each pixel in a block, generate a mask and possibly render
386 * the quad:
387 */
388static void
389do_block( struct triangle *tri,
390	 int x, int y,
391	 float c1,
392	 float c2,
393	 float c3 )
394{
395   const int step = 2;
396
397   float xstep1 = -step * tri->dy12;
398   float xstep2 = -step * tri->dy23;
399   float xstep3 = -step * tri->dy31;
400
401   float ystep1 = step * tri->dx12;
402   float ystep2 = step * tri->dx23;
403   float ystep3 = step * tri->dx31;
404
405   int ix, iy;
406
407   for (iy = 0; iy < BLOCKSIZE; iy += 2) {
408      float cx1 = c1;
409      float cx2 = c2;
410      float cx3 = c3;
411
412      for (ix = 0; ix < BLOCKSIZE; ix += 2) {
413
414	 do_quad(tri, x+ix, y+iy, cx1, cx2, cx3);
415
416	 cx1 += xstep1;
417	 cx2 += xstep2;
418	 cx3 += xstep3;
419      }
420
421      c1 += ystep1;
422      c2 += ystep2;
423      c3 += ystep3;
424   }
425}
426
427
428
429
430/* to avoid having to allocate power-of-four, square render targets,
431 * end up having a specialized version of the above that runs only at
432 * the topmost level.
433 *
434 * at the topmost level there may be an arbitary number of steps on
435 * either dimension, so this loop needs to be either separately
436 * code-generated and unrolled for each render target size, or kept as
437 * generic looping code:
438 */
439
440#define MIN3(a,b,c) MIN2(MIN2(a,b),c)
441#define MAX3(a,b,c) MAX2(MAX2(a,b),c)
442
443static void
444do_triangle_ccw(struct llvmpipe_context *llvmpipe,
445		const float (*v1)[4],
446		const float (*v2)[4],
447		const float (*v3)[4],
448		boolean frontfacing )
449{
450   const int rt_width = llvmpipe->framebuffer.cbufs[0]->width;
451   const int rt_height = llvmpipe->framebuffer.cbufs[0]->height;
452
453   const float y1 = subpixel_snap(v1[0][1]);
454   const float y2 = subpixel_snap(v2[0][1]);
455   const float y3 = subpixel_snap(v3[0][1]);
456
457   const float x1 = subpixel_snap(v1[0][0]);
458   const float x2 = subpixel_snap(v2[0][0]);
459   const float x3 = subpixel_snap(v3[0][0]);
460
461   struct triangle tri;
462   float area;
463   float c1, c2, c3;
464   int i;
465   int minx, maxx, miny, maxy;
466
467   tri.llvmpipe = llvmpipe;
468
469
470   tri.dx12 = x1 - x2;
471   tri.dx23 = x2 - x3;
472   tri.dx31 = x3 - x1;
473
474   tri.dy12 = y1 - y2;
475   tri.dy23 = y2 - y3;
476   tri.dy31 = y3 - y1;
477
478   area = (tri.dx12 * tri.dy31 -
479	   tri.dx31 * tri.dy12);
480
481   /* Cull non-ccw and zero-sized triangles.
482    */
483   if (area <= 0 || util_is_inf_or_nan(area))
484      return;
485
486   // Bounding rectangle
487   minx = util_iround(MIN3(x1, x2, x3) - .5);
488   maxx = util_iround(MAX3(x1, x2, x3) + .5);
489   miny = util_iround(MIN3(y1, y2, y3) - .5);
490   maxy = util_iround(MAX3(y1, y2, y3) + .5);
491
492   /* Clamp to framebuffer (or tile) dimensions:
493    */
494   miny = MAX2(0, miny);
495   minx = MAX2(0, minx);
496   maxy = MIN2(rt_height, maxy);
497   maxx = MIN2(rt_width, maxx);
498
499   if (miny == maxy || minx == maxx)
500      return;
501
502   /* The only divide in this code.  Is it really needed?
503    */
504   tri.oneoverarea = 1.0f / area;
505
506   /* Setup parameter interpolants:
507    */
508   setup_tri_coefficients( llvmpipe, &tri, v1, v2, v3, frontfacing );
509
510   for (i = 0; i < Elements(tri.quad); i++) {
511      tri.quad[i].coef = tri.coef;
512      tri.quad[i].posCoef = &tri.position_coef;
513   }
514
515   /* half-edge constants, will be interated over the whole
516    * rendertarget.
517    */
518   c1 = tri.dy12 * x1 - tri.dx12 * y1;
519   c2 = tri.dy23 * x2 - tri.dx23 * y2;
520   c3 = tri.dy31 * x3 - tri.dx31 * y3;
521
522   /* correct for top-left fill convention:
523    */
524   if (tri.dy12 < 0 || (tri.dy12 == 0 && tri.dx12 > 0)) c1++;
525   if (tri.dy23 < 0 || (tri.dy23 == 0 && tri.dx23 > 0)) c2++;
526   if (tri.dy31 < 0 || (tri.dy31 == 0 && tri.dx31 > 0)) c3++;
527
528   /* find trivial reject offsets for each edge for a single-pixel
529    * sized block.  These will be scaled up at each recursive level to
530    * match the active blocksize.  Scaling in this way works best if
531    * the blocks are square.
532    */
533   tri.eo1 = 0;
534   if (tri.dy12 < 0) tri.eo1 -= tri.dy12;
535   if (tri.dx12 > 0) tri.eo1 += tri.dx12;
536
537   tri.eo2 = 0;
538   if (tri.dy23 < 0) tri.eo2 -= tri.dy23;
539   if (tri.dx23 > 0) tri.eo2 += tri.dx23;
540
541   tri.eo3 = 0;
542   if (tri.dy31 < 0) tri.eo3 -= tri.dy31;
543   if (tri.dx31 > 0) tri.eo3 += tri.dx31;
544
545   /* Calculate trivial accept offsets from the above.
546    */
547   tri.ei1 = tri.dx12 - tri.dy12 - tri.eo1;
548   tri.ei2 = tri.dx23 - tri.dy23 - tri.eo2;
549   tri.ei3 = tri.dx31 - tri.dy31 - tri.eo3;
550
551   minx &= ~(BLOCKSIZE-1);		/* aligned blocks */
552   miny &= ~(BLOCKSIZE-1);		/* aligned blocks */
553
554   c1 += tri.dx12 * miny - tri.dy12 * minx;
555   c2 += tri.dx23 * miny - tri.dy23 * minx;
556   c3 += tri.dx31 * miny - tri.dy31 * minx;
557
558   if ((miny & ~15) == (maxy & ~15) &&
559       (minx & ~15) == (maxx & ~15))
560   {
561      const int step = 2;
562
563      float xstep1 = -step * tri.dy12;
564      float xstep2 = -step * tri.dy23;
565      float xstep3 = -step * tri.dy31;
566
567      float ystep1 = step * tri.dx12;
568      float ystep2 = step * tri.dx23;
569      float ystep3 = step * tri.dx31;
570
571      float eo1 = tri.eo1 * step;
572      float eo2 = tri.eo2 * step;
573      float eo3 = tri.eo3 * step;
574
575      int x, y;
576
577      /* Subdivide space into NxM blocks, where each block is square and
578       * power-of-four in dimension.
579       *
580       * Trivially accept or reject blocks, else jump to per-pixel
581       * examination above.
582       */
583      for (y = miny; y < maxy; y += step)
584      {
585	 float cx1 = c1;
586	 float cx2 = c2;
587	 float cx3 = c3;
588
589	 for (x = minx; x < maxx; x += step)
590	 {
591	    if (cx1 + eo1 < 0 ||
592		cx2 + eo2 < 0 ||
593		cx3 + eo3 < 0)
594	    {
595	    }
596	    else
597	    {
598	       do_quad(&tri, x, y, cx1, cx2, cx3);
599	    }
600
601	    /* Iterate cx values across the region:
602	     */
603	    cx1 += xstep1;
604	    cx2 += xstep2;
605	    cx3 += xstep3;
606	 }
607
608	 /* Iterate c values down the region:
609	  */
610	 c1 += ystep1;
611	 c2 += ystep2;
612	 c3 += ystep3;
613      }
614   }
615   else
616   {
617      const int step = BLOCKSIZE;
618
619      float ei1 = tri.ei1 * step;
620      float ei2 = tri.ei2 * step;
621      float ei3 = tri.ei3 * step;
622
623      float eo1 = tri.eo1 * step;
624      float eo2 = tri.eo2 * step;
625      float eo3 = tri.eo3 * step;
626
627      float xstep1 = -step * tri.dy12;
628      float xstep2 = -step * tri.dy23;
629      float xstep3 = -step * tri.dy31;
630
631      float ystep1 = step * tri.dx12;
632      float ystep2 = step * tri.dx23;
633      float ystep3 = step * tri.dx31;
634      int x, y;
635
636
637      /* Subdivide space into NxM blocks, where each block is square and
638       * power-of-four in dimension.
639       *
640       * Trivially accept or reject blocks, else jump to per-pixel
641       * examination above.
642       */
643      for (y = miny; y < maxy; y += step)
644      {
645	 float cx1 = c1;
646	 float cx2 = c2;
647	 float cx3 = c3;
648	 boolean in = false;
649
650	 for (x = minx; x < maxx; x += step)
651	 {
652	    if (cx1 + eo1 < 0 ||
653		cx2 + eo2 < 0 ||
654		cx3 + eo3 < 0)
655	    {
656	       /* do nothing */
657	       if (in)
658		  break;
659	    }
660	    else if (cx1 + ei1 > 0 &&
661		     cx2 + ei2 > 0 &&
662		     cx3 + ei3 > 0)
663	    {
664	       in = TRUE;
665	       block_full(&tri, x, y); /* trivial accept */
666	    }
667	    else
668	    {
669	       in = TRUE;
670	       // block_full(&tri, x, y); /* trivial accept */
671	       do_block(&tri, x, y, cx1, cx2, cx3);
672	    }
673
674	    /* Iterate cx values across the region:
675	     */
676	    cx1 += xstep1;
677	    cx2 += xstep2;
678	    cx3 += xstep3;
679	 }
680
681	 /* Iterate c values down the region:
682	  */
683	 c1 += ystep1;
684	 c2 += ystep2;
685	 c3 += ystep3;
686      }
687   }
688}
689
690static void triangle_cw( struct llvmpipe_context *llvmpipe,
691			 const float (*v0)[4],
692			 const float (*v1)[4],
693			 const float (*v2)[4] )
694{
695   do_triangle_ccw( llvmpipe, v1, v0, v2, !llvmpipe->ccw_is_frontface );
696}
697
698static void triangle_ccw( struct llvmpipe_context *llvmpipe,
699			 const float (*v0)[4],
700			 const float (*v1)[4],
701			 const float (*v2)[4] )
702{
703   do_triangle_ccw( llvmpipe, v0, v1, v2, llvmpipe->ccw_is_frontface );
704}
705
706static void triangle_both( struct llvmpipe_context *llvmpipe,
707			   const float (*v0)[4],
708			   const float (*v1)[4],
709			   const float (*v2)[4] )
710{
711   /* edge vectors e = v0 - v2, f = v1 - v2 */
712   const float ex = v0[0][0] - v2[0][0];
713   const float ey = v0[0][1] - v2[0][1];
714   const float fx = v1[0][0] - v2[0][0];
715   const float fy = v1[0][1] - v2[0][1];
716
717   /* det = cross(e,f).z */
718   if (ex * fy - ey * fx < 0)
719      triangle_ccw( llvmpipe, v0, v1, v2 );
720   else
721      triangle_cw( llvmpipe, v0, v1, v2 );
722}
723
724static void triangle_nop( struct llvmpipe_context *llvmpipe,
725			  const float (*v0)[4],
726			  const float (*v1)[4],
727			  const float (*v2)[4] )
728{
729}
730
731/**
732 * Do setup for triangle rasterization, then render the triangle.
733 */
734void setup_prepare_tri( struct llvmpipe_context *llvmpipe )
735{
736   llvmpipe->ccw_is_frontface = (llvmpipe->rasterizer->front_winding ==
737				 PIPE_WINDING_CW);
738
739   switch (llvmpipe->rasterizer->cull_mode) {
740   case PIPE_WINDING_NONE:
741      llvmpipe->triangle = triangle_both;
742      break;
743   case PIPE_WINDING_CCW:
744      llvmpipe->triangle = triangle_cw;
745      break;
746   case PIPE_WINDING_CW:
747      llvmpipe->triangle = triangle_ccw;
748      break;
749   default:
750      llvmpipe->triangle = triangle_nop;
751      break;
752   }
753}
754
755
756