t_vertex_sse.c revision 6e29a3c8e2dc920b6216a0df6357abd8234f1ec4
1/*
2 * Copyright 2003 Tungsten Graphics, inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 *    Keith Whitwell <keithw@tungstengraphics.com>
26 */
27
28#include "main/glheader.h"
29#include "main/context.h"
30#include "main/colormac.h"
31#include "main/simple_list.h"
32#include "main/enums.h"
33#include "t_context.h"
34#include "t_vertex.h"
35
36#if defined(USE_SSE_ASM)
37
38#include "x86/rtasm/x86sse.h"
39#include "x86/common_x86_asm.h"
40
41
42/**
43 * Number of bytes to allocate for generated SSE functions
44 */
45#define MAX_SSE_CODE_SIZE 1024
46
47
48#define X    0
49#define Y    1
50#define Z    2
51#define W    3
52
53
54struct x86_program {
55   struct x86_function func;
56
57   GLcontext *ctx;
58   GLboolean inputs_safe;
59   GLboolean outputs_safe;
60   GLboolean have_sse2;
61
62   struct x86_reg identity;
63   struct x86_reg chan0;
64};
65
66
67static struct x86_reg get_identity( struct x86_program *p )
68{
69   return p->identity;
70}
71
72static void emit_load4f_4( struct x86_program *p,
73			   struct x86_reg dest,
74			   struct x86_reg arg0 )
75{
76   sse_movups(&p->func, dest, arg0);
77}
78
79static void emit_load4f_3( struct x86_program *p,
80			   struct x86_reg dest,
81			   struct x86_reg arg0 )
82{
83   /* Have to jump through some hoops:
84    *
85    * c 0 0 0
86    * c 0 0 1
87    * 0 0 c 1
88    * a b c 1
89    */
90   sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
91   sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
92   sse_shufps(&p->func, dest, dest, SHUF(Y,Z,X,W) );
93   sse_movlps(&p->func, dest, arg0);
94}
95
96static void emit_load4f_2( struct x86_program *p,
97			   struct x86_reg dest,
98			   struct x86_reg arg0 )
99{
100   /* Initialize from identity, then pull in low two words:
101    */
102   sse_movups(&p->func, dest, get_identity(p));
103   sse_movlps(&p->func, dest, arg0);
104}
105
106static void emit_load4f_1( struct x86_program *p,
107			   struct x86_reg dest,
108			   struct x86_reg arg0 )
109{
110   /* Pull in low word, then swizzle in identity */
111   sse_movss(&p->func, dest, arg0);
112   sse_shufps(&p->func, dest, get_identity(p), SHUF(X,Y,Z,W) );
113}
114
115
116
117static void emit_load3f_3( struct x86_program *p,
118			   struct x86_reg dest,
119			   struct x86_reg arg0 )
120{
121   /* Over-reads by 1 dword - potential SEGV if input is a vertex
122    * array.
123    */
124   if (p->inputs_safe) {
125      sse_movups(&p->func, dest, arg0);
126   }
127   else {
128      /* c 0 0 0
129       * c c c c
130       * a b c c
131       */
132      sse_movss(&p->func, dest, x86_make_disp(arg0, 8));
133      sse_shufps(&p->func, dest, dest, SHUF(X,X,X,X));
134      sse_movlps(&p->func, dest, arg0);
135   }
136}
137
138static void emit_load3f_2( struct x86_program *p,
139			   struct x86_reg dest,
140			   struct x86_reg arg0 )
141{
142   emit_load4f_2(p, dest, arg0);
143}
144
145static void emit_load3f_1( struct x86_program *p,
146			   struct x86_reg dest,
147			   struct x86_reg arg0 )
148{
149   emit_load4f_1(p, dest, arg0);
150}
151
152static void emit_load2f_2( struct x86_program *p,
153			   struct x86_reg dest,
154			   struct x86_reg arg0 )
155{
156   sse_movlps(&p->func, dest, arg0);
157}
158
159static void emit_load2f_1( struct x86_program *p,
160			   struct x86_reg dest,
161			   struct x86_reg arg0 )
162{
163   emit_load4f_1(p, dest, arg0);
164}
165
166static void emit_load1f_1( struct x86_program *p,
167			   struct x86_reg dest,
168			   struct x86_reg arg0 )
169{
170   sse_movss(&p->func, dest, arg0);
171}
172
173static void (*load[4][4])( struct x86_program *p,
174			   struct x86_reg dest,
175			   struct x86_reg arg0 ) = {
176   { emit_load1f_1,
177     emit_load1f_1,
178     emit_load1f_1,
179     emit_load1f_1 },
180
181   { emit_load2f_1,
182     emit_load2f_2,
183     emit_load2f_2,
184     emit_load2f_2 },
185
186   { emit_load3f_1,
187     emit_load3f_2,
188     emit_load3f_3,
189     emit_load3f_3 },
190
191   { emit_load4f_1,
192     emit_load4f_2,
193     emit_load4f_3,
194     emit_load4f_4 }
195};
196
197static void emit_load( struct x86_program *p,
198		       struct x86_reg dest,
199		       GLuint sz,
200		       struct x86_reg src,
201		       GLuint src_sz)
202{
203   load[sz-1][src_sz-1](p, dest, src);
204}
205
206static void emit_store4f( struct x86_program *p,
207			  struct x86_reg dest,
208			  struct x86_reg arg0 )
209{
210   sse_movups(&p->func, dest, arg0);
211}
212
213static void emit_store3f( struct x86_program *p,
214			  struct x86_reg dest,
215			  struct x86_reg arg0 )
216{
217   if (p->outputs_safe) {
218      /* Emit the extra dword anyway.  This may hurt writecombining,
219       * may cause other problems.
220       */
221      sse_movups(&p->func, dest, arg0);
222   }
223   else {
224      /* Alternate strategy - emit two, shuffle, emit one.
225       */
226      sse_movlps(&p->func, dest, arg0);
227      sse_shufps(&p->func, arg0, arg0, SHUF(Z,Z,Z,Z) ); /* NOTE! destructive */
228      sse_movss(&p->func, x86_make_disp(dest,8), arg0);
229   }
230}
231
232static void emit_store2f( struct x86_program *p,
233			   struct x86_reg dest,
234			   struct x86_reg arg0 )
235{
236   sse_movlps(&p->func, dest, arg0);
237}
238
239static void emit_store1f( struct x86_program *p,
240			  struct x86_reg dest,
241			  struct x86_reg arg0 )
242{
243   sse_movss(&p->func, dest, arg0);
244}
245
246
247static void (*store[4])( struct x86_program *p,
248			 struct x86_reg dest,
249			 struct x86_reg arg0 ) =
250{
251   emit_store1f,
252   emit_store2f,
253   emit_store3f,
254   emit_store4f
255};
256
257static void emit_store( struct x86_program *p,
258			struct x86_reg dest,
259			GLuint sz,
260			struct x86_reg temp )
261
262{
263   store[sz-1](p, dest, temp);
264}
265
266static void emit_pack_store_4ub( struct x86_program *p,
267				 struct x86_reg dest,
268				 struct x86_reg temp )
269{
270   /* Scale by 255.0
271    */
272   sse_mulps(&p->func, temp, p->chan0);
273
274   if (p->have_sse2) {
275      sse2_cvtps2dq(&p->func, temp, temp);
276      sse2_packssdw(&p->func, temp, temp);
277      sse2_packuswb(&p->func, temp, temp);
278      sse_movss(&p->func, dest, temp);
279   }
280   else {
281      struct x86_reg mmx0 = x86_make_reg(file_MMX, 0);
282      struct x86_reg mmx1 = x86_make_reg(file_MMX, 1);
283      sse_cvtps2pi(&p->func, mmx0, temp);
284      sse_movhlps(&p->func, temp, temp);
285      sse_cvtps2pi(&p->func, mmx1, temp);
286      mmx_packssdw(&p->func, mmx0, mmx1);
287      mmx_packuswb(&p->func, mmx0, mmx0);
288      mmx_movd(&p->func, dest, mmx0);
289   }
290}
291
292static GLint get_offset( const void *a, const void *b )
293{
294   return (const char *)b - (const char *)a;
295}
296
297/* Not much happens here.  Eventually use this function to try and
298 * avoid saving/reloading the source pointers each vertex (if some of
299 * them can fit in registers).
300 */
301static void get_src_ptr( struct x86_program *p,
302			 struct x86_reg srcREG,
303			 struct x86_reg vtxREG,
304			 struct tnl_clipspace_attr *a )
305{
306   struct tnl_clipspace *vtx = GET_VERTEX_STATE(p->ctx);
307   struct x86_reg ptr_to_src = x86_make_disp(vtxREG, get_offset(vtx, &a->inputptr));
308
309   /* Load current a[j].inputptr
310    */
311   x86_mov(&p->func, srcREG, ptr_to_src);
312}
313
314static void update_src_ptr( struct x86_program *p,
315			 struct x86_reg srcREG,
316			 struct x86_reg vtxREG,
317			 struct tnl_clipspace_attr *a )
318{
319   if (a->inputstride) {
320      struct tnl_clipspace *vtx = GET_VERTEX_STATE(p->ctx);
321      struct x86_reg ptr_to_src = x86_make_disp(vtxREG, get_offset(vtx, &a->inputptr));
322
323      /* add a[j].inputstride (hardcoded value - could just as easily
324       * pull the stride value from memory each time).
325       */
326      x86_lea(&p->func, srcREG, x86_make_disp(srcREG, a->inputstride));
327
328      /* save new value of a[j].inputptr
329       */
330      x86_mov(&p->func, ptr_to_src, srcREG);
331   }
332}
333
334
335/* Lots of hardcoding
336 *
337 * EAX -- pointer to current output vertex
338 * ECX -- pointer to current attribute
339 *
340 */
341static GLboolean build_vertex_emit( struct x86_program *p )
342{
343   GLcontext *ctx = p->ctx;
344   TNLcontext *tnl = TNL_CONTEXT(ctx);
345   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
346   GLuint j = 0;
347
348   struct x86_reg vertexEAX = x86_make_reg(file_REG32, reg_AX);
349   struct x86_reg srcECX = x86_make_reg(file_REG32, reg_CX);
350   struct x86_reg countEBP = x86_make_reg(file_REG32, reg_BP);
351   struct x86_reg vtxESI = x86_make_reg(file_REG32, reg_SI);
352   struct x86_reg temp = x86_make_reg(file_XMM, 0);
353   struct x86_reg vp0 = x86_make_reg(file_XMM, 1);
354   struct x86_reg vp1 = x86_make_reg(file_XMM, 2);
355   GLubyte *fixup, *label;
356
357   /* Push a few regs?
358    */
359   x86_push(&p->func, countEBP);
360   x86_push(&p->func, vtxESI);
361
362
363   /* Get vertex count, compare to zero
364    */
365   x86_xor(&p->func, srcECX, srcECX);
366   x86_mov(&p->func, countEBP, x86_fn_arg(&p->func, 2));
367   x86_cmp(&p->func, countEBP, srcECX);
368   fixup = x86_jcc_forward(&p->func, cc_E);
369
370   /* Initialize destination register.
371    */
372   x86_mov(&p->func, vertexEAX, x86_fn_arg(&p->func, 3));
373
374   /* Dereference ctx to get tnl, then vtx:
375    */
376   x86_mov(&p->func, vtxESI, x86_fn_arg(&p->func, 1));
377   x86_mov(&p->func, vtxESI, x86_make_disp(vtxESI, get_offset(ctx, &ctx->swtnl_context)));
378   vtxESI = x86_make_disp(vtxESI, get_offset(tnl, &tnl->clipspace));
379
380
381   /* Possibly load vp0, vp1 for viewport calcs:
382    */
383   if (vtx->need_viewport) {
384      sse_movups(&p->func, vp0, x86_make_disp(vtxESI, get_offset(vtx, &vtx->vp_scale[0])));
385      sse_movups(&p->func, vp1, x86_make_disp(vtxESI, get_offset(vtx, &vtx->vp_xlate[0])));
386   }
387
388   /* always load, needed or not:
389    */
390   sse_movups(&p->func, p->chan0, x86_make_disp(vtxESI, get_offset(vtx, &vtx->chan_scale[0])));
391   sse_movups(&p->func, p->identity, x86_make_disp(vtxESI, get_offset(vtx, &vtx->identity[0])));
392
393   /* Note address for loop jump */
394   label = x86_get_label(&p->func);
395
396   /* Emit code for each of the attributes.  Currently routes
397    * everything through SSE registers, even when it might be more
398    * efficient to stick with regular old x86.  No optimization or
399    * other tricks - enough new ground to cover here just getting
400    * things working.
401    */
402   while (j < vtx->attr_count) {
403      struct tnl_clipspace_attr *a = &vtx->attr[j];
404      struct x86_reg dest = x86_make_disp(vertexEAX, a->vertoffset);
405
406      /* Now, load an XMM reg from src, perhaps transform, then save.
407       * Could be shortcircuited in specific cases:
408       */
409      switch (a->format) {
410      case EMIT_1F:
411	 get_src_ptr(p, srcECX, vtxESI, a);
412	 emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
413	 emit_store(p, dest, 1, temp);
414	 update_src_ptr(p, srcECX, vtxESI, a);
415	 break;
416      case EMIT_2F:
417	 get_src_ptr(p, srcECX, vtxESI, a);
418	 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
419	 emit_store(p, dest, 2, temp);
420	 update_src_ptr(p, srcECX, vtxESI, a);
421	 break;
422      case EMIT_3F:
423	 /* Potentially the worst case - hardcode 2+1 copying:
424	  */
425	 if (0) {
426	    get_src_ptr(p, srcECX, vtxESI, a);
427	    emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
428	    emit_store(p, dest, 3, temp);
429	    update_src_ptr(p, srcECX, vtxESI, a);
430	 }
431	 else {
432	    get_src_ptr(p, srcECX, vtxESI, a);
433	    emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
434	    emit_store(p, dest, 2, temp);
435	    if (a->inputsize > 2) {
436	       emit_load(p, temp, 1, x86_make_disp(srcECX, 8), 1);
437	       emit_store(p, x86_make_disp(dest,8), 1, temp);
438	    }
439	    else {
440	       sse_movss(&p->func, x86_make_disp(dest,8), get_identity(p));
441	    }
442	    update_src_ptr(p, srcECX, vtxESI, a);
443	 }
444	 break;
445      case EMIT_4F:
446	 get_src_ptr(p, srcECX, vtxESI, a);
447	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
448	 emit_store(p, dest, 4, temp);
449	 update_src_ptr(p, srcECX, vtxESI, a);
450	 break;
451      case EMIT_2F_VIEWPORT:
452	 get_src_ptr(p, srcECX, vtxESI, a);
453	 emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize);
454	 sse_mulps(&p->func, temp, vp0);
455	 sse_addps(&p->func, temp, vp1);
456	 emit_store(p, dest, 2, temp);
457	 update_src_ptr(p, srcECX, vtxESI, a);
458	 break;
459      case EMIT_3F_VIEWPORT:
460	 get_src_ptr(p, srcECX, vtxESI, a);
461	 emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
462	 sse_mulps(&p->func, temp, vp0);
463	 sse_addps(&p->func, temp, vp1);
464	 emit_store(p, dest, 3, temp);
465	 update_src_ptr(p, srcECX, vtxESI, a);
466	 break;
467      case EMIT_4F_VIEWPORT:
468	 get_src_ptr(p, srcECX, vtxESI, a);
469	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
470	 sse_mulps(&p->func, temp, vp0);
471	 sse_addps(&p->func, temp, vp1);
472	 emit_store(p, dest, 4, temp);
473	 update_src_ptr(p, srcECX, vtxESI, a);
474	 break;
475      case EMIT_3F_XYW:
476	 get_src_ptr(p, srcECX, vtxESI, a);
477	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
478	 sse_shufps(&p->func, temp, temp, SHUF(X,Y,W,Z));
479	 emit_store(p, dest, 3, temp);
480	 update_src_ptr(p, srcECX, vtxESI, a);
481	 break;
482
483      case EMIT_1UB_1F:
484	 /* Test for PAD3 + 1UB:
485	  */
486	 if (j > 0 &&
487	     a[-1].vertoffset + a[-1].vertattrsize <= a->vertoffset - 3)
488	 {
489	    get_src_ptr(p, srcECX, vtxESI, a);
490	    emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize);
491	    sse_shufps(&p->func, temp, temp, SHUF(X,X,X,X));
492	    emit_pack_store_4ub(p, x86_make_disp(dest, -3), temp); /* overkill! */
493	    update_src_ptr(p, srcECX, vtxESI, a);
494	 }
495	 else {
496	    _mesa_printf("Can't emit 1ub %x %x %d\n", a->vertoffset, a[-1].vertoffset, a[-1].vertattrsize );
497	    return GL_FALSE;
498	 }
499	 break;
500      case EMIT_3UB_3F_RGB:
501      case EMIT_3UB_3F_BGR:
502	 /* Test for 3UB + PAD1:
503	  */
504	 if (j == vtx->attr_count - 1 ||
505	     a[1].vertoffset >= a->vertoffset + 4) {
506	    get_src_ptr(p, srcECX, vtxESI, a);
507	    emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
508	    if (a->format == EMIT_3UB_3F_BGR)
509	       sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
510	    emit_pack_store_4ub(p, dest, temp);
511	    update_src_ptr(p, srcECX, vtxESI, a);
512	 }
513	 /* Test for 3UB + 1UB:
514	  */
515	 else if (j < vtx->attr_count - 1 &&
516		  a[1].format == EMIT_1UB_1F &&
517		  a[1].vertoffset == a->vertoffset + 3) {
518	    get_src_ptr(p, srcECX, vtxESI, a);
519	    emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize);
520	    update_src_ptr(p, srcECX, vtxESI, a);
521
522	    /* Make room for incoming value:
523	     */
524	    sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
525
526	    get_src_ptr(p, srcECX, vtxESI, &a[1]);
527	    emit_load(p, temp, 1, x86_deref(srcECX), a[1].inputsize);
528	    update_src_ptr(p, srcECX, vtxESI, &a[1]);
529
530	    /* Rearrange and possibly do BGR conversion:
531	     */
532	    if (a->format == EMIT_3UB_3F_BGR)
533	       sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
534	    else
535	       sse_shufps(&p->func, temp, temp, SHUF(Y,Z,W,X));
536
537	    emit_pack_store_4ub(p, dest, temp);
538	    j++;		/* NOTE: two attrs consumed */
539	 }
540	 else {
541	    _mesa_printf("Can't emit 3ub\n");
542	    return GL_FALSE;	/* add this later */
543	 }
544	 break;
545
546      case EMIT_4UB_4F_RGBA:
547	 get_src_ptr(p, srcECX, vtxESI, a);
548	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
549	 emit_pack_store_4ub(p, dest, temp);
550	 update_src_ptr(p, srcECX, vtxESI, a);
551	 break;
552      case EMIT_4UB_4F_BGRA:
553	 get_src_ptr(p, srcECX, vtxESI, a);
554	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
555	 sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W));
556	 emit_pack_store_4ub(p, dest, temp);
557	 update_src_ptr(p, srcECX, vtxESI, a);
558	 break;
559      case EMIT_4UB_4F_ARGB:
560	 get_src_ptr(p, srcECX, vtxESI, a);
561	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
562	 sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z));
563	 emit_pack_store_4ub(p, dest, temp);
564	 update_src_ptr(p, srcECX, vtxESI, a);
565	 break;
566      case EMIT_4UB_4F_ABGR:
567	 get_src_ptr(p, srcECX, vtxESI, a);
568	 emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
569	 sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X));
570	 emit_pack_store_4ub(p, dest, temp);
571	 update_src_ptr(p, srcECX, vtxESI, a);
572	 break;
573      case EMIT_4CHAN_4F_RGBA:
574	 switch (CHAN_TYPE) {
575	 case GL_UNSIGNED_BYTE:
576	    get_src_ptr(p, srcECX, vtxESI, a);
577	    emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
578	    emit_pack_store_4ub(p, dest, temp);
579	    update_src_ptr(p, srcECX, vtxESI, a);
580	    break;
581	 case GL_FLOAT:
582	    get_src_ptr(p, srcECX, vtxESI, a);
583	    emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize);
584	    emit_store(p, dest, 4, temp);
585	    update_src_ptr(p, srcECX, vtxESI, a);
586	    break;
587	 case GL_UNSIGNED_SHORT:
588	 default:
589	    _mesa_printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE));
590	    return GL_FALSE;
591	 }
592	 break;
593      default:
594	 _mesa_printf("unknown a[%d].format %d\n", j, a->format);
595	 return GL_FALSE;	/* catch any new opcodes */
596      }
597
598      /* Increment j by at least 1 - may have been incremented above also:
599       */
600      j++;
601   }
602
603   /* Next vertex:
604    */
605   x86_lea(&p->func, vertexEAX, x86_make_disp(vertexEAX, vtx->vertex_size));
606
607   /* decr count, loop if not zero
608    */
609   x86_dec(&p->func, countEBP);
610   x86_test(&p->func, countEBP, countEBP);
611   x86_jcc(&p->func, cc_NZ, label);
612
613   /* Exit mmx state?
614    */
615   if (p->func.need_emms)
616      mmx_emms(&p->func);
617
618   /* Land forward jump here:
619    */
620   x86_fixup_fwd_jump(&p->func, fixup);
621
622   /* Pop regs and return
623    */
624   x86_pop(&p->func, x86_get_base_reg(vtxESI));
625   x86_pop(&p->func, countEBP);
626   x86_ret(&p->func);
627
628   assert(!vtx->emit);
629   vtx->emit = (tnl_emit_func)x86_get_func(&p->func);
630
631   assert( (char *) p->func.csr - (char *) p->func.store <= MAX_SSE_CODE_SIZE );
632   return GL_TRUE;
633}
634
635
636
637void _tnl_generate_sse_emit( GLcontext *ctx )
638{
639   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
640   struct x86_program p;
641
642   if (!cpu_has_xmm) {
643      vtx->codegen_emit = NULL;
644      return;
645   }
646
647   _mesa_memset(&p, 0, sizeof(p));
648
649   p.ctx = ctx;
650   p.inputs_safe = 0;		/* for now */
651   p.outputs_safe = 0;		/* for now */
652   p.have_sse2 = cpu_has_xmm2;
653   p.identity = x86_make_reg(file_XMM, 6);
654   p.chan0 = x86_make_reg(file_XMM, 7);
655
656   if (!x86_init_func_size(&p.func, MAX_SSE_CODE_SIZE)) {
657      vtx->emit = NULL;
658      return;
659   }
660
661   if (build_vertex_emit(&p)) {
662      _tnl_register_fastpath( vtx, GL_TRUE );
663   }
664   else {
665      /* Note the failure so that we don't keep trying to codegen an
666       * impossible state:
667       */
668      _tnl_register_fastpath( vtx, GL_FALSE );
669      x86_release_func(&p.func);
670   }
671}
672
673#else
674
675void _tnl_generate_sse_emit( GLcontext *ctx )
676{
677   /* Dummy version for when USE_SSE_ASM not defined */
678}
679
680#endif
681