1/**********************************************************
2 * Copyright 2008-2009 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "util/u_inlines.h"
27#include "pipe/p_defines.h"
28#include "util/u_math.h"
29#include "util/u_memory.h"
30#include "util/u_bitmask.h"
31#include "translate/translate.h"
32#include "tgsi/tgsi_ureg.h"
33
34#include "svga_context.h"
35#include "svga_state.h"
36#include "svga_cmd.h"
37#include "svga_shader.h"
38#include "svga_tgsi.h"
39
40#include "svga_hw_reg.h"
41
42
43/**
44 * If we fail to compile a vertex shader we'll use a dummy/fallback shader
45 * that simply emits a (0,0,0,1) vertex position.
46 */
47static const struct tgsi_token *
48get_dummy_vertex_shader(void)
49{
50   static const float zero[4] = { 0.0, 0.0, 0.0, 1.0 };
51   struct ureg_program *ureg;
52   const struct tgsi_token *tokens;
53   struct ureg_src src;
54   struct ureg_dst dst;
55   unsigned num_tokens;
56
57   ureg = ureg_create(PIPE_SHADER_VERTEX);
58   if (!ureg)
59      return NULL;
60
61   dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
62   src = ureg_DECL_immediate(ureg, zero, 4);
63   ureg_MOV(ureg, dst, src);
64   ureg_END(ureg);
65
66   tokens = ureg_get_tokens(ureg, &num_tokens);
67
68   ureg_destroy(ureg);
69
70   return tokens;
71}
72
73
74static struct svga_shader_variant *
75translate_vertex_program(struct svga_context *svga,
76                         const struct svga_vertex_shader *vs,
77                         const struct svga_compile_key *key)
78{
79   if (svga_have_vgpu10(svga)) {
80      return svga_tgsi_vgpu10_translate(svga, &vs->base, key,
81                                        PIPE_SHADER_VERTEX);
82   }
83   else {
84      return svga_tgsi_vgpu9_translate(svga, &vs->base, key,
85                                       PIPE_SHADER_VERTEX);
86   }
87}
88
89
90/**
91 * Replace the given shader's instruction with a simple / dummy shader.
92 * We use this when normal shader translation fails.
93 */
94static struct svga_shader_variant *
95get_compiled_dummy_vertex_shader(struct svga_context *svga,
96                                 struct svga_vertex_shader *vs,
97                                 const struct svga_compile_key *key)
98{
99   const struct tgsi_token *dummy = get_dummy_vertex_shader();
100   struct svga_shader_variant *variant;
101
102   if (!dummy) {
103      return NULL;
104   }
105
106   FREE((void *) vs->base.tokens);
107   vs->base.tokens = dummy;
108
109   variant = translate_vertex_program(svga, vs, key);
110   return variant;
111}
112
113
114/**
115 * Translate TGSI shader into an svga shader variant.
116 */
117static enum pipe_error
118compile_vs(struct svga_context *svga,
119           struct svga_vertex_shader *vs,
120           const struct svga_compile_key *key,
121           struct svga_shader_variant **out_variant)
122{
123   struct svga_shader_variant *variant;
124   enum pipe_error ret = PIPE_ERROR;
125
126   variant = translate_vertex_program(svga, vs, key);
127   if (variant == NULL) {
128      debug_printf("Failed to compile vertex shader,"
129                   " using dummy shader instead.\n");
130      variant = get_compiled_dummy_vertex_shader(svga, vs, key);
131   }
132   else if (svga_shader_too_large(svga, variant)) {
133      /* too big, use dummy shader */
134      debug_printf("Shader too large (%u bytes),"
135                   " using dummy shader instead.\n",
136                   (unsigned) (variant->nr_tokens
137                               * sizeof(variant->tokens[0])));
138      /* Free the too-large variant */
139      svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
140      /* Use simple pass-through shader instead */
141      variant = get_compiled_dummy_vertex_shader(svga, vs, key);
142   }
143
144   if (!variant) {
145      return PIPE_ERROR;
146   }
147
148   ret = svga_define_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
149   if (ret != PIPE_OK) {
150      svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
151      return ret;
152   }
153
154   *out_variant = variant;
155
156   return PIPE_OK;
157}
158
159
160/* SVGA_NEW_PRESCALE, SVGA_NEW_RAST, SVGA_NEW_FS
161 */
162static void
163make_vs_key(struct svga_context *svga, struct svga_compile_key *key)
164{
165   const enum pipe_shader_type shader = PIPE_SHADER_VERTEX;
166
167   memset(key, 0, sizeof *key);
168
169   if (svga->state.sw.need_swtnl && svga_have_vgpu10(svga)) {
170      /* Set both of these flags, to match compile_passthrough_vs() */
171      key->vs.passthrough = 1;
172      key->vs.undo_viewport = 1;
173      return;
174   }
175
176   /* SVGA_NEW_PRESCALE */
177   key->vs.need_prescale = svga->state.hw_clear.prescale.enabled &&
178                           (svga->curr.gs == NULL);
179
180   /* SVGA_NEW_RAST */
181   key->vs.allow_psiz = svga->curr.rast->templ.point_size_per_vertex;
182
183   /* SVGA_NEW_FS */
184   key->vs.fs_generic_inputs = svga->curr.fs->generic_inputs;
185
186   svga_remap_generics(key->vs.fs_generic_inputs, key->generic_remap_table);
187
188   /* SVGA_NEW_VELEMENT */
189   key->vs.adjust_attrib_range = svga->curr.velems->adjust_attrib_range;
190   key->vs.adjust_attrib_w_1 = svga->curr.velems->adjust_attrib_w_1;
191   key->vs.attrib_is_pure_int = svga->curr.velems->attrib_is_pure_int;
192   key->vs.adjust_attrib_itof = svga->curr.velems->adjust_attrib_itof;
193   key->vs.adjust_attrib_utof = svga->curr.velems->adjust_attrib_utof;
194   key->vs.attrib_is_bgra = svga->curr.velems->attrib_is_bgra;
195   key->vs.attrib_puint_to_snorm = svga->curr.velems->attrib_puint_to_snorm;
196   key->vs.attrib_puint_to_uscaled = svga->curr.velems->attrib_puint_to_uscaled;
197   key->vs.attrib_puint_to_sscaled = svga->curr.velems->attrib_puint_to_sscaled;
198
199   /* SVGA_NEW_TEXTURE_BINDING | SVGA_NEW_SAMPLER */
200   svga_init_shader_key_common(svga, shader, key);
201
202   /* SVGA_NEW_RAST */
203   key->clip_plane_enable = svga->curr.rast->templ.clip_plane_enable;
204}
205
206
207/**
208 * svga_reemit_vs_bindings - Reemit the vertex shader bindings
209 */
210enum pipe_error
211svga_reemit_vs_bindings(struct svga_context *svga)
212{
213   enum pipe_error ret;
214   struct svga_winsys_gb_shader *gbshader = NULL;
215   SVGA3dShaderId shaderId = SVGA3D_INVALID_ID;
216
217   assert(svga->rebind.flags.vs);
218   assert(svga_have_gb_objects(svga));
219
220   if (svga->state.hw_draw.vs) {
221      gbshader = svga->state.hw_draw.vs->gb_shader;
222      shaderId = svga->state.hw_draw.vs->id;
223   }
224
225   if (!svga_need_to_rebind_resources(svga)) {
226      ret =  svga->swc->resource_rebind(svga->swc, NULL, gbshader,
227                                        SVGA_RELOC_READ);
228      goto out;
229   }
230
231   if (svga_have_vgpu10(svga))
232      ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_VS,
233                                    gbshader, shaderId);
234   else
235      ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_VS, gbshader);
236
237 out:
238   if (ret != PIPE_OK)
239      return ret;
240
241   svga->rebind.flags.vs = FALSE;
242   return PIPE_OK;
243}
244
245
246/**
247 * The current vertex shader is already executed by the 'draw'
248 * module, so we just need to generate a simple vertex shader
249 * to pass through all those VS outputs that will
250 * be consumed by the fragment shader.
251 * Used when we employ the 'draw' module.
252 */
253static enum pipe_error
254compile_passthrough_vs(struct svga_context *svga,
255                       struct svga_vertex_shader *vs,
256                       struct svga_fragment_shader *fs,
257                       struct svga_shader_variant **out_variant)
258{
259   struct svga_shader_variant *variant = NULL;
260   unsigned num_inputs;
261   unsigned i;
262   unsigned num_elements;
263   struct svga_vertex_shader new_vs;
264   struct ureg_src src[PIPE_MAX_SHADER_INPUTS];
265   struct ureg_dst dst[PIPE_MAX_SHADER_OUTPUTS];
266   struct ureg_program *ureg;
267   unsigned num_tokens;
268   struct svga_compile_key key;
269   enum pipe_error ret;
270
271   assert(svga_have_vgpu10(svga));
272   assert(fs);
273
274   num_inputs = fs->base.info.num_inputs;
275
276   ureg = ureg_create(PIPE_SHADER_VERTEX);
277   if (!ureg)
278      return PIPE_ERROR_OUT_OF_MEMORY;
279
280   /* draw will always add position */
281   dst[0] = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
282   src[0] = ureg_DECL_vs_input(ureg, 0);
283   num_elements = 1;
284
285   /**
286    * swtnl backend redefines the input layout based on the
287    * fragment shader's inputs. So we only need to passthrough
288    * those inputs that will be consumed by the fragment shader.
289    * Note: DX10 requires the number of vertex elements
290    * specified in the input layout to be no less than the
291    * number of inputs to the vertex shader.
292    */
293   for (i = 0; i < num_inputs; i++) {
294      switch (fs->base.info.input_semantic_name[i]) {
295      case TGSI_SEMANTIC_COLOR:
296      case TGSI_SEMANTIC_GENERIC:
297      case TGSI_SEMANTIC_FOG:
298         dst[num_elements] = ureg_DECL_output(ureg,
299                                fs->base.info.input_semantic_name[i],
300                                fs->base.info.input_semantic_index[i]);
301         src[num_elements] = ureg_DECL_vs_input(ureg, num_elements);
302         num_elements++;
303         break;
304      default:
305         break;
306      }
307   }
308
309   for (i = 0; i < num_elements; i++) {
310      ureg_MOV(ureg, dst[i], src[i]);
311   }
312
313   ureg_END(ureg);
314
315   memset(&new_vs, 0, sizeof(new_vs));
316   new_vs.base.tokens = ureg_get_tokens(ureg, &num_tokens);
317   tgsi_scan_shader(new_vs.base.tokens, &new_vs.base.info);
318
319   memset(&key, 0, sizeof(key));
320   key.vs.undo_viewport = 1;
321
322   ret = compile_vs(svga, &new_vs, &key, &variant);
323   if (ret != PIPE_OK)
324      return ret;
325
326   ureg_free_tokens(new_vs.base.tokens);
327   ureg_destroy(ureg);
328
329   /* Overwrite the variant key to indicate it's a pass-through VS */
330   memset(&variant->key, 0, sizeof(variant->key));
331   variant->key.vs.passthrough = 1;
332   variant->key.vs.undo_viewport = 1;
333
334   *out_variant = variant;
335
336   return PIPE_OK;
337}
338
339
340static enum pipe_error
341emit_hw_vs(struct svga_context *svga, unsigned dirty)
342{
343   struct svga_shader_variant *variant;
344   struct svga_vertex_shader *vs = svga->curr.vs;
345   struct svga_fragment_shader *fs = svga->curr.fs;
346   enum pipe_error ret = PIPE_OK;
347   struct svga_compile_key key;
348
349   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITVS);
350
351   /* If there is an active geometry shader, and it has stream output
352    * defined, then we will skip the stream output from the vertex shader
353    */
354   if (!svga_have_gs_streamout(svga)) {
355      /* No GS stream out */
356      if (svga_have_vs_streamout(svga)) {
357         /* Set VS stream out */
358         svga_set_stream_output(svga, vs->base.stream_output);
359      }
360      else {
361         /* turn off stream out */
362         svga_set_stream_output(svga, NULL);
363      }
364   }
365
366   /* SVGA_NEW_NEED_SWTNL */
367   if (svga->state.sw.need_swtnl && !svga_have_vgpu10(svga)) {
368      /* No vertex shader is needed */
369      variant = NULL;
370   }
371   else {
372      make_vs_key(svga, &key);
373
374      /* See if we already have a VS variant that matches the key */
375      variant = svga_search_shader_key(&vs->base, &key);
376
377      if (!variant) {
378         /* Create VS variant now */
379         if (key.vs.passthrough) {
380            ret = compile_passthrough_vs(svga, vs, fs, &variant);
381         }
382         else {
383            ret = compile_vs(svga, vs, &key, &variant);
384         }
385         if (ret != PIPE_OK)
386            goto done;
387
388         /* insert the new variant at head of linked list */
389         assert(variant);
390         variant->next = vs->base.variants;
391         vs->base.variants = variant;
392      }
393   }
394
395   if (variant != svga->state.hw_draw.vs) {
396      /* Bind the new variant */
397      if (variant) {
398         ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
399         if (ret != PIPE_OK)
400            goto done;
401         svga->rebind.flags.vs = FALSE;
402      }
403
404      svga->dirty |= SVGA_NEW_VS_VARIANT;
405      svga->state.hw_draw.vs = variant;
406   }
407
408done:
409   SVGA_STATS_TIME_POP(svga_sws(svga));
410   return ret;
411}
412
413struct svga_tracked_state svga_hw_vs =
414{
415   "vertex shader (hwtnl)",
416   (SVGA_NEW_VS |
417    SVGA_NEW_FS |
418    SVGA_NEW_TEXTURE_BINDING |
419    SVGA_NEW_SAMPLER |
420    SVGA_NEW_RAST |
421    SVGA_NEW_PRESCALE |
422    SVGA_NEW_VELEMENT |
423    SVGA_NEW_NEED_SWTNL),
424   emit_hw_vs
425};
426