1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24extern "C" {
25#include "main/macros.h"
26#include "brw_context.h"
27#include "brw_vs.h"
28}
29#include "brw_fs.h"
30#include "glsl/ir_optimization.h"
31#include "glsl/ir_print_visitor.h"
32
33struct gl_shader *
34brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
35{
36   struct brw_shader *shader;
37
38   shader = rzalloc(NULL, struct brw_shader);
39   if (shader) {
40      shader->base.Type = type;
41      shader->base.Name = name;
42      _mesa_init_shader(ctx, &shader->base);
43   }
44
45   return &shader->base;
46}
47
48struct gl_shader_program *
49brw_new_shader_program(struct gl_context *ctx, GLuint name)
50{
51   struct brw_shader_program *prog;
52   prog = rzalloc(NULL, struct brw_shader_program);
53   if (prog) {
54      prog->base.Name = name;
55      _mesa_init_shader_program(ctx, &prog->base);
56   }
57   return &prog->base;
58}
59
60/**
61 * Performs a compile of the shader stages even when we don't know
62 * what non-orthogonal state will be set, in the hope that it reflects
63 * the eventual NOS used, and thus allows us to produce link failures.
64 */
65bool
66brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
67{
68   struct brw_context *brw = brw_context(ctx);
69
70   if (brw->precompile && !brw_fs_precompile(ctx, prog))
71      return false;
72
73   if (brw->precompile && !brw_vs_precompile(ctx, prog))
74      return false;
75
76   return true;
77}
78
79GLboolean
80brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
81{
82   struct brw_context *brw = brw_context(ctx);
83   struct intel_context *intel = &brw->intel;
84   unsigned int stage;
85
86   for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
87      struct brw_shader *shader =
88	 (struct brw_shader *)shProg->_LinkedShaders[stage];
89      static const GLenum targets[] = {
90	 GL_VERTEX_PROGRAM_ARB,
91	 GL_FRAGMENT_PROGRAM_ARB,
92	 GL_GEOMETRY_PROGRAM_NV
93      };
94
95      if (!shader)
96	 continue;
97
98      struct gl_program *prog =
99	 ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
100      if (!prog)
101	return false;
102      prog->Parameters = _mesa_new_parameter_list();
103
104      _mesa_generate_parameters_list_for_uniforms(shProg, &shader->base,
105						  prog->Parameters);
106
107      if (stage == 0) {
108	 struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
109	 vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
110      }
111
112      void *mem_ctx = ralloc_context(NULL);
113      bool progress;
114
115      if (shader->ir)
116	 ralloc_free(shader->ir);
117      shader->ir = new(shader) exec_list;
118      clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
119
120      do_mat_op_to_vec(shader->ir);
121      lower_instructions(shader->ir,
122			 MOD_TO_FRACT |
123			 DIV_TO_MUL_RCP |
124			 SUB_TO_ADD_NEG |
125			 EXP_TO_EXP2 |
126			 LOG_TO_LOG2);
127
128      /* Pre-gen6 HW can only nest if-statements 16 deep.  Beyond this,
129       * if-statements need to be flattened.
130       */
131      if (intel->gen < 6)
132	 lower_if_to_cond_assign(shader->ir, 16);
133
134      do_lower_texture_projection(shader->ir);
135      if (intel->gen < 8 && !intel->is_haswell)
136         brw_lower_texture_gradients(shader->ir);
137      do_vec_index_to_cond_assign(shader->ir);
138      brw_do_cubemap_normalize(shader->ir);
139      lower_noise(shader->ir);
140      lower_quadop_vector(shader->ir, false);
141
142      bool input = true;
143      bool output = stage == MESA_SHADER_FRAGMENT;
144      bool temp = stage == MESA_SHADER_FRAGMENT;
145      bool uniform = stage == MESA_SHADER_FRAGMENT;
146
147      lower_variable_index_to_cond_assign(shader->ir,
148					  input, output, temp, uniform);
149
150      /* FINISHME: Do this before the variable index lowering. */
151      lower_ubo_reference(&shader->base, shader->ir);
152
153      do {
154	 progress = false;
155
156	 if (stage == MESA_SHADER_FRAGMENT) {
157	    brw_do_channel_expressions(shader->ir);
158	    brw_do_vector_splitting(shader->ir);
159	 }
160
161	 progress = do_lower_jumps(shader->ir, true, true,
162				   true, /* main return */
163				   false, /* continue */
164				   false /* loops */
165				   ) || progress;
166
167	 progress = do_common_optimization(shader->ir, true, true, 32)
168	   || progress;
169      } while (progress);
170
171      /* Make a pass over the IR to add state references for any built-in
172       * uniforms that are used.  This has to be done now (during linking).
173       * Code generation doesn't happen until the first time this shader is
174       * used for rendering.  Waiting until then to generate the parameters is
175       * too late.  At that point, the values for the built-in informs won't
176       * get sent to the shader.
177       */
178      foreach_list(node, shader->ir) {
179	 ir_variable *var = ((ir_instruction *) node)->as_variable();
180
181	 if ((var == NULL) || (var->mode != ir_var_uniform)
182	     || (strncmp(var->name, "gl_", 3) != 0))
183	    continue;
184
185	 const ir_state_slot *const slots = var->state_slots;
186	 assert(var->state_slots != NULL);
187
188	 for (unsigned int i = 0; i < var->num_state_slots; i++) {
189	    _mesa_add_state_reference(prog->Parameters,
190				      (gl_state_index *) slots[i].tokens);
191	 }
192      }
193
194      validate_ir_tree(shader->ir);
195
196      reparent_ir(shader->ir, shader->ir);
197      ralloc_free(mem_ctx);
198
199      do_set_program_inouts(shader->ir, prog,
200			    shader->base.Type == GL_FRAGMENT_SHADER);
201
202      prog->SamplersUsed = shader->base.active_samplers;
203      _mesa_update_shader_textures_used(shProg, prog);
204
205      _mesa_reference_program(ctx, &shader->base.Program, prog);
206
207      /* This has to be done last.  Any operation that can cause
208       * prog->ParameterValues to get reallocated (e.g., anything that adds a
209       * program constant) has to happen before creating this linkage.
210       */
211      _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
212
213      _mesa_reference_program(ctx, &prog, NULL);
214   }
215
216   if (!brw_shader_precompile(ctx, shProg))
217      return false;
218
219   return true;
220}
221
222
223int
224brw_type_for_base_type(const struct glsl_type *type)
225{
226   switch (type->base_type) {
227   case GLSL_TYPE_FLOAT:
228      return BRW_REGISTER_TYPE_F;
229   case GLSL_TYPE_INT:
230   case GLSL_TYPE_BOOL:
231      return BRW_REGISTER_TYPE_D;
232   case GLSL_TYPE_UINT:
233      return BRW_REGISTER_TYPE_UD;
234   case GLSL_TYPE_ARRAY:
235      return brw_type_for_base_type(type->fields.array);
236   case GLSL_TYPE_STRUCT:
237   case GLSL_TYPE_SAMPLER:
238      /* These should be overridden with the type of the member when
239       * dereferenced into.  BRW_REGISTER_TYPE_UD seems like a likely
240       * way to trip up if we don't.
241       */
242      return BRW_REGISTER_TYPE_UD;
243   default:
244      assert(!"not reached");
245      return BRW_REGISTER_TYPE_F;
246   }
247}
248
249uint32_t
250brw_conditional_for_comparison(unsigned int op)
251{
252   switch (op) {
253   case ir_binop_less:
254      return BRW_CONDITIONAL_L;
255   case ir_binop_greater:
256      return BRW_CONDITIONAL_G;
257   case ir_binop_lequal:
258      return BRW_CONDITIONAL_LE;
259   case ir_binop_gequal:
260      return BRW_CONDITIONAL_GE;
261   case ir_binop_equal:
262   case ir_binop_all_equal: /* same as equal for scalars */
263      return BRW_CONDITIONAL_Z;
264   case ir_binop_nequal:
265   case ir_binop_any_nequal: /* same as nequal for scalars */
266      return BRW_CONDITIONAL_NZ;
267   default:
268      assert(!"not reached: bad operation for comparison");
269      return BRW_CONDITIONAL_NZ;
270   }
271}
272
273uint32_t
274brw_math_function(enum opcode op)
275{
276   switch (op) {
277   case SHADER_OPCODE_RCP:
278      return BRW_MATH_FUNCTION_INV;
279   case SHADER_OPCODE_RSQ:
280      return BRW_MATH_FUNCTION_RSQ;
281   case SHADER_OPCODE_SQRT:
282      return BRW_MATH_FUNCTION_SQRT;
283   case SHADER_OPCODE_EXP2:
284      return BRW_MATH_FUNCTION_EXP;
285   case SHADER_OPCODE_LOG2:
286      return BRW_MATH_FUNCTION_LOG;
287   case SHADER_OPCODE_POW:
288      return BRW_MATH_FUNCTION_POW;
289   case SHADER_OPCODE_SIN:
290      return BRW_MATH_FUNCTION_SIN;
291   case SHADER_OPCODE_COS:
292      return BRW_MATH_FUNCTION_COS;
293   case SHADER_OPCODE_INT_QUOTIENT:
294      return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
295   case SHADER_OPCODE_INT_REMAINDER:
296      return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
297   default:
298      assert(!"not reached: unknown math function");
299      return 0;
300   }
301}
302
303uint32_t
304brw_texture_offset(ir_constant *offset)
305{
306   assert(offset != NULL);
307
308   signed char offsets[3];
309   for (unsigned i = 0; i < offset->type->vector_elements; i++)
310      offsets[i] = (signed char) offset->value.i[i];
311
312   /* Combine all three offsets into a single unsigned dword:
313    *
314    *    bits 11:8 - U Offset (X component)
315    *    bits  7:4 - V Offset (Y component)
316    *    bits  3:0 - R Offset (Z component)
317    */
318   unsigned offset_bits = 0;
319   for (unsigned i = 0; i < offset->type->vector_elements; i++) {
320      const unsigned shift = 4 * (2 - i);
321      offset_bits |= (offsets[i] << shift) & (0xF << shift);
322   }
323   return offset_bits;
324}
325