1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * \file lower_mat_op_to_vec.cpp
26 *
27 * Breaks matrix operation expressions down to a series of vector operations.
28 *
29 * Generally this is how we have to codegen matrix operations for a
30 * GPU, so this gives us the chance to constant fold operations on a
31 * column or row.
32 */
33
34#include "ir.h"
35#include "ir_expression_flattening.h"
36#include "compiler/glsl_types.h"
37
38namespace {
39
40class ir_mat_op_to_vec_visitor : public ir_hierarchical_visitor {
41public:
42   ir_mat_op_to_vec_visitor()
43   {
44      this->made_progress = false;
45      this->mem_ctx = NULL;
46   }
47
48   ir_visitor_status visit_leave(ir_assignment *);
49
50   ir_dereference *get_column(ir_dereference *val, int col);
51   ir_rvalue *get_element(ir_dereference *val, int col, int row);
52
53   void do_mul_mat_mat(ir_dereference *result,
54		       ir_dereference *a, ir_dereference *b);
55   void do_mul_mat_vec(ir_dereference *result,
56		       ir_dereference *a, ir_dereference *b);
57   void do_mul_vec_mat(ir_dereference *result,
58		       ir_dereference *a, ir_dereference *b);
59   void do_mul_mat_scalar(ir_dereference *result,
60			  ir_dereference *a, ir_dereference *b);
61   void do_equal_mat_mat(ir_dereference *result, ir_dereference *a,
62			 ir_dereference *b, bool test_equal);
63
64   void *mem_ctx;
65   bool made_progress;
66};
67
68} /* anonymous namespace */
69
70static bool
71mat_op_to_vec_predicate(ir_instruction *ir)
72{
73   ir_expression *expr = ir->as_expression();
74   unsigned int i;
75
76   if (!expr)
77      return false;
78
79   for (i = 0; i < expr->get_num_operands(); i++) {
80      if (expr->operands[i]->type->is_matrix())
81	 return true;
82   }
83
84   return false;
85}
86
87bool
88do_mat_op_to_vec(exec_list *instructions)
89{
90   ir_mat_op_to_vec_visitor v;
91
92   /* Pull out any matrix expression to a separate assignment to a
93    * temp.  This will make our handling of the breakdown to
94    * operations on the matrix's vector components much easier.
95    */
96   do_expression_flattening(instructions, mat_op_to_vec_predicate);
97
98   visit_list_elements(&v, instructions);
99
100   return v.made_progress;
101}
102
103ir_rvalue *
104ir_mat_op_to_vec_visitor::get_element(ir_dereference *val, int col, int row)
105{
106   val = get_column(val, col);
107
108   return new(mem_ctx) ir_swizzle(val, row, 0, 0, 0, 1);
109}
110
111ir_dereference *
112ir_mat_op_to_vec_visitor::get_column(ir_dereference *val, int row)
113{
114   val = val->clone(mem_ctx, NULL);
115
116   if (val->type->is_matrix()) {
117      val = new(mem_ctx) ir_dereference_array(val,
118					      new(mem_ctx) ir_constant(row));
119   }
120
121   return val;
122}
123
124void
125ir_mat_op_to_vec_visitor::do_mul_mat_mat(ir_dereference *result,
126					 ir_dereference *a,
127					 ir_dereference *b)
128{
129   unsigned b_col, i;
130   ir_assignment *assign;
131   ir_expression *expr;
132
133   for (b_col = 0; b_col < b->type->matrix_columns; b_col++) {
134      /* first column */
135      expr = new(mem_ctx) ir_expression(ir_binop_mul,
136					get_column(a, 0),
137					get_element(b, b_col, 0));
138
139      /* following columns */
140      for (i = 1; i < a->type->matrix_columns; i++) {
141	 ir_expression *mul_expr;
142
143	 mul_expr = new(mem_ctx) ir_expression(ir_binop_mul,
144					       get_column(a, i),
145					       get_element(b, b_col, i));
146	 expr = new(mem_ctx) ir_expression(ir_binop_add,
147					   expr,
148					   mul_expr);
149      }
150
151      assign = new(mem_ctx) ir_assignment(get_column(result, b_col), expr);
152      base_ir->insert_before(assign);
153   }
154}
155
156void
157ir_mat_op_to_vec_visitor::do_mul_mat_vec(ir_dereference *result,
158					 ir_dereference *a,
159					 ir_dereference *b)
160{
161   unsigned i;
162   ir_assignment *assign;
163   ir_expression *expr;
164
165   /* first column */
166   expr = new(mem_ctx) ir_expression(ir_binop_mul,
167				     get_column(a, 0),
168				     get_element(b, 0, 0));
169
170   /* following columns */
171   for (i = 1; i < a->type->matrix_columns; i++) {
172      ir_expression *mul_expr;
173
174      mul_expr = new(mem_ctx) ir_expression(ir_binop_mul,
175					    get_column(a, i),
176					    get_element(b, 0, i));
177      expr = new(mem_ctx) ir_expression(ir_binop_add, expr, mul_expr);
178   }
179
180   result = result->clone(mem_ctx, NULL);
181   assign = new(mem_ctx) ir_assignment(result, expr);
182   base_ir->insert_before(assign);
183}
184
185void
186ir_mat_op_to_vec_visitor::do_mul_vec_mat(ir_dereference *result,
187					 ir_dereference *a,
188					 ir_dereference *b)
189{
190   unsigned i;
191
192   for (i = 0; i < b->type->matrix_columns; i++) {
193      ir_rvalue *column_result;
194      ir_expression *column_expr;
195      ir_assignment *column_assign;
196
197      column_result = result->clone(mem_ctx, NULL);
198      column_result = new(mem_ctx) ir_swizzle(column_result, i, 0, 0, 0, 1);
199
200      column_expr = new(mem_ctx) ir_expression(ir_binop_dot,
201					       a->clone(mem_ctx, NULL),
202					       get_column(b, i));
203
204      column_assign = new(mem_ctx) ir_assignment(column_result,
205						 column_expr);
206      base_ir->insert_before(column_assign);
207   }
208}
209
210void
211ir_mat_op_to_vec_visitor::do_mul_mat_scalar(ir_dereference *result,
212					    ir_dereference *a,
213					    ir_dereference *b)
214{
215   unsigned i;
216
217   for (i = 0; i < a->type->matrix_columns; i++) {
218      ir_expression *column_expr;
219      ir_assignment *column_assign;
220
221      column_expr = new(mem_ctx) ir_expression(ir_binop_mul,
222					       get_column(a, i),
223					       b->clone(mem_ctx, NULL));
224
225      column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
226						 column_expr);
227      base_ir->insert_before(column_assign);
228   }
229}
230
231void
232ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_dereference *result,
233					   ir_dereference *a,
234					   ir_dereference *b,
235					   bool test_equal)
236{
237   /* This essentially implements the following GLSL:
238    *
239    * bool equal(mat4 a, mat4 b)
240    * {
241    *   return !any(bvec4(a[0] != b[0],
242    *                     a[1] != b[1],
243    *                     a[2] != b[2],
244    *                     a[3] != b[3]);
245    * }
246    *
247    * bool nequal(mat4 a, mat4 b)
248    * {
249    *   return any(bvec4(a[0] != b[0],
250    *                    a[1] != b[1],
251    *                    a[2] != b[2],
252    *                    a[3] != b[3]);
253    * }
254    */
255   const unsigned columns = a->type->matrix_columns;
256   const glsl_type *const bvec_type =
257      glsl_type::get_instance(GLSL_TYPE_BOOL, columns, 1);
258
259   ir_variable *const tmp_bvec =
260      new(this->mem_ctx) ir_variable(bvec_type, "mat_cmp_bvec",
261				     ir_var_temporary);
262   this->base_ir->insert_before(tmp_bvec);
263
264   for (unsigned i = 0; i < columns; i++) {
265      ir_expression *const cmp =
266	 new(this->mem_ctx) ir_expression(ir_binop_any_nequal,
267					  get_column(a, i),
268					  get_column(b, i));
269
270      ir_dereference *const lhs =
271	 new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
272
273      ir_assignment *const assign =
274	 new(this->mem_ctx) ir_assignment(lhs, cmp, NULL, (1U << i));
275
276      this->base_ir->insert_before(assign);
277   }
278
279   ir_rvalue *const val = new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
280   uint8_t vec_elems = val->type->vector_elements;
281   ir_expression *any =
282      new(this->mem_ctx) ir_expression(ir_binop_any_nequal, val,
283                                       new(this->mem_ctx) ir_constant(false,
284                                                                      vec_elems));
285
286   if (test_equal)
287      any = new(this->mem_ctx) ir_expression(ir_unop_logic_not, any);
288
289   ir_assignment *const assign =
290      new(mem_ctx) ir_assignment(result->clone(mem_ctx, NULL), any);
291   base_ir->insert_before(assign);
292}
293
294static bool
295has_matrix_operand(const ir_expression *expr, unsigned &columns)
296{
297   for (unsigned i = 0; i < expr->get_num_operands(); i++) {
298      if (expr->operands[i]->type->is_matrix()) {
299	 columns = expr->operands[i]->type->matrix_columns;
300	 return true;
301      }
302   }
303
304   return false;
305}
306
307
308ir_visitor_status
309ir_mat_op_to_vec_visitor::visit_leave(ir_assignment *orig_assign)
310{
311   ir_expression *orig_expr = orig_assign->rhs->as_expression();
312   unsigned int i, matrix_columns = 1;
313   ir_dereference *op[2];
314
315   if (!orig_expr)
316      return visit_continue;
317
318   if (!has_matrix_operand(orig_expr, matrix_columns))
319      return visit_continue;
320
321   assert(orig_expr->get_num_operands() <= 2);
322
323   mem_ctx = ralloc_parent(orig_assign);
324
325   ir_dereference_variable *result =
326      orig_assign->lhs->as_dereference_variable();
327   assert(result);
328
329   /* Store the expression operands in temps so we can use them
330    * multiple times.
331    */
332   for (i = 0; i < orig_expr->get_num_operands(); i++) {
333      ir_assignment *assign;
334      ir_dereference *deref = orig_expr->operands[i]->as_dereference();
335
336      /* Avoid making a temporary if we don't need to to avoid aliasing. */
337      if (deref &&
338	  deref->variable_referenced() != result->variable_referenced()) {
339	 op[i] = deref;
340	 continue;
341      }
342
343      /* Otherwise, store the operand in a temporary generally if it's
344       * not a dereference.
345       */
346      ir_variable *var = new(mem_ctx) ir_variable(orig_expr->operands[i]->type,
347						  "mat_op_to_vec",
348						  ir_var_temporary);
349      base_ir->insert_before(var);
350
351      /* Note that we use this dereference for the assignment.  That means
352       * that others that want to use op[i] have to clone the deref.
353       */
354      op[i] = new(mem_ctx) ir_dereference_variable(var);
355      assign = new(mem_ctx) ir_assignment(op[i], orig_expr->operands[i]);
356      base_ir->insert_before(assign);
357   }
358
359   /* OK, time to break down this matrix operation. */
360   switch (orig_expr->operation) {
361   case ir_unop_d2f:
362   case ir_unop_f2d:
363   case ir_unop_neg: {
364      /* Apply the operation to each column.*/
365      for (i = 0; i < matrix_columns; i++) {
366	 ir_expression *column_expr;
367	 ir_assignment *column_assign;
368
369	 column_expr = new(mem_ctx) ir_expression(orig_expr->operation,
370						  get_column(op[0], i));
371
372	 column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
373						    column_expr);
374	 assert(column_assign->write_mask != 0);
375	 base_ir->insert_before(column_assign);
376      }
377      break;
378   }
379   case ir_binop_add:
380   case ir_binop_sub:
381   case ir_binop_div:
382   case ir_binop_mod: {
383      /* For most operations, the matrix version is just going
384       * column-wise through and applying the operation to each column
385       * if available.
386       */
387      for (i = 0; i < matrix_columns; i++) {
388	 ir_expression *column_expr;
389	 ir_assignment *column_assign;
390
391	 column_expr = new(mem_ctx) ir_expression(orig_expr->operation,
392						  get_column(op[0], i),
393						  get_column(op[1], i));
394
395	 column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
396						    column_expr);
397	 assert(column_assign->write_mask != 0);
398	 base_ir->insert_before(column_assign);
399      }
400      break;
401   }
402   case ir_binop_mul:
403      if (op[0]->type->is_matrix()) {
404	 if (op[1]->type->is_matrix()) {
405	    do_mul_mat_mat(result, op[0], op[1]);
406	 } else if (op[1]->type->is_vector()) {
407	    do_mul_mat_vec(result, op[0], op[1]);
408	 } else {
409	    assert(op[1]->type->is_scalar());
410	    do_mul_mat_scalar(result, op[0], op[1]);
411	 }
412      } else {
413	 assert(op[1]->type->is_matrix());
414	 if (op[0]->type->is_vector()) {
415	    do_mul_vec_mat(result, op[0], op[1]);
416	 } else {
417	    assert(op[0]->type->is_scalar());
418	    do_mul_mat_scalar(result, op[1], op[0]);
419	 }
420      }
421      break;
422
423   case ir_binop_all_equal:
424   case ir_binop_any_nequal:
425      do_equal_mat_mat(result, op[1], op[0],
426		       (orig_expr->operation == ir_binop_all_equal));
427      break;
428
429   default:
430      printf("FINISHME: Handle matrix operation for %s\n",
431	     ir_expression_operation_strings[orig_expr->operation]);
432      abort();
433   }
434   orig_assign->remove();
435   this->made_progress = true;
436
437   return visit_continue;
438}
439