brw_blorp_blit.cpp revision 8c1c18769ef4838b11065b353f6f62bfd1de1cd2
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "main/teximage.h"
25#include "main/fbobject.h"
26
27#include "glsl/ralloc.h"
28
29#include "intel_fbo.h"
30
31#include "brw_blorp.h"
32#include "brw_context.h"
33#include "brw_eu.h"
34#include "brw_state.h"
35
36
37/**
38 * Helper function for handling mirror image blits.
39 *
40 * If coord0 > coord1, swap them and invert the "mirror" boolean.
41 */
42static inline void
43fixup_mirroring(bool &mirror, GLint &coord0, GLint &coord1)
44{
45   if (coord0 > coord1) {
46      mirror = !mirror;
47      GLint tmp = coord0;
48      coord0 = coord1;
49      coord1 = tmp;
50   }
51}
52
53
54/**
55 * Adjust {src,dst}_x{0,1} to account for clipping and scissoring of
56 * destination coordinates.
57 *
58 * Return true if there is still blitting to do, false if all pixels got
59 * rejected by the clip and/or scissor.
60 *
61 * For clarity, the nomenclature of this function assumes we are clipping and
62 * scissoring the X coordinate; the exact same logic applies for Y
63 * coordinates.
64 *
65 * Note: this function may also be used to account for clipping of source
66 * coordinates, by swapping the roles of src and dst.
67 */
68static inline bool
69clip_or_scissor(bool mirror, GLint &src_x0, GLint &src_x1, GLint &dst_x0,
70                GLint &dst_x1, GLint fb_xmin, GLint fb_xmax)
71{
72   /* If we are going to scissor everything away, stop. */
73   if (!(fb_xmin < fb_xmax &&
74         dst_x0 < fb_xmax &&
75         fb_xmin < dst_x1 &&
76         dst_x0 < dst_x1)) {
77      return false;
78   }
79
80   /* Clip the destination rectangle, and keep track of how many pixels we
81    * clipped off of the left and right sides of it.
82    */
83   GLint pixels_clipped_left = 0;
84   GLint pixels_clipped_right = 0;
85   if (dst_x0 < fb_xmin) {
86      pixels_clipped_left = fb_xmin - dst_x0;
87      dst_x0 = fb_xmin;
88   }
89   if (fb_xmax < dst_x1) {
90      pixels_clipped_right = dst_x1 - fb_xmax;
91      dst_x1 = fb_xmax;
92   }
93
94   /* If we are mirrored, then before applying pixels_clipped_{left,right} to
95    * the source coordinates, we need to flip them to account for the
96    * mirroring.
97    */
98   if (mirror) {
99      GLint tmp = pixels_clipped_left;
100      pixels_clipped_left = pixels_clipped_right;
101      pixels_clipped_right = tmp;
102   }
103
104   /* Adjust the source rectangle to remove the pixels corresponding to those
105    * that were clipped/scissored out of the destination rectangle.
106    */
107   src_x0 += pixels_clipped_left;
108   src_x1 -= pixels_clipped_right;
109
110   return true;
111}
112
113
114static struct intel_mipmap_tree *
115find_miptree(GLbitfield buffer_bit, struct intel_renderbuffer *irb)
116{
117   struct intel_mipmap_tree *mt = irb->mt;
118   if (buffer_bit == GL_STENCIL_BUFFER_BIT && mt->stencil_mt)
119      mt = mt->stencil_mt;
120   return mt;
121}
122
123void
124brw_blorp_blit_miptrees(struct intel_context *intel,
125                        struct intel_mipmap_tree *src_mt,
126                        unsigned src_level, unsigned src_layer,
127                        struct intel_mipmap_tree *dst_mt,
128                        unsigned dst_level, unsigned dst_layer,
129                        int src_x0, int src_y0,
130                        int dst_x0, int dst_y0,
131                        int dst_x1, int dst_y1,
132                        bool mirror_x, bool mirror_y)
133{
134   brw_blorp_blit_params params(brw_context(&intel->ctx),
135                                src_mt, src_level, src_layer,
136                                dst_mt, dst_level, dst_layer,
137                                src_x0, src_y0,
138                                dst_x0, dst_y0,
139                                dst_x1, dst_y1,
140                                mirror_x, mirror_y);
141   brw_blorp_exec(intel, &params);
142}
143
144static void
145do_blorp_blit(struct intel_context *intel, GLbitfield buffer_bit,
146              struct intel_renderbuffer *src_irb,
147              struct intel_renderbuffer *dst_irb,
148              GLint srcX0, GLint srcY0,
149              GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
150              bool mirror_x, bool mirror_y)
151{
152   /* Find source/dst miptrees */
153   struct intel_mipmap_tree *src_mt = find_miptree(buffer_bit, src_irb);
154   struct intel_mipmap_tree *dst_mt = find_miptree(buffer_bit, dst_irb);
155
156   /* Get ready to blit.  This includes depth resolving the src and dst
157    * buffers if necessary.
158    */
159   intel_renderbuffer_resolve_depth(intel, src_irb);
160   intel_renderbuffer_resolve_depth(intel, dst_irb);
161
162   /* Do the blit */
163   brw_blorp_blit_miptrees(intel,
164                           src_mt, src_irb->mt_level, src_irb->mt_layer,
165                           dst_mt, dst_irb->mt_level, dst_irb->mt_layer,
166                           srcX0, srcY0, dstX0, dstY0, dstX1, dstY1,
167                           mirror_x, mirror_y);
168
169   intel_renderbuffer_set_needs_hiz_resolve(dst_irb);
170   intel_renderbuffer_set_needs_downsample(dst_irb);
171}
172
173
174static bool
175formats_match(GLbitfield buffer_bit, struct intel_renderbuffer *src_irb,
176              struct intel_renderbuffer *dst_irb)
177{
178   /* Note: don't just check gl_renderbuffer::Format, because in some cases
179    * multiple gl_formats resolve to the same native type in the miptree (for
180    * example MESA_FORMAT_X8_Z24 and MESA_FORMAT_S8_Z24), and we can blit
181    * between those formats.
182    */
183   gl_format src_format = find_miptree(buffer_bit, src_irb)->format;
184   gl_format dst_format = find_miptree(buffer_bit, dst_irb)->format;
185
186   return _mesa_get_srgb_format_linear(src_format) ==
187          _mesa_get_srgb_format_linear(dst_format);
188}
189
190
191static bool
192try_blorp_blit(struct intel_context *intel,
193               GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
194               GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
195               GLenum filter, GLbitfield buffer_bit)
196{
197   struct gl_context *ctx = &intel->ctx;
198
199   /* Sync up the state of window system buffers.  We need to do this before
200    * we go looking for the buffers.
201    */
202   intel_prepare_render(intel);
203
204   const struct gl_framebuffer *read_fb = ctx->ReadBuffer;
205   const struct gl_framebuffer *draw_fb = ctx->DrawBuffer;
206
207   /* Detect if the blit needs to be mirrored */
208   bool mirror_x = false, mirror_y = false;
209   fixup_mirroring(mirror_x, srcX0, srcX1);
210   fixup_mirroring(mirror_x, dstX0, dstX1);
211   fixup_mirroring(mirror_y, srcY0, srcY1);
212   fixup_mirroring(mirror_y, dstY0, dstY1);
213
214   /* Make sure width and height match */
215   if (srcX1 - srcX0 != dstX1 - dstX0) return false;
216   if (srcY1 - srcY0 != dstY1 - dstY0) return false;
217
218   /* If the destination rectangle needs to be clipped or scissored, do so.
219    */
220   if (!(clip_or_scissor(mirror_x, srcX0, srcX1, dstX0, dstX1,
221                         draw_fb->_Xmin, draw_fb->_Xmax) &&
222         clip_or_scissor(mirror_y, srcY0, srcY1, dstY0, dstY1,
223                         draw_fb->_Ymin, draw_fb->_Ymax))) {
224      /* Everything got clipped/scissored away, so the blit was successful. */
225      return true;
226   }
227
228   /* If the source rectangle needs to be clipped or scissored, do so. */
229   if (!(clip_or_scissor(mirror_x, dstX0, dstX1, srcX0, srcX1,
230                         0, read_fb->Width) &&
231         clip_or_scissor(mirror_y, dstY0, dstY1, srcY0, srcY1,
232                         0, read_fb->Height))) {
233      /* Everything got clipped/scissored away, so the blit was successful. */
234      return true;
235   }
236
237   /* Account for the fact that in the system framebuffer, the origin is at
238    * the lower left.
239    */
240   if (_mesa_is_winsys_fbo(read_fb)) {
241      GLint tmp = read_fb->Height - srcY0;
242      srcY0 = read_fb->Height - srcY1;
243      srcY1 = tmp;
244      mirror_y = !mirror_y;
245   }
246   if (_mesa_is_winsys_fbo(draw_fb)) {
247      GLint tmp = draw_fb->Height - dstY0;
248      dstY0 = draw_fb->Height - dstY1;
249      dstY1 = tmp;
250      mirror_y = !mirror_y;
251   }
252
253   /* Find buffers */
254   struct intel_renderbuffer *src_irb;
255   struct intel_renderbuffer *dst_irb;
256   switch (buffer_bit) {
257   case GL_COLOR_BUFFER_BIT:
258      src_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
259      for (unsigned i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; ++i) {
260         dst_irb = intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i]);
261         if (dst_irb && !formats_match(buffer_bit, src_irb, dst_irb))
262            return false;
263      }
264      for (unsigned i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; ++i) {
265         dst_irb = intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i]);
266         do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
267                       dstX0, dstY0, dstX1, dstY1, mirror_x, mirror_y);
268      }
269      break;
270   case GL_DEPTH_BUFFER_BIT:
271      src_irb =
272         intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
273      dst_irb =
274         intel_renderbuffer(draw_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
275      if (!formats_match(buffer_bit, src_irb, dst_irb))
276         return false;
277      do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
278                    dstX0, dstY0, dstX1, dstY1, mirror_x, mirror_y);
279      break;
280   case GL_STENCIL_BUFFER_BIT:
281      src_irb =
282         intel_renderbuffer(read_fb->Attachment[BUFFER_STENCIL].Renderbuffer);
283      dst_irb =
284         intel_renderbuffer(draw_fb->Attachment[BUFFER_STENCIL].Renderbuffer);
285      if (!formats_match(buffer_bit, src_irb, dst_irb))
286         return false;
287      do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
288                    dstX0, dstY0, dstX1, dstY1, mirror_x, mirror_y);
289      break;
290   default:
291      assert(false);
292   }
293
294   return true;
295}
296
297GLbitfield
298brw_blorp_framebuffer(struct intel_context *intel,
299                      GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
300                      GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
301                      GLbitfield mask, GLenum filter)
302{
303   /* BLORP is not supported before Gen6. */
304   if (intel->gen < 6)
305      return mask;
306
307   static GLbitfield buffer_bits[] = {
308      GL_COLOR_BUFFER_BIT,
309      GL_DEPTH_BUFFER_BIT,
310      GL_STENCIL_BUFFER_BIT,
311   };
312
313   for (unsigned int i = 0; i < ARRAY_SIZE(buffer_bits); ++i) {
314      if ((mask & buffer_bits[i]) &&
315       try_blorp_blit(intel,
316                      srcX0, srcY0, srcX1, srcY1,
317                      dstX0, dstY0, dstX1, dstY1,
318                      filter, buffer_bits[i])) {
319         mask &= ~buffer_bits[i];
320      }
321   }
322
323   return mask;
324}
325
326
327/**
328 * Enum to specify the order of arguments in a sampler message
329 */
330enum sampler_message_arg
331{
332   SAMPLER_MESSAGE_ARG_U_FLOAT,
333   SAMPLER_MESSAGE_ARG_V_FLOAT,
334   SAMPLER_MESSAGE_ARG_U_INT,
335   SAMPLER_MESSAGE_ARG_V_INT,
336   SAMPLER_MESSAGE_ARG_SI_INT,
337   SAMPLER_MESSAGE_ARG_MCS_INT,
338   SAMPLER_MESSAGE_ARG_ZERO_INT,
339};
340
341/**
342 * Generator for WM programs used in BLORP blits.
343 *
344 * The bulk of the work done by the WM program is to wrap and unwrap the
345 * coordinate transformations used by the hardware to store surfaces in
346 * memory.  The hardware transforms a pixel location (X, Y, S) (where S is the
347 * sample index for a multisampled surface) to a memory offset by the
348 * following formulas:
349 *
350 *   offset = tile(tiling_format, encode_msaa(num_samples, layout, X, Y, S))
351 *   (X, Y, S) = decode_msaa(num_samples, layout, detile(tiling_format, offset))
352 *
353 * For a single-sampled surface, or for a multisampled surface using
354 * INTEL_MSAA_LAYOUT_UMS, encode_msaa() and decode_msaa are the identity
355 * function:
356 *
357 *   encode_msaa(1, NONE, X, Y, 0) = (X, Y, 0)
358 *   decode_msaa(1, NONE, X, Y, 0) = (X, Y, 0)
359 *   encode_msaa(n, UMS, X, Y, S) = (X, Y, S)
360 *   decode_msaa(n, UMS, X, Y, S) = (X, Y, S)
361 *
362 * For a 4x multisampled surface using INTEL_MSAA_LAYOUT_IMS, encode_msaa()
363 * embeds the sample number into bit 1 of the X and Y coordinates:
364 *
365 *   encode_msaa(4, IMS, X, Y, S) = (X', Y', 0)
366 *     where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
367 *           Y' = (Y & ~0b1 ) << 1 | (S & 0b10) | (Y & 0b1)
368 *   decode_msaa(4, IMS, X, Y, 0) = (X', Y', S)
369 *     where X' = (X & ~0b11) >> 1 | (X & 0b1)
370 *           Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
371 *           S = (Y & 0b10) | (X & 0b10) >> 1
372 *
373 * For an 8x multisampled surface using INTEL_MSAA_LAYOUT_IMS, encode_msaa()
374 * embeds the sample number into bits 1 and 2 of the X coordinate and bit 1 of
375 * the Y coordinate:
376 *
377 *   encode_msaa(8, IMS, X, Y, S) = (X', Y', 0)
378 *     where X' = (X & ~0b1) << 2 | (S & 0b100) | (S & 0b1) << 1 | (X & 0b1)
379 *           Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
380 *   decode_msaa(8, IMS, X, Y, 0) = (X', Y', S)
381 *     where X' = (X & ~0b111) >> 2 | (X & 0b1)
382 *           Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
383 *           S = (X & 0b100) | (Y & 0b10) | (X & 0b10) >> 1
384 *
385 * For X tiling, tile() combines together the low-order bits of the X and Y
386 * coordinates in the pattern 0byyyxxxxxxxxx, creating 4k tiles that are 512
387 * bytes wide and 8 rows high:
388 *
389 *   tile(x_tiled, X, Y, S) = A
390 *     where A = tile_num << 12 | offset
391 *           tile_num = (Y' >> 3) * tile_pitch + (X' >> 9)
392 *           offset = (Y' & 0b111) << 9
393 *                    | (X & 0b111111111)
394 *           X' = X * cpp
395 *           Y' = Y + S * qpitch
396 *   detile(x_tiled, A) = (X, Y, S)
397 *     where X = X' / cpp
398 *           Y = Y' % qpitch
399 *           S = Y' / qpitch
400 *           Y' = (tile_num / tile_pitch) << 3
401 *                | (A & 0b111000000000) >> 9
402 *           X' = (tile_num % tile_pitch) << 9
403 *                | (A & 0b111111111)
404 *
405 * (In all tiling formulas, cpp is the number of bytes occupied by a single
406 * sample ("chars per pixel"), tile_pitch is the number of 4k tiles required
407 * to fill the width of the surface, and qpitch is the spacing (in rows)
408 * between array slices).
409 *
410 * For Y tiling, tile() combines together the low-order bits of the X and Y
411 * coordinates in the pattern 0bxxxyyyyyxxxx, creating 4k tiles that are 128
412 * bytes wide and 32 rows high:
413 *
414 *   tile(y_tiled, X, Y, S) = A
415 *     where A = tile_num << 12 | offset
416 *           tile_num = (Y' >> 5) * tile_pitch + (X' >> 7)
417 *           offset = (X' & 0b1110000) << 5
418 *                    | (Y' & 0b11111) << 4
419 *                    | (X' & 0b1111)
420 *           X' = X * cpp
421 *           Y' = Y + S * qpitch
422 *   detile(y_tiled, A) = (X, Y, S)
423 *     where X = X' / cpp
424 *           Y = Y' % qpitch
425 *           S = Y' / qpitch
426 *           Y' = (tile_num / tile_pitch) << 5
427 *                | (A & 0b111110000) >> 4
428 *           X' = (tile_num % tile_pitch) << 7
429 *                | (A & 0b111000000000) >> 5
430 *                | (A & 0b1111)
431 *
432 * For W tiling, tile() combines together the low-order bits of the X and Y
433 * coordinates in the pattern 0bxxxyyyyxyxyx, creating 4k tiles that are 64
434 * bytes wide and 64 rows high (note that W tiling is only used for stencil
435 * buffers, which always have cpp = 1 and S=0):
436 *
437 *   tile(w_tiled, X, Y, S) = A
438 *     where A = tile_num << 12 | offset
439 *           tile_num = (Y' >> 6) * tile_pitch + (X' >> 6)
440 *           offset = (X' & 0b111000) << 6
441 *                    | (Y' & 0b111100) << 3
442 *                    | (X' & 0b100) << 2
443 *                    | (Y' & 0b10) << 2
444 *                    | (X' & 0b10) << 1
445 *                    | (Y' & 0b1) << 1
446 *                    | (X' & 0b1)
447 *           X' = X * cpp = X
448 *           Y' = Y + S * qpitch
449 *   detile(w_tiled, A) = (X, Y, S)
450 *     where X = X' / cpp = X'
451 *           Y = Y' % qpitch = Y'
452 *           S = Y / qpitch = 0
453 *           Y' = (tile_num / tile_pitch) << 6
454 *                | (A & 0b111100000) >> 3
455 *                | (A & 0b1000) >> 2
456 *                | (A & 0b10) >> 1
457 *           X' = (tile_num % tile_pitch) << 6
458 *                | (A & 0b111000000000) >> 6
459 *                | (A & 0b10000) >> 2
460 *                | (A & 0b100) >> 1
461 *                | (A & 0b1)
462 *
463 * Finally, for a non-tiled surface, tile() simply combines together the X and
464 * Y coordinates in the natural way:
465 *
466 *   tile(untiled, X, Y, S) = A
467 *     where A = Y * pitch + X'
468 *           X' = X * cpp
469 *           Y' = Y + S * qpitch
470 *   detile(untiled, A) = (X, Y, S)
471 *     where X = X' / cpp
472 *           Y = Y' % qpitch
473 *           S = Y' / qpitch
474 *           X' = A % pitch
475 *           Y' = A / pitch
476 *
477 * (In these formulas, pitch is the number of bytes occupied by a single row
478 * of samples).
479 */
480class brw_blorp_blit_program
481{
482public:
483   brw_blorp_blit_program(struct brw_context *brw,
484                          const brw_blorp_blit_prog_key *key);
485   ~brw_blorp_blit_program();
486
487   const GLuint *compile(struct brw_context *brw, GLuint *program_size);
488
489   brw_blorp_prog_data prog_data;
490
491private:
492   void alloc_regs();
493   void alloc_push_const_regs(int base_reg);
494   void compute_frag_coords();
495   void translate_tiling(bool old_tiled_w, bool new_tiled_w);
496   void encode_msaa(unsigned num_samples, intel_msaa_layout layout);
497   void decode_msaa(unsigned num_samples, intel_msaa_layout layout);
498   void kill_if_outside_dst_rect();
499   void translate_dst_to_src();
500   void single_to_blend();
501   void manual_blend(unsigned num_samples);
502   void sample(struct brw_reg dst);
503   void texel_fetch(struct brw_reg dst);
504   void mcs_fetch();
505   void expand_to_32_bits(struct brw_reg src, struct brw_reg dst);
506   void texture_lookup(struct brw_reg dst, GLuint msg_type,
507                       const sampler_message_arg *args, int num_args);
508   void render_target_write();
509
510   /**
511    * Base-2 logarithm of the maximum number of samples that can be blended.
512    */
513   static const unsigned LOG2_MAX_BLEND_SAMPLES = 3;
514
515   void *mem_ctx;
516   struct brw_context *brw;
517   const brw_blorp_blit_prog_key *key;
518   struct brw_compile func;
519
520   /* Thread dispatch header */
521   struct brw_reg R0;
522
523   /* Pixel X/Y coordinates (always in R1). */
524   struct brw_reg R1;
525
526   /* Push constants */
527   struct brw_reg dst_x0;
528   struct brw_reg dst_x1;
529   struct brw_reg dst_y0;
530   struct brw_reg dst_y1;
531   struct {
532      struct brw_reg multiplier;
533      struct brw_reg offset;
534   } x_transform, y_transform;
535
536   /* Data read from texture (4 vec16's per array element) */
537   struct brw_reg texture_data[LOG2_MAX_BLEND_SAMPLES + 1];
538
539   /* Auxiliary storage for the contents of the MCS surface.
540    *
541    * Since the sampler always returns 8 registers worth of data, this is 8
542    * registers wide, even though we only use the first 2 registers of it.
543    */
544   struct brw_reg mcs_data;
545
546   /* X coordinates.  We have two of them so that we can perform coordinate
547    * transformations easily.
548    */
549   struct brw_reg x_coords[2];
550
551   /* Y coordinates.  We have two of them so that we can perform coordinate
552    * transformations easily.
553    */
554   struct brw_reg y_coords[2];
555
556   /* Which element of x_coords and y_coords is currently in use.
557    */
558   int xy_coord_index;
559
560   /* True if, at the point in the program currently being compiled, the
561    * sample index is known to be zero.
562    */
563   bool s_is_zero;
564
565   /* Register storing the sample index when s_is_zero is false. */
566   struct brw_reg sample_index;
567
568   /* Temporaries */
569   struct brw_reg t1;
570   struct brw_reg t2;
571
572   /* MRF used for sampling and render target writes */
573   GLuint base_mrf;
574};
575
576brw_blorp_blit_program::brw_blorp_blit_program(
577      struct brw_context *brw,
578      const brw_blorp_blit_prog_key *key)
579   : mem_ctx(ralloc_context(NULL)),
580     brw(brw),
581     key(key)
582{
583   brw_init_compile(brw, &func, mem_ctx);
584}
585
586brw_blorp_blit_program::~brw_blorp_blit_program()
587{
588   ralloc_free(mem_ctx);
589}
590
591const GLuint *
592brw_blorp_blit_program::compile(struct brw_context *brw,
593                                GLuint *program_size)
594{
595   /* Sanity checks */
596   if (key->dst_tiled_w && key->rt_samples > 0) {
597      /* If the destination image is W tiled and multisampled, then the thread
598       * must be dispatched once per sample, not once per pixel.  This is
599       * necessary because after conversion between W and Y tiling, there's no
600       * guarantee that all samples corresponding to a single pixel will still
601       * be together.
602       */
603      assert(key->persample_msaa_dispatch);
604   }
605
606   if (key->blend) {
607      /* We are blending, which means we won't have an opportunity to
608       * translate the tiling and sample count for the texture surface.  So
609       * the surface state for the texture must be configured with the correct
610       * tiling and sample count.
611       */
612      assert(!key->src_tiled_w);
613      assert(key->tex_samples == key->src_samples);
614      assert(key->tex_layout == key->src_layout);
615      assert(key->tex_samples > 0);
616   }
617
618   if (key->persample_msaa_dispatch) {
619      /* It only makes sense to do persample dispatch if the render target is
620       * configured as multisampled.
621       */
622      assert(key->rt_samples > 0);
623   }
624
625   /* Make sure layout is consistent with sample count */
626   assert((key->tex_layout == INTEL_MSAA_LAYOUT_NONE) ==
627          (key->tex_samples == 0));
628   assert((key->rt_layout == INTEL_MSAA_LAYOUT_NONE) ==
629          (key->rt_samples == 0));
630   assert((key->src_layout == INTEL_MSAA_LAYOUT_NONE) ==
631          (key->src_samples == 0));
632   assert((key->dst_layout == INTEL_MSAA_LAYOUT_NONE) ==
633          (key->dst_samples == 0));
634
635   /* Set up prog_data */
636   memset(&prog_data, 0, sizeof(prog_data));
637   prog_data.persample_msaa_dispatch = key->persample_msaa_dispatch;
638
639   brw_set_compression_control(&func, BRW_COMPRESSION_NONE);
640
641   alloc_regs();
642   compute_frag_coords();
643
644   /* Render target and texture hardware don't support W tiling. */
645   const bool rt_tiled_w = false;
646   const bool tex_tiled_w = false;
647
648   /* The address that data will be written to is determined by the
649    * coordinates supplied to the WM thread and the tiling and sample count of
650    * the render target, according to the formula:
651    *
652    * (X, Y, S) = decode_msaa(rt_samples, detile(rt_tiling, offset))
653    *
654    * If the actual tiling and sample count of the destination surface are not
655    * the same as the configuration of the render target, then these
656    * coordinates are wrong and we have to adjust them to compensate for the
657    * difference.
658    */
659   if (rt_tiled_w != key->dst_tiled_w ||
660       key->rt_samples != key->dst_samples ||
661       key->rt_layout != key->dst_layout) {
662      encode_msaa(key->rt_samples, key->rt_layout);
663      /* Now (X, Y, S) = detile(rt_tiling, offset) */
664      translate_tiling(rt_tiled_w, key->dst_tiled_w);
665      /* Now (X, Y, S) = detile(dst_tiling, offset) */
666      decode_msaa(key->dst_samples, key->dst_layout);
667   }
668
669   /* Now (X, Y, S) = decode_msaa(dst_samples, detile(dst_tiling, offset)).
670    *
671    * That is: X, Y and S now contain the true coordinates and sample index of
672    * the data that the WM thread should output.
673    *
674    * If we need to kill pixels that are outside the destination rectangle,
675    * now is the time to do it.
676    */
677
678   if (key->use_kill)
679      kill_if_outside_dst_rect();
680
681   /* Next, apply a translation to obtain coordinates in the source image. */
682   translate_dst_to_src();
683
684   /* If the source image is not multisampled, then we want to fetch sample
685    * number 0, because that's the only sample there is.
686    */
687   if (key->src_samples == 0)
688      s_is_zero = true;
689
690   /* X, Y, and S are now the coordinates of the pixel in the source image
691    * that we want to texture from.  Exception: if we are blending, then S is
692    * irrelevant, because we are going to fetch all samples.
693    */
694   if (key->blend) {
695      if (brw->intel.gen == 6) {
696         /* Gen6 hardware an automatically blend using the SAMPLE message */
697         single_to_blend();
698         sample(texture_data[0]);
699      } else {
700         /* Gen7+ hardware doesn't automaticaly blend. */
701         manual_blend(key->src_samples);
702      }
703   } else {
704      /* We aren't blending, which means we just want to fetch a single sample
705       * from the source surface.  The address that we want to fetch from is
706       * related to the X, Y and S values according to the formula:
707       *
708       * (X, Y, S) = decode_msaa(src_samples, detile(src_tiling, offset)).
709       *
710       * If the actual tiling and sample count of the source surface are not
711       * the same as the configuration of the texture, then we need to adjust
712       * the coordinates to compensate for the difference.
713       */
714      if (tex_tiled_w != key->src_tiled_w ||
715          key->tex_samples != key->src_samples ||
716          key->tex_layout != key->src_layout) {
717         encode_msaa(key->src_samples, key->src_layout);
718         /* Now (X, Y, S) = detile(src_tiling, offset) */
719         translate_tiling(key->src_tiled_w, tex_tiled_w);
720         /* Now (X, Y, S) = detile(tex_tiling, offset) */
721         decode_msaa(key->tex_samples, key->tex_layout);
722      }
723
724      /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)).
725       *
726       * In other words: X, Y, and S now contain values which, when passed to
727       * the texturing unit, will cause data to be read from the correct
728       * memory location.  So we can fetch the texel now.
729       */
730      if (key->tex_layout == INTEL_MSAA_LAYOUT_CMS)
731         mcs_fetch();
732      texel_fetch(texture_data[0]);
733   }
734
735   /* Finally, write the fetched (or blended) value to the render target and
736    * terminate the thread.
737    */
738   render_target_write();
739   return brw_get_program(&func, program_size);
740}
741
742void
743brw_blorp_blit_program::alloc_push_const_regs(int base_reg)
744{
745#define CONST_LOC(name) offsetof(brw_blorp_wm_push_constants, name)
746#define ALLOC_REG(name) \
747   this->name = \
748      brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, base_reg, CONST_LOC(name) / 2)
749
750   ALLOC_REG(dst_x0);
751   ALLOC_REG(dst_x1);
752   ALLOC_REG(dst_y0);
753   ALLOC_REG(dst_y1);
754   ALLOC_REG(x_transform.multiplier);
755   ALLOC_REG(x_transform.offset);
756   ALLOC_REG(y_transform.multiplier);
757   ALLOC_REG(y_transform.offset);
758#undef CONST_LOC
759#undef ALLOC_REG
760}
761
762void
763brw_blorp_blit_program::alloc_regs()
764{
765   int reg = 0;
766   this->R0 = retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW);
767   this->R1 = retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW);
768   prog_data.first_curbe_grf = reg;
769   alloc_push_const_regs(reg);
770   reg += BRW_BLORP_NUM_PUSH_CONST_REGS;
771   for (unsigned i = 0; i < ARRAY_SIZE(texture_data); ++i) {
772      this->texture_data[i] =
773         retype(vec16(brw_vec8_grf(reg, 0)), key->texture_data_type);
774      reg += 8;
775   }
776   this->mcs_data =
777      retype(brw_vec8_grf(reg, 0), BRW_REGISTER_TYPE_UD); reg += 8;
778   for (int i = 0; i < 2; ++i) {
779      this->x_coords[i]
780         = vec16(retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW));
781      this->y_coords[i]
782         = vec16(retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW));
783   }
784   this->xy_coord_index = 0;
785   this->sample_index
786      = vec16(retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW));
787   this->t1 = vec16(retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW));
788   this->t2 = vec16(retype(brw_vec8_grf(reg++, 0), BRW_REGISTER_TYPE_UW));
789
790   /* Make sure we didn't run out of registers */
791   assert(reg <= GEN7_MRF_HACK_START);
792
793   int mrf = 2;
794   this->base_mrf = mrf;
795}
796
797/* In the code that follows, X and Y can be used to quickly refer to the
798 * active elements of x_coords and y_coords, and Xp and Yp ("X prime" and "Y
799 * prime") to the inactive elements.
800 *
801 * S can be used to quickly refer to sample_index.
802 */
803#define X x_coords[xy_coord_index]
804#define Y y_coords[xy_coord_index]
805#define Xp x_coords[!xy_coord_index]
806#define Yp y_coords[!xy_coord_index]
807#define S sample_index
808
809/* Quickly swap the roles of (X, Y) and (Xp, Yp).  Saves us from having to do
810 * MOVs to transfor (Xp, Yp) to (X, Y) after a coordinate transformation.
811 */
812#define SWAP_XY_AND_XPYP() xy_coord_index = !xy_coord_index;
813
814/**
815 * Emit code to compute the X and Y coordinates of the pixels being rendered
816 * by this WM invocation.
817 *
818 * Assuming the render target is set up for Y tiling, these (X, Y) values are
819 * related to the address offset where outputs will be written by the formula:
820 *
821 *   (X, Y, S) = decode_msaa(detile(offset)).
822 *
823 * (See brw_blorp_blit_program).
824 */
825void
826brw_blorp_blit_program::compute_frag_coords()
827{
828   /* R1.2[15:0] = X coordinate of upper left pixel of subspan 0 (pixel 0)
829    * R1.3[15:0] = X coordinate of upper left pixel of subspan 1 (pixel 4)
830    * R1.4[15:0] = X coordinate of upper left pixel of subspan 2 (pixel 8)
831    * R1.5[15:0] = X coordinate of upper left pixel of subspan 3 (pixel 12)
832    *
833    * Pixels within a subspan are laid out in this arrangement:
834    * 0 1
835    * 2 3
836    *
837    * So, to compute the coordinates of each pixel, we need to read every 2nd
838    * 16-bit value (vstride=2) from R1, starting at the 4th 16-bit value
839    * (suboffset=4), and duplicate each value 4 times (hstride=0, width=4).
840    * In other words, the data we want to access is R1.4<2;4,0>UW.
841    *
842    * Then, we need to add the repeating sequence (0, 1, 0, 1, ...) to the
843    * result, since pixels n+1 and n+3 are in the right half of the subspan.
844    */
845   brw_ADD(&func, X, stride(suboffset(R1, 4), 2, 4, 0), brw_imm_v(0x10101010));
846
847   /* Similarly, Y coordinates for subspans come from R1.2[31:16] through
848    * R1.5[31:16], so to get pixel Y coordinates we need to start at the 5th
849    * 16-bit value instead of the 4th (R1.5<2;4,0>UW instead of
850    * R1.4<2;4,0>UW).
851    *
852    * And we need to add the repeating sequence (0, 0, 1, 1, ...), since
853    * pixels n+2 and n+3 are in the bottom half of the subspan.
854    */
855   brw_ADD(&func, Y, stride(suboffset(R1, 5), 2, 4, 0), brw_imm_v(0x11001100));
856
857   if (key->persample_msaa_dispatch) {
858      switch (key->rt_samples) {
859      case 4:
860         /* The WM will be run in MSDISPMODE_PERSAMPLE with num_samples == 4.
861          * Therefore, subspan 0 will represent sample 0, subspan 1 will
862          * represent sample 1, and so on.
863          *
864          * So we need to populate S with the sequence (0, 0, 0, 0, 1, 1, 1,
865          * 1, 2, 2, 2, 2, 3, 3, 3, 3).  The easiest way to do this is to
866          * populate a temporary variable with the sequence (0, 1, 2, 3), and
867          * then copy from it using vstride=1, width=4, hstride=0.
868          */
869         brw_MOV(&func, t1, brw_imm_v(0x3210));
870         brw_MOV(&func, S, stride(t1, 1, 4, 0));
871         break;
872      case 8: {
873         /* The WM will be run in MSDISPMODE_PERSAMPLE with num_samples == 8.
874          * Therefore, subspan 0 will represent sample N (where N is 0 or 4),
875          * subspan 1 will represent sample 1, and so on.  We can find the
876          * value of N by looking at R0.0 bits 7:6 ("Starting Sample Pair
877          * Index") and multiplying by two (since samples are always delivered
878          * in pairs).  That is, we compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 &
879          * 0xc0) >> 5.
880          *
881          * Then we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1, 2,
882          * 2, 2, 2, 3, 3, 3, 3), which we compute by populating a temporary
883          * variable with the sequence (0, 1, 2, 3), and then reading from it
884          * using vstride=1, width=4, hstride=0.
885          */
886         struct brw_reg t1_ud1 = vec1(retype(t1, BRW_REGISTER_TYPE_UD));
887         struct brw_reg r0_ud1 = vec1(retype(R0, BRW_REGISTER_TYPE_UD));
888         brw_AND(&func, t1_ud1, r0_ud1, brw_imm_ud(0xc0));
889         brw_SHR(&func, t1_ud1, t1_ud1, brw_imm_ud(5));
890         brw_MOV(&func, t2, brw_imm_v(0x3210));
891         brw_ADD(&func, S, retype(t1_ud1, BRW_REGISTER_TYPE_UW),
892                 stride(t2, 1, 4, 0));
893         break;
894      }
895      default:
896         assert(!"Unrecognized sample count in "
897                "brw_blorp_blit_program::compute_frag_coords()");
898         break;
899      }
900      s_is_zero = false;
901   } else {
902      /* Either the destination surface is single-sampled, or the WM will be
903       * run in MSDISPMODE_PERPIXEL (which causes a single fragment dispatch
904       * per pixel).  In either case, it's not meaningful to compute a sample
905       * value.  Just set it to 0.
906       */
907      s_is_zero = true;
908   }
909}
910
911/**
912 * Emit code to compensate for the difference between Y and W tiling.
913 *
914 * This code modifies the X and Y coordinates according to the formula:
915 *
916 *   (X', Y', S') = detile(new_tiling, tile(old_tiling, X, Y, S))
917 *
918 * (See brw_blorp_blit_program).
919 *
920 * It can only translate between W and Y tiling, so new_tiling and old_tiling
921 * are booleans where true represents W tiling and false represents Y tiling.
922 */
923void
924brw_blorp_blit_program::translate_tiling(bool old_tiled_w, bool new_tiled_w)
925{
926   if (old_tiled_w == new_tiled_w)
927      return;
928
929   /* In the code that follows, we can safely assume that S = 0, because W
930    * tiling formats always use IMS layout.
931    */
932   assert(s_is_zero);
933
934   if (new_tiled_w) {
935      /* Given X and Y coordinates that describe an address using Y tiling,
936       * translate to the X and Y coordinates that describe the same address
937       * using W tiling.
938       *
939       * If we break down the low order bits of X and Y, using a
940       * single letter to represent each low-order bit:
941       *
942       *   X = A << 7 | 0bBCDEFGH
943       *   Y = J << 5 | 0bKLMNP                                       (1)
944       *
945       * Then we can apply the Y tiling formula to see the memory offset being
946       * addressed:
947       *
948       *   offset = (J * tile_pitch + A) << 12 | 0bBCDKLMNPEFGH       (2)
949       *
950       * If we apply the W detiling formula to this memory location, that the
951       * corresponding X' and Y' coordinates are:
952       *
953       *   X' = A << 6 | 0bBCDPFH                                     (3)
954       *   Y' = J << 6 | 0bKLMNEG
955       *
956       * Combining (1) and (3), we see that to transform (X, Y) to (X', Y'),
957       * we need to make the following computation:
958       *
959       *   X' = (X & ~0b1011) >> 1 | (Y & 0b1) << 2 | X & 0b1         (4)
960       *   Y' = (Y & ~0b1) << 1 | (X & 0b1000) >> 2 | (X & 0b10) >> 1
961       */
962      brw_AND(&func, t1, X, brw_imm_uw(0xfff4)); /* X & ~0b1011 */
963      brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b1011) >> 1 */
964      brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
965      brw_SHL(&func, t2, t2, brw_imm_uw(2)); /* (Y & 0b1) << 2 */
966      brw_OR(&func, t1, t1, t2); /* (X & ~0b1011) >> 1 | (Y & 0b1) << 2 */
967      brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
968      brw_OR(&func, Xp, t1, t2);
969      brw_AND(&func, t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
970      brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
971      brw_AND(&func, t2, X, brw_imm_uw(8)); /* X & 0b1000 */
972      brw_SHR(&func, t2, t2, brw_imm_uw(2)); /* (X & 0b1000) >> 2 */
973      brw_OR(&func, t1, t1, t2); /* (Y & ~0b1) << 1 | (X & 0b1000) >> 2 */
974      brw_AND(&func, t2, X, brw_imm_uw(2)); /* X & 0b10 */
975      brw_SHR(&func, t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
976      brw_OR(&func, Yp, t1, t2);
977      SWAP_XY_AND_XPYP();
978   } else {
979      /* Applying the same logic as above, but in reverse, we obtain the
980       * formulas:
981       *
982       * X' = (X & ~0b101) << 1 | (Y & 0b10) << 2 | (Y & 0b1) << 1 | X & 0b1
983       * Y' = (Y & ~0b11) >> 1 | (X & 0b100) >> 2
984       */
985      brw_AND(&func, t1, X, brw_imm_uw(0xfffa)); /* X & ~0b101 */
986      brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b101) << 1 */
987      brw_AND(&func, t2, Y, brw_imm_uw(2)); /* Y & 0b10 */
988      brw_SHL(&func, t2, t2, brw_imm_uw(2)); /* (Y & 0b10) << 2 */
989      brw_OR(&func, t1, t1, t2); /* (X & ~0b101) << 1 | (Y & 0b10) << 2 */
990      brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
991      brw_SHL(&func, t2, t2, brw_imm_uw(1)); /* (Y & 0b1) << 1 */
992      brw_OR(&func, t1, t1, t2); /* (X & ~0b101) << 1 | (Y & 0b10) << 2
993                                    | (Y & 0b1) << 1 */
994      brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
995      brw_OR(&func, Xp, t1, t2);
996      brw_AND(&func, t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
997      brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
998      brw_AND(&func, t2, X, brw_imm_uw(4)); /* X & 0b100 */
999      brw_SHR(&func, t2, t2, brw_imm_uw(2)); /* (X & 0b100) >> 2 */
1000      brw_OR(&func, Yp, t1, t2);
1001      SWAP_XY_AND_XPYP();
1002   }
1003}
1004
1005/**
1006 * Emit code to compensate for the difference between MSAA and non-MSAA
1007 * surfaces.
1008 *
1009 * This code modifies the X and Y coordinates according to the formula:
1010 *
1011 *   (X', Y', S') = encode_msaa(num_samples, IMS, X, Y, S)
1012 *
1013 * (See brw_blorp_blit_program).
1014 */
1015void
1016brw_blorp_blit_program::encode_msaa(unsigned num_samples,
1017                                    intel_msaa_layout layout)
1018{
1019   switch (layout) {
1020   case INTEL_MSAA_LAYOUT_NONE:
1021      /* No translation necessary, and S should already be zero. */
1022      assert(s_is_zero);
1023      break;
1024   case INTEL_MSAA_LAYOUT_CMS:
1025      /* We can't compensate for compressed layout since at this point in the
1026       * program we haven't read from the MCS buffer.
1027       */
1028      assert(!"Bad layout in encode_msaa");
1029      break;
1030   case INTEL_MSAA_LAYOUT_UMS:
1031      /* No translation necessary. */
1032      break;
1033   case INTEL_MSAA_LAYOUT_IMS:
1034      switch (num_samples) {
1035      case 4:
1036         /* encode_msaa(4, IMS, X, Y, S) = (X', Y', 0)
1037          *   where X' = (X & ~0b1) << 1 | (S & 0b1) << 1 | (X & 0b1)
1038          *         Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
1039          */
1040         brw_AND(&func, t1, X, brw_imm_uw(0xfffe)); /* X & ~0b1 */
1041         if (!s_is_zero) {
1042            brw_AND(&func, t2, S, brw_imm_uw(1)); /* S & 0b1 */
1043            brw_OR(&func, t1, t1, t2); /* (X & ~0b1) | (S & 0b1) */
1044         }
1045         brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b1) << 1
1046                                                   | (S & 0b1) << 1 */
1047         brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
1048         brw_OR(&func, Xp, t1, t2);
1049         brw_AND(&func, t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
1050         brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
1051         if (!s_is_zero) {
1052            brw_AND(&func, t2, S, brw_imm_uw(2)); /* S & 0b10 */
1053            brw_OR(&func, t1, t1, t2); /* (Y & ~0b1) << 1 | (S & 0b10) */
1054         }
1055         brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
1056         brw_OR(&func, Yp, t1, t2);
1057         break;
1058      case 8:
1059         /* encode_msaa(8, IMS, X, Y, S) = (X', Y', 0)
1060          *   where X' = (X & ~0b1) << 2 | (S & 0b100) | (S & 0b1) << 1
1061          *              | (X & 0b1)
1062          *         Y' = (Y & ~0b1) << 1 | (S & 0b10) | (Y & 0b1)
1063          */
1064         brw_AND(&func, t1, X, brw_imm_uw(0xfffe)); /* X & ~0b1 */
1065         brw_SHL(&func, t1, t1, brw_imm_uw(2)); /* (X & ~0b1) << 2 */
1066         if (!s_is_zero) {
1067            brw_AND(&func, t2, S, brw_imm_uw(4)); /* S & 0b100 */
1068            brw_OR(&func, t1, t1, t2); /* (X & ~0b1) << 2 | (S & 0b100) */
1069            brw_AND(&func, t2, S, brw_imm_uw(1)); /* S & 0b1 */
1070            brw_SHL(&func, t2, t2, brw_imm_uw(1)); /* (S & 0b1) << 1 */
1071            brw_OR(&func, t1, t1, t2); /* (X & ~0b1) << 2 | (S & 0b100)
1072                                          | (S & 0b1) << 1 */
1073         }
1074         brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
1075         brw_OR(&func, Xp, t1, t2);
1076         brw_AND(&func, t1, Y, brw_imm_uw(0xfffe)); /* Y & ~0b1 */
1077         brw_SHL(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b1) << 1 */
1078         if (!s_is_zero) {
1079            brw_AND(&func, t2, S, brw_imm_uw(2)); /* S & 0b10 */
1080            brw_OR(&func, t1, t1, t2); /* (Y & ~0b1) << 1 | (S & 0b10) */
1081         }
1082         brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
1083         brw_OR(&func, Yp, t1, t2);
1084         break;
1085      }
1086      SWAP_XY_AND_XPYP();
1087      s_is_zero = true;
1088      break;
1089   }
1090}
1091
1092/**
1093 * Emit code to compensate for the difference between MSAA and non-MSAA
1094 * surfaces.
1095 *
1096 * This code modifies the X and Y coordinates according to the formula:
1097 *
1098 *   (X', Y', S) = decode_msaa(num_samples, IMS, X, Y, S)
1099 *
1100 * (See brw_blorp_blit_program).
1101 */
1102void
1103brw_blorp_blit_program::decode_msaa(unsigned num_samples,
1104                                    intel_msaa_layout layout)
1105{
1106   switch (layout) {
1107   case INTEL_MSAA_LAYOUT_NONE:
1108      /* No translation necessary, and S should already be zero. */
1109      assert(s_is_zero);
1110      break;
1111   case INTEL_MSAA_LAYOUT_CMS:
1112      /* We can't compensate for compressed layout since at this point in the
1113       * program we don't have access to the MCS buffer.
1114       */
1115      assert(!"Bad layout in encode_msaa");
1116      break;
1117   case INTEL_MSAA_LAYOUT_UMS:
1118      /* No translation necessary. */
1119      break;
1120   case INTEL_MSAA_LAYOUT_IMS:
1121      assert(s_is_zero);
1122      switch (num_samples) {
1123      case 4:
1124         /* decode_msaa(4, IMS, X, Y, 0) = (X', Y', S)
1125          *   where X' = (X & ~0b11) >> 1 | (X & 0b1)
1126          *         Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
1127          *         S = (Y & 0b10) | (X & 0b10) >> 1
1128          */
1129         brw_AND(&func, t1, X, brw_imm_uw(0xfffc)); /* X & ~0b11 */
1130         brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (X & ~0b11) >> 1 */
1131         brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
1132         brw_OR(&func, Xp, t1, t2);
1133         brw_AND(&func, t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
1134         brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
1135         brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
1136         brw_OR(&func, Yp, t1, t2);
1137         brw_AND(&func, t1, Y, brw_imm_uw(2)); /* Y & 0b10 */
1138         brw_AND(&func, t2, X, brw_imm_uw(2)); /* X & 0b10 */
1139         brw_SHR(&func, t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
1140         brw_OR(&func, S, t1, t2);
1141         break;
1142      case 8:
1143         /* decode_msaa(8, IMS, X, Y, 0) = (X', Y', S)
1144          *   where X' = (X & ~0b111) >> 2 | (X & 0b1)
1145          *         Y' = (Y & ~0b11) >> 1 | (Y & 0b1)
1146          *         S = (X & 0b100) | (Y & 0b10) | (X & 0b10) >> 1
1147          */
1148         brw_AND(&func, t1, X, brw_imm_uw(0xfff8)); /* X & ~0b111 */
1149         brw_SHR(&func, t1, t1, brw_imm_uw(2)); /* (X & ~0b111) >> 2 */
1150         brw_AND(&func, t2, X, brw_imm_uw(1)); /* X & 0b1 */
1151         brw_OR(&func, Xp, t1, t2);
1152         brw_AND(&func, t1, Y, brw_imm_uw(0xfffc)); /* Y & ~0b11 */
1153         brw_SHR(&func, t1, t1, brw_imm_uw(1)); /* (Y & ~0b11) >> 1 */
1154         brw_AND(&func, t2, Y, brw_imm_uw(1)); /* Y & 0b1 */
1155         brw_OR(&func, Yp, t1, t2);
1156         brw_AND(&func, t1, X, brw_imm_uw(4)); /* X & 0b100 */
1157         brw_AND(&func, t2, Y, brw_imm_uw(2)); /* Y & 0b10 */
1158         brw_OR(&func, t1, t1, t2); /* (X & 0b100) | (Y & 0b10) */
1159         brw_AND(&func, t2, X, brw_imm_uw(2)); /* X & 0b10 */
1160         brw_SHR(&func, t2, t2, brw_imm_uw(1)); /* (X & 0b10) >> 1 */
1161         brw_OR(&func, S, t1, t2);
1162         break;
1163      }
1164      s_is_zero = false;
1165      SWAP_XY_AND_XPYP();
1166      break;
1167   }
1168}
1169
1170/**
1171 * Emit code that kills pixels whose X and Y coordinates are outside the
1172 * boundary of the rectangle defined by the push constants (dst_x0, dst_y0,
1173 * dst_x1, dst_y1).
1174 */
1175void
1176brw_blorp_blit_program::kill_if_outside_dst_rect()
1177{
1178   struct brw_reg f0 = brw_flag_reg();
1179   struct brw_reg g1 = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
1180   struct brw_reg null16 = vec16(retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
1181
1182   brw_CMP(&func, null16, BRW_CONDITIONAL_GE, X, dst_x0);
1183   brw_CMP(&func, null16, BRW_CONDITIONAL_GE, Y, dst_y0);
1184   brw_CMP(&func, null16, BRW_CONDITIONAL_L, X, dst_x1);
1185   brw_CMP(&func, null16, BRW_CONDITIONAL_L, Y, dst_y1);
1186
1187   brw_set_predicate_control(&func, BRW_PREDICATE_NONE);
1188   brw_push_insn_state(&func);
1189   brw_set_mask_control(&func, BRW_MASK_DISABLE);
1190   brw_AND(&func, g1, f0, g1);
1191   brw_pop_insn_state(&func);
1192}
1193
1194/**
1195 * Emit code to translate from destination (X, Y) coordinates to source (X, Y)
1196 * coordinates.
1197 */
1198void
1199brw_blorp_blit_program::translate_dst_to_src()
1200{
1201   brw_MUL(&func, Xp, X, x_transform.multiplier);
1202   brw_MUL(&func, Yp, Y, y_transform.multiplier);
1203   brw_ADD(&func, Xp, Xp, x_transform.offset);
1204   brw_ADD(&func, Yp, Yp, y_transform.offset);
1205   SWAP_XY_AND_XPYP();
1206}
1207
1208/**
1209 * Emit code to transform the X and Y coordinates as needed for blending
1210 * together the different samples in an MSAA texture.
1211 */
1212void
1213brw_blorp_blit_program::single_to_blend()
1214{
1215   /* When looking up samples in an MSAA texture using the SAMPLE message,
1216    * Gen6 requires the texture coordinates to be odd integers (so that they
1217    * correspond to the center of a 2x2 block representing the four samples
1218    * that maxe up a pixel).  So we need to multiply our X and Y coordinates
1219    * each by 2 and then add 1.
1220    */
1221   brw_SHL(&func, t1, X, brw_imm_w(1));
1222   brw_SHL(&func, t2, Y, brw_imm_w(1));
1223   brw_ADD(&func, Xp, t1, brw_imm_w(1));
1224   brw_ADD(&func, Yp, t2, brw_imm_w(1));
1225   SWAP_XY_AND_XPYP();
1226}
1227
1228
1229/**
1230 * Count the number of trailing 1 bits in the given value.  For example:
1231 *
1232 * count_trailing_one_bits(0) == 0
1233 * count_trailing_one_bits(7) == 3
1234 * count_trailing_one_bits(11) == 2
1235 */
1236inline int count_trailing_one_bits(unsigned value)
1237{
1238#if defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) /* gcc 3.4 or later */
1239   return __builtin_ctz(~value);
1240#else
1241   return _mesa_bitcount(value & ~(value + 1));
1242#endif
1243}
1244
1245
1246void
1247brw_blorp_blit_program::manual_blend(unsigned num_samples)
1248{
1249   if (key->tex_layout == INTEL_MSAA_LAYOUT_CMS)
1250      mcs_fetch();
1251
1252   /* We add together samples using a binary tree structure, e.g. for 4x MSAA:
1253    *
1254    *   result = ((sample[0] + sample[1]) + (sample[2] + sample[3])) / 4
1255    *
1256    * This ensures that when all samples have the same value, no numerical
1257    * precision is lost, since each addition operation always adds two equal
1258    * values, and summing two equal floating point values does not lose
1259    * precision.
1260    *
1261    * We perform this computation by treating the texture_data array as a
1262    * stack and performing the following operations:
1263    *
1264    * - push sample 0 onto stack
1265    * - push sample 1 onto stack
1266    * - add top two stack entries
1267    * - push sample 2 onto stack
1268    * - push sample 3 onto stack
1269    * - add top two stack entries
1270    * - add top two stack entries
1271    * - divide top stack entry by 4
1272    *
1273    * Note that after pushing sample i onto the stack, the number of add
1274    * operations we do is equal to the number of trailing 1 bits in i.  This
1275    * works provided the total number of samples is a power of two, which it
1276    * always is for i965.
1277    *
1278    * For integer formats, we replace the add operations with average
1279    * operations and skip the final division.
1280    */
1281   typedef struct brw_instruction *(*brw_op2_ptr)(struct brw_compile *,
1282                                                  struct brw_reg,
1283                                                  struct brw_reg,
1284                                                  struct brw_reg);
1285   brw_op2_ptr combine_op =
1286      key->texture_data_type == BRW_REGISTER_TYPE_F ? brw_ADD : brw_AVG;
1287   unsigned stack_depth = 0;
1288   for (unsigned i = 0; i < num_samples; ++i) {
1289      assert(stack_depth == _mesa_bitcount(i)); /* Loop invariant */
1290
1291      /* Push sample i onto the stack */
1292      assert(stack_depth < ARRAY_SIZE(texture_data));
1293      if (i == 0) {
1294         s_is_zero = true;
1295      } else {
1296         s_is_zero = false;
1297         brw_MOV(&func, S, brw_imm_uw(i));
1298      }
1299      texel_fetch(texture_data[stack_depth++]);
1300
1301      if (i == 0 && key->tex_layout == INTEL_MSAA_LAYOUT_CMS) {
1302         /* The Ivy Bridge PRM, Vol4 Part1 p27 (Multisample Control Surface)
1303          * suggests an optimization:
1304          *
1305          *     "A simple optimization with probable large return in
1306          *     performance is to compare the MCS value to zero (indicating
1307          *     all samples are on sample slice 0), and sample only from
1308          *     sample slice 0 using ld2dss if MCS is zero."
1309          *
1310          * Note that in the case where the MCS value is zero, sampling from
1311          * sample slice 0 using ld2dss and sampling from sample 0 using
1312          * ld2dms are equivalent (since all samples are on sample slice 0).
1313          * Since we have already sampled from sample 0, all we need to do is
1314          * skip the remaining fetches and averaging if MCS is zero.
1315          */
1316         brw_CMP(&func, vec16(brw_null_reg()), BRW_CONDITIONAL_NZ,
1317                 mcs_data, brw_imm_ud(0));
1318         brw_IF(&func, BRW_EXECUTE_16);
1319      }
1320
1321      /* Do count_trailing_one_bits(i) times */
1322      for (int j = count_trailing_one_bits(i); j-- > 0; ) {
1323         assert(stack_depth >= 2);
1324         --stack_depth;
1325
1326         /* TODO: should use a smaller loop bound for non_RGBA formats */
1327         for (int k = 0; k < 4; ++k) {
1328            combine_op(&func, offset(texture_data[stack_depth - 1], 2*k),
1329                       offset(vec8(texture_data[stack_depth - 1]), 2*k),
1330                       offset(vec8(texture_data[stack_depth]), 2*k));
1331         }
1332      }
1333   }
1334
1335   /* We should have just 1 sample on the stack now. */
1336   assert(stack_depth == 1);
1337
1338   if (key->texture_data_type == BRW_REGISTER_TYPE_F) {
1339      /* Scale the result down by a factor of num_samples */
1340      /* TODO: should use a smaller loop bound for non-RGBA formats */
1341      for (int j = 0; j < 4; ++j) {
1342         brw_MUL(&func, offset(texture_data[0], 2*j),
1343                 offset(vec8(texture_data[0]), 2*j),
1344                 brw_imm_f(1.0/num_samples));
1345      }
1346   }
1347
1348   if (key->tex_layout == INTEL_MSAA_LAYOUT_CMS)
1349      brw_ENDIF(&func);
1350}
1351
1352/**
1353 * Emit code to look up a value in the texture using the SAMPLE message (which
1354 * does blending of MSAA surfaces).
1355 */
1356void
1357brw_blorp_blit_program::sample(struct brw_reg dst)
1358{
1359   static const sampler_message_arg args[2] = {
1360      SAMPLER_MESSAGE_ARG_U_FLOAT,
1361      SAMPLER_MESSAGE_ARG_V_FLOAT
1362   };
1363
1364   texture_lookup(dst, GEN5_SAMPLER_MESSAGE_SAMPLE, args, ARRAY_SIZE(args));
1365}
1366
1367/**
1368 * Emit code to look up a value in the texture using the SAMPLE_LD message
1369 * (which does a simple texel fetch).
1370 */
1371void
1372brw_blorp_blit_program::texel_fetch(struct brw_reg dst)
1373{
1374   static const sampler_message_arg gen6_args[5] = {
1375      SAMPLER_MESSAGE_ARG_U_INT,
1376      SAMPLER_MESSAGE_ARG_V_INT,
1377      SAMPLER_MESSAGE_ARG_ZERO_INT, /* R */
1378      SAMPLER_MESSAGE_ARG_ZERO_INT, /* LOD */
1379      SAMPLER_MESSAGE_ARG_SI_INT
1380   };
1381   static const sampler_message_arg gen7_ld_args[3] = {
1382      SAMPLER_MESSAGE_ARG_U_INT,
1383      SAMPLER_MESSAGE_ARG_ZERO_INT, /* LOD */
1384      SAMPLER_MESSAGE_ARG_V_INT
1385   };
1386   static const sampler_message_arg gen7_ld2dss_args[3] = {
1387      SAMPLER_MESSAGE_ARG_SI_INT,
1388      SAMPLER_MESSAGE_ARG_U_INT,
1389      SAMPLER_MESSAGE_ARG_V_INT
1390   };
1391   static const sampler_message_arg gen7_ld2dms_args[4] = {
1392      SAMPLER_MESSAGE_ARG_SI_INT,
1393      SAMPLER_MESSAGE_ARG_MCS_INT,
1394      SAMPLER_MESSAGE_ARG_U_INT,
1395      SAMPLER_MESSAGE_ARG_V_INT
1396   };
1397
1398   switch (brw->intel.gen) {
1399   case 6:
1400      texture_lookup(dst, GEN5_SAMPLER_MESSAGE_SAMPLE_LD, gen6_args,
1401                     s_is_zero ? 2 : 5);
1402      break;
1403   case 7:
1404      switch (key->tex_layout) {
1405      case INTEL_MSAA_LAYOUT_IMS:
1406         /* From the Ivy Bridge PRM, Vol4 Part1 p72 (Multisampled Surface Storage
1407          * Format):
1408          *
1409          *     If this field is MSFMT_DEPTH_STENCIL
1410          *     [a.k.a. INTEL_MSAA_LAYOUT_IMS], the only sampling engine
1411          *     messages allowed are "ld2dms", "resinfo", and "sampleinfo".
1412          *
1413          * So fall through to emit the same message as we use for
1414          * INTEL_MSAA_LAYOUT_CMS.
1415          */
1416      case INTEL_MSAA_LAYOUT_CMS:
1417         texture_lookup(dst, GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS,
1418                        gen7_ld2dms_args, ARRAY_SIZE(gen7_ld2dms_args));
1419         break;
1420      case INTEL_MSAA_LAYOUT_UMS:
1421         texture_lookup(dst, GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS,
1422                        gen7_ld2dss_args, ARRAY_SIZE(gen7_ld2dss_args));
1423         break;
1424      case INTEL_MSAA_LAYOUT_NONE:
1425         assert(s_is_zero);
1426         texture_lookup(dst, GEN5_SAMPLER_MESSAGE_SAMPLE_LD, gen7_ld_args,
1427                        ARRAY_SIZE(gen7_ld_args));
1428         break;
1429      }
1430      break;
1431   default:
1432      assert(!"Should not get here.");
1433      break;
1434   };
1435}
1436
1437void
1438brw_blorp_blit_program::mcs_fetch()
1439{
1440   static const sampler_message_arg gen7_ld_mcs_args[2] = {
1441      SAMPLER_MESSAGE_ARG_U_INT,
1442      SAMPLER_MESSAGE_ARG_V_INT
1443   };
1444   texture_lookup(vec16(mcs_data), GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS,
1445                  gen7_ld_mcs_args, ARRAY_SIZE(gen7_ld_mcs_args));
1446}
1447
1448void
1449brw_blorp_blit_program::expand_to_32_bits(struct brw_reg src,
1450                                          struct brw_reg dst)
1451{
1452   brw_MOV(&func, vec8(dst), vec8(src));
1453   brw_set_compression_control(&func, BRW_COMPRESSION_2NDHALF);
1454   brw_MOV(&func, offset(vec8(dst), 1), suboffset(vec8(src), 8));
1455   brw_set_compression_control(&func, BRW_COMPRESSION_NONE);
1456}
1457
1458void
1459brw_blorp_blit_program::texture_lookup(struct brw_reg dst,
1460                                       GLuint msg_type,
1461                                       const sampler_message_arg *args,
1462                                       int num_args)
1463{
1464   struct brw_reg mrf =
1465      retype(vec16(brw_message_reg(base_mrf)), BRW_REGISTER_TYPE_UD);
1466   for (int arg = 0; arg < num_args; ++arg) {
1467      switch (args[arg]) {
1468      case SAMPLER_MESSAGE_ARG_U_FLOAT:
1469         expand_to_32_bits(X, retype(mrf, BRW_REGISTER_TYPE_F));
1470         break;
1471      case SAMPLER_MESSAGE_ARG_V_FLOAT:
1472         expand_to_32_bits(Y, retype(mrf, BRW_REGISTER_TYPE_F));
1473         break;
1474      case SAMPLER_MESSAGE_ARG_U_INT:
1475         expand_to_32_bits(X, mrf);
1476         break;
1477      case SAMPLER_MESSAGE_ARG_V_INT:
1478         expand_to_32_bits(Y, mrf);
1479         break;
1480      case SAMPLER_MESSAGE_ARG_SI_INT:
1481         /* Note: on Gen7, this code may be reached with s_is_zero==true
1482          * because in Gen7's ld2dss message, the sample index is the first
1483          * argument.  When this happens, we need to move a 0 into the
1484          * appropriate message register.
1485          */
1486         if (s_is_zero)
1487            brw_MOV(&func, mrf, brw_imm_ud(0));
1488         else
1489            expand_to_32_bits(S, mrf);
1490         break;
1491      case SAMPLER_MESSAGE_ARG_MCS_INT:
1492         switch (key->tex_layout) {
1493         case INTEL_MSAA_LAYOUT_CMS:
1494            brw_MOV(&func, mrf, mcs_data);
1495            break;
1496         case INTEL_MSAA_LAYOUT_IMS:
1497            /* When sampling from an IMS surface, MCS data is not relevant,
1498             * and the hardware ignores it.  So don't bother populating it.
1499             */
1500            break;
1501         default:
1502            /* We shouldn't be trying to send MCS data with any other
1503             * layouts.
1504             */
1505            assert (!"Unsupported layout for MCS data");
1506            break;
1507         }
1508         break;
1509      case SAMPLER_MESSAGE_ARG_ZERO_INT:
1510         brw_MOV(&func, mrf, brw_imm_ud(0));
1511         break;
1512      }
1513      mrf.nr += 2;
1514   }
1515
1516   brw_SAMPLE(&func,
1517              retype(dst, BRW_REGISTER_TYPE_UW) /* dest */,
1518              base_mrf /* msg_reg_nr */,
1519              brw_message_reg(base_mrf) /* src0 */,
1520              BRW_BLORP_TEXTURE_BINDING_TABLE_INDEX,
1521              0 /* sampler */,
1522              WRITEMASK_XYZW,
1523              msg_type,
1524              8 /* response_length.  TODO: should be smaller for non-RGBA formats? */,
1525              mrf.nr - base_mrf /* msg_length */,
1526              0 /* header_present */,
1527              BRW_SAMPLER_SIMD_MODE_SIMD16,
1528              BRW_SAMPLER_RETURN_FORMAT_FLOAT32);
1529}
1530
1531#undef X
1532#undef Y
1533#undef U
1534#undef V
1535#undef S
1536#undef SWAP_XY_AND_XPYP
1537
1538void
1539brw_blorp_blit_program::render_target_write()
1540{
1541   struct brw_reg mrf_rt_write =
1542      retype(vec16(brw_message_reg(base_mrf)), key->texture_data_type);
1543   int mrf_offset = 0;
1544
1545   /* If we may have killed pixels, then we need to send R0 and R1 in a header
1546    * so that the render target knows which pixels we killed.
1547    */
1548   bool use_header = key->use_kill;
1549   if (use_header) {
1550      /* Copy R0/1 to MRF */
1551      brw_MOV(&func, retype(mrf_rt_write, BRW_REGISTER_TYPE_UD),
1552              retype(R0, BRW_REGISTER_TYPE_UD));
1553      mrf_offset += 2;
1554   }
1555
1556   /* Copy texture data to MRFs */
1557   for (int i = 0; i < 4; ++i) {
1558      /* E.g. mov(16) m2.0<1>:f r2.0<8;8,1>:f { Align1, H1 } */
1559      brw_MOV(&func, offset(mrf_rt_write, mrf_offset),
1560              offset(vec8(texture_data[0]), 2*i));
1561      mrf_offset += 2;
1562   }
1563
1564   /* Now write to the render target and terminate the thread */
1565   brw_fb_WRITE(&func,
1566                16 /* dispatch_width */,
1567                base_mrf /* msg_reg_nr */,
1568                mrf_rt_write /* src0 */,
1569                BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE,
1570                BRW_BLORP_RENDERBUFFER_BINDING_TABLE_INDEX,
1571                mrf_offset /* msg_length.  TODO: Should be smaller for non-RGBA formats. */,
1572                0 /* response_length */,
1573                true /* eot */,
1574                use_header);
1575}
1576
1577
1578void
1579brw_blorp_coord_transform_params::setup(GLuint src0, GLuint dst0, GLuint dst1,
1580                                        bool mirror)
1581{
1582   if (!mirror) {
1583      /* When not mirroring a coordinate (say, X), we need:
1584       *   x' - src_x0 = x - dst_x0
1585       * Therefore:
1586       *   x' = 1*x + (src_x0 - dst_x0)
1587       */
1588      multiplier = 1;
1589      offset = src0 - dst0;
1590   } else {
1591      /* When mirroring X we need:
1592       *   x' - src_x0 = dst_x1 - x - 1
1593       * Therefore:
1594       *   x' = -1*x + (src_x0 + dst_x1 - 1)
1595       */
1596      multiplier = -1;
1597      offset = src0 + dst1 - 1;
1598   }
1599}
1600
1601
1602/**
1603 * Determine which MSAA layout the GPU pipeline should be configured for,
1604 * based on the chip generation, the number of samples, and the true layout of
1605 * the image in memory.
1606 */
1607inline intel_msaa_layout
1608compute_msaa_layout_for_pipeline(struct brw_context *brw, unsigned num_samples,
1609                                 intel_msaa_layout true_layout)
1610{
1611   if (num_samples <= 1) {
1612      /* When configuring the GPU for non-MSAA, we can still accommodate IMS
1613       * format buffers, by transforming coordinates appropriately.
1614       */
1615      assert(true_layout == INTEL_MSAA_LAYOUT_NONE ||
1616             true_layout == INTEL_MSAA_LAYOUT_IMS);
1617      return INTEL_MSAA_LAYOUT_NONE;
1618   } else {
1619      assert(true_layout != INTEL_MSAA_LAYOUT_NONE);
1620   }
1621
1622   /* Prior to Gen7, all MSAA surfaces use IMS layout. */
1623   if (brw->intel.gen == 6) {
1624      assert(true_layout == INTEL_MSAA_LAYOUT_IMS);
1625   }
1626
1627   return true_layout;
1628}
1629
1630
1631brw_blorp_blit_params::brw_blorp_blit_params(struct brw_context *brw,
1632                                             struct intel_mipmap_tree *src_mt,
1633                                             unsigned src_level, unsigned src_layer,
1634                                             struct intel_mipmap_tree *dst_mt,
1635                                             unsigned dst_level, unsigned dst_layer,
1636                                             GLuint src_x0, GLuint src_y0,
1637                                             GLuint dst_x0, GLuint dst_y0,
1638                                             GLuint dst_x1, GLuint dst_y1,
1639                                             bool mirror_x, bool mirror_y)
1640{
1641   src.set(brw, src_mt, src_level, src_layer);
1642   dst.set(brw, dst_mt, dst_level, dst_layer);
1643
1644   /* If we are blitting from sRGB to linear or vice versa, we still want the
1645    * blit to be a direct copy, so we need source and destination to use the
1646    * same format.  However, we want the destination sRGB/linear state to be
1647    * correct (so that sRGB blending is used when doing an MSAA resolve to an
1648    * sRGB surface, and linear blending is used when doing an MSAA resolve to
1649    * a linear surface).  Since blorp blits don't support any format
1650    * conversion (except between sRGB and linear), we can accomplish this by
1651    * simply setting up the source to use the same format as the destination.
1652    */
1653   assert(_mesa_get_srgb_format_linear(src_mt->format) ==
1654          _mesa_get_srgb_format_linear(dst_mt->format));
1655   src.brw_surfaceformat = dst.brw_surfaceformat;
1656
1657   use_wm_prog = true;
1658   memset(&wm_prog_key, 0, sizeof(wm_prog_key));
1659
1660   /* texture_data_type indicates the register type that should be used to
1661    * manipulate texture data.
1662    */
1663   switch (_mesa_get_format_datatype(src_mt->format)) {
1664   case GL_UNSIGNED_NORMALIZED:
1665   case GL_SIGNED_NORMALIZED:
1666   case GL_FLOAT:
1667      wm_prog_key.texture_data_type = BRW_REGISTER_TYPE_F;
1668      break;
1669   case GL_UNSIGNED_INT:
1670      if (src_mt->format == MESA_FORMAT_S8) {
1671         /* We process stencil as though it's an unsigned normalized color */
1672         wm_prog_key.texture_data_type = BRW_REGISTER_TYPE_F;
1673      } else {
1674         wm_prog_key.texture_data_type = BRW_REGISTER_TYPE_UD;
1675      }
1676      break;
1677   case GL_INT:
1678      wm_prog_key.texture_data_type = BRW_REGISTER_TYPE_D;
1679      break;
1680   default:
1681      assert(!"Unrecognized blorp format");
1682      break;
1683   }
1684
1685   if (brw->intel.gen > 6) {
1686      /* Gen7's rendering hardware only supports the IMS layout for depth and
1687       * stencil render targets.  Blorp always maps its destination surface as
1688       * a color render target (even if it's actually a depth or stencil
1689       * buffer).  So if the destination is IMS, we'll have to map it as a
1690       * single-sampled texture and interleave the samples ourselves.
1691       */
1692      if (dst_mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS)
1693         dst.num_samples = 0;
1694   }
1695
1696   if (dst.map_stencil_as_y_tiled && dst.num_samples > 1) {
1697      /* If the destination surface is a W-tiled multisampled stencil buffer
1698       * that we're mapping as Y tiled, then we need to arrange for the WM
1699       * program to run once per sample rather than once per pixel, because
1700       * the memory layout of related samples doesn't match between W and Y
1701       * tiling.
1702       */
1703      wm_prog_key.persample_msaa_dispatch = true;
1704   }
1705
1706   if (src.num_samples > 0 && dst.num_samples > 1) {
1707      /* We are blitting from a multisample buffer to a multisample buffer, so
1708       * we must preserve samples within a pixel.  This means we have to
1709       * arrange for the WM program to run once per sample rather than once
1710       * per pixel.
1711       */
1712      wm_prog_key.persample_msaa_dispatch = true;
1713   }
1714
1715   /* The render path must be configured to use the same number of samples as
1716    * the destination buffer.
1717    */
1718   num_samples = dst.num_samples;
1719
1720   GLenum base_format = _mesa_get_format_base_format(src_mt->format);
1721   if (base_format != GL_DEPTH_COMPONENT && /* TODO: what about depth/stencil? */
1722       base_format != GL_STENCIL_INDEX &&
1723       src_mt->num_samples > 1 && dst_mt->num_samples <= 1) {
1724      /* We are downsampling a color buffer, so blend. */
1725      wm_prog_key.blend = true;
1726   }
1727
1728   /* src_samples and dst_samples are the true sample counts */
1729   wm_prog_key.src_samples = src_mt->num_samples;
1730   wm_prog_key.dst_samples = dst_mt->num_samples;
1731
1732   /* tex_samples and rt_samples are the sample counts that are set up in
1733    * SURFACE_STATE.
1734    */
1735   wm_prog_key.tex_samples = src.num_samples;
1736   wm_prog_key.rt_samples  = dst.num_samples;
1737
1738   /* tex_layout and rt_layout indicate the MSAA layout the GPU pipeline will
1739    * use to access the source and destination surfaces.
1740    */
1741   wm_prog_key.tex_layout =
1742      compute_msaa_layout_for_pipeline(brw, src.num_samples, src.msaa_layout);
1743   wm_prog_key.rt_layout =
1744      compute_msaa_layout_for_pipeline(brw, dst.num_samples, dst.msaa_layout);
1745
1746   /* src_layout and dst_layout indicate the true MSAA layout used by src and
1747    * dst.
1748    */
1749   wm_prog_key.src_layout = src_mt->msaa_layout;
1750   wm_prog_key.dst_layout = dst_mt->msaa_layout;
1751
1752   wm_prog_key.src_tiled_w = src.map_stencil_as_y_tiled;
1753   wm_prog_key.dst_tiled_w = dst.map_stencil_as_y_tiled;
1754   x0 = wm_push_consts.dst_x0 = dst_x0;
1755   y0 = wm_push_consts.dst_y0 = dst_y0;
1756   x1 = wm_push_consts.dst_x1 = dst_x1;
1757   y1 = wm_push_consts.dst_y1 = dst_y1;
1758   wm_push_consts.x_transform.setup(src_x0, dst_x0, dst_x1, mirror_x);
1759   wm_push_consts.y_transform.setup(src_y0, dst_y0, dst_y1, mirror_y);
1760
1761   if (dst.num_samples <= 1 && dst_mt->num_samples > 1) {
1762      /* We must expand the rectangle we send through the rendering pipeline,
1763       * to account for the fact that we are mapping the destination region as
1764       * single-sampled when it is in fact multisampled.  We must also align
1765       * it to a multiple of the multisampling pattern, because the
1766       * differences between multisampled and single-sampled surface formats
1767       * will mean that pixels are scrambled within the multisampling pattern.
1768       * TODO: what if this makes the coordinates too large?
1769       *
1770       * Note: this only works if the destination surface uses the IMS layout.
1771       * If it's UMS, then we have no choice but to set up the rendering
1772       * pipeline as multisampled.
1773       */
1774      assert(dst_mt->msaa_layout == INTEL_MSAA_LAYOUT_IMS);
1775      switch (dst_mt->num_samples) {
1776      case 4:
1777         x0 = ROUND_DOWN_TO(x0 * 2, 4);
1778         y0 = ROUND_DOWN_TO(y0 * 2, 4);
1779         x1 = ALIGN(x1 * 2, 4);
1780         y1 = ALIGN(y1 * 2, 4);
1781         break;
1782      case 8:
1783         x0 = ROUND_DOWN_TO(x0 * 4, 8);
1784         y0 = ROUND_DOWN_TO(y0 * 2, 4);
1785         x1 = ALIGN(x1 * 4, 8);
1786         y1 = ALIGN(y1 * 2, 4);
1787         break;
1788      default:
1789         assert(!"Unrecognized sample count in brw_blorp_blit_params ctor");
1790         break;
1791      }
1792      wm_prog_key.use_kill = true;
1793   }
1794
1795   if (dst.map_stencil_as_y_tiled) {
1796      /* We must modify the rectangle we send through the rendering pipeline
1797       * (and the size and x/y offset of the destination surface), to account
1798       * for the fact that we are mapping it as Y-tiled when it is in fact
1799       * W-tiled.
1800       *
1801       * Both Y tiling and W tiling can be understood as organizations of
1802       * 32-byte sub-tiles; within each 32-byte sub-tile, the layout of pixels
1803       * is different, but the layout of the 32-byte sub-tiles within the 4k
1804       * tile is the same (8 sub-tiles across by 16 sub-tiles down, in
1805       * column-major order).  In Y tiling, the sub-tiles are 16 bytes wide
1806       * and 2 rows high; in W tiling, they are 8 bytes wide and 4 rows high.
1807       *
1808       * Therefore, to account for the layout differences within the 32-byte
1809       * sub-tiles, we must expand the rectangle so the X coordinates of its
1810       * edges are multiples of 8 (the W sub-tile width), and its Y
1811       * coordinates of its edges are multiples of 4 (the W sub-tile height).
1812       * Then we need to scale the X and Y coordinates of the rectangle to
1813       * account for the differences in aspect ratio between the Y and W
1814       * sub-tiles.  We need to modify the layer width and height similarly.
1815       *
1816       * A correction needs to be applied when MSAA is in use: since
1817       * INTEL_MSAA_LAYOUT_IMS uses an interleaving pattern whose height is 4,
1818       * we need to align the Y coordinates to multiples of 8, so that when
1819       * they are divided by two they are still multiples of 4.
1820       *
1821       * Note: Since the x/y offset of the surface will be applied using the
1822       * SURFACE_STATE command packet, it will be invisible to the swizzling
1823       * code in the shader; therefore it needs to be in a multiple of the
1824       * 32-byte sub-tile size.  Fortunately it is, since the sub-tile is 8
1825       * pixels wide and 4 pixels high (when viewed as a W-tiled stencil
1826       * buffer), and the miplevel alignment used for stencil buffers is 8
1827       * pixels horizontally and either 4 or 8 pixels vertically (see
1828       * intel_horizontal_texture_alignment_unit() and
1829       * intel_vertical_texture_alignment_unit()).
1830       *
1831       * Note: Also, since the SURFACE_STATE command packet can only apply
1832       * offsets that are multiples of 4 pixels horizontally and 2 pixels
1833       * vertically, it is important that the offsets will be multiples of
1834       * these sizes after they are converted into Y-tiled coordinates.
1835       * Fortunately they will be, since we know from above that the offsets
1836       * are a multiple of the 32-byte sub-tile size, and in Y-tiled
1837       * coordinates the sub-tile is 16 pixels wide and 2 pixels high.
1838       *
1839       * TODO: what if this makes the coordinates (or the texture size) too
1840       * large?
1841       */
1842      const unsigned x_align = 8, y_align = dst.num_samples != 0 ? 8 : 4;
1843      x0 = ROUND_DOWN_TO(x0, x_align) * 2;
1844      y0 = ROUND_DOWN_TO(y0, y_align) / 2;
1845      x1 = ALIGN(x1, x_align) * 2;
1846      y1 = ALIGN(y1, y_align) / 2;
1847      dst.width = ALIGN(dst.width, x_align) * 2;
1848      dst.height = ALIGN(dst.height, y_align) / 2;
1849      dst.x_offset *= 2;
1850      dst.y_offset /= 2;
1851      wm_prog_key.use_kill = true;
1852   }
1853
1854   if (src.map_stencil_as_y_tiled) {
1855      /* We must modify the size and x/y offset of the source surface to
1856       * account for the fact that we are mapping it as Y-tiled when it is in
1857       * fact W tiled.
1858       *
1859       * See the comments above concerning x/y offset alignment for the
1860       * destination surface.
1861       *
1862       * TODO: what if this makes the texture size too large?
1863       */
1864      const unsigned x_align = 8, y_align = src.num_samples != 0 ? 8 : 4;
1865      src.width = ALIGN(src.width, x_align) * 2;
1866      src.height = ALIGN(src.height, y_align) / 2;
1867      src.x_offset *= 2;
1868      src.y_offset /= 2;
1869   }
1870}
1871
1872uint32_t
1873brw_blorp_blit_params::get_wm_prog(struct brw_context *brw,
1874                                   brw_blorp_prog_data **prog_data) const
1875{
1876   uint32_t prog_offset;
1877   if (!brw_search_cache(&brw->cache, BRW_BLORP_BLIT_PROG,
1878                         &this->wm_prog_key, sizeof(this->wm_prog_key),
1879                         &prog_offset, prog_data)) {
1880      brw_blorp_blit_program prog(brw, &this->wm_prog_key);
1881      GLuint program_size;
1882      const GLuint *program = prog.compile(brw, &program_size);
1883      brw_upload_cache(&brw->cache, BRW_BLORP_BLIT_PROG,
1884                       &this->wm_prog_key, sizeof(this->wm_prog_key),
1885                       program, program_size,
1886                       &prog.prog_data, sizeof(prog.prog_data),
1887                       &prog_offset, prog_data);
1888   }
1889   return prog_offset;
1890}
1891