intel_mipmap_tree.c revision 33202b4876a88b6f54ca7022eadd2875a2d3508a
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "intel_batchbuffer.h"
29#include "intel_context.h"
30#include "intel_mipmap_tree.h"
31#include "intel_regions.h"
32#include "intel_resolve_map.h"
33#include "intel_span.h"
34#include "intel_tex_layout.h"
35#include "intel_tex.h"
36#include "intel_blit.h"
37
38#include "main/enums.h"
39#include "main/formats.h"
40#include "main/image.h"
41#include "main/teximage.h"
42
43#define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45static GLenum
46target_to_target(GLenum target)
47{
48   switch (target) {
49   case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50   case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51   case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52   case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53   case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54   case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55      return GL_TEXTURE_CUBE_MAP_ARB;
56   default:
57      return target;
58   }
59}
60
61/**
62 * @param for_region Indicates that the caller is
63 *        intel_miptree_create_for_region(). If true, then do not create
64 *        \c stencil_mt.
65 */
66static struct intel_mipmap_tree *
67intel_miptree_create_internal(struct intel_context *intel,
68			      GLenum target,
69			      gl_format format,
70			      GLuint first_level,
71			      GLuint last_level,
72			      GLuint width0,
73			      GLuint height0,
74			      GLuint depth0,
75			      bool for_region,
76                              GLuint num_samples,
77                              enum intel_msaa_layout msaa_layout)
78{
79   struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
80   int compress_byte = 0;
81
82   DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
83       _mesa_lookup_enum_by_nr(target),
84       _mesa_get_format_name(format),
85       first_level, last_level, mt);
86
87   if (_mesa_is_format_compressed(format))
88      compress_byte = intel_compressed_num_bytes(format);
89
90   mt->target = target_to_target(target);
91   mt->format = format;
92   mt->first_level = first_level;
93   mt->last_level = last_level;
94   mt->width0 = width0;
95   mt->height0 = height0;
96   mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
97   mt->num_samples = num_samples;
98   mt->compressed = compress_byte ? 1 : 0;
99   mt->msaa_layout = msaa_layout;
100   mt->refcount = 1;
101
102   /* array_spacing_lod0 is only used for non-IMS MSAA surfaces.  TODO: can we
103    * use it elsewhere?
104    */
105   switch (msaa_layout) {
106   case INTEL_MSAA_LAYOUT_NONE:
107   case INTEL_MSAA_LAYOUT_IMS:
108      mt->array_spacing_lod0 = false;
109      break;
110   case INTEL_MSAA_LAYOUT_UMS:
111   case INTEL_MSAA_LAYOUT_CMS:
112      mt->array_spacing_lod0 = true;
113      break;
114   }
115
116   if (target == GL_TEXTURE_CUBE_MAP) {
117      assert(depth0 == 1);
118      mt->depth0 = 6;
119   } else {
120      mt->depth0 = depth0;
121   }
122
123   if (!for_region &&
124       _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
125       (intel->must_use_separate_stencil ||
126	(intel->has_separate_stencil &&
127	 intel->vtbl.is_hiz_depth_format(intel, format)))) {
128      /* MSAA stencil surfaces always use IMS layout. */
129      enum intel_msaa_layout msaa_layout =
130         num_samples > 0 ? INTEL_MSAA_LAYOUT_IMS : INTEL_MSAA_LAYOUT_NONE;
131      mt->stencil_mt = intel_miptree_create(intel,
132                                            mt->target,
133                                            MESA_FORMAT_S8,
134                                            mt->first_level,
135                                            mt->last_level,
136                                            mt->width0,
137                                            mt->height0,
138                                            mt->depth0,
139                                            true,
140                                            num_samples,
141                                            msaa_layout);
142      if (!mt->stencil_mt) {
143	 intel_miptree_release(&mt);
144	 return NULL;
145      }
146
147      /* Fix up the Z miptree format for how we're splitting out separate
148       * stencil.  Gen7 expects there to be no stencil bits in its depth buffer.
149       */
150      if (mt->format == MESA_FORMAT_S8_Z24) {
151	 mt->format = MESA_FORMAT_X8_Z24;
152      } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
153	 mt->format = MESA_FORMAT_Z32_FLOAT;
154	 mt->cpp = 4;
155      } else {
156	 _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
157		       _mesa_get_format_name(mt->format));
158      }
159   }
160
161   intel_get_texture_alignment_unit(intel, mt->format,
162				    &mt->align_w, &mt->align_h);
163
164#ifdef I915
165   (void) intel;
166   if (intel->is_945)
167      i945_miptree_layout(mt);
168   else
169      i915_miptree_layout(mt);
170#else
171   brw_miptree_layout(intel, mt);
172#endif
173
174   return mt;
175}
176
177
178struct intel_mipmap_tree *
179intel_miptree_create(struct intel_context *intel,
180		     GLenum target,
181		     gl_format format,
182		     GLuint first_level,
183		     GLuint last_level,
184		     GLuint width0,
185		     GLuint height0,
186		     GLuint depth0,
187		     bool expect_accelerated_upload,
188                     GLuint num_samples,
189                     enum intel_msaa_layout msaa_layout)
190{
191   struct intel_mipmap_tree *mt;
192   uint32_t tiling = I915_TILING_NONE;
193   GLenum base_format = _mesa_get_format_base_format(format);
194
195   if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
196      if (intel->gen >= 4 &&
197	  (base_format == GL_DEPTH_COMPONENT ||
198	   base_format == GL_DEPTH_STENCIL_EXT))
199	 tiling = I915_TILING_Y;
200      else if (msaa_layout != INTEL_MSAA_LAYOUT_NONE) {
201         /* From p82 of the Sandy Bridge PRM, dw3[1] of SURFACE_STATE ("Tiled
202          * Surface"):
203          *
204          *   [DevSNB+]: For multi-sample render targets, this field must be
205          *   1. MSRTs can only be tiled.
206          *
207          * Our usual reason for preferring X tiling (fast blits using the
208          * blitting engine) doesn't apply to MSAA, since we'll generally be
209          * downsampling or upsampling when blitting between the MSAA buffer
210          * and another buffer, and the blitting engine doesn't support that.
211          * So use Y tiling, since it makes better use of the cache.
212          */
213         tiling = I915_TILING_Y;
214      } else if (width0 >= 64)
215	 tiling = I915_TILING_X;
216   }
217
218   if (format == MESA_FORMAT_S8) {
219      /* The stencil buffer is W tiled. However, we request from the kernel a
220       * non-tiled buffer because the GTT is incapable of W fencing.  So round
221       * up the width and height to match the size of W tiles (64x64).
222       */
223      tiling = I915_TILING_NONE;
224      width0 = ALIGN(width0, 64);
225      height0 = ALIGN(height0, 64);
226   }
227
228   mt = intel_miptree_create_internal(intel, target, format,
229				      first_level, last_level, width0,
230				      height0, depth0,
231				      false, num_samples, msaa_layout);
232   /*
233    * pitch == 0 || height == 0  indicates the null texture
234    */
235   if (!mt || !mt->total_width || !mt->total_height) {
236      intel_miptree_release(&mt);
237      return NULL;
238   }
239
240   mt->region = intel_region_alloc(intel->intelScreen,
241				   tiling,
242				   mt->cpp,
243				   mt->total_width,
244				   mt->total_height,
245				   expect_accelerated_upload);
246   mt->offset = 0;
247
248   if (!mt->region) {
249       intel_miptree_release(&mt);
250       return NULL;
251   }
252
253   return mt;
254}
255
256
257struct intel_mipmap_tree *
258intel_miptree_create_for_region(struct intel_context *intel,
259				GLenum target,
260				gl_format format,
261				struct intel_region *region)
262{
263   struct intel_mipmap_tree *mt;
264
265   mt = intel_miptree_create_internal(intel, target, format,
266				      0, 0,
267				      region->width, region->height, 1,
268				      true, 0 /* num_samples */,
269                                      INTEL_MSAA_LAYOUT_NONE);
270   if (!mt)
271      return mt;
272
273   intel_region_reference(&mt->region, region);
274
275   return mt;
276}
277
278/**
279 * Determine which MSAA layout should be used by the MSAA surface being
280 * created, based on the chip generation and the surface type.
281 */
282static enum intel_msaa_layout
283compute_msaa_layout(struct intel_context *intel, gl_format format)
284{
285   /* Prior to Gen7, all MSAA surfaces used IMS layout. */
286   if (intel->gen < 7)
287      return INTEL_MSAA_LAYOUT_IMS;
288
289   /* In Gen7, IMS layout is only used for depth and stencil buffers. */
290   switch (_mesa_get_format_base_format(format)) {
291   case GL_DEPTH_COMPONENT:
292   case GL_STENCIL_INDEX:
293   case GL_DEPTH_STENCIL:
294      return INTEL_MSAA_LAYOUT_IMS;
295   default:
296      /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"):
297       *
298       *   This field must be set to 0 for all SINT MSRTs when all RT channels
299       *   are not written
300       *
301       * In practice this means that we have to disable MCS for all signed
302       * integer MSAA buffers.  The alternative, to disable MCS only when one
303       * of the render target channels is disabled, is impractical because it
304       * would require converting between CMS and UMS MSAA layouts on the fly,
305       * which is expensive.
306       */
307      if (_mesa_get_format_datatype(format) == GL_INT) {
308         /* TODO: is this workaround needed for future chipsets? */
309         assert(intel->gen == 7);
310         return INTEL_MSAA_LAYOUT_UMS;
311      } else {
312         return INTEL_MSAA_LAYOUT_CMS;
313      }
314   }
315}
316
317struct intel_mipmap_tree*
318intel_miptree_create_for_renderbuffer(struct intel_context *intel,
319                                      gl_format format,
320                                      uint32_t width,
321                                      uint32_t height,
322                                      uint32_t num_samples)
323{
324   struct intel_mipmap_tree *mt;
325   uint32_t depth = 1;
326   enum intel_msaa_layout msaa_layout = INTEL_MSAA_LAYOUT_NONE;
327
328   if (num_samples > 0) {
329      /* Adjust width/height/depth for MSAA */
330      msaa_layout = compute_msaa_layout(intel, format);
331      if (msaa_layout == INTEL_MSAA_LAYOUT_IMS) {
332         /* In the Sandy Bridge PRM, volume 4, part 1, page 31, it says:
333          *
334          *     "Any of the other messages (sample*, LOD, load4) used with a
335          *      (4x) multisampled surface will in-effect sample a surface with
336          *      double the height and width as that indicated in the surface
337          *      state. Each pixel position on the original-sized surface is
338          *      replaced with a 2x2 of samples with the following arrangement:
339          *
340          *         sample 0 sample 2
341          *         sample 1 sample 3"
342          *
343          * Thus, when sampling from a multisampled texture, it behaves as
344          * though the layout in memory for (x,y,sample) is:
345          *
346          *      (0,0,0) (0,0,2)   (1,0,0) (1,0,2)
347          *      (0,0,1) (0,0,3)   (1,0,1) (1,0,3)
348          *
349          *      (0,1,0) (0,1,2)   (1,1,0) (1,1,2)
350          *      (0,1,1) (0,1,3)   (1,1,1) (1,1,3)
351          *
352          * However, the actual layout of multisampled data in memory is:
353          *
354          *      (0,0,0) (1,0,0)   (0,0,1) (1,0,1)
355          *      (0,1,0) (1,1,0)   (0,1,1) (1,1,1)
356          *
357          *      (0,0,2) (1,0,2)   (0,0,3) (1,0,3)
358          *      (0,1,2) (1,1,2)   (0,1,3) (1,1,3)
359          *
360          * This pattern repeats for each 2x2 pixel block.
361          *
362          * As a result, when calculating the size of our 4-sample buffer for
363          * an odd width or height, we have to align before scaling up because
364          * sample 3 is in that bottom right 2x2 block.
365          */
366         switch (num_samples) {
367         case 4:
368            width = ALIGN(width, 2) * 2;
369            height = ALIGN(height, 2) * 2;
370            break;
371         case 8:
372            width = ALIGN(width, 2) * 4;
373            height = ALIGN(height, 2) * 2;
374            break;
375         default:
376            /* num_samples should already have been quantized to 0, 4, or
377             * 8.
378             */
379            assert(false);
380         }
381      } else {
382         /* Non-interleaved */
383         depth = num_samples;
384      }
385   }
386
387   mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
388			     width, height, depth, true, num_samples,
389                             msaa_layout);
390
391   return mt;
392}
393
394void
395intel_miptree_reference(struct intel_mipmap_tree **dst,
396                        struct intel_mipmap_tree *src)
397{
398   if (*dst == src)
399      return;
400
401   intel_miptree_release(dst);
402
403   if (src) {
404      src->refcount++;
405      DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
406   }
407
408   *dst = src;
409}
410
411
412void
413intel_miptree_release(struct intel_mipmap_tree **mt)
414{
415   if (!*mt)
416      return;
417
418   DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
419   if (--(*mt)->refcount <= 0) {
420      GLuint i;
421
422      DBG("%s deleting %p\n", __FUNCTION__, *mt);
423
424      intel_region_release(&((*mt)->region));
425      intel_miptree_release(&(*mt)->stencil_mt);
426      intel_miptree_release(&(*mt)->hiz_mt);
427      intel_miptree_release(&(*mt)->mcs_mt);
428      intel_resolve_map_clear(&(*mt)->hiz_map);
429
430      for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
431	 free((*mt)->level[i].slice);
432      }
433
434      free(*mt);
435   }
436   *mt = NULL;
437}
438
439void
440intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
441                                       int *width, int *height, int *depth)
442{
443   switch (image->TexObject->Target) {
444   case GL_TEXTURE_1D_ARRAY:
445      *width = image->Width;
446      *height = 1;
447      *depth = image->Height;
448      break;
449   default:
450      *width = image->Width;
451      *height = image->Height;
452      *depth = image->Depth;
453      break;
454   }
455}
456
457/**
458 * Can the image be pulled into a unified mipmap tree?  This mirrors
459 * the completeness test in a lot of ways.
460 *
461 * Not sure whether I want to pass gl_texture_image here.
462 */
463bool
464intel_miptree_match_image(struct intel_mipmap_tree *mt,
465                          struct gl_texture_image *image)
466{
467   struct intel_texture_image *intelImage = intel_texture_image(image);
468   GLuint level = intelImage->base.Base.Level;
469   int width, height, depth;
470
471   if (target_to_target(image->TexObject->Target) != mt->target)
472      return false;
473
474   if (image->TexFormat != mt->format &&
475       !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
476	 mt->format == MESA_FORMAT_X8_Z24 &&
477	 mt->stencil_mt)) {
478      return false;
479   }
480
481   intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
482
483   if (mt->target == GL_TEXTURE_CUBE_MAP)
484      depth = 6;
485
486   /* Test image dimensions against the base level image adjusted for
487    * minification.  This will also catch images not present in the
488    * tree, changed targets, etc.
489    */
490   if (width != mt->level[level].width ||
491       height != mt->level[level].height ||
492       depth != mt->level[level].depth)
493      return false;
494
495   return true;
496}
497
498
499void
500intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
501			     GLuint level,
502			     GLuint x, GLuint y,
503			     GLuint w, GLuint h, GLuint d)
504{
505   mt->level[level].width = w;
506   mt->level[level].height = h;
507   mt->level[level].depth = d;
508   mt->level[level].level_x = x;
509   mt->level[level].level_y = y;
510
511   DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
512       level, w, h, d, x, y);
513
514   assert(mt->level[level].slice == NULL);
515
516   mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
517   mt->level[level].slice[0].x_offset = mt->level[level].level_x;
518   mt->level[level].slice[0].y_offset = mt->level[level].level_y;
519}
520
521
522void
523intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
524			       GLuint level, GLuint img,
525			       GLuint x, GLuint y)
526{
527   if (img == 0 && level == 0)
528      assert(x == 0 && y == 0);
529
530   assert(img < mt->level[level].depth);
531
532   mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
533   mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
534
535   DBG("%s level %d img %d pos %d,%d\n",
536       __FUNCTION__, level, img,
537       mt->level[level].slice[img].x_offset,
538       mt->level[level].slice[img].y_offset);
539}
540
541
542/**
543 * For cube map textures, either the \c face parameter can be used, of course,
544 * or the cube face can be interpreted as a depth layer and the \c layer
545 * parameter used.
546 */
547void
548intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
549			       GLuint level, GLuint face, GLuint layer,
550			       GLuint *x, GLuint *y)
551{
552   int slice;
553
554   if (face > 0) {
555      assert(mt->target == GL_TEXTURE_CUBE_MAP);
556      assert(face < 6);
557      assert(layer == 0);
558      slice = face;
559   } else {
560      /* This branch may be taken even if the texture target is a cube map. In
561       * that case, the caller chose to interpret each cube face as a layer.
562       */
563      assert(face == 0);
564      slice = layer;
565   }
566
567   *x = mt->level[level].slice[slice].x_offset;
568   *y = mt->level[level].slice[slice].y_offset;
569}
570
571static void
572intel_miptree_copy_slice(struct intel_context *intel,
573			 struct intel_mipmap_tree *dst_mt,
574			 struct intel_mipmap_tree *src_mt,
575			 int level,
576			 int face,
577			 int depth)
578
579{
580   gl_format format = src_mt->format;
581   uint32_t width = src_mt->level[level].width;
582   uint32_t height = src_mt->level[level].height;
583
584   assert(depth < src_mt->level[level].depth);
585
586   if (dst_mt->compressed) {
587      height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
588      width = ALIGN(width, dst_mt->align_w);
589   }
590
591   uint32_t dst_x, dst_y, src_x, src_y;
592   intel_miptree_get_image_offset(dst_mt, level, face, depth,
593				  &dst_x, &dst_y);
594   intel_miptree_get_image_offset(src_mt, level, face, depth,
595				  &src_x, &src_y);
596
597   DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
598       src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
599       dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
600       width, height);
601
602   if (!intelEmitCopyBlit(intel,
603			  dst_mt->region->cpp,
604			  src_mt->region->pitch, src_mt->region->bo,
605			  0, src_mt->region->tiling,
606			  dst_mt->region->pitch, dst_mt->region->bo,
607			  0, dst_mt->region->tiling,
608			  src_x, src_y,
609			  dst_x, dst_y,
610			  width, height,
611			  GL_COPY)) {
612
613      fallback_debug("miptree validate blit for %s failed\n",
614		     _mesa_get_format_name(format));
615      void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
616      void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
617
618      _mesa_copy_rect(dst,
619		      dst_mt->cpp,
620		      dst_mt->region->pitch,
621		      dst_x, dst_y,
622		      width, height,
623		      src, src_mt->region->pitch,
624		      src_x, src_y);
625
626      intel_region_unmap(intel, dst_mt->region);
627      intel_region_unmap(intel, src_mt->region);
628   }
629
630   if (src_mt->stencil_mt) {
631      intel_miptree_copy_slice(intel,
632                               dst_mt->stencil_mt, src_mt->stencil_mt,
633                               level, face, depth);
634   }
635}
636
637/**
638 * Copies the image's current data to the given miptree, and associates that
639 * miptree with the image.
640 */
641void
642intel_miptree_copy_teximage(struct intel_context *intel,
643			    struct intel_texture_image *intelImage,
644			    struct intel_mipmap_tree *dst_mt)
645{
646   struct intel_mipmap_tree *src_mt = intelImage->mt;
647   int level = intelImage->base.Base.Level;
648   int face = intelImage->base.Base.Face;
649   GLuint depth = intelImage->base.Base.Depth;
650
651   for (int slice = 0; slice < depth; slice++) {
652      intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
653   }
654
655   intel_miptree_reference(&intelImage->mt, dst_mt);
656}
657
658bool
659intel_miptree_alloc_mcs(struct intel_context *intel,
660                        struct intel_mipmap_tree *mt,
661                        GLuint num_samples)
662{
663   assert(mt->mcs_mt == NULL);
664   assert(intel->gen >= 7); /* MCS only used on Gen7+ */
665   assert(num_samples == 4); /* TODO: support 8x MSAA */
666
667   /* From the Ivy Bridge PRM, Vol4 Part1 p76, "MCS Base Address":
668    *
669    *     "The MCS surface must be stored as Tile Y."
670    *
671    * We set msaa_format to INTEL_MSAA_LAYOUT_CMS to force
672    * intel_miptree_create() to use Y tiling.  msaa_format is otherwise
673    * ignored for the MCS miptree.
674    */
675   mt->mcs_mt = intel_miptree_create(intel,
676                                     mt->target,
677                                     MESA_FORMAT_A8,
678                                     mt->first_level,
679                                     mt->last_level,
680                                     mt->width0,
681                                     mt->height0,
682                                     mt->depth0,
683                                     true,
684                                     0 /* num_samples */,
685                                     INTEL_MSAA_LAYOUT_CMS);
686
687   /* From the Ivy Bridge PRM, Vol 2 Part 1 p326:
688    *
689    *     When MCS buffer is enabled and bound to MSRT, it is required that it
690    *     is cleared prior to any rendering.
691    *
692    * Since we don't use the MCS buffer for any purpose other than rendering,
693    * it makes sense to just clear it immediately upon allocation.
694    *
695    * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
696    */
697   void *data = intel_region_map(intel, mt->mcs_mt->region, 0);
698   memset(data, 0xff, mt->mcs_mt->region->bo->size);
699   intel_region_unmap(intel, mt->mcs_mt->region);
700
701   return mt->mcs_mt;
702}
703
704bool
705intel_miptree_alloc_hiz(struct intel_context *intel,
706			struct intel_mipmap_tree *mt,
707                        GLuint num_samples)
708{
709   assert(mt->hiz_mt == NULL);
710   /* MSAA HiZ surfaces always use IMS layout. */
711   mt->hiz_mt = intel_miptree_create(intel,
712                                     mt->target,
713                                     MESA_FORMAT_X8_Z24,
714                                     mt->first_level,
715                                     mt->last_level,
716                                     mt->width0,
717                                     mt->height0,
718                                     mt->depth0,
719                                     true,
720                                     num_samples,
721                                     INTEL_MSAA_LAYOUT_IMS);
722
723   if (!mt->hiz_mt)
724      return false;
725
726   /* Mark that all slices need a HiZ resolve. */
727   struct intel_resolve_map *head = &mt->hiz_map;
728   for (int level = mt->first_level; level <= mt->last_level; ++level) {
729      for (int layer = 0; layer < mt->level[level].depth; ++layer) {
730	 head->next = malloc(sizeof(*head->next));
731	 head->next->prev = head;
732	 head->next->next = NULL;
733	 head = head->next;
734
735	 head->level = level;
736	 head->layer = layer;
737	 head->need = GEN6_HIZ_OP_HIZ_RESOLVE;
738      }
739   }
740
741   return true;
742}
743
744void
745intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
746					  uint32_t level,
747					  uint32_t layer)
748{
749   intel_miptree_check_level_layer(mt, level, layer);
750
751   if (!mt->hiz_mt)
752      return;
753
754   intel_resolve_map_set(&mt->hiz_map,
755			 level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
756}
757
758
759void
760intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
761                                            uint32_t level,
762                                            uint32_t layer)
763{
764   intel_miptree_check_level_layer(mt, level, layer);
765
766   if (!mt->hiz_mt)
767      return;
768
769   intel_resolve_map_set(&mt->hiz_map,
770			 level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
771}
772
773static bool
774intel_miptree_slice_resolve(struct intel_context *intel,
775			    struct intel_mipmap_tree *mt,
776			    uint32_t level,
777			    uint32_t layer,
778			    enum gen6_hiz_op need)
779{
780   intel_miptree_check_level_layer(mt, level, layer);
781
782   struct intel_resolve_map *item =
783	 intel_resolve_map_get(&mt->hiz_map, level, layer);
784
785   if (!item || item->need != need)
786      return false;
787
788   intel_hiz_exec(intel, mt, level, layer, need);
789   intel_resolve_map_remove(item);
790   return true;
791}
792
793bool
794intel_miptree_slice_resolve_hiz(struct intel_context *intel,
795				struct intel_mipmap_tree *mt,
796				uint32_t level,
797				uint32_t layer)
798{
799   return intel_miptree_slice_resolve(intel, mt, level, layer,
800				      GEN6_HIZ_OP_HIZ_RESOLVE);
801}
802
803bool
804intel_miptree_slice_resolve_depth(struct intel_context *intel,
805				  struct intel_mipmap_tree *mt,
806				  uint32_t level,
807				  uint32_t layer)
808{
809   return intel_miptree_slice_resolve(intel, mt, level, layer,
810				      GEN6_HIZ_OP_DEPTH_RESOLVE);
811}
812
813static bool
814intel_miptree_all_slices_resolve(struct intel_context *intel,
815				 struct intel_mipmap_tree *mt,
816				 enum gen6_hiz_op need)
817{
818   bool did_resolve = false;
819   struct intel_resolve_map *i, *next;
820
821   for (i = mt->hiz_map.next; i; i = next) {
822      next = i->next;
823      if (i->need != need)
824	 continue;
825
826      intel_hiz_exec(intel, mt, i->level, i->layer, need);
827      intel_resolve_map_remove(i);
828      did_resolve = true;
829   }
830
831   return did_resolve;
832}
833
834bool
835intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
836				     struct intel_mipmap_tree *mt)
837{
838   return intel_miptree_all_slices_resolve(intel, mt,
839					   GEN6_HIZ_OP_HIZ_RESOLVE);
840}
841
842bool
843intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
844				       struct intel_mipmap_tree *mt)
845{
846   return intel_miptree_all_slices_resolve(intel, mt,
847					   GEN6_HIZ_OP_DEPTH_RESOLVE);
848}
849
850static void
851intel_miptree_map_gtt(struct intel_context *intel,
852		      struct intel_mipmap_tree *mt,
853		      struct intel_miptree_map *map,
854		      unsigned int level, unsigned int slice)
855{
856   unsigned int bw, bh;
857   void *base;
858   unsigned int image_x, image_y;
859   int x = map->x;
860   int y = map->y;
861
862   /* For compressed formats, the stride is the number of bytes per
863    * row of blocks.  intel_miptree_get_image_offset() already does
864    * the divide.
865    */
866   _mesa_get_format_block_size(mt->format, &bw, &bh);
867   assert(y % bh == 0);
868   y /= bh;
869
870   base = intel_region_map(intel, mt->region, map->mode);
871
872   if (base == NULL)
873      map->ptr = NULL;
874   else {
875      /* Note that in the case of cube maps, the caller must have passed the
876       * slice number referencing the face.
877      */
878      intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
879      x += image_x;
880      y += image_y;
881
882      map->stride = mt->region->pitch * mt->cpp;
883      map->ptr = base + y * map->stride + x * mt->cpp;
884   }
885
886   DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
887       map->x, map->y, map->w, map->h,
888       mt, _mesa_get_format_name(mt->format),
889       x, y, map->ptr, map->stride);
890}
891
892static void
893intel_miptree_unmap_gtt(struct intel_context *intel,
894			struct intel_mipmap_tree *mt,
895			struct intel_miptree_map *map,
896			unsigned int level,
897			unsigned int slice)
898{
899   intel_region_unmap(intel, mt->region);
900}
901
902static void
903intel_miptree_map_blit(struct intel_context *intel,
904		       struct intel_mipmap_tree *mt,
905		       struct intel_miptree_map *map,
906		       unsigned int level, unsigned int slice)
907{
908   unsigned int image_x, image_y;
909   int x = map->x;
910   int y = map->y;
911   int ret;
912
913   /* The blitter requires the pitch to be aligned to 4. */
914   map->stride = ALIGN(map->w * mt->region->cpp, 4);
915
916   map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
917				map->stride * map->h, 4096);
918   if (!map->bo) {
919      fprintf(stderr, "Failed to allocate blit temporary\n");
920      goto fail;
921   }
922
923   intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
924   x += image_x;
925   y += image_y;
926
927   if (!intelEmitCopyBlit(intel,
928			  mt->region->cpp,
929			  mt->region->pitch, mt->region->bo,
930			  0, mt->region->tiling,
931			  map->stride / mt->region->cpp, map->bo,
932			  0, I915_TILING_NONE,
933			  x, y,
934			  0, 0,
935			  map->w, map->h,
936			  GL_COPY)) {
937      fprintf(stderr, "Failed to blit\n");
938      goto fail;
939   }
940
941   intel_batchbuffer_flush(intel);
942   ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
943   if (ret) {
944      fprintf(stderr, "Failed to map blit temporary\n");
945      goto fail;
946   }
947
948   map->ptr = map->bo->virtual;
949
950   DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
951       map->x, map->y, map->w, map->h,
952       mt, _mesa_get_format_name(mt->format),
953       x, y, map->ptr, map->stride);
954
955   return;
956
957fail:
958   drm_intel_bo_unreference(map->bo);
959   map->ptr = NULL;
960   map->stride = 0;
961}
962
963static void
964intel_miptree_unmap_blit(struct intel_context *intel,
965			 struct intel_mipmap_tree *mt,
966			 struct intel_miptree_map *map,
967			 unsigned int level,
968			 unsigned int slice)
969{
970   assert(!(map->mode & GL_MAP_WRITE_BIT));
971
972   drm_intel_bo_unmap(map->bo);
973   drm_intel_bo_unreference(map->bo);
974}
975
976static void
977intel_miptree_map_s8(struct intel_context *intel,
978		     struct intel_mipmap_tree *mt,
979		     struct intel_miptree_map *map,
980		     unsigned int level, unsigned int slice)
981{
982   map->stride = map->w;
983   map->buffer = map->ptr = malloc(map->stride * map->h);
984   if (!map->buffer)
985      return;
986
987   /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
988    * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
989    * invalidate is set, since we'll be writing the whole rectangle from our
990    * temporary buffer back out.
991    */
992   if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
993      uint8_t *untiled_s8_map = map->ptr;
994      uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
995					       GL_MAP_READ_BIT);
996      unsigned int image_x, image_y;
997
998      intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
999
1000      for (uint32_t y = 0; y < map->h; y++) {
1001	 for (uint32_t x = 0; x < map->w; x++) {
1002	    ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1003	                                       x + image_x + map->x,
1004	                                       y + image_y + map->y,
1005					       intel->has_swizzling);
1006	    untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
1007	 }
1008      }
1009
1010      intel_region_unmap(intel, mt->region);
1011
1012      DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
1013	  map->x, map->y, map->w, map->h,
1014	  mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
1015   } else {
1016      DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1017	  map->x, map->y, map->w, map->h,
1018	  mt, map->ptr, map->stride);
1019   }
1020}
1021
1022static void
1023intel_miptree_unmap_s8(struct intel_context *intel,
1024		       struct intel_mipmap_tree *mt,
1025		       struct intel_miptree_map *map,
1026		       unsigned int level,
1027		       unsigned int slice)
1028{
1029   if (map->mode & GL_MAP_WRITE_BIT) {
1030      unsigned int image_x, image_y;
1031      uint8_t *untiled_s8_map = map->ptr;
1032      uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
1033
1034      intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
1035
1036      for (uint32_t y = 0; y < map->h; y++) {
1037	 for (uint32_t x = 0; x < map->w; x++) {
1038	    ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
1039	                                       x + map->x,
1040	                                       y + map->y,
1041					       intel->has_swizzling);
1042	    tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
1043	 }
1044      }
1045
1046      intel_region_unmap(intel, mt->region);
1047   }
1048
1049   free(map->buffer);
1050}
1051
1052/**
1053 * Mapping function for packed depth/stencil miptrees backed by real separate
1054 * miptrees for depth and stencil.
1055 *
1056 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
1057 * separate from the depth buffer.  Yet at the GL API level, we have to expose
1058 * packed depth/stencil textures and FBO attachments, and Mesa core expects to
1059 * be able to map that memory for texture storage and glReadPixels-type
1060 * operations.  We give Mesa core that access by mallocing a temporary and
1061 * copying the data between the actual backing store and the temporary.
1062 */
1063static void
1064intel_miptree_map_depthstencil(struct intel_context *intel,
1065			       struct intel_mipmap_tree *mt,
1066			       struct intel_miptree_map *map,
1067			       unsigned int level, unsigned int slice)
1068{
1069   struct intel_mipmap_tree *z_mt = mt;
1070   struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1071   bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1072   int packed_bpp = map_z32f_x24s8 ? 8 : 4;
1073
1074   map->stride = map->w * packed_bpp;
1075   map->buffer = map->ptr = malloc(map->stride * map->h);
1076   if (!map->buffer)
1077      return;
1078
1079   /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
1080    * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
1081    * invalidate is set, since we'll be writing the whole rectangle from our
1082    * temporary buffer back out.
1083    */
1084   if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
1085      uint32_t *packed_map = map->ptr;
1086      uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
1087      uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
1088      unsigned int s_image_x, s_image_y;
1089      unsigned int z_image_x, z_image_y;
1090
1091      intel_miptree_get_image_offset(s_mt, level, 0, slice,
1092				     &s_image_x, &s_image_y);
1093      intel_miptree_get_image_offset(z_mt, level, 0, slice,
1094				     &z_image_x, &z_image_y);
1095
1096      for (uint32_t y = 0; y < map->h; y++) {
1097	 for (uint32_t x = 0; x < map->w; x++) {
1098	    int map_x = map->x + x, map_y = map->y + y;
1099	    ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1100						 map_x + s_image_x,
1101						 map_y + s_image_y,
1102						 intel->has_swizzling);
1103	    ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
1104				  (map_x + z_image_x));
1105	    uint8_t s = s_map[s_offset];
1106	    uint32_t z = z_map[z_offset];
1107
1108	    if (map_z32f_x24s8) {
1109	       packed_map[(y * map->w + x) * 2 + 0] = z;
1110	       packed_map[(y * map->w + x) * 2 + 1] = s;
1111	    } else {
1112	       packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
1113	    }
1114	 }
1115      }
1116
1117      intel_region_unmap(intel, s_mt->region);
1118      intel_region_unmap(intel, z_mt->region);
1119
1120      DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
1121	  __FUNCTION__,
1122	  map->x, map->y, map->w, map->h,
1123	  z_mt, map->x + z_image_x, map->y + z_image_y,
1124	  s_mt, map->x + s_image_x, map->y + s_image_y,
1125	  map->ptr, map->stride);
1126   } else {
1127      DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
1128	  map->x, map->y, map->w, map->h,
1129	  mt, map->ptr, map->stride);
1130   }
1131}
1132
1133static void
1134intel_miptree_unmap_depthstencil(struct intel_context *intel,
1135				 struct intel_mipmap_tree *mt,
1136				 struct intel_miptree_map *map,
1137				 unsigned int level,
1138				 unsigned int slice)
1139{
1140   struct intel_mipmap_tree *z_mt = mt;
1141   struct intel_mipmap_tree *s_mt = mt->stencil_mt;
1142   bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
1143
1144   if (map->mode & GL_MAP_WRITE_BIT) {
1145      uint32_t *packed_map = map->ptr;
1146      uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
1147      uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
1148      unsigned int s_image_x, s_image_y;
1149      unsigned int z_image_x, z_image_y;
1150
1151      intel_miptree_get_image_offset(s_mt, level, 0, slice,
1152				     &s_image_x, &s_image_y);
1153      intel_miptree_get_image_offset(z_mt, level, 0, slice,
1154				     &z_image_x, &z_image_y);
1155
1156      for (uint32_t y = 0; y < map->h; y++) {
1157	 for (uint32_t x = 0; x < map->w; x++) {
1158	    ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
1159						 x + s_image_x + map->x,
1160						 y + s_image_y + map->y,
1161						 intel->has_swizzling);
1162	    ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
1163				  (x + z_image_x));
1164
1165	    if (map_z32f_x24s8) {
1166	       z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1167	       s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1168	    } else {
1169	       uint32_t packed = packed_map[y * map->w + x];
1170	       s_map[s_offset] = packed >> 24;
1171	       z_map[z_offset] = packed;
1172	    }
1173	 }
1174      }
1175
1176      intel_region_unmap(intel, s_mt->region);
1177      intel_region_unmap(intel, z_mt->region);
1178
1179      DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1180	  __FUNCTION__,
1181	  map->x, map->y, map->w, map->h,
1182	  z_mt, _mesa_get_format_name(z_mt->format),
1183	  map->x + z_image_x, map->y + z_image_y,
1184	  s_mt, map->x + s_image_x, map->y + s_image_y,
1185	  map->ptr, map->stride);
1186   }
1187
1188   free(map->buffer);
1189}
1190
1191void
1192intel_miptree_map(struct intel_context *intel,
1193		  struct intel_mipmap_tree *mt,
1194		  unsigned int level,
1195		  unsigned int slice,
1196		  unsigned int x,
1197		  unsigned int y,
1198		  unsigned int w,
1199		  unsigned int h,
1200		  GLbitfield mode,
1201		  void **out_ptr,
1202		  int *out_stride)
1203{
1204   struct intel_miptree_map *map;
1205
1206   map = calloc(1, sizeof(struct intel_miptree_map));
1207   if (!map){
1208      *out_ptr = NULL;
1209      *out_stride = 0;
1210      return;
1211   }
1212
1213   assert(!mt->level[level].slice[slice].map);
1214   mt->level[level].slice[slice].map = map;
1215   map->mode = mode;
1216   map->x = x;
1217   map->y = y;
1218   map->w = w;
1219   map->h = h;
1220
1221   intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1222   if (map->mode & GL_MAP_WRITE_BIT) {
1223      intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1224   }
1225
1226   if (mt->format == MESA_FORMAT_S8) {
1227      intel_miptree_map_s8(intel, mt, map, level, slice);
1228   } else if (mt->stencil_mt) {
1229      intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1230   } else if (intel->has_llc &&
1231	      !(mode & GL_MAP_WRITE_BIT) &&
1232	      !mt->compressed &&
1233	      mt->region->tiling == I915_TILING_X) {
1234      intel_miptree_map_blit(intel, mt, map, level, slice);
1235   } else {
1236      intel_miptree_map_gtt(intel, mt, map, level, slice);
1237   }
1238
1239   *out_ptr = map->ptr;
1240   *out_stride = map->stride;
1241
1242   if (map->ptr == NULL) {
1243      mt->level[level].slice[slice].map = NULL;
1244      free(map);
1245   }
1246}
1247
1248void
1249intel_miptree_unmap(struct intel_context *intel,
1250		    struct intel_mipmap_tree *mt,
1251		    unsigned int level,
1252		    unsigned int slice)
1253{
1254   struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1255
1256   if (!map)
1257      return;
1258
1259   DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1260       mt, _mesa_get_format_name(mt->format), level, slice);
1261
1262   if (mt->format == MESA_FORMAT_S8) {
1263      intel_miptree_unmap_s8(intel, mt, map, level, slice);
1264   } else if (mt->stencil_mt) {
1265      intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1266   } else if (map->bo) {
1267      intel_miptree_unmap_blit(intel, mt, map, level, slice);
1268   } else {
1269      intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1270   }
1271
1272   mt->level[level].slice[slice].map = NULL;
1273   free(map);
1274}
1275