r600_texture.c revision 137d44e0f2ce7d99d34f301f7d943645cefb289c
1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *      Jerome Glisse
25 *      Corbin Simpson
26 */
27#include <errno.h>
28#include <pipe/p_screen.h>
29#include <util/u_format.h>
30#include <util/u_format_s3tc.h>
31#include <util/u_math.h>
32#include <util/u_inlines.h>
33#include <util/u_memory.h>
34#include "state_tracker/drm_driver.h"
35#include "pipebuffer/pb_buffer.h"
36#include "r600_pipe.h"
37#include "r600_resource.h"
38#include "r600_state_inlines.h"
39#include "r600d.h"
40#include "r600_formats.h"
41
42/* Copy from a full GPU texture to a transfer's staging one. */
43static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
44{
45	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
46	struct pipe_resource *texture = transfer->resource;
47
48	ctx->resource_copy_region(ctx, rtransfer->staging_texture,
49				0, 0, 0, 0, texture, transfer->level,
50				&transfer->box);
51}
52
53
54/* Copy from a transfer's staging texture to a full GPU one. */
55static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
56{
57	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
58	struct pipe_resource *texture = transfer->resource;
59	struct pipe_box sbox;
60
61	sbox.x = sbox.y = sbox.z = 0;
62	sbox.width = transfer->box.width;
63	sbox.height = transfer->box.height;
64	/* XXX that might be wrong */
65	sbox.depth = 1;
66	ctx->resource_copy_region(ctx, texture, transfer->level,
67				  transfer->box.x, transfer->box.y, transfer->box.z,
68				  rtransfer->staging_texture,
69				  0, &sbox);
70
71	ctx->flush(ctx, 0, NULL);
72}
73
74unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
75					unsigned level, unsigned layer)
76{
77	unsigned offset = rtex->offset[level];
78
79	switch (rtex->resource.b.b.b.target) {
80	case PIPE_TEXTURE_3D:
81	case PIPE_TEXTURE_CUBE:
82	default:
83		return offset + layer * rtex->layer_size[level];
84	}
85}
86
87static unsigned r600_get_block_alignment(struct pipe_screen *screen,
88					 enum pipe_format format,
89					 unsigned array_mode)
90{
91	struct r600_screen* rscreen = (struct r600_screen *)screen;
92	unsigned pixsize = util_format_get_blocksize(format);
93	int p_align;
94
95	switch(array_mode) {
96	case V_038000_ARRAY_1D_TILED_THIN1:
97		p_align = MAX2(8,
98			       ((rscreen->tiling_info->group_bytes / 8 / pixsize)));
99		break;
100	case V_038000_ARRAY_2D_TILED_THIN1:
101		p_align = MAX2(rscreen->tiling_info->num_banks,
102			       (((rscreen->tiling_info->group_bytes / 8 / pixsize)) *
103				rscreen->tiling_info->num_banks)) * 8;
104		break;
105	case V_038000_ARRAY_LINEAR_ALIGNED:
106		p_align = MAX2(64, rscreen->tiling_info->group_bytes / pixsize);
107		break;
108	case V_038000_ARRAY_LINEAR_GENERAL:
109	default:
110		p_align = rscreen->tiling_info->group_bytes / pixsize;
111		break;
112	}
113	return p_align;
114}
115
116static unsigned r600_get_height_alignment(struct pipe_screen *screen,
117					  unsigned array_mode)
118{
119	struct r600_screen* rscreen = (struct r600_screen *)screen;
120	int h_align;
121
122	switch (array_mode) {
123	case V_038000_ARRAY_2D_TILED_THIN1:
124		h_align = rscreen->tiling_info->num_channels * 8;
125		break;
126	case V_038000_ARRAY_1D_TILED_THIN1:
127	case V_038000_ARRAY_LINEAR_ALIGNED:
128		h_align = 8;
129		break;
130	case V_038000_ARRAY_LINEAR_GENERAL:
131	default:
132		h_align = 1;
133		break;
134	}
135	return h_align;
136}
137
138static unsigned r600_get_base_alignment(struct pipe_screen *screen,
139					enum pipe_format format,
140					unsigned array_mode)
141{
142	struct r600_screen* rscreen = (struct r600_screen *)screen;
143	unsigned pixsize = util_format_get_blocksize(format);
144	int p_align = r600_get_block_alignment(screen, format, array_mode);
145	int h_align = r600_get_height_alignment(screen, array_mode);
146	int b_align;
147
148	switch (array_mode) {
149	case V_038000_ARRAY_2D_TILED_THIN1:
150		b_align = MAX2(rscreen->tiling_info->num_banks * rscreen->tiling_info->num_channels * 8 * 8 * pixsize,
151			       p_align * pixsize * h_align);
152		break;
153	case V_038000_ARRAY_1D_TILED_THIN1:
154	case V_038000_ARRAY_LINEAR_ALIGNED:
155	case V_038000_ARRAY_LINEAR_GENERAL:
156	default:
157		b_align = rscreen->tiling_info->group_bytes;
158		break;
159	}
160	return b_align;
161}
162
163static unsigned mip_minify(unsigned size, unsigned level)
164{
165	unsigned val;
166	val = u_minify(size, level);
167	if (level > 0)
168		val = util_next_power_of_two(val);
169	return val;
170}
171
172static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen,
173					  struct r600_resource_texture *rtex,
174					  unsigned level)
175{
176	struct pipe_resource *ptex = &rtex->resource.b.b.b;
177	unsigned nblocksx, block_align, width;
178	unsigned blocksize = util_format_get_blocksize(ptex->format);
179
180	if (rtex->pitch_override)
181		return rtex->pitch_override / blocksize;
182
183	width = mip_minify(ptex->width0, level);
184	nblocksx = util_format_get_nblocksx(ptex->format, width);
185
186	block_align = r600_get_block_alignment(screen, ptex->format,
187					      rtex->array_mode[level]);
188	nblocksx = align(nblocksx, block_align);
189	return nblocksx;
190}
191
192static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
193					  struct r600_resource_texture *rtex,
194					  unsigned level)
195{
196	struct pipe_resource *ptex = &rtex->resource.b.b.b;
197	unsigned height, tile_height;
198
199	height = mip_minify(ptex->height0, level);
200	height = util_format_get_nblocksy(ptex->format, height);
201	tile_height = r600_get_height_alignment(screen,
202						rtex->array_mode[level]);
203	height = align(height, tile_height);
204	return height;
205}
206
207static void r600_texture_set_array_mode(struct pipe_screen *screen,
208					struct r600_resource_texture *rtex,
209					unsigned level, unsigned array_mode)
210{
211	struct pipe_resource *ptex = &rtex->resource.b.b.b;
212
213	switch (array_mode) {
214	case V_0280A0_ARRAY_LINEAR_GENERAL:
215	case V_0280A0_ARRAY_LINEAR_ALIGNED:
216	case V_0280A0_ARRAY_1D_TILED_THIN1:
217	default:
218		rtex->array_mode[level] = array_mode;
219		break;
220	case V_0280A0_ARRAY_2D_TILED_THIN1:
221	{
222		unsigned w, h, tile_height, tile_width;
223
224		tile_height = r600_get_height_alignment(screen, array_mode);
225		tile_width = r600_get_block_alignment(screen, ptex->format, array_mode);
226
227		w = mip_minify(ptex->width0, level);
228		h = mip_minify(ptex->height0, level);
229		if (w <= tile_width || h <= tile_height)
230			rtex->array_mode[level] = V_0280A0_ARRAY_1D_TILED_THIN1;
231		else
232			rtex->array_mode[level] = array_mode;
233	}
234	break;
235	}
236}
237
238static void r600_setup_miptree(struct pipe_screen *screen,
239			       struct r600_resource_texture *rtex,
240			       unsigned array_mode)
241{
242	struct pipe_resource *ptex = &rtex->resource.b.b.b;
243	struct radeon *radeon = (struct radeon *)screen->winsys;
244	enum chip_class chipc = r600_get_family_class(radeon);
245	unsigned size, layer_size, i, offset;
246	unsigned nblocksx, nblocksy;
247
248	for (i = 0, offset = 0; i <= ptex->last_level; i++) {
249		unsigned blocksize = util_format_get_blocksize(ptex->format);
250
251		r600_texture_set_array_mode(screen, rtex, i, array_mode);
252
253		nblocksx = r600_texture_get_nblocksx(screen, rtex, i);
254		nblocksy = r600_texture_get_nblocksy(screen, rtex, i);
255
256		layer_size = nblocksx * nblocksy * blocksize;
257		if (ptex->target == PIPE_TEXTURE_CUBE) {
258			if (chipc >= R700)
259				size = layer_size * 8;
260			else
261				size = layer_size * 6;
262		}
263		else if (ptex->target == PIPE_TEXTURE_3D)
264			size = layer_size * u_minify(ptex->depth0, i);
265		else
266			size = layer_size * ptex->array_size;
267
268		/* align base image and start of miptree */
269		if ((i == 0) || (i == 1))
270			offset = align(offset, r600_get_base_alignment(screen, ptex->format, array_mode));
271		rtex->offset[i] = offset;
272		rtex->layer_size[i] = layer_size;
273		rtex->pitch_in_blocks[i] = nblocksx; /* CB talks in elements */
274		rtex->pitch_in_bytes[i] = nblocksx * blocksize;
275
276		offset += size;
277	}
278	rtex->size = offset;
279}
280
281/* Figure out whether u_blitter will fallback to a transfer operation.
282 * If so, don't use a staging resource.
283 */
284static boolean permit_hardware_blit(struct pipe_screen *screen,
285					const struct pipe_resource *res)
286{
287	unsigned bind;
288
289	if (util_format_is_depth_or_stencil(res->format))
290		bind = PIPE_BIND_DEPTH_STENCIL;
291	else
292		bind = PIPE_BIND_RENDER_TARGET;
293
294	/* hackaround for S3TC */
295	if (util_format_is_compressed(res->format))
296		return TRUE;
297
298	if (!screen->is_format_supported(screen,
299				res->format,
300				res->target,
301				res->nr_samples,
302				bind, 0))
303		return FALSE;
304
305	if (!screen->is_format_supported(screen,
306				res->format,
307				res->target,
308				res->nr_samples,
309				PIPE_BIND_SAMPLER_VIEW, 0))
310		return FALSE;
311
312	return TRUE;
313}
314
315static boolean r600_texture_get_handle(struct pipe_screen* screen,
316					struct pipe_resource *ptex,
317					struct winsys_handle *whandle)
318{
319	struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
320	struct r600_resource *resource = &rtex->resource;
321	struct radeon *radeon = (struct radeon *)screen->winsys;
322
323	return r600_bo_get_winsys_handle(radeon, resource->bo,
324			rtex->pitch_in_bytes[0], whandle);
325}
326
327static void r600_texture_destroy(struct pipe_screen *screen,
328				 struct pipe_resource *ptex)
329{
330	struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
331	struct r600_resource *resource = &rtex->resource;
332	struct radeon *radeon = (struct radeon *)screen->winsys;
333
334	if (rtex->flushed_depth_texture)
335		pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
336
337	if (resource->bo) {
338		r600_bo_reference(radeon, &resource->bo, NULL);
339	}
340	FREE(rtex);
341}
342
343static unsigned int r600_texture_is_referenced(struct pipe_context *context,
344						struct pipe_resource *texture,
345						unsigned level, int layer)
346{
347	/* FIXME */
348	return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
349}
350
351static const struct u_resource_vtbl r600_texture_vtbl =
352{
353	r600_texture_get_handle,	/* get_handle */
354	r600_texture_destroy,		/* resource_destroy */
355	r600_texture_is_referenced,	/* is_resource_referenced */
356	r600_texture_get_transfer,	/* get_transfer */
357	r600_texture_transfer_destroy,	/* transfer_destroy */
358	r600_texture_transfer_map,	/* transfer_map */
359	u_default_transfer_flush_region,/* transfer_flush_region */
360	r600_texture_transfer_unmap,	/* transfer_unmap */
361	u_default_transfer_inline_write	/* transfer_inline_write */
362};
363
364static struct r600_resource_texture *
365r600_texture_create_object(struct pipe_screen *screen,
366			   const struct pipe_resource *base,
367			   unsigned array_mode,
368			   unsigned pitch_in_bytes_override,
369			   unsigned max_buffer_size,
370			   struct r600_bo *bo)
371{
372	struct r600_resource_texture *rtex;
373	struct r600_resource *resource;
374	struct radeon *radeon = (struct radeon *)screen->winsys;
375
376	rtex = CALLOC_STRUCT(r600_resource_texture);
377	if (rtex == NULL)
378		return NULL;
379
380	resource = &rtex->resource;
381	resource->b.b.b = *base;
382	resource->b.b.vtbl = &r600_texture_vtbl;
383	pipe_reference_init(&resource->b.b.b.reference, 1);
384	resource->b.b.b.screen = screen;
385	resource->bo = bo;
386	rtex->pitch_override = pitch_in_bytes_override;
387	/* only mark depth textures the HW can hit as depth textures */
388	if (util_format_is_depth_or_stencil(base->format) && permit_hardware_blit(screen, base))
389		rtex->depth = 1;
390
391	r600_setup_miptree(screen, rtex, array_mode);
392
393	resource->size = rtex->size;
394
395	if (!resource->bo) {
396		struct pipe_resource *ptex = &rtex->resource.b.b.b;
397		int base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
398
399		resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage);
400		if (!resource->bo) {
401			FREE(rtex);
402			return NULL;
403		}
404	}
405	return rtex;
406}
407
408struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
409						const struct pipe_resource *templ)
410{
411	unsigned array_mode = 0;
412	static int force_tiling = -1;
413
414	/* Would like some magic "get_bool_option_once" routine.
415	 */
416	if (force_tiling == -1) {
417		struct r600_screen *rscreen = (struct r600_screen *)screen;
418		/* reenable when 2D tiling is fixed better */
419		/*if (r600_get_minor_version(rscreen->radeon) >= 9)
420			force_tiling = debug_get_bool_option("R600_TILING", TRUE);*/
421		force_tiling = debug_get_bool_option("R600_TILING", FALSE);
422	}
423
424	if (force_tiling && permit_hardware_blit(screen, templ)) {
425		if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
426		    !(templ->bind & PIPE_BIND_SCANOUT)) {
427			array_mode = V_038000_ARRAY_2D_TILED_THIN1;
428		}
429	}
430
431	if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
432	    util_format_is_compressed(templ->format))
433		array_mode = V_038000_ARRAY_1D_TILED_THIN1;
434
435	return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
436								  0, 0, NULL);
437
438}
439
440static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
441						struct pipe_resource *texture,
442						const struct pipe_surface *surf_tmpl)
443{
444	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
445	struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
446	unsigned level = surf_tmpl->u.tex.level;
447
448	assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
449	if (surface == NULL)
450		return NULL;
451	/* XXX no offset */
452/*	offset = r600_texture_get_offset(rtex, level, surf_tmpl->u.tex.first_layer);*/
453	pipe_reference_init(&surface->base.reference, 1);
454	pipe_resource_reference(&surface->base.texture, texture);
455	surface->base.context = pipe;
456	surface->base.format = surf_tmpl->format;
457	surface->base.width = mip_minify(texture->width0, level);
458	surface->base.height = mip_minify(texture->height0, level);
459	surface->base.usage = surf_tmpl->usage;
460	surface->base.texture = texture;
461	surface->base.u.tex.first_layer = surf_tmpl->u.tex.first_layer;
462	surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
463	surface->base.u.tex.level = level;
464
465	surface->aligned_height = r600_texture_get_nblocksy(pipe->screen,
466							    rtex, level);
467	return &surface->base;
468}
469
470static void r600_surface_destroy(struct pipe_context *pipe,
471				 struct pipe_surface *surface)
472{
473	pipe_resource_reference(&surface->texture, NULL);
474	FREE(surface);
475}
476
477
478struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
479					       const struct pipe_resource *templ,
480					       struct winsys_handle *whandle)
481{
482	struct radeon *rw = (struct radeon*)screen->winsys;
483	struct r600_bo *bo = NULL;
484	unsigned array_mode = 0;
485
486	/* Support only 2D textures without mipmaps */
487	if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
488	      templ->depth0 != 1 || templ->last_level != 0)
489		return NULL;
490
491	bo = r600_bo_handle(rw, whandle->handle, &array_mode);
492	if (bo == NULL) {
493		return NULL;
494	}
495
496	return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
497								  whandle->stride,
498								  0,
499								  bo);
500}
501
502int r600_texture_depth_flush(struct pipe_context *ctx,
503			     struct pipe_resource *texture, boolean just_create)
504{
505	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
506	struct pipe_resource resource;
507
508	if (rtex->flushed_depth_texture)
509		goto out;
510
511	resource.target = PIPE_TEXTURE_2D;
512	resource.format = texture->format;
513	resource.width0 = texture->width0;
514	resource.height0 = texture->height0;
515	resource.depth0 = 1;
516	resource.array_size = 1;
517	resource.last_level = texture->last_level;
518	resource.nr_samples = 0;
519	resource.usage = PIPE_USAGE_DYNAMIC;
520	resource.bind = 0;
521	resource.flags = R600_RESOURCE_FLAG_TRANSFER;
522
523	resource.bind |= PIPE_BIND_DEPTH_STENCIL;
524
525	rtex->flushed_depth_texture = (struct r600_resource_texture *)ctx->screen->resource_create(ctx->screen, &resource);
526	if (rtex->flushed_depth_texture == NULL) {
527		R600_ERR("failed to create temporary texture to hold untiled copy\n");
528		return -ENOMEM;
529	}
530
531	((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE;
532out:
533	if (just_create)
534		return 0;
535
536	/* XXX: only do this if the depth texture has actually changed:
537	 */
538	r600_blit_uncompress_depth(ctx, rtex);
539	return 0;
540}
541
542/* Needs adjustment for pixelformat:
543 */
544static INLINE unsigned u_box_volume( const struct pipe_box *box )
545{
546	return box->width * box->depth * box->height;
547};
548
549struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
550						struct pipe_resource *texture,
551						unsigned level,
552						unsigned usage,
553						const struct pipe_box *box)
554{
555	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
556	struct pipe_resource resource;
557	struct r600_transfer *trans;
558	int r;
559	boolean use_staging_texture = FALSE;
560
561	/* We cannot map a tiled texture directly because the data is
562	 * in a different order, therefore we do detiling using a blit.
563	 *
564	 * Also, use a temporary in GTT memory for read transfers, as
565	 * the CPU is much happier reading out of cached system memory
566	 * than uncached VRAM.
567	 */
568	if (R600_TEX_IS_TILED(rtex, level))
569		use_staging_texture = TRUE;
570
571	if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024)
572		use_staging_texture = TRUE;
573
574	/* XXX: Use a staging texture for uploads if the underlying BO
575	 * is busy.  No interface for checking that currently? so do
576	 * it eagerly whenever the transfer doesn't require a readback
577	 * and might block.
578	 */
579	if ((usage & PIPE_TRANSFER_WRITE) &&
580			!(usage & (PIPE_TRANSFER_READ |
581					PIPE_TRANSFER_DONTBLOCK |
582					PIPE_TRANSFER_UNSYNCHRONIZED)))
583		use_staging_texture = TRUE;
584
585	if (!permit_hardware_blit(ctx->screen, texture) ||
586		(texture->flags & R600_RESOURCE_FLAG_TRANSFER))
587		use_staging_texture = FALSE;
588
589	trans = CALLOC_STRUCT(r600_transfer);
590	if (trans == NULL)
591		return NULL;
592	pipe_resource_reference(&trans->transfer.resource, texture);
593	trans->transfer.level = level;
594	trans->transfer.usage = usage;
595	trans->transfer.box = *box;
596	if (rtex->depth) {
597		/* XXX: only readback the rectangle which is being mapped?
598		*/
599		/* XXX: when discard is true, no need to read back from depth texture
600		*/
601		r = r600_texture_depth_flush(ctx, texture, FALSE);
602		if (r < 0) {
603			R600_ERR("failed to create temporary texture to hold untiled copy\n");
604			pipe_resource_reference(&trans->transfer.resource, NULL);
605			FREE(trans);
606			return NULL;
607		}
608		trans->transfer.stride = rtex->flushed_depth_texture->pitch_in_bytes[level];
609		trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
610		return &trans->transfer;
611	} else if (use_staging_texture) {
612		resource.target = PIPE_TEXTURE_2D;
613		resource.format = texture->format;
614		resource.width0 = box->width;
615		resource.height0 = box->height;
616		resource.depth0 = 1;
617		resource.array_size = 1;
618		resource.last_level = 0;
619		resource.nr_samples = 0;
620		resource.usage = PIPE_USAGE_STAGING;
621		resource.bind = 0;
622		resource.flags = R600_RESOURCE_FLAG_TRANSFER;
623		/* For texture reading, the temporary (detiled) texture is used as
624		 * a render target when blitting from a tiled texture. */
625		if (usage & PIPE_TRANSFER_READ) {
626			resource.bind |= PIPE_BIND_RENDER_TARGET;
627		}
628		/* For texture writing, the temporary texture is used as a sampler
629		 * when blitting into a tiled texture. */
630		if (usage & PIPE_TRANSFER_WRITE) {
631			resource.bind |= PIPE_BIND_SAMPLER_VIEW;
632		}
633		/* Create the temporary texture. */
634		trans->staging_texture = ctx->screen->resource_create(ctx->screen, &resource);
635		if (trans->staging_texture == NULL) {
636			R600_ERR("failed to create temporary texture to hold untiled copy\n");
637			pipe_resource_reference(&trans->transfer.resource, NULL);
638			FREE(trans);
639			return NULL;
640		}
641
642		trans->transfer.stride =
643			((struct r600_resource_texture *)trans->staging_texture)->pitch_in_bytes[0];
644		if (usage & PIPE_TRANSFER_READ) {
645			r600_copy_to_staging_texture(ctx, trans);
646			/* Always referenced in the blit. */
647			ctx->flush(ctx, 0, NULL);
648		}
649		return &trans->transfer;
650	}
651	trans->transfer.stride = rtex->pitch_in_bytes[level];
652	trans->transfer.layer_stride = rtex->layer_size[level];
653	trans->offset = r600_texture_get_offset(rtex, level, box->z);
654	return &trans->transfer;
655}
656
657void r600_texture_transfer_destroy(struct pipe_context *ctx,
658				   struct pipe_transfer *transfer)
659{
660	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
661	struct pipe_resource *texture = transfer->resource;
662	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
663
664	if (rtransfer->staging_texture) {
665		if (transfer->usage & PIPE_TRANSFER_WRITE) {
666			r600_copy_from_staging_texture(ctx, rtransfer);
667		}
668		pipe_resource_reference(&rtransfer->staging_texture, NULL);
669	}
670
671	if (rtex->depth && !rtex->is_flushing_texture) {
672		if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture)
673			r600_blit_push_depth(ctx, rtex);
674	}
675
676	pipe_resource_reference(&transfer->resource, NULL);
677	FREE(transfer);
678}
679
680void* r600_texture_transfer_map(struct pipe_context *ctx,
681				struct pipe_transfer* transfer)
682{
683	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
684	struct r600_bo *bo;
685	enum pipe_format format = transfer->resource->format;
686	struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
687	unsigned offset = 0;
688	unsigned usage = 0;
689	char *map;
690
691	if (rtransfer->staging_texture) {
692		bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
693	} else {
694		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
695
696		if (rtex->flushed_depth_texture)
697			bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
698		else
699			bo = ((struct r600_resource *)transfer->resource)->bo;
700
701		offset = rtransfer->offset +
702			transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
703			transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
704	}
705
706	if (transfer->usage & PIPE_TRANSFER_WRITE) {
707		usage |= PB_USAGE_CPU_WRITE;
708
709		if (transfer->usage & PIPE_TRANSFER_DISCARD) {
710		}
711
712		if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
713		}
714	}
715
716	if (transfer->usage & PIPE_TRANSFER_READ) {
717		usage |= PB_USAGE_CPU_READ;
718	}
719
720	if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
721		usage |= PB_USAGE_DONTBLOCK;
722	}
723
724	if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
725		usage |= PB_USAGE_UNSYNCHRONIZED;
726	}
727
728	map = r600_bo_map(radeon, bo, usage, ctx);
729	if (!map) {
730		return NULL;
731	}
732
733	return map + offset;
734}
735
736void r600_texture_transfer_unmap(struct pipe_context *ctx,
737				 struct pipe_transfer* transfer)
738{
739	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
740	struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
741	struct r600_bo *bo;
742
743	if (rtransfer->staging_texture) {
744		bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
745	} else {
746		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
747
748		if (rtex->flushed_depth_texture) {
749			bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
750		} else {
751			bo = ((struct r600_resource *)transfer->resource)->bo;
752		}
753	}
754	r600_bo_unmap(radeon, bo);
755}
756
757void r600_init_surface_functions(struct r600_pipe_context *r600)
758{
759	r600->context.create_surface = r600_create_surface;
760	r600->context.surface_destroy = r600_surface_destroy;
761}
762
763static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
764		const unsigned char *swizzle_view)
765{
766	unsigned i;
767	unsigned char swizzle[4];
768	unsigned result = 0;
769	const uint32_t swizzle_shift[4] = {
770		16, 19, 22, 25,
771	};
772	const uint32_t swizzle_bit[4] = {
773		0, 1, 2, 3,
774	};
775
776	if (swizzle_view) {
777		/* Combine two sets of swizzles. */
778		for (i = 0; i < 4; i++) {
779			swizzle[i] = swizzle_view[i] <= UTIL_FORMAT_SWIZZLE_W ?
780				swizzle_format[swizzle_view[i]] : swizzle_view[i];
781		}
782	} else {
783		memcpy(swizzle, swizzle_format, 4);
784	}
785
786	/* Get swizzle. */
787	for (i = 0; i < 4; i++) {
788		switch (swizzle[i]) {
789		case UTIL_FORMAT_SWIZZLE_Y:
790			result |= swizzle_bit[1] << swizzle_shift[i];
791			break;
792		case UTIL_FORMAT_SWIZZLE_Z:
793			result |= swizzle_bit[2] << swizzle_shift[i];
794			break;
795		case UTIL_FORMAT_SWIZZLE_W:
796			result |= swizzle_bit[3] << swizzle_shift[i];
797			break;
798		case UTIL_FORMAT_SWIZZLE_0:
799			result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
800			break;
801		case UTIL_FORMAT_SWIZZLE_1:
802			result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
803			break;
804		default: /* UTIL_FORMAT_SWIZZLE_X */
805			result |= swizzle_bit[0] << swizzle_shift[i];
806		}
807	}
808	return result;
809}
810
811/* texture format translate */
812uint32_t r600_translate_texformat(struct pipe_screen *screen,
813				  enum pipe_format format,
814				  const unsigned char *swizzle_view,
815				  uint32_t *word4_p, uint32_t *yuv_format_p)
816{
817	uint32_t result = 0, word4 = 0, yuv_format = 0;
818	const struct util_format_description *desc;
819	boolean uniform = TRUE;
820	static int r600_enable_s3tc = -1;
821
822	int i;
823	const uint32_t sign_bit[4] = {
824		S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
825		S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
826		S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
827		S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
828	};
829	desc = util_format_description(format);
830
831	word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view);
832
833	/* Colorspace (return non-RGB formats directly). */
834	switch (desc->colorspace) {
835		/* Depth stencil formats */
836	case UTIL_FORMAT_COLORSPACE_ZS:
837		switch (format) {
838		case PIPE_FORMAT_Z16_UNORM:
839			result = FMT_16;
840			goto out_word4;
841		case PIPE_FORMAT_X24S8_USCALED:
842			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
843		case PIPE_FORMAT_Z24X8_UNORM:
844		case PIPE_FORMAT_Z24_UNORM_S8_USCALED:
845			result = FMT_8_24;
846			goto out_word4;
847		case PIPE_FORMAT_S8X24_USCALED:
848			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
849		case PIPE_FORMAT_X8Z24_UNORM:
850		case PIPE_FORMAT_S8_USCALED_Z24_UNORM:
851			result = FMT_24_8;
852			goto out_word4;
853		case PIPE_FORMAT_S8_USCALED:
854			result = FMT_8;
855			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
856			goto out_word4;
857		default:
858			goto out_unknown;
859		}
860
861	case UTIL_FORMAT_COLORSPACE_YUV:
862		yuv_format |= (1 << 30);
863		switch (format) {
864		case PIPE_FORMAT_UYVY:
865		case PIPE_FORMAT_YUYV:
866		default:
867			break;
868		}
869		goto out_unknown; /* TODO */
870
871	case UTIL_FORMAT_COLORSPACE_SRGB:
872		word4 |= S_038010_FORCE_DEGAMMA(1);
873		break;
874
875	default:
876		break;
877	}
878
879	if (r600_enable_s3tc == -1) {
880		struct r600_screen *rscreen = (struct r600_screen *)screen;
881		if (r600_get_minor_version(rscreen->radeon) >= 9)
882			r600_enable_s3tc = 1;
883		else
884			r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
885	}
886
887	if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
888		if (!r600_enable_s3tc)
889			goto out_unknown;
890
891		switch (format) {
892		case PIPE_FORMAT_RGTC1_SNORM:
893			word4 |= sign_bit[0];
894		case PIPE_FORMAT_RGTC1_UNORM:
895			result = FMT_BC4;
896			goto out_word4;
897		case PIPE_FORMAT_RGTC2_SNORM:
898			word4 |= sign_bit[0] | sign_bit[1];
899		case PIPE_FORMAT_RGTC2_UNORM:
900			result = FMT_BC5;
901			goto out_word4;
902		default:
903			goto out_unknown;
904		}
905	}
906
907	if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
908
909		if (!r600_enable_s3tc)
910			goto out_unknown;
911
912		if (!util_format_s3tc_enabled) {
913			goto out_unknown;
914		}
915
916		switch (format) {
917		case PIPE_FORMAT_DXT1_RGB:
918		case PIPE_FORMAT_DXT1_RGBA:
919		case PIPE_FORMAT_DXT1_SRGB:
920		case PIPE_FORMAT_DXT1_SRGBA:
921			result = FMT_BC1;
922			goto out_word4;
923		case PIPE_FORMAT_DXT3_RGBA:
924		case PIPE_FORMAT_DXT3_SRGBA:
925			result = FMT_BC2;
926			goto out_word4;
927		case PIPE_FORMAT_DXT5_RGBA:
928		case PIPE_FORMAT_DXT5_SRGBA:
929			result = FMT_BC3;
930			goto out_word4;
931		default:
932			goto out_unknown;
933		}
934	}
935
936
937	for (i = 0; i < desc->nr_channels; i++) {
938		if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
939			word4 |= sign_bit[i];
940		}
941	}
942
943	/* R8G8Bx_SNORM - TODO CxV8U8 */
944
945	/* See whether the components are of the same size. */
946	for (i = 1; i < desc->nr_channels; i++) {
947		uniform = uniform && desc->channel[0].size == desc->channel[i].size;
948	}
949
950	/* Non-uniform formats. */
951	if (!uniform) {
952		switch(desc->nr_channels) {
953		case 3:
954			if (desc->channel[0].size == 5 &&
955			    desc->channel[1].size == 6 &&
956			    desc->channel[2].size == 5) {
957				result = FMT_5_6_5;
958				goto out_word4;
959			}
960			goto out_unknown;
961		case 4:
962			if (desc->channel[0].size == 5 &&
963			    desc->channel[1].size == 5 &&
964			    desc->channel[2].size == 5 &&
965			    desc->channel[3].size == 1) {
966				result = FMT_1_5_5_5;
967				goto out_word4;
968			}
969			if (desc->channel[0].size == 10 &&
970			    desc->channel[1].size == 10 &&
971			    desc->channel[2].size == 10 &&
972			    desc->channel[3].size == 2) {
973				result = FMT_2_10_10_10;
974				goto out_word4;
975			}
976			goto out_unknown;
977		}
978		goto out_unknown;
979	}
980
981	/* Find the first non-VOID channel. */
982	for (i = 0; i < 4; i++) {
983		if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
984			break;
985		}
986	}
987
988	if (i == 4)
989		goto out_unknown;
990
991	/* uniform formats */
992	switch (desc->channel[i].type) {
993	case UTIL_FORMAT_TYPE_UNSIGNED:
994	case UTIL_FORMAT_TYPE_SIGNED:
995		if (!desc->channel[i].normalized &&
996		    desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
997			goto out_unknown;
998		}
999
1000		switch (desc->channel[i].size) {
1001		case 4:
1002			switch (desc->nr_channels) {
1003			case 2:
1004				result = FMT_4_4;
1005				goto out_word4;
1006			case 4:
1007				result = FMT_4_4_4_4;
1008				goto out_word4;
1009			}
1010			goto out_unknown;
1011		case 8:
1012			switch (desc->nr_channels) {
1013			case 1:
1014				result = FMT_8;
1015				goto out_word4;
1016			case 2:
1017				result = FMT_8_8;
1018				goto out_word4;
1019			case 4:
1020				result = FMT_8_8_8_8;
1021				goto out_word4;
1022			}
1023			goto out_unknown;
1024		case 16:
1025			switch (desc->nr_channels) {
1026			case 1:
1027				result = FMT_16;
1028				goto out_word4;
1029			case 2:
1030				result = FMT_16_16;
1031				goto out_word4;
1032			case 4:
1033				result = FMT_16_16_16_16;
1034				goto out_word4;
1035			}
1036			goto out_unknown;
1037		case 32:
1038			switch (desc->nr_channels) {
1039			case 1:
1040				result = FMT_32;
1041				goto out_word4;
1042			case 2:
1043				result = FMT_32_32;
1044				goto out_word4;
1045			case 4:
1046				result = FMT_32_32_32_32;
1047				goto out_word4;
1048			}
1049		}
1050		goto out_unknown;
1051
1052	case UTIL_FORMAT_TYPE_FLOAT:
1053		switch (desc->channel[i].size) {
1054		case 16:
1055			switch (desc->nr_channels) {
1056			case 1:
1057				result = FMT_16_FLOAT;
1058				goto out_word4;
1059			case 2:
1060				result = FMT_16_16_FLOAT;
1061				goto out_word4;
1062			case 4:
1063				result = FMT_16_16_16_16_FLOAT;
1064				goto out_word4;
1065			}
1066			goto out_unknown;
1067		case 32:
1068			switch (desc->nr_channels) {
1069			case 1:
1070				result = FMT_32_FLOAT;
1071				goto out_word4;
1072			case 2:
1073				result = FMT_32_32_FLOAT;
1074				goto out_word4;
1075			case 4:
1076				result = FMT_32_32_32_32_FLOAT;
1077				goto out_word4;
1078			}
1079		}
1080
1081	}
1082out_word4:
1083	if (word4_p)
1084		*word4_p = word4;
1085	if (yuv_format_p)
1086		*yuv_format_p = yuv_format;
1087	return result;
1088out_unknown:
1089//	R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format));
1090	return ~0;
1091}
1092