freedreno_resource.c revision bde2045fa247b4d1de98a3bc7585d1b60f9b58b7
1/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3/*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 *    Rob Clark <robclark@freedesktop.org>
27 */
28
29#include "util/u_format.h"
30#include "util/u_inlines.h"
31#include "util/u_transfer.h"
32#include "util/u_string.h"
33#include "util/u_surface.h"
34
35#include "freedreno_resource.h"
36#include "freedreno_screen.h"
37#include "freedreno_surface.h"
38#include "freedreno_context.h"
39#include "freedreno_query_hw.h"
40#include "freedreno_util.h"
41
42#include <errno.h>
43
44static void
45realloc_bo(struct fd_resource *rsc, uint32_t size)
46{
47	struct fd_screen *screen = fd_screen(rsc->base.b.screen);
48	uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
49			DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */
50
51	/* if we start using things other than write-combine,
52	 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
53	 */
54
55	if (rsc->bo)
56		fd_bo_del(rsc->bo);
57
58	rsc->bo = fd_bo_new(screen->dev, size, flags);
59	rsc->timestamp = 0;
60	rsc->dirty = rsc->reading = false;
61	list_delinit(&rsc->list);
62	util_range_set_empty(&rsc->valid_buffer_range);
63}
64
65static void fd_resource_transfer_flush_region(struct pipe_context *pctx,
66		struct pipe_transfer *ptrans,
67		const struct pipe_box *box)
68{
69	struct fd_resource *rsc = fd_resource(ptrans->resource);
70
71	if (ptrans->resource->target == PIPE_BUFFER)
72		util_range_add(&rsc->valid_buffer_range,
73					   ptrans->box.x + box->x,
74					   ptrans->box.x + box->x + box->width);
75}
76
77static void
78fd_resource_transfer_unmap(struct pipe_context *pctx,
79		struct pipe_transfer *ptrans)
80{
81	struct fd_context *ctx = fd_context(pctx);
82	struct fd_resource *rsc = fd_resource(ptrans->resource);
83	if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
84		fd_bo_cpu_fini(rsc->bo);
85
86	util_range_add(&rsc->valid_buffer_range,
87				   ptrans->box.x,
88				   ptrans->box.x + ptrans->box.width);
89
90	pipe_resource_reference(&ptrans->resource, NULL);
91	util_slab_free(&ctx->transfer_pool, ptrans);
92}
93
94static void *
95fd_resource_transfer_map(struct pipe_context *pctx,
96		struct pipe_resource *prsc,
97		unsigned level, unsigned usage,
98		const struct pipe_box *box,
99		struct pipe_transfer **pptrans)
100{
101	struct fd_context *ctx = fd_context(pctx);
102	struct fd_resource *rsc = fd_resource(prsc);
103	struct fd_resource_slice *slice = fd_resource_slice(rsc, level);
104	struct pipe_transfer *ptrans;
105	enum pipe_format format = prsc->format;
106	uint32_t op = 0;
107	uint32_t offset;
108	char *buf;
109	int ret = 0;
110
111	DBG("prsc=%p, level=%u, usage=%x", prsc, level, usage);
112
113	ptrans = util_slab_alloc(&ctx->transfer_pool);
114	if (!ptrans)
115		return NULL;
116
117	/* util_slab_alloc() doesn't zero: */
118	memset(ptrans, 0, sizeof(*ptrans));
119
120	pipe_resource_reference(&ptrans->resource, prsc);
121	ptrans->level = level;
122	ptrans->usage = usage;
123	ptrans->box = *box;
124	ptrans->stride = slice->pitch * rsc->cpp;
125	ptrans->layer_stride = slice->size0;
126
127	if (usage & PIPE_TRANSFER_READ)
128		op |= DRM_FREEDRENO_PREP_READ;
129
130	if (usage & PIPE_TRANSFER_WRITE)
131		op |= DRM_FREEDRENO_PREP_WRITE;
132
133	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
134		realloc_bo(rsc, fd_bo_size(rsc->bo));
135	} else if ((usage & PIPE_TRANSFER_WRITE) &&
136			   prsc->target == PIPE_BUFFER &&
137			   !util_ranges_intersect(&rsc->valid_buffer_range,
138									  box->x, box->x + box->width)) {
139		/* We are trying to write to a previously uninitialized range. No need
140		 * to wait.
141		 */
142	} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
143		/* If the GPU is writing to the resource, or if it is reading from the
144		 * resource and we're trying to write to it, flush the renders.
145		 */
146		if (rsc->dirty ||
147			((ptrans->usage & PIPE_TRANSFER_WRITE) && rsc->reading))
148			fd_context_render(pctx);
149
150		/* The GPU keeps track of how the various bo's are being used, and
151		 * will wait if necessary for the proper operation to have
152		 * completed.
153		 */
154		ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op);
155		if (ret)
156			goto fail;
157	}
158
159	buf = fd_bo_map(rsc->bo);
160	if (!buf) {
161		fd_resource_transfer_unmap(pctx, ptrans);
162		return NULL;
163	}
164
165	*pptrans = ptrans;
166
167	if (rsc->layer_first) {
168		offset = slice->offset +
169			box->y / util_format_get_blockheight(format) * ptrans->stride +
170			box->x / util_format_get_blockwidth(format) * rsc->cpp +
171			box->z * rsc->layer_size;
172	} else {
173		offset = slice->offset +
174			box->y / util_format_get_blockheight(format) * ptrans->stride +
175			box->x / util_format_get_blockwidth(format) * rsc->cpp +
176			box->z * slice->size0;
177	}
178
179	return buf + offset;
180
181fail:
182	fd_resource_transfer_unmap(pctx, ptrans);
183	return NULL;
184}
185
186static void
187fd_resource_destroy(struct pipe_screen *pscreen,
188		struct pipe_resource *prsc)
189{
190	struct fd_resource *rsc = fd_resource(prsc);
191	if (rsc->bo)
192		fd_bo_del(rsc->bo);
193	list_delinit(&rsc->list);
194	util_range_destroy(&rsc->valid_buffer_range);
195	FREE(rsc);
196}
197
198static boolean
199fd_resource_get_handle(struct pipe_screen *pscreen,
200		struct pipe_resource *prsc,
201		struct winsys_handle *handle)
202{
203	struct fd_resource *rsc = fd_resource(prsc);
204
205	return fd_screen_bo_get_handle(pscreen, rsc->bo,
206			rsc->slices[0].pitch * rsc->cpp, handle);
207}
208
209
210static const struct u_resource_vtbl fd_resource_vtbl = {
211		.resource_get_handle      = fd_resource_get_handle,
212		.resource_destroy         = fd_resource_destroy,
213		.transfer_map             = fd_resource_transfer_map,
214		.transfer_flush_region    = fd_resource_transfer_flush_region,
215		.transfer_unmap           = fd_resource_transfer_unmap,
216		.transfer_inline_write    = u_default_transfer_inline_write,
217};
218
219static uint32_t
220setup_slices(struct fd_resource *rsc, uint32_t alignment)
221{
222	struct pipe_resource *prsc = &rsc->base.b;
223	uint32_t level, size = 0;
224	uint32_t width = prsc->width0;
225	uint32_t height = prsc->height0;
226	uint32_t depth = prsc->depth0;
227	/* in layer_first layout, the level (slice) contains just one
228	 * layer (since in fact the layer contains the slices)
229	 */
230	uint32_t layers_in_level = rsc->layer_first ? 1 : prsc->array_size;
231
232	for (level = 0; level <= prsc->last_level; level++) {
233		struct fd_resource_slice *slice = fd_resource_slice(rsc, level);
234
235		slice->pitch = width = align(width, 32);
236		slice->offset = size;
237		/* 1d array and 2d array textures must all have the same layer size
238		 * for each miplevel on a3xx. 3d textures can have different layer
239		 * sizes for high levels, but the hw auto-sizer is buggy (or at least
240		 * different than what this code does), so as soon as the layer size
241		 * range gets into range, we stop reducing it.
242		 */
243		if (prsc->target == PIPE_TEXTURE_3D && (
244					level == 1 ||
245					(level > 1 && rsc->slices[level - 1].size0 > 0xf000)))
246			slice->size0 = align(slice->pitch * height * rsc->cpp, alignment);
247		else if (level == 0 || rsc->layer_first || alignment == 1)
248			slice->size0 = align(slice->pitch * height * rsc->cpp, alignment);
249		else
250			slice->size0 = rsc->slices[level - 1].size0;
251
252		size += slice->size0 * depth * layers_in_level;
253
254		width = u_minify(width, 1);
255		height = u_minify(height, 1);
256		depth = u_minify(depth, 1);
257	}
258
259	return size;
260}
261
262static uint32_t
263slice_alignment(struct pipe_screen *pscreen, const struct pipe_resource *tmpl)
264{
265	/* on a3xx, 2d array and 3d textures seem to want their
266	 * layers aligned to page boundaries:
267	 */
268	switch (tmpl->target) {
269	case PIPE_TEXTURE_3D:
270	case PIPE_TEXTURE_1D_ARRAY:
271	case PIPE_TEXTURE_2D_ARRAY:
272		return 4096;
273	default:
274		return 1;
275	}
276}
277
278/**
279 * Create a new texture object, using the given template info.
280 */
281static struct pipe_resource *
282fd_resource_create(struct pipe_screen *pscreen,
283		const struct pipe_resource *tmpl)
284{
285	struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
286	struct pipe_resource *prsc = &rsc->base.b;
287	uint32_t size;
288
289	DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
290			"nr_samples=%u, usage=%u, bind=%x, flags=%x",
291			tmpl->target, util_format_name(tmpl->format),
292			tmpl->width0, tmpl->height0, tmpl->depth0,
293			tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
294			tmpl->usage, tmpl->bind, tmpl->flags);
295
296	if (!rsc)
297		return NULL;
298
299	*prsc = *tmpl;
300
301	pipe_reference_init(&prsc->reference, 1);
302	list_inithead(&rsc->list);
303	prsc->screen = pscreen;
304
305	util_range_init(&rsc->valid_buffer_range);
306
307	rsc->base.vtbl = &fd_resource_vtbl;
308	rsc->cpp = util_format_get_blocksize(tmpl->format);
309
310	assert(rsc->cpp);
311
312	if (is_a4xx(fd_screen(pscreen))) {
313		switch (tmpl->target) {
314		case PIPE_TEXTURE_3D:
315			/* TODO 3D_ARRAY? */
316			rsc->layer_first = false;
317			break;
318		default:
319			rsc->layer_first = true;
320			break;
321		}
322	}
323
324	size = setup_slices(rsc, slice_alignment(pscreen, tmpl));
325
326	if (rsc->layer_first) {
327		rsc->layer_size = align(size, 4096);
328		size = rsc->layer_size * prsc->array_size;
329	}
330
331	realloc_bo(rsc, size);
332	if (!rsc->bo)
333		goto fail;
334
335	return prsc;
336fail:
337	fd_resource_destroy(pscreen, prsc);
338	return NULL;
339}
340
341/**
342 * Create a texture from a winsys_handle. The handle is often created in
343 * another process by first creating a pipe texture and then calling
344 * resource_get_handle.
345 */
346static struct pipe_resource *
347fd_resource_from_handle(struct pipe_screen *pscreen,
348		const struct pipe_resource *tmpl,
349		struct winsys_handle *handle)
350{
351	struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
352	struct fd_resource_slice *slice = &rsc->slices[0];
353	struct pipe_resource *prsc = &rsc->base.b;
354
355	DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
356			"nr_samples=%u, usage=%u, bind=%x, flags=%x",
357			tmpl->target, util_format_name(tmpl->format),
358			tmpl->width0, tmpl->height0, tmpl->depth0,
359			tmpl->array_size, tmpl->last_level, tmpl->nr_samples,
360			tmpl->usage, tmpl->bind, tmpl->flags);
361
362	if (!rsc)
363		return NULL;
364
365	*prsc = *tmpl;
366
367	pipe_reference_init(&prsc->reference, 1);
368	list_inithead(&rsc->list);
369	prsc->screen = pscreen;
370
371	util_range_init(&rsc->valid_buffer_range);
372
373	rsc->bo = fd_screen_bo_from_handle(pscreen, handle, &slice->pitch);
374	if (!rsc->bo)
375		goto fail;
376
377	rsc->base.vtbl = &fd_resource_vtbl;
378	rsc->cpp = util_format_get_blocksize(tmpl->format);
379	slice->pitch /= rsc->cpp;
380
381	assert(rsc->cpp);
382
383	return prsc;
384
385fail:
386	fd_resource_destroy(pscreen, prsc);
387	return NULL;
388}
389
390static void fd_blitter_pipe_begin(struct fd_context *ctx);
391static void fd_blitter_pipe_end(struct fd_context *ctx);
392
393/**
394 * _copy_region using pipe (3d engine)
395 */
396static bool
397fd_blitter_pipe_copy_region(struct fd_context *ctx,
398		struct pipe_resource *dst,
399		unsigned dst_level,
400		unsigned dstx, unsigned dsty, unsigned dstz,
401		struct pipe_resource *src,
402		unsigned src_level,
403		const struct pipe_box *src_box)
404{
405	/* not until we allow rendertargets to be buffers */
406	if (dst->target == PIPE_BUFFER || src->target == PIPE_BUFFER)
407		return false;
408
409	if (!util_blitter_is_copy_supported(ctx->blitter, dst, src))
410		return false;
411
412	fd_blitter_pipe_begin(ctx);
413	util_blitter_copy_texture(ctx->blitter,
414			dst, dst_level, dstx, dsty, dstz,
415			src, src_level, src_box);
416	fd_blitter_pipe_end(ctx);
417
418	return true;
419}
420
421/**
422 * Copy a block of pixels from one resource to another.
423 * The resource must be of the same format.
424 * Resources with nr_samples > 1 are not allowed.
425 */
426static void
427fd_resource_copy_region(struct pipe_context *pctx,
428		struct pipe_resource *dst,
429		unsigned dst_level,
430		unsigned dstx, unsigned dsty, unsigned dstz,
431		struct pipe_resource *src,
432		unsigned src_level,
433		const struct pipe_box *src_box)
434{
435	struct fd_context *ctx = fd_context(pctx);
436
437	/* TODO if we have 2d core, or other DMA engine that could be used
438	 * for simple copies and reasonably easily synchronized with the 3d
439	 * core, this is where we'd plug it in..
440	 */
441
442	/* try blit on 3d pipe: */
443	if (fd_blitter_pipe_copy_region(ctx,
444			dst, dst_level, dstx, dsty, dstz,
445			src, src_level, src_box))
446		return;
447
448	/* else fallback to pure sw: */
449	util_resource_copy_region(pctx,
450			dst, dst_level, dstx, dsty, dstz,
451			src, src_level, src_box);
452}
453
454/**
455 * Optimal hardware path for blitting pixels.
456 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
457 */
458static void
459fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
460{
461	struct fd_context *ctx = fd_context(pctx);
462	struct pipe_blit_info info = *blit_info;
463
464	if (info.src.resource->nr_samples > 1 &&
465			info.dst.resource->nr_samples <= 1 &&
466			!util_format_is_depth_or_stencil(info.src.resource->format) &&
467			!util_format_is_pure_integer(info.src.resource->format)) {
468		DBG("color resolve unimplemented");
469		return;
470	}
471
472	if (util_try_blit_via_copy_region(pctx, &info)) {
473		return; /* done */
474	}
475
476	if (info.mask & PIPE_MASK_S) {
477		DBG("cannot blit stencil, skipping");
478		info.mask &= ~PIPE_MASK_S;
479	}
480
481	if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
482		DBG("blit unsupported %s -> %s",
483				util_format_short_name(info.src.resource->format),
484				util_format_short_name(info.dst.resource->format));
485		return;
486	}
487
488	fd_blitter_pipe_begin(ctx);
489	util_blitter_blit(ctx->blitter, &info);
490	fd_blitter_pipe_end(ctx);
491}
492
493static void
494fd_blitter_pipe_begin(struct fd_context *ctx)
495{
496	util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vtx.vertexbuf.vb);
497	util_blitter_save_vertex_elements(ctx->blitter, ctx->vtx.vtx);
498	util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vp);
499	util_blitter_save_rasterizer(ctx->blitter, ctx->rasterizer);
500	util_blitter_save_viewport(ctx->blitter, &ctx->viewport);
501	util_blitter_save_scissor(ctx->blitter, &ctx->scissor);
502	util_blitter_save_fragment_shader(ctx->blitter, ctx->prog.fp);
503	util_blitter_save_blend(ctx->blitter, ctx->blend);
504	util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->zsa);
505	util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
506	util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
507	util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
508	util_blitter_save_fragment_sampler_states(ctx->blitter,
509			ctx->fragtex.num_samplers,
510			(void **)ctx->fragtex.samplers);
511	util_blitter_save_fragment_sampler_views(ctx->blitter,
512			ctx->fragtex.num_textures, ctx->fragtex.textures);
513
514	fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_BLIT);
515}
516
517static void
518fd_blitter_pipe_end(struct fd_context *ctx)
519{
520	fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL);
521}
522
523static void
524fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
525{
526	struct fd_resource *rsc = fd_resource(prsc);
527
528	if (rsc->dirty)
529		fd_context_render(pctx);
530}
531
532void
533fd_resource_screen_init(struct pipe_screen *pscreen)
534{
535	pscreen->resource_create = fd_resource_create;
536	pscreen->resource_from_handle = fd_resource_from_handle;
537	pscreen->resource_get_handle = u_resource_get_handle_vtbl;
538	pscreen->resource_destroy = u_resource_destroy_vtbl;
539}
540
541void
542fd_resource_context_init(struct pipe_context *pctx)
543{
544	pctx->transfer_map = u_transfer_map_vtbl;
545	pctx->transfer_flush_region = u_transfer_flush_region_vtbl;
546	pctx->transfer_unmap = u_transfer_unmap_vtbl;
547	pctx->transfer_inline_write = u_transfer_inline_write_vtbl;
548	pctx->create_surface = fd_create_surface;
549	pctx->surface_destroy = fd_surface_destroy;
550	pctx->resource_copy_region = fd_resource_copy_region;
551	pctx->blit = fd_blit;
552	pctx->flush_resource = fd_flush_resource;
553}
554