1/*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#ifndef FD5_EMIT_H
28#define FD5_EMIT_H
29
30#include "pipe/p_context.h"
31
32#include "freedreno_context.h"
33#include "fd5_context.h"
34#include "fd5_format.h"
35#include "fd5_program.h"
36#include "ir3_shader.h"
37
38struct fd_ringbuffer;
39
40/* grouped together emit-state for prog/vertex/state emit: */
41struct fd5_emit {
42	struct pipe_debug_callback *debug;
43	const struct fd_vertex_state *vtx;
44	const struct fd_program_stateobj *prog;
45	const struct pipe_draw_info *info;
46	struct ir3_shader_key key;
47	uint32_t dirty;
48
49	uint32_t sprite_coord_enable;  /* bitmask */
50	bool sprite_coord_mode;
51	bool rasterflat;
52	bool no_decode_srgb;
53
54	/* cached to avoid repeated lookups of same variants: */
55	const struct ir3_shader_variant *vp, *fp;
56	/* TODO: other shader stages.. */
57
58	unsigned streamout_mask;
59};
60
61static inline enum a5xx_color_fmt fd5_emit_format(struct pipe_surface *surf)
62{
63	if (!surf)
64		return 0;
65	return fd5_pipe2color(surf->format);
66}
67
68static inline const struct ir3_shader_variant *
69fd5_emit_get_vp(struct fd5_emit *emit)
70{
71	if (!emit->vp) {
72		struct fd5_shader_stateobj *so = emit->prog->vp;
73		emit->vp = ir3_shader_variant(so->shader, emit->key, emit->debug);
74	}
75	return emit->vp;
76}
77
78static inline const struct ir3_shader_variant *
79fd5_emit_get_fp(struct fd5_emit *emit)
80{
81	if (!emit->fp) {
82		if (emit->key.binning_pass) {
83			/* use dummy stateobj to simplify binning vs non-binning: */
84			static const struct ir3_shader_variant binning_fp = {};
85			emit->fp = &binning_fp;
86		} else {
87			struct fd5_shader_stateobj *so = emit->prog->fp;
88			emit->fp = ir3_shader_variant(so->shader, emit->key, emit->debug);
89		}
90	}
91	return emit->fp;
92}
93
94static inline void
95fd5_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
96{
97	fd_reset_wfi(batch);
98	OUT_PKT4(ring, REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO, 5);
99	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MIN_LO */
100	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MIN_HI */
101	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MAX_LO */
102	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MAX_HI */
103	OUT_RING(ring, 0x00000012);   /* UCHE_CACHE_INVALIDATE */
104	fd_wfi(batch, ring);
105}
106
107static inline void
108fd5_set_render_mode(struct fd_context *ctx, struct fd_ringbuffer *ring,
109		enum render_mode_cmd mode)
110{
111	/* TODO add preemption support, gmem bypass, etc */
112	emit_marker5(ring, 7);
113	OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
114	OUT_RING(ring, CP_SET_RENDER_MODE_0_MODE(mode));
115	OUT_RING(ring, 0x00000000);   /* ADDR_LO */
116	OUT_RING(ring, 0x00000000);   /* ADDR_HI */
117	OUT_RING(ring, COND(mode == GMEM, CP_SET_RENDER_MODE_3_GMEM_ENABLE));
118	OUT_RING(ring, 0x00000000);
119	emit_marker5(ring, 7);
120}
121
122static inline void
123fd5_emit_blit(struct fd_context *ctx, struct fd_ringbuffer *ring)
124{
125	struct fd5_context *fd5_ctx = fd5_context(ctx);
126
127	emit_marker5(ring, 7);
128
129	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
130	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(BLIT));
131	OUT_RELOCW(ring, fd5_ctx->blit_mem, 0, 0, 0);  /* ADDR_LO/HI */
132	OUT_RING(ring, 0x00000000);
133
134	emit_marker5(ring, 7);
135}
136
137static inline void
138fd5_emit_render_cntl(struct fd_context *ctx, bool blit)
139{
140	struct fd_ringbuffer *ring = ctx->batch->draw;
141
142	/* TODO eventually this partially depends on the pfb state, ie.
143	 * which of the cbuf(s)/zsbuf has an UBWC flag buffer.. that part
144	 * we could probably cache and just regenerate if framebuffer
145	 * state is dirty (or something like that)..
146	 *
147	 * Other bits seem to depend on query state, like if samples-passed
148	 * query is active.
149	 */
150	OUT_PKT4(ring, REG_A5XX_RB_RENDER_CNTL, 1);
151	OUT_RING(ring, 0x00000000 |   /* RB_RENDER_CNTL */
152			COND(!blit, 0x8));
153}
154
155void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit);
156
157void fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
158		struct fd5_emit *emit);
159
160void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
161
162void fd5_emit_init(struct pipe_context *pctx);
163
164#endif /* FD5_EMIT_H */
165