fd5_emit.c revision 4dc6ed53c1a8431a818d2f13e60f340d60f80127
1/*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "pipe/p_state.h"
28#include "util/u_string.h"
29#include "util/u_memory.h"
30#include "util/u_helpers.h"
31#include "util/u_format.h"
32#include "util/u_viewport.h"
33
34#include "freedreno_resource.h"
35#include "freedreno_query_hw.h"
36
37#include "fd5_emit.h"
38#include "fd5_blend.h"
39#include "fd5_context.h"
40#include "fd5_program.h"
41#include "fd5_rasterizer.h"
42#include "fd5_texture.h"
43#include "fd5_format.h"
44#include "fd5_zsa.h"
45
46static const enum adreno_state_block sb[] = {
47	[SHADER_VERTEX]   = SB_VERT_SHADER,
48	[SHADER_FRAGMENT] = SB_FRAG_SHADER,
49};
50
51/* regid:          base const register
52 * prsc or dwords: buffer containing constant values
53 * sizedwords:     size of const value buffer
54 */
55static void
56fd5_emit_const(struct fd_ringbuffer *ring, enum shader_t type,
57		uint32_t regid, uint32_t offset, uint32_t sizedwords,
58		const uint32_t *dwords, struct pipe_resource *prsc)
59{
60	uint32_t i, sz;
61	enum adreno_state_src src;
62
63	debug_assert((regid % 4) == 0);
64	debug_assert((sizedwords % 4) == 0);
65
66	if (prsc) {
67		sz = 0;
68		src = 0x2;  // TODO ??
69	} else {
70		sz = sizedwords;
71		src = SS_DIRECT;
72	}
73
74	OUT_PKT7(ring, CP_LOAD_STATE, 3 + sz);
75	OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) |
76			CP_LOAD_STATE_0_STATE_SRC(src) |
77			CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
78			CP_LOAD_STATE_0_NUM_UNIT(sizedwords/4));
79	if (prsc) {
80		struct fd_bo *bo = fd_resource(prsc)->bo;
81		OUT_RELOC(ring, bo, offset,
82				CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS), 0);
83	} else {
84		OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
85				CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
86		OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
87		dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
88	}
89	for (i = 0; i < sz; i++) {
90		OUT_RING(ring, dwords[i]);
91	}
92}
93
94static void
95fd5_emit_const_bo(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
96		uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets)
97{
98	uint32_t anum = align(num, 2);
99	uint32_t i;
100
101	debug_assert((regid % 4) == 0);
102
103	OUT_PKT7(ring, CP_LOAD_STATE, 3 + (2 * anum));
104	OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) |
105			CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
106			CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
107			CP_LOAD_STATE_0_NUM_UNIT(anum/2));
108	OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
109			CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
110	OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
111
112	for (i = 0; i < num; i++) {
113		if (prscs[i]) {
114			if (write) {
115				OUT_RELOCW(ring, fd_resource(prscs[i])->bo, offsets[i], 0, 0);
116			} else {
117				OUT_RELOC(ring, fd_resource(prscs[i])->bo, offsets[i], 0, 0);
118			}
119		} else {
120			OUT_RING(ring, 0xbad00000 | (i << 16));
121			OUT_RING(ring, 0xbad00000 | (i << 16));
122		}
123	}
124
125	for (; i < anum; i++) {
126		OUT_RING(ring, 0xffffffff);
127		OUT_RING(ring, 0xffffffff);
128	}
129}
130
131/* Border color layout is diff from a4xx/a5xx.. if it turns out to be
132 * the same as a6xx then move this somewhere common ;-)
133 *
134 * Entry layout looks like (total size, 0x60 bytes):
135 *
136 *   offset | description
137 *   -------+-------------
138 *     0x00 | fp32[0]
139 *          | fp32[1]
140 *          | fp32[2]
141 *          | fp32[3]
142 *     0x10 | uint16[0]
143 *          | uint16[1]
144 *          | uint16[2]
145 *          | uint16[3]
146 *     0x18 | int16[0]
147 *          | int16[1]
148 *          | int16[2]
149 *          | int16[3]
150 *     0x20 | fp16[0]
151 *          | fp16[1]
152 *          | fp16[2]
153 *          | fp16[3]
154 *     0x28 | ?? maybe padding ??
155 *     0x30 | uint8[0]
156 *          | uint8[1]
157 *          | uint8[2]
158 *          | uint8[3]
159 *     0x34 | int8[0]
160 *          | int8[1]
161 *          | int8[2]
162 *          | int8[3]
163 *     0x38 | ?? maybe padding ??
164 *
165 * Some uncertainty, because not clear that this actually works properly
166 * with blob, so who knows..
167 */
168
169struct PACKED bcolor_entry {
170	uint32_t fp32[4];
171	uint16_t ui16[4];
172	int16_t  si16[4];
173	uint16_t fp16[4];
174	uint8_t  __pad0[8];
175	uint8_t  ui8[4];
176	int8_t   si8[4];
177	uint8_t  __pad1[40];
178};
179
180#define FD5_BORDER_COLOR_SIZE        0x60
181#define FD5_BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * FD5_BORDER_COLOR_SIZE)
182#define FD5_BORDER_COLOR_OFFSET      8   /* TODO probably should be dynamic */
183
184static void
185setup_border_colors(struct fd_texture_stateobj *tex, struct bcolor_entry *entries)
186{
187	unsigned i, j;
188
189	debug_assert(tex->num_samplers < FD5_BORDER_COLOR_OFFSET);  // TODO
190
191	for (i = 0; i < tex->num_samplers; i++) {
192		struct bcolor_entry *e = &entries[i];
193		struct pipe_sampler_state *sampler = tex->samplers[i];
194		union pipe_color_union *bc;
195
196		if (!sampler)
197			continue;
198
199		bc = &sampler->border_color;
200
201		/*
202		 * XXX HACK ALERT XXX
203		 *
204		 * The border colors need to be swizzled in a particular
205		 * format-dependent order. Even though samplers don't know about
206		 * formats, we can assume that with a GL state tracker, there's a
207		 * 1:1 correspondence between sampler and texture. Take advantage
208		 * of that knowledge.
209		 */
210		if ((i >= tex->num_textures) || !tex->textures[i])
211			continue;
212
213		const struct util_format_description *desc =
214				util_format_description(tex->textures[i]->format);
215
216		for (j = 0; j < 4; j++) {
217			int c = desc->swizzle[j];
218
219			if (c >= 4)
220				continue;
221
222			if (desc->channel[c].pure_integer) {
223				float f = bc->i[c];
224
225				e->fp32[j] = fui(f);
226				e->fp16[j] = util_float_to_half(f);
227				e->ui16[j] = bc->ui[c];
228				e->si16[j] = bc->i[c];
229				e->ui8[j]  = bc->ui[c];
230				e->si8[j]  = bc->i[c];
231			} else {
232				float f = bc->f[c];
233
234				e->fp32[j] = fui(f);
235				e->fp16[j] = util_float_to_half(f);
236				e->ui16[j] = f * 65535.0;
237				e->si16[j] = f * 32767.5;
238				e->ui8[j]  = f * 255.0;
239				e->si8[j]  = f * 128.0;
240			}
241		}
242
243#ifdef DEBUG
244		memset(&e->__pad0, 0, sizeof(e->__pad0));
245		memset(&e->__pad1, 0, sizeof(e->__pad1));
246#endif
247	}
248}
249
250static void
251emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
252{
253	struct fd5_context *fd5_ctx = fd5_context(ctx);
254	struct bcolor_entry *entries;
255	unsigned off;
256	void *ptr;
257
258	STATIC_ASSERT(sizeof(struct bcolor_entry) == FD5_BORDER_COLOR_SIZE);
259
260	u_upload_alloc(fd5_ctx->border_color_uploader,
261			0, FD5_BORDER_COLOR_UPLOAD_SIZE,
262			FD5_BORDER_COLOR_UPLOAD_SIZE, &off,
263			&fd5_ctx->border_color_buf,
264			&ptr);
265
266	entries = ptr;
267
268	setup_border_colors(&ctx->verttex, &entries[0]);
269	setup_border_colors(&ctx->fragtex, &entries[ctx->verttex.num_samplers]);
270
271	OUT_PKT4(ring, REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
272	OUT_RELOC(ring, fd_resource(fd5_ctx->border_color_buf)->bo, off, 0, 0);
273
274	u_upload_unmap(fd5_ctx->border_color_uploader);
275}
276
277static bool
278emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
279		enum adreno_state_block sb, struct fd_texture_stateobj *tex)
280{
281	bool needs_border = false;
282	unsigned bcolor_offset = (sb == SB_FRAG_TEX) ? ctx->verttex.num_samplers : 0;
283	unsigned i;
284
285	if (tex->num_samplers > 0) {
286		/* output sampler state: */
287		OUT_PKT7(ring, CP_LOAD_STATE, 3 + (4 * tex->num_samplers));
288		OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
289				CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
290				CP_LOAD_STATE_0_STATE_BLOCK(sb) |
291				CP_LOAD_STATE_0_NUM_UNIT(tex->num_samplers));
292		OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
293				CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
294		OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
295		for (i = 0; i < tex->num_samplers; i++) {
296			static const struct fd5_sampler_stateobj dummy_sampler = {};
297			const struct fd5_sampler_stateobj *sampler = tex->samplers[i] ?
298					fd5_sampler_stateobj(tex->samplers[i]) :
299					&dummy_sampler;
300			OUT_RING(ring, sampler->texsamp0);
301			OUT_RING(ring, sampler->texsamp1);
302			OUT_RING(ring, sampler->texsamp2 |
303					A5XX_TEX_SAMP_2_BCOLOR_OFFSET(bcolor_offset));
304			OUT_RING(ring, sampler->texsamp3);
305
306			needs_border |= sampler->needs_border;
307		}
308	}
309
310	if (tex->num_textures > 0) {
311		unsigned num_textures = tex->num_textures;
312
313		/* emit texture state: */
314		OUT_PKT7(ring, CP_LOAD_STATE, 3 + (12 * num_textures));
315		OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
316				CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
317				CP_LOAD_STATE_0_STATE_BLOCK(sb) |
318				CP_LOAD_STATE_0_NUM_UNIT(num_textures));
319		OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
320				CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
321		OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
322		for (i = 0; i < tex->num_textures; i++) {
323			static const struct fd5_pipe_sampler_view dummy_view = {};
324			const struct fd5_pipe_sampler_view *view = tex->textures[i] ?
325					fd5_pipe_sampler_view(tex->textures[i]) :
326					&dummy_view;
327
328			OUT_RING(ring, view->texconst0);
329			OUT_RING(ring, view->texconst1);
330			OUT_RING(ring, view->texconst2);
331			OUT_RING(ring, view->texconst3);
332			if (view->base.texture) {
333				struct fd_resource *rsc = fd_resource(view->base.texture);
334				OUT_RELOC(ring, rsc->bo, view->offset,
335						(uint64_t)view->texconst5 << 32, 0);
336			} else {
337				OUT_RING(ring, 0x00000000);
338				OUT_RING(ring, view->texconst5);
339			}
340			OUT_RING(ring, view->texconst6);
341			OUT_RING(ring, view->texconst7);
342			OUT_RING(ring, view->texconst8);
343			OUT_RING(ring, view->texconst9);
344			OUT_RING(ring, view->texconst10);
345			OUT_RING(ring, view->texconst11);
346		}
347	}
348
349	return needs_border;
350}
351
352void
353fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit)
354{
355	int32_t i, j;
356	const struct fd_vertex_state *vtx = emit->vtx;
357	const struct ir3_shader_variant *vp = fd5_emit_get_vp(emit);
358
359	for (i = 0, j = 0; i <= vp->inputs_count; i++) {
360		if (vp->inputs[i].sysval)
361			continue;
362		if (vp->inputs[i].compmask) {
363			struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
364			const struct pipe_vertex_buffer *vb =
365					&vtx->vertexbuf.vb[elem->vertex_buffer_index];
366			struct fd_resource *rsc = fd_resource(vb->buffer);
367			enum pipe_format pfmt = elem->src_format;
368			enum a5xx_vtx_fmt fmt = fd5_pipe2vtx(pfmt);
369			bool isint = util_format_is_pure_integer(pfmt);
370			uint32_t off = vb->buffer_offset + elem->src_offset;
371			uint32_t size = fd_bo_size(rsc->bo) - off;
372			debug_assert(fmt != ~0);
373
374			OUT_PKT4(ring, REG_A5XX_VFD_FETCH(j), 4);
375			OUT_RELOC(ring, rsc->bo, off, 0, 0);
376			OUT_RING(ring, size);           /* VFD_FETCH[j].SIZE */
377			OUT_RING(ring, vb->stride);     /* VFD_FETCH[j].STRIDE */
378
379			OUT_PKT4(ring, REG_A5XX_VFD_DECODE(j), 2);
380			OUT_RING(ring, A5XX_VFD_DECODE_INSTR_IDX(j) |
381					A5XX_VFD_DECODE_INSTR_FORMAT(fmt) |
382					COND(elem->instance_divisor, A5XX_VFD_DECODE_INSTR_INSTANCED) |
383					A5XX_VFD_DECODE_INSTR_UNK30 |
384					COND(!isint, A5XX_VFD_DECODE_INSTR_FLOAT));
385			OUT_RING(ring, MAX2(1, elem->instance_divisor)); /* VFD_DECODE[j].STEP_RATE */
386
387			OUT_PKT4(ring, REG_A5XX_VFD_DEST_CNTL(j), 1);
388			OUT_RING(ring, A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vp->inputs[i].compmask) |
389					A5XX_VFD_DEST_CNTL_INSTR_REGID(vp->inputs[i].regid));
390
391			j++;
392		}
393	}
394
395	OUT_PKT4(ring, REG_A5XX_VFD_CONTROL_0, 1);
396	OUT_RING(ring, A5XX_VFD_CONTROL_0_VTXCNT(j));
397}
398
399void
400fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
401		struct fd5_emit *emit)
402{
403	const struct ir3_shader_variant *vp = fd5_emit_get_vp(emit);
404	const struct ir3_shader_variant *fp = fd5_emit_get_fp(emit);
405	uint32_t dirty = emit->dirty;
406	bool needs_border = false;
407
408	emit_marker5(ring, 5);
409
410	if ((dirty & FD_DIRTY_FRAMEBUFFER) && !emit->key.binning_pass) {
411		struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
412		unsigned char mrt_comp[A5XX_MAX_RENDER_TARGETS] = {0};
413
414		for (unsigned i = 0; i < A5XX_MAX_RENDER_TARGETS; i++) {
415			mrt_comp[i] = ((i < pfb->nr_cbufs) && pfb->cbufs[i]) ? 0xf : 0;
416		}
417
418		OUT_PKT4(ring, REG_A5XX_RB_RENDER_COMPONENTS, 1);
419		OUT_RING(ring, A5XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
420				A5XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
421				A5XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
422				A5XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
423				A5XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
424				A5XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
425				A5XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
426				A5XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
427	}
428
429	if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_FRAMEBUFFER)) {
430		struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
431		struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
432		uint32_t rb_alpha_control = zsa->rb_alpha_control;
433
434		if (util_format_is_pure_integer(pipe_surface_format(pfb->cbufs[0])))
435			rb_alpha_control &= ~A5XX_RB_ALPHA_CONTROL_ALPHA_TEST;
436
437		OUT_PKT4(ring, REG_A5XX_RB_ALPHA_CONTROL, 1);
438		OUT_RING(ring, rb_alpha_control);
439
440		OUT_PKT4(ring, REG_A5XX_RB_STENCIL_CONTROL, 1);
441		OUT_RING(ring, zsa->rb_stencil_control);
442	}
443
444	if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
445		struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
446		struct pipe_stencil_ref *sr = &ctx->stencil_ref;
447
448		OUT_PKT4(ring, REG_A5XX_RB_STENCILREFMASK, 1);
449		OUT_RING(ring, zsa->rb_stencilrefmask |
450				A5XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[0]));
451	}
452
453	if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
454		struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
455		bool fragz = fp->has_kill | fp->writes_pos;
456
457		OUT_PKT4(ring, REG_A5XX_RB_DEPTH_CNTL, 1);
458		OUT_RING(ring, zsa->rb_depth_cntl);
459
460		OUT_PKT4(ring, REG_A5XX_RB_DEPTH_PLANE_CNTL, 1);
461		OUT_RING(ring, COND(fragz, A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z) |
462				COND(fragz && fp->frag_coord, A5XX_RB_DEPTH_PLANE_CNTL_UNK1));
463
464		OUT_PKT4(ring, REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL, 1);
465		OUT_RING(ring, COND(fragz, A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z) |
466				COND(fragz && fp->frag_coord, A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1));
467	}
468
469	if (dirty & FD_DIRTY_RASTERIZER) {
470		struct fd5_rasterizer_stateobj *rasterizer =
471				fd5_rasterizer_stateobj(ctx->rasterizer);
472
473		OUT_PKT4(ring, REG_A5XX_GRAS_SU_CNTL, 1);
474		OUT_RING(ring, rasterizer->gras_su_cntl);
475
476		OUT_PKT4(ring, REG_A5XX_GRAS_SU_POINT_MINMAX, 2);
477		OUT_RING(ring, rasterizer->gras_su_point_minmax);
478		OUT_RING(ring, rasterizer->gras_su_point_size);
479
480		OUT_PKT4(ring, REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE, 3);
481		OUT_RING(ring, rasterizer->gras_su_poly_offset_scale);
482		OUT_RING(ring, rasterizer->gras_su_poly_offset_offset);
483		OUT_RING(ring, rasterizer->gras_su_poly_offset_clamp);
484	}
485
486	/* NOTE: since primitive_restart is not actually part of any
487	 * state object, we need to make sure that we always emit
488	 * PRIM_VTX_CNTL.. either that or be more clever and detect
489	 * when it changes.
490	 */
491	if (emit->info) {
492		struct fd5_rasterizer_stateobj *rast =
493			fd5_rasterizer_stateobj(ctx->rasterizer);
494		uint32_t val = rast->pc_prim_vtx_cntl;
495
496		val |= COND(vp->writes_psize, A5XX_PC_PRIM_VTX_CNTL_PSIZE);
497
498		OUT_PKT4(ring, REG_A5XX_PC_PRIM_VTX_CNTL, 1);
499		OUT_RING(ring, val);
500	}
501
502	if (dirty & FD_DIRTY_SCISSOR) {
503		struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
504
505		OUT_PKT4(ring, REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0, 2);
506		OUT_RING(ring, A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(scissor->minx) |
507				A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(scissor->miny));
508		OUT_RING(ring, A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(scissor->maxx - 1) |
509				A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(scissor->maxy - 1));
510
511		OUT_PKT4(ring, REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0, 2);
512		OUT_RING(ring, A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(scissor->minx) |
513				A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(scissor->miny));
514		OUT_RING(ring, A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(scissor->maxx - 1) |
515				A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(scissor->maxy - 1));
516
517		ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx);
518		ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny);
519		ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
520		ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
521	}
522
523	if (dirty & FD_DIRTY_VIEWPORT) {
524		fd_wfi(ctx->batch, ring);
525		OUT_PKT4(ring, REG_A5XX_GRAS_CL_VPORT_XOFFSET_0, 6);
526		OUT_RING(ring, A5XX_GRAS_CL_VPORT_XOFFSET_0(ctx->viewport.translate[0]));
527		OUT_RING(ring, A5XX_GRAS_CL_VPORT_XSCALE_0(ctx->viewport.scale[0]));
528		OUT_RING(ring, A5XX_GRAS_CL_VPORT_YOFFSET_0(ctx->viewport.translate[1]));
529		OUT_RING(ring, A5XX_GRAS_CL_VPORT_YSCALE_0(ctx->viewport.scale[1]));
530		OUT_RING(ring, A5XX_GRAS_CL_VPORT_ZOFFSET_0(ctx->viewport.translate[2]));
531		OUT_RING(ring, A5XX_GRAS_CL_VPORT_ZSCALE_0(ctx->viewport.scale[2]));
532	}
533
534	if (dirty & FD_DIRTY_PROG)
535		fd5_program_emit(ring, emit);
536
537	if (dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_RASTERIZER)) {
538		struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
539		uint32_t posz_regid = ir3_find_output_regid(fp, FRAG_RESULT_DEPTH);
540		unsigned nr = pfb->nr_cbufs;
541
542		if (emit->key.binning_pass)
543			nr = 0;
544		else if (ctx->rasterizer->rasterizer_discard)
545			nr = 0;
546
547		OUT_PKT4(ring, REG_A5XX_RB_FS_OUTPUT_CNTL, 1);
548		OUT_RING(ring, A5XX_RB_FS_OUTPUT_CNTL_MRT(nr) |
549				COND(fp->writes_pos, A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z));
550
551		OUT_PKT4(ring, REG_A5XX_SP_FS_OUTPUT_CNTL, 1);
552		OUT_RING(ring, A5XX_SP_FS_OUTPUT_CNTL_MRT(nr) |
553				A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(posz_regid) |
554				A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(regid(63, 0)));
555	}
556
557	if (emit->prog == &ctx->prog) { /* evil hack to deal sanely with clear path */
558		ir3_emit_consts(vp, ring, ctx, emit->info, dirty);
559		if (!emit->key.binning_pass)
560			ir3_emit_consts(fp, ring, ctx, emit->info, dirty);
561
562		struct pipe_stream_output_info *info = &vp->shader->stream_output;
563		if (info->num_outputs) {
564			struct fd_streamout_stateobj *so = &ctx->streamout;
565
566			for (unsigned i = 0; i < so->num_targets; i++) {
567				struct pipe_stream_output_target *target = so->targets[i];
568
569				if (!target)
570					continue;
571
572				unsigned offset = (so->offsets[i] * info->stride[i] * 4) +
573						target->buffer_offset;
574
575				OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_BASE_LO(i), 3);
576				/* VPC_SO[i].BUFFER_BASE_LO: */
577				OUT_RELOCW(ring, fd_resource(target->buffer)->bo, 0, 0, 0);
578				OUT_RING(ring, target->buffer_size + offset);
579
580				OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(i), 3);
581				OUT_RING(ring, offset);
582				/* VPC_SO[i].FLUSH_BASE_LO/HI: */
583				// TODO just give hw a dummy addr for now.. we should
584				// be using this an then CP_MEM_TO_REG to set the
585				// VPC_SO[i].BUFFER_OFFSET for the next draw..
586				OUT_RELOCW(ring, fd5_context(ctx)->blit_mem, 0x100, 0, 0);
587
588				emit->streamout_mask |= (1 << i);
589			}
590		}
591	}
592
593	if ((dirty & FD_DIRTY_BLEND)) {
594		struct fd5_blend_stateobj *blend = fd5_blend_stateobj(ctx->blend);
595		uint32_t i;
596
597		for (i = 0; i < A5XX_MAX_RENDER_TARGETS; i++) {
598			enum pipe_format format = pipe_surface_format(
599					ctx->batch->framebuffer.cbufs[i]);
600			bool is_int = util_format_is_pure_integer(format);
601			bool has_alpha = util_format_has_alpha(format);
602			uint32_t control = blend->rb_mrt[i].control;
603			uint32_t blend_control = blend->rb_mrt[i].blend_control_alpha;
604
605			if (is_int) {
606				control &= A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
607//				control |= A5XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY);
608			}
609
610			if (has_alpha) {
611				blend_control |= blend->rb_mrt[i].blend_control_rgb;
612			} else {
613				blend_control |= blend->rb_mrt[i].blend_control_no_alpha_rgb;
614				control &= ~A5XX_RB_MRT_CONTROL_BLEND2;
615			}
616
617			OUT_PKT4(ring, REG_A5XX_RB_MRT_CONTROL(i), 1);
618			OUT_RING(ring, control);
619
620			OUT_PKT4(ring, REG_A5XX_RB_MRT_BLEND_CONTROL(i), 1);
621			OUT_RING(ring, blend_control);
622		}
623
624		OUT_PKT4(ring, REG_A5XX_RB_BLEND_CNTL, 1);
625		OUT_RING(ring, blend->rb_blend_cntl |
626				A5XX_RB_BLEND_CNTL_SAMPLE_MASK(0xffff));
627
628		OUT_PKT4(ring, REG_A5XX_SP_BLEND_CNTL, 1);
629		OUT_RING(ring, 0x00000100);
630	}
631
632	if (dirty & FD_DIRTY_BLEND_COLOR) {
633		struct pipe_blend_color *bcolor = &ctx->blend_color;
634
635		OUT_PKT4(ring, REG_A5XX_RB_BLEND_RED, 8);
636		OUT_RING(ring, A5XX_RB_BLEND_RED_FLOAT(bcolor->color[0]) |
637				A5XX_RB_BLEND_RED_UINT(bcolor->color[0] * 0xff) |
638				A5XX_RB_BLEND_RED_SINT(bcolor->color[0] * 0x7f));
639		OUT_RING(ring, A5XX_RB_BLEND_RED_F32(bcolor->color[0]));
640		OUT_RING(ring, A5XX_RB_BLEND_GREEN_FLOAT(bcolor->color[1]) |
641				A5XX_RB_BLEND_GREEN_UINT(bcolor->color[1] * 0xff) |
642				A5XX_RB_BLEND_GREEN_SINT(bcolor->color[1] * 0x7f));
643		OUT_RING(ring, A5XX_RB_BLEND_RED_F32(bcolor->color[1]));
644		OUT_RING(ring, A5XX_RB_BLEND_BLUE_FLOAT(bcolor->color[2]) |
645				A5XX_RB_BLEND_BLUE_UINT(bcolor->color[2] * 0xff) |
646				A5XX_RB_BLEND_BLUE_SINT(bcolor->color[2] * 0x7f));
647		OUT_RING(ring, A5XX_RB_BLEND_BLUE_F32(bcolor->color[2]));
648		OUT_RING(ring, A5XX_RB_BLEND_ALPHA_FLOAT(bcolor->color[3]) |
649				A5XX_RB_BLEND_ALPHA_UINT(bcolor->color[3] * 0xff) |
650				A5XX_RB_BLEND_ALPHA_SINT(bcolor->color[3] * 0x7f));
651		OUT_RING(ring, A5XX_RB_BLEND_ALPHA_F32(bcolor->color[3]));
652	}
653
654	if (dirty & FD_DIRTY_VERTTEX) {
655		if (vp->has_samp) {
656			needs_border |= emit_textures(ctx, ring, SB_VERT_TEX, &ctx->verttex);
657			OUT_PKT4(ring, REG_A5XX_TPL1_VS_TEX_COUNT, 1);
658			OUT_RING(ring, ctx->verttex.num_textures);
659		} else {
660			dirty &= ~FD_DIRTY_VERTTEX;
661		}
662	}
663
664	if (dirty & FD_DIRTY_FRAGTEX) {
665		if (fp->has_samp) {
666			needs_border |= emit_textures(ctx, ring, SB_FRAG_TEX, &ctx->fragtex);
667			OUT_PKT4(ring, REG_A5XX_TPL1_FS_TEX_COUNT, 1);
668			OUT_RING(ring, ctx->fragtex.num_textures);
669		} else {
670			dirty &= ~FD_DIRTY_FRAGTEX;
671		}
672	}
673
674	if (needs_border)
675		emit_border_color(ctx, ring);
676
677	ctx->dirty &= ~dirty;
678}
679
680/* emit setup at begin of new cmdstream buffer (don't rely on previous
681 * state, there could have been a context switch between ioctls):
682 */
683void
684fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring)
685{
686	struct fd_context *ctx = batch->ctx;
687
688	fd5_set_render_mode(ctx, ring, BYPASS);
689	fd5_cache_flush(batch, ring);
690
691	OUT_PKT4(ring, REG_A5XX_HLSQ_UPDATE_CNTL, 1);
692	OUT_RING(ring, 0xfffff);
693
694/*
695t7              opcode: CP_PERFCOUNTER_ACTION (50) (4 dwords)
6960000000500024048:               70d08003 00000000 001c5000 00000005
697t7              opcode: CP_PERFCOUNTER_ACTION (50) (4 dwords)
6980000000500024058:               70d08003 00000010 001c7000 00000005
699
700t7              opcode: CP_WAIT_FOR_IDLE (26) (1 dwords)
7010000000500024068:               70268000
702*/
703
704	OUT_PKT4(ring, REG_A5XX_PC_RESTART_INDEX, 1);
705	OUT_RING(ring, 0xffffffff);
706
707	OUT_PKT4(ring, REG_A5XX_PC_RASTER_CNTL, 1);
708	OUT_RING(ring, 0x00000012);
709
710	OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_CNTL, 1);
711	OUT_RING(ring, 0x00000000);
712
713	OUT_PKT4(ring, REG_A5XX_GRAS_SU_POINT_MINMAX, 2);
714	OUT_RING(ring, A5XX_GRAS_SU_POINT_MINMAX_MIN(1.0) |
715			A5XX_GRAS_SU_POINT_MINMAX_MAX(4092.0));
716	OUT_RING(ring, A5XX_GRAS_SU_POINT_SIZE(0.5));
717
718	OUT_PKT4(ring, REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL, 1);
719	OUT_RING(ring, 0x00000000);   /* GRAS_SU_CONSERVATIVE_RAS_CNTL */
720
721	OUT_PKT4(ring, REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL, 1);
722	OUT_RING(ring, 0x00000000);   /* GRAS_SC_SCREEN_SCISSOR_CNTL */
723
724	OUT_PKT4(ring, REG_A5XX_SP_VS_CONFIG_MAX_CONST, 1);
725	OUT_RING(ring, 0);            /* SP_VS_CONFIG_MAX_CONST */
726
727	OUT_PKT4(ring, REG_A5XX_SP_FS_CONFIG_MAX_CONST, 1);
728	OUT_RING(ring, 0);            /* SP_FS_CONFIG_MAX_CONST */
729
730	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E292, 2);
731	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E292 */
732	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E293 */
733
734	OUT_PKT4(ring, REG_A5XX_RB_MODE_CNTL, 1);
735	OUT_RING(ring, 0x00000044);   /* RB_MODE_CNTL */
736
737	OUT_PKT4(ring, REG_A5XX_RB_DBG_ECO_CNTL, 1);
738	OUT_RING(ring, 0x00100000);   /* RB_DBG_ECO_CNTL */
739
740	OUT_PKT4(ring, REG_A5XX_VFD_MODE_CNTL, 1);
741	OUT_RING(ring, 0x00000000);   /* VFD_MODE_CNTL */
742
743	OUT_PKT4(ring, REG_A5XX_PC_MODE_CNTL, 1);
744	OUT_RING(ring, 0x0000001f);   /* PC_MODE_CNTL */
745
746	OUT_PKT4(ring, REG_A5XX_SP_MODE_CNTL, 1);
747	OUT_RING(ring, 0x0000001e);   /* SP_MODE_CNTL */
748
749	OUT_PKT4(ring, REG_A5XX_SP_DBG_ECO_CNTL, 1);
750	OUT_RING(ring, 0x40000800);   /* SP_DBG_ECO_CNTL */
751
752	OUT_PKT4(ring, REG_A5XX_TPL1_MODE_CNTL, 1);
753	OUT_RING(ring, 0x00000544);   /* TPL1_MODE_CNTL */
754
755	OUT_PKT4(ring, REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0, 2);
756	OUT_RING(ring, 0x00000080);   /* HLSQ_TIMEOUT_THRESHOLD_0 */
757	OUT_RING(ring, 0x00000000);   /* HLSQ_TIMEOUT_THRESHOLD_1 */
758
759	OUT_PKT4(ring, REG_A5XX_VPC_DBG_ECO_CNTL, 1);
760	OUT_RING(ring, 0x00000400);   /* VPC_DBG_ECO_CNTL */
761
762	OUT_PKT4(ring, REG_A5XX_HLSQ_MODE_CNTL, 1);
763	OUT_RING(ring, 0x00000001);   /* HLSQ_MODE_CNTL */
764
765	OUT_PKT4(ring, REG_A5XX_VPC_MODE_CNTL, 1);
766	OUT_RING(ring, 0x00000000);   /* VPC_MODE_CNTL */
767
768	/* we don't use this yet.. probably best to disable.. */
769	OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
770	OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
771			CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
772			CP_SET_DRAW_STATE__0_GROUP_ID(0));
773	OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
774	OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
775
776	/* other regs not used (yet?) and always seem to have same value: */
777	OUT_PKT4(ring, REG_A5XX_GRAS_CL_CNTL, 1);
778	OUT_RING(ring, 0x00000080);   /* GRAS_CL_CNTL */
779
780	OUT_PKT4(ring, REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL, 1);
781	OUT_RING(ring, 0x00000000);   /* GRAS_SU_CONSERVATIVE_RAS_CNTL */
782
783	OUT_PKT4(ring, REG_A5XX_GRAS_SC_BIN_CNTL, 1);
784	OUT_RING(ring, 0x00000000);   /* GRAS_SC_BIN_CNTL */
785
786	OUT_PKT4(ring, REG_A5XX_GRAS_SC_BIN_CNTL, 1);
787	OUT_RING(ring, 0x00000000);   /* GRAS_SC_BIN_CNTL */
788
789	OUT_PKT4(ring, REG_A5XX_VPC_FS_PRIMITIVEID_CNTL, 1);
790	OUT_RING(ring, 0x000000ff);   /* VPC_FS_PRIMITIVEID_CNTL */
791
792	OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
793	OUT_RING(ring, A5XX_VPC_SO_OVERRIDE_SO_DISABLE);
794
795	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_BASE_LO(0), 3);
796	OUT_RING(ring, 0x00000000);   /* VPC_SO_BUFFER_BASE_LO_0 */
797	OUT_RING(ring, 0x00000000);   /* VPC_SO_BUFFER_BASE_HI_0 */
798	OUT_RING(ring, 0x00000000);   /* VPC_SO_BUFFER_SIZE_0 */
799
800	OUT_PKT4(ring, REG_A5XX_VPC_SO_FLUSH_BASE_LO(0), 2);
801	OUT_RING(ring, 0x00000000);   /* VPC_SO_FLUSH_BASE_LO_0 */
802	OUT_RING(ring, 0x00000000);   /* VPC_SO_FLUSH_BASE_HI_0 */
803
804	OUT_PKT4(ring, REG_A5XX_PC_GS_PARAM, 1);
805	OUT_RING(ring, 0x00000000);   /* PC_GS_PARAM */
806
807	OUT_PKT4(ring, REG_A5XX_PC_HS_PARAM, 1);
808	OUT_RING(ring, 0x00000000);   /* PC_HS_PARAM */
809
810	OUT_PKT4(ring, REG_A5XX_TPL1_TP_FS_ROTATION_CNTL, 1);
811	OUT_RING(ring, 0x00000000);   /* TPL1_TP_FS_ROTATION_CNTL */
812
813	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E001, 1);
814	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E001 */
815
816	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E004, 1);
817	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E004 */
818
819	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E093, 1);
820	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E093 */
821
822	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E1C7, 1);
823	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E1C7 */
824
825	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E29A, 1);
826	OUT_RING(ring, 0x00ffff00);   /* UNKNOWN_E29A */
827
828	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUF_CNTL, 1);
829	OUT_RING(ring, 0x00000000);   /* VPC_SO_BUF_CNTL */
830
831	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(0), 1);
832	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E2AB */
833
834	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E389, 1);
835	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E389 */
836
837	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E38D, 1);
838	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E38D */
839
840	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E5AB, 1);
841	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E5AB */
842
843	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E5C2, 1);
844	OUT_RING(ring, 0x00000000);   /* UNKNOWN_E5C2 */
845
846	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_BASE_LO(1), 3);
847	OUT_RING(ring, 0x00000000);
848	OUT_RING(ring, 0x00000000);
849	OUT_RING(ring, 0x00000000);
850
851	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(1), 6);
852	OUT_RING(ring, 0x00000000);
853	OUT_RING(ring, 0x00000000);
854	OUT_RING(ring, 0x00000000);
855	OUT_RING(ring, 0x00000000);
856	OUT_RING(ring, 0x00000000);
857	OUT_RING(ring, 0x00000000);
858
859	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(2), 6);
860	OUT_RING(ring, 0x00000000);
861	OUT_RING(ring, 0x00000000);
862	OUT_RING(ring, 0x00000000);
863	OUT_RING(ring, 0x00000000);
864	OUT_RING(ring, 0x00000000);
865	OUT_RING(ring, 0x00000000);
866
867	OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(3), 3);
868	OUT_RING(ring, 0x00000000);
869	OUT_RING(ring, 0x00000000);
870	OUT_RING(ring, 0x00000000);
871
872	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E5DB, 1);
873	OUT_RING(ring, 0x00000000);
874
875	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E600, 1);
876	OUT_RING(ring, 0x00000000);
877
878	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E640, 1);
879	OUT_RING(ring, 0x00000000);
880
881	OUT_PKT4(ring, REG_A5XX_TPL1_VS_TEX_COUNT, 4);
882	OUT_RING(ring, 0x00000000);
883	OUT_RING(ring, 0x00000000);
884	OUT_RING(ring, 0x00000000);
885	OUT_RING(ring, 0x00000000);
886
887	OUT_PKT4(ring, REG_A5XX_TPL1_FS_TEX_COUNT, 2);
888	OUT_RING(ring, 0x00000000);
889	OUT_RING(ring, 0x00000000);
890
891	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7C0, 3);
892	OUT_RING(ring, 0x00000000);
893	OUT_RING(ring, 0x00000000);
894	OUT_RING(ring, 0x00000000);
895
896	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7C5, 3);
897	OUT_RING(ring, 0x00000000);
898	OUT_RING(ring, 0x00000000);
899	OUT_RING(ring, 0x00000000);
900
901	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7CA, 3);
902	OUT_RING(ring, 0x00000000);
903	OUT_RING(ring, 0x00000000);
904	OUT_RING(ring, 0x00000000);
905
906	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7CF, 3);
907	OUT_RING(ring, 0x00000000);
908	OUT_RING(ring, 0x00000000);
909	OUT_RING(ring, 0x00000000);
910
911	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7D4, 3);
912	OUT_RING(ring, 0x00000000);
913	OUT_RING(ring, 0x00000000);
914	OUT_RING(ring, 0x00000000);
915
916	OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7D9, 3);
917	OUT_RING(ring, 0x00000000);
918	OUT_RING(ring, 0x00000000);
919	OUT_RING(ring, 0x00000000);
920
921	// TODO hacks.. these should not be hardcoded:
922	OUT_PKT4(ring, REG_A5XX_GRAS_SC_CNTL, 1);
923	OUT_RING(ring, 0x00000008);   /* GRAS_SC_CNTL */
924
925	fd_hw_query_enable(batch, ring);
926}
927
928static void
929fd5_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
930{
931	__OUT_IB5(ring, target);
932}
933
934void
935fd5_emit_init(struct pipe_context *pctx)
936{
937	struct fd_context *ctx = fd_context(pctx);
938	ctx->emit_const = fd5_emit_const;
939	ctx->emit_const_bo = fd5_emit_const_bo;
940	ctx->emit_ib = fd5_emit_ib;
941}
942