r600_state_common.c revision de0adb691feb2ae7f64dd74ed6bc5a9e0f493631
1/*
2 * Copyright 2010 Red Hat Inc.
3 *           2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 *          Jerome Glisse <jglisse@redhat.com>
26 */
27#include <util/u_memory.h>
28#include <util/u_format.h>
29#include <pipebuffer/pb_buffer.h>
30#include "pipe/p_shader_tokens.h"
31#include "r600_formats.h"
32#include "r600_pipe.h"
33#include "r600d.h"
34
35static int r600_conv_pipe_prim(unsigned pprim, unsigned *prim)
36{
37	static const int prim_conv[] = {
38		V_008958_DI_PT_POINTLIST,
39		V_008958_DI_PT_LINELIST,
40		V_008958_DI_PT_LINELOOP,
41		V_008958_DI_PT_LINESTRIP,
42		V_008958_DI_PT_TRILIST,
43		V_008958_DI_PT_TRISTRIP,
44		V_008958_DI_PT_TRIFAN,
45		V_008958_DI_PT_QUADLIST,
46		V_008958_DI_PT_QUADSTRIP,
47		V_008958_DI_PT_POLYGON,
48		-1,
49		-1,
50		-1,
51		-1
52	};
53
54	*prim = prim_conv[pprim];
55	if (*prim == -1) {
56		fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim);
57		return -1;
58	}
59	return 0;
60}
61
62/* common state between evergreen and r600 */
63void r600_bind_blend_state(struct pipe_context *ctx, void *state)
64{
65	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
66	struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
67	struct r600_pipe_state *rstate;
68
69	if (state == NULL)
70		return;
71	rstate = &blend->rstate;
72	rctx->states[rstate->id] = rstate;
73	rctx->cb_target_mask = blend->cb_target_mask;
74	r600_context_pipe_state_set(&rctx->ctx, rstate);
75}
76
77void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
78{
79	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
80	struct r600_pipe_dsa *dsa = state;
81	struct r600_pipe_state *rstate;
82
83	if (state == NULL)
84		return;
85	rstate = &dsa->rstate;
86	rctx->states[rstate->id] = rstate;
87	rctx->alpha_ref = dsa->alpha_ref;
88	rctx->alpha_ref_dirty = true;
89	r600_context_pipe_state_set(&rctx->ctx, rstate);
90}
91
92void r600_bind_rs_state(struct pipe_context *ctx, void *state)
93{
94	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
95	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
96
97	if (state == NULL)
98		return;
99
100	rctx->flatshade = rs->flatshade;
101	rctx->sprite_coord_enable = rs->sprite_coord_enable;
102	rctx->rasterizer = rs;
103
104	rctx->states[rs->rstate.id] = &rs->rstate;
105	r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
106
107	if (rctx->family >= CHIP_CEDAR) {
108		evergreen_polygon_offset_update(rctx);
109	} else {
110		r600_polygon_offset_update(rctx);
111	}
112}
113
114void r600_delete_rs_state(struct pipe_context *ctx, void *state)
115{
116	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
117	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
118
119	if (rctx->rasterizer == rs) {
120		rctx->rasterizer = NULL;
121	}
122	if (rctx->states[rs->rstate.id] == &rs->rstate) {
123		rctx->states[rs->rstate.id] = NULL;
124	}
125	free(rs);
126}
127
128void r600_sampler_view_destroy(struct pipe_context *ctx,
129			       struct pipe_sampler_view *state)
130{
131	struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
132
133	pipe_resource_reference(&state->texture, NULL);
134	FREE(resource);
135}
136
137void r600_delete_state(struct pipe_context *ctx, void *state)
138{
139	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
140	struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
141
142	if (rctx->states[rstate->id] == rstate) {
143		rctx->states[rstate->id] = NULL;
144	}
145	for (int i = 0; i < rstate->nregs; i++) {
146		r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
147	}
148	free(rstate);
149}
150
151void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
152{
153	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
154	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
155
156	rctx->vertex_elements = v;
157	if (v) {
158		u_vbuf_mgr_bind_vertex_elements(rctx->vbuf_mgr, state,
159						v->vmgr_elements);
160
161		rctx->states[v->rstate.id] = &v->rstate;
162		r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
163	}
164}
165
166void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
167{
168	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
169	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
170
171	if (rctx->states[v->rstate.id] == &v->rstate) {
172		rctx->states[v->rstate.id] = NULL;
173	}
174	if (rctx->vertex_elements == state)
175		rctx->vertex_elements = NULL;
176
177	r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL);
178	u_vbuf_mgr_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
179	FREE(state);
180}
181
182
183void r600_set_index_buffer(struct pipe_context *ctx,
184			   const struct pipe_index_buffer *ib)
185{
186	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
187
188	if (ib) {
189		pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
190		memcpy(&rctx->index_buffer, ib, sizeof(rctx->index_buffer));
191	} else {
192		pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
193		memset(&rctx->index_buffer, 0, sizeof(rctx->index_buffer));
194	}
195
196	/* TODO make this more like a state */
197}
198
199void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
200			     const struct pipe_vertex_buffer *buffers)
201{
202	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
203	int i;
204
205	/* Zero states. */
206	for (i = 0; i < count; i++) {
207		if (!buffers[i].buffer) {
208			if (rctx->family >= CHIP_CEDAR) {
209				evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
210			} else {
211				r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
212			}
213		}
214	}
215	for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) {
216		if (rctx->family >= CHIP_CEDAR) {
217			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
218		} else {
219			r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
220		}
221	}
222
223	u_vbuf_mgr_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
224}
225
226void *r600_create_vertex_elements(struct pipe_context *ctx,
227				  unsigned count,
228				  const struct pipe_vertex_element *elements)
229{
230	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
231	struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
232
233	assert(count < 32);
234	if (!v)
235		return NULL;
236
237	v->count = count;
238	v->vmgr_elements =
239		u_vbuf_mgr_create_vertex_elements(rctx->vbuf_mgr, count,
240						  elements, v->elements);
241
242	if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
243		FREE(v);
244		return NULL;
245	}
246
247	return v;
248}
249
250void *r600_create_shader_state(struct pipe_context *ctx,
251			       const struct pipe_shader_state *state)
252{
253	struct r600_pipe_shader *shader =  CALLOC_STRUCT(r600_pipe_shader);
254	int r;
255
256	r =  r600_pipe_shader_create(ctx, shader, state->tokens);
257	if (r) {
258		return NULL;
259	}
260	return shader;
261}
262
263void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
264{
265	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
266
267	/* TODO delete old shader */
268	rctx->ps_shader = (struct r600_pipe_shader *)state;
269	if (state) {
270		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
271	}
272}
273
274void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
275{
276	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
277
278	/* TODO delete old shader */
279	rctx->vs_shader = (struct r600_pipe_shader *)state;
280	if (state) {
281		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate);
282	}
283}
284
285void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
286{
287	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
288	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
289
290	if (rctx->ps_shader == shader) {
291		rctx->ps_shader = NULL;
292	}
293
294	r600_pipe_shader_destroy(ctx, shader);
295	free(shader);
296}
297
298void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
299{
300	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
301	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
302
303	if (rctx->vs_shader == shader) {
304		rctx->vs_shader = NULL;
305	}
306
307	r600_pipe_shader_destroy(ctx, shader);
308	free(shader);
309}
310
311static void r600_update_alpha_ref(struct r600_pipe_context *rctx)
312{
313	unsigned alpha_ref = rctx->alpha_ref;
314	struct r600_pipe_state rstate;
315
316	if (!rctx->alpha_ref_dirty)
317		return;
318
319	rstate.nregs = 0;
320	if (rctx->export_16bpc)
321		alpha_ref &= ~0x1FFF;
322	r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL);
323
324	r600_context_pipe_state_set(&rctx->ctx, &rstate);
325	rctx->alpha_ref_dirty = false;
326}
327
328/* FIXME optimize away spi update when it's not needed */
329static void r600_spi_block_init(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate)
330{
331	int i;
332	rstate->nregs = 0;
333	rstate->id = R600_PIPE_STATE_SPI;
334	for (i = 0; i < 32; i++) {
335		r600_pipe_state_add_reg(rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, 0, 0xFFFFFFFF, NULL);
336	}
337}
338
339static void r600_spi_update(struct r600_pipe_context *rctx, unsigned prim)
340{
341	struct r600_pipe_shader *shader = rctx->ps_shader;
342	struct r600_pipe_state *rstate = &rctx->spi;
343	struct r600_shader *rshader = &shader->shader;
344	unsigned i, tmp;
345
346	if (rctx->spi.id == 0)
347		r600_spi_block_init(rctx, &rctx->spi);
348
349	rstate->nregs = 0;
350	for (i = 0; i < rshader->ninput; i++) {
351		tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i));
352
353		if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
354		    rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
355		    rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
356			tmp |= S_028644_FLAT_SHADE(rctx->flatshade);
357		}
358
359		if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
360		    rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) {
361			tmp |= S_028644_PT_SPRITE_TEX(1);
362		}
363
364		if (rctx->family < CHIP_CEDAR) {
365			if (rshader->input[i].centroid)
366				tmp |= S_028644_SEL_CENTROID(1);
367
368			if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
369				tmp |= S_028644_SEL_LINEAR(1);
370		}
371
372		r600_pipe_state_mod_reg(rstate, tmp);
373	}
374
375	r600_context_pipe_state_set(&rctx->ctx, rstate);
376}
377
378void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
379			      struct pipe_resource *buffer)
380{
381	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
382	struct r600_resource_buffer *rbuffer = r600_buffer(buffer);
383	struct r600_pipe_state *rstate;
384	uint32_t offset;
385
386	/* Note that the state tracker can unbind constant buffers by
387	 * passing NULL here.
388	 */
389	if (buffer == NULL) {
390		return;
391	}
392
393	r600_upload_const_buffer(rctx, &rbuffer, &offset);
394	offset += r600_bo_offset(rbuffer->r.bo);
395
396	switch (shader) {
397	case PIPE_SHADER_VERTEX:
398		rctx->vs_const_buffer.nregs = 0;
399		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
400					R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
401					ALIGN_DIVUP(buffer->width0 >> 4, 16),
402					0xFFFFFFFF, NULL);
403		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
404					R_028980_ALU_CONST_CACHE_VS_0,
405					offset >> 8, 0xFFFFFFFF, rbuffer->r.bo);
406		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
407
408		rstate = &rctx->vs_const_buffer_resource[index];
409		if (!rstate->id) {
410			if (rctx->family >= CHIP_CEDAR) {
411				evergreen_pipe_init_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
412			} else {
413				r600_pipe_init_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
414			}
415		}
416
417		if (rctx->family >= CHIP_CEDAR) {
418			evergreen_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
419			evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
420		} else {
421			r600_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
422			r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
423		}
424		break;
425	case PIPE_SHADER_FRAGMENT:
426		rctx->ps_const_buffer.nregs = 0;
427		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
428					R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
429					ALIGN_DIVUP(buffer->width0 >> 4, 16),
430					0xFFFFFFFF, NULL);
431		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
432					R_028940_ALU_CONST_CACHE_PS_0,
433					offset >> 8, 0xFFFFFFFF, rbuffer->r.bo);
434		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
435
436		rstate = &rctx->ps_const_buffer_resource[index];
437		if (!rstate->id) {
438			if (rctx->family >= CHIP_CEDAR) {
439				evergreen_pipe_init_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
440			} else {
441				r600_pipe_init_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16);
442			}
443		}
444		if (rctx->family >= CHIP_CEDAR) {
445			evergreen_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
446			evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
447		} else {
448			r600_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
449			r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
450		}
451		break;
452	default:
453		R600_ERR("unsupported %d\n", shader);
454		return;
455	}
456
457	if (buffer != &rbuffer->r.b.b.b)
458		pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
459}
460
461static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
462{
463	struct r600_pipe_state *rstate;
464	struct r600_resource *rbuffer;
465	struct pipe_vertex_buffer *vertex_buffer;
466	unsigned i, count, offset;
467
468	if (rctx->vertex_elements->vbuffer_need_offset) {
469		/* one resource per vertex elements */
470		count = rctx->vertex_elements->count;
471	} else {
472		/* bind vertex buffer once */
473		count = rctx->vbuf_mgr->nr_real_vertex_buffers;
474	}
475
476	for (i = 0 ; i < count; i++) {
477		rstate = &rctx->fs_resource[i];
478
479		if (rctx->vertex_elements->vbuffer_need_offset) {
480			/* one resource per vertex elements */
481			unsigned vbuffer_index;
482			vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
483			vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[vbuffer_index];
484			rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
485			offset = rctx->vertex_elements->vbuffer_offset[i];
486		} else {
487			/* bind vertex buffer once */
488			vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[i];
489			rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[i];
490			offset = 0;
491		}
492		if (vertex_buffer == NULL || rbuffer == NULL)
493			continue;
494		offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo);
495
496		if (!rstate->id) {
497			if (rctx->family >= CHIP_CEDAR) {
498				evergreen_pipe_init_buffer_resource(rctx, rstate, rbuffer, offset, vertex_buffer->stride);
499			} else {
500				r600_pipe_init_buffer_resource(rctx, rstate, rbuffer, offset, vertex_buffer->stride);
501			}
502		}
503
504		if (rctx->family >= CHIP_CEDAR) {
505			evergreen_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride);
506			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
507		} else {
508			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride);
509			r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
510		}
511	}
512}
513
514void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
515{
516	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
517	struct r600_resource *rbuffer;
518	u32 vgt_dma_index_type, vgt_dma_swap_mode, vgt_draw_initiator, mask;
519	struct r600_draw rdraw;
520	struct r600_drawl draw = {};
521	unsigned prim;
522
523	r600_flush_depth_textures(rctx);
524	u_vbuf_mgr_draw_begin(rctx->vbuf_mgr, info, NULL, NULL);
525	r600_vertex_buffer_update(rctx);
526
527	draw.info = *info;
528	draw.ctx = ctx;
529	if (info->indexed && rctx->index_buffer.buffer) {
530		draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size;
531		pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer);
532
533		r600_translate_index_buffer(rctx, &draw.index_buffer,
534					    &rctx->index_buffer.index_size,
535					    &draw.info.start,
536					    info->count);
537
538		draw.index_size = rctx->index_buffer.index_size;
539		draw.index_buffer_offset = draw.info.start * draw.index_size;
540		draw.info.start = 0;
541
542		if (u_vbuf_resource(draw.index_buffer)->user_ptr) {
543			r600_upload_index_buffer(rctx, &draw);
544		}
545	} else {
546		draw.info.index_bias = info->start;
547	}
548
549	vgt_dma_swap_mode = 0;
550	switch (draw.index_size) {
551	case 2:
552		vgt_draw_initiator = 0;
553		vgt_dma_index_type = 0;
554		if (R600_BIG_ENDIAN) {
555			vgt_dma_swap_mode = ENDIAN_8IN16;
556		}
557		break;
558	case 4:
559		vgt_draw_initiator = 0;
560		vgt_dma_index_type = 1;
561		if (R600_BIG_ENDIAN) {
562			vgt_dma_swap_mode = ENDIAN_8IN32;
563		}
564		break;
565	case 0:
566		vgt_draw_initiator = 2;
567		vgt_dma_index_type = 0;
568		break;
569	default:
570		R600_ERR("unsupported index size %d\n", draw.index_size);
571		return;
572	}
573	if (r600_conv_pipe_prim(draw.info.mode, &prim))
574		return;
575	if (unlikely(rctx->ps_shader == NULL)) {
576		R600_ERR("missing vertex shader\n");
577		return;
578	}
579	if (unlikely(rctx->vs_shader == NULL)) {
580		R600_ERR("missing vertex shader\n");
581		return;
582	}
583	/* there should be enough input */
584	if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) {
585		R600_ERR("%d resources provided, expecting %d\n",
586			rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource);
587		return;
588	}
589
590	r600_update_alpha_ref(rctx);
591	r600_spi_update(rctx, draw.info.mode);
592
593	mask = 0;
594	for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
595		mask |= (0xF << (i * 4));
596	}
597
598	if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
599		rctx->vgt.id = R600_PIPE_STATE_VGT;
600		rctx->vgt.nregs = 0;
601		r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
602		r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
603		r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL);
604		r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL);
605		r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL);
606		r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL);
607		r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, draw.info.start_instance, 0xFFFFFFFF, NULL);
608		r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL,
609					0,
610					S_028814_PROVOKING_VTX_LAST(1), NULL);
611
612	}
613
614	rctx->vgt.nregs = 0;
615	r600_pipe_state_mod_reg(&rctx->vgt, prim);
616	r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask);
617	r600_pipe_state_mod_reg(&rctx->vgt, draw.info.max_index);
618	r600_pipe_state_mod_reg(&rctx->vgt, draw.info.min_index);
619	r600_pipe_state_mod_reg(&rctx->vgt, draw.info.index_bias);
620	r600_pipe_state_mod_reg(&rctx->vgt, 0);
621	r600_pipe_state_mod_reg(&rctx->vgt, draw.info.start_instance);
622	if (draw.info.mode == PIPE_PRIM_QUADS || draw.info.mode == PIPE_PRIM_QUAD_STRIP || draw.info.mode == PIPE_PRIM_POLYGON) {
623		r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1));
624	}
625
626	r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt);
627
628	rdraw.vgt_num_indices = draw.info.count;
629	rdraw.vgt_num_instances = draw.info.instance_count;
630	rdraw.vgt_index_type = vgt_dma_index_type | (vgt_dma_swap_mode << 2);
631	rdraw.vgt_draw_initiator = vgt_draw_initiator;
632	rdraw.indices = NULL;
633	if (draw.index_buffer) {
634		rbuffer = (struct r600_resource*)draw.index_buffer;
635		rdraw.indices = rbuffer->bo;
636		rdraw.indices_bo_offset = draw.index_buffer_offset;
637	}
638
639	if (rctx->family >= CHIP_CEDAR) {
640		evergreen_context_draw(&rctx->ctx, &rdraw);
641	} else {
642		r600_context_draw(&rctx->ctx, &rdraw);
643	}
644
645	if (rctx->framebuffer.zsbuf)
646	{
647		struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
648		((struct r600_resource_texture *)tex)->dirty_db = TRUE;
649	}
650
651	pipe_resource_reference(&draw.index_buffer, NULL);
652
653	u_vbuf_mgr_draw_end(rctx->vbuf_mgr);
654}
655
656void _r600_pipe_state_add_reg(struct r600_context *ctx,
657			      struct r600_pipe_state *state,
658			      u32 offset, u32 value, u32 mask,
659			      u32 range_id, u32 block_id,
660			      struct r600_bo *bo)
661{
662	struct r600_range *range;
663	struct r600_block *block;
664
665	range = &ctx->range[range_id];
666	block = range->blocks[block_id];
667	state->regs[state->nregs].block = block;
668	state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
669
670	state->regs[state->nregs].value = value;
671	state->regs[state->nregs].mask = mask;
672	state->regs[state->nregs].bo = bo;
673
674	state->nregs++;
675	assert(state->nregs < R600_BLOCK_MAX_REG);
676}
677
678void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
679				     u32 offset, u32 value, u32 mask,
680				     struct r600_bo *bo)
681{
682	state->regs[state->nregs].id = offset;
683	state->regs[state->nregs].block = NULL;
684	state->regs[state->nregs].value = value;
685	state->regs[state->nregs].mask = mask;
686	state->regs[state->nregs].bo = bo;
687
688	state->nregs++;
689	assert(state->nregs < R600_BLOCK_MAX_REG);
690}
691