r600_state_common.c revision b13b7b86b2e1165b24a2df20cb67f9f3baa17b13
1/*
2 * Copyright 2010 Red Hat Inc.
3 *           2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 *          Jerome Glisse <jglisse@redhat.com>
26 */
27#include <util/u_memory.h>
28#include <util/u_format.h>
29#include <pipebuffer/pb_buffer.h>
30#include "pipe/p_shader_tokens.h"
31#include "r600_pipe.h"
32#include "r600d.h"
33
34/* common state between evergreen and r600 */
35void r600_bind_blend_state(struct pipe_context *ctx, void *state)
36{
37	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
38	struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
39	struct r600_pipe_state *rstate;
40
41	if (state == NULL)
42		return;
43	rstate = &blend->rstate;
44	rctx->states[rstate->id] = rstate;
45	rctx->cb_target_mask = blend->cb_target_mask;
46	r600_context_pipe_state_set(&rctx->ctx, rstate);
47}
48
49void r600_bind_rs_state(struct pipe_context *ctx, void *state)
50{
51	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
52	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
53
54	if (state == NULL)
55		return;
56
57	rctx->flatshade = rs->flatshade;
58	rctx->sprite_coord_enable = rs->sprite_coord_enable;
59	rctx->rasterizer = rs;
60
61	rctx->states[rs->rstate.id] = &rs->rstate;
62	r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
63
64	if (rctx->family >= CHIP_CEDAR) {
65		evergreen_polygon_offset_update(rctx);
66	} else {
67		r600_polygon_offset_update(rctx);
68	}
69}
70
71void r600_delete_rs_state(struct pipe_context *ctx, void *state)
72{
73	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
74	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
75
76	if (rctx->rasterizer == rs) {
77		rctx->rasterizer = NULL;
78	}
79	if (rctx->states[rs->rstate.id] == &rs->rstate) {
80		rctx->states[rs->rstate.id] = NULL;
81	}
82	free(rs);
83}
84
85void r600_sampler_view_destroy(struct pipe_context *ctx,
86			       struct pipe_sampler_view *state)
87{
88	struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
89
90	pipe_resource_reference(&state->texture, NULL);
91	FREE(resource);
92}
93
94void r600_bind_state(struct pipe_context *ctx, void *state)
95{
96	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
97	struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
98
99	if (state == NULL)
100		return;
101	rctx->states[rstate->id] = rstate;
102	r600_context_pipe_state_set(&rctx->ctx, rstate);
103}
104
105void r600_delete_state(struct pipe_context *ctx, void *state)
106{
107	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
108	struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
109
110	if (rctx->states[rstate->id] == rstate) {
111		rctx->states[rstate->id] = NULL;
112	}
113	for (int i = 0; i < rstate->nregs; i++) {
114		r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
115	}
116	free(rstate);
117}
118
119void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
120{
121	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
122	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
123
124	rctx->vertex_elements = v;
125	if (v) {
126		rctx->states[v->rstate.id] = &v->rstate;
127		r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
128	}
129}
130
131void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
132{
133	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
134	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
135
136	if (rctx->states[v->rstate.id] == &v->rstate) {
137		rctx->states[v->rstate.id] = NULL;
138	}
139	if (rctx->vertex_elements == state)
140		rctx->vertex_elements = NULL;
141
142	r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL);
143	FREE(state);
144}
145
146
147void r600_set_index_buffer(struct pipe_context *ctx,
148			   const struct pipe_index_buffer *ib)
149{
150	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
151
152	if (ib) {
153		pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
154		memcpy(&rctx->index_buffer, ib, sizeof(rctx->index_buffer));
155	} else {
156		pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
157		memset(&rctx->index_buffer, 0, sizeof(rctx->index_buffer));
158	}
159
160	/* TODO make this more like a state */
161}
162
163void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
164			     const struct pipe_vertex_buffer *buffers)
165{
166	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
167	struct pipe_vertex_buffer *vbo;
168	unsigned max_index = ~0;
169	int i;
170
171	for (i = 0; i < count; i++) {
172		vbo = (struct pipe_vertex_buffer*)&buffers[i];
173
174		pipe_resource_reference(&rctx->vertex_buffer[i].buffer, vbo->buffer);
175		pipe_resource_reference(&rctx->real_vertex_buffer[i], NULL);
176
177		if (!vbo->buffer) {
178			/* Zero states. */
179			if (rctx->family >= CHIP_CEDAR) {
180				evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
181			} else {
182				r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
183			}
184			continue;
185		}
186
187		if (r600_is_user_buffer(vbo->buffer)) {
188			rctx->any_user_vbs = TRUE;
189			continue;
190		}
191
192		pipe_resource_reference(&rctx->real_vertex_buffer[i], vbo->buffer);
193
194		/* The stride of zero means we will be fetching only the first
195		 * vertex, so don't care about max_index. */
196		if (!vbo->stride) {
197			continue;
198		}
199
200		/* Update the maximum index. */
201		{
202		    unsigned vbo_max_index =
203			  (vbo->buffer->width0 - vbo->buffer_offset) / vbo->stride;
204		    max_index = MIN2(max_index, vbo_max_index);
205		}
206	}
207
208	for (; i < rctx->nreal_vertex_buffers; i++) {
209		pipe_resource_reference(&rctx->vertex_buffer[i].buffer, NULL);
210		pipe_resource_reference(&rctx->real_vertex_buffer[i], NULL);
211
212		/* Zero states. */
213		if (rctx->family >= CHIP_CEDAR) {
214			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
215		} else {
216			r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
217		}
218	}
219
220	memcpy(rctx->vertex_buffer, buffers, sizeof(struct pipe_vertex_buffer) * count);
221
222	rctx->nvertex_buffers = count;
223	rctx->nreal_vertex_buffers = count;
224	rctx->vb_max_index = max_index;
225}
226
227
228#define FORMAT_REPLACE(what, withwhat) \
229	case PIPE_FORMAT_##what: *format = PIPE_FORMAT_##withwhat; break
230
231void *r600_create_vertex_elements(struct pipe_context *ctx,
232				  unsigned count,
233				  const struct pipe_vertex_element *elements)
234{
235	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
236	struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
237	enum pipe_format *format;
238	int i;
239
240	assert(count < 32);
241	if (!v)
242		return NULL;
243
244	v->count = count;
245	memcpy(v->elements, elements, count * sizeof(struct pipe_vertex_element));
246
247	for (i = 0; i < count; i++) {
248		v->hw_format[i] = v->elements[i].src_format;
249		format = &v->hw_format[i];
250
251		switch (*format) {
252		FORMAT_REPLACE(R64_FLOAT,           R32_FLOAT);
253		FORMAT_REPLACE(R64G64_FLOAT,        R32G32_FLOAT);
254		FORMAT_REPLACE(R64G64B64_FLOAT,     R32G32B32_FLOAT);
255		FORMAT_REPLACE(R64G64B64A64_FLOAT,  R32G32B32A32_FLOAT);
256
257		/* r600 doesn't seem to support 32_*SCALED, these formats
258		 * aren't in D3D10 either. */
259		FORMAT_REPLACE(R32_UNORM,           R32_FLOAT);
260		FORMAT_REPLACE(R32G32_UNORM,        R32G32_FLOAT);
261		FORMAT_REPLACE(R32G32B32_UNORM,     R32G32B32_FLOAT);
262		FORMAT_REPLACE(R32G32B32A32_UNORM,  R32G32B32A32_FLOAT);
263
264		FORMAT_REPLACE(R32_USCALED,         R32_FLOAT);
265		FORMAT_REPLACE(R32G32_USCALED,      R32G32_FLOAT);
266		FORMAT_REPLACE(R32G32B32_USCALED,   R32G32B32_FLOAT);
267		FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT);
268
269		FORMAT_REPLACE(R32_SNORM,           R32_FLOAT);
270		FORMAT_REPLACE(R32G32_SNORM,        R32G32_FLOAT);
271		FORMAT_REPLACE(R32G32B32_SNORM,     R32G32B32_FLOAT);
272		FORMAT_REPLACE(R32G32B32A32_SNORM,  R32G32B32A32_FLOAT);
273
274		FORMAT_REPLACE(R32_SSCALED,         R32_FLOAT);
275		FORMAT_REPLACE(R32G32_SSCALED,      R32G32_FLOAT);
276		FORMAT_REPLACE(R32G32B32_SSCALED,   R32G32B32_FLOAT);
277		FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT);
278		default:;
279		}
280		v->incompatible_layout =
281			v->incompatible_layout ||
282			v->elements[i].src_format != v->hw_format[i];
283
284		v->hw_format_size[i] = align(util_format_get_blocksize(v->hw_format[i]), 4);
285	}
286
287	if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
288		FREE(v);
289		return NULL;
290	}
291
292	return v;
293}
294
295void *r600_create_shader_state(struct pipe_context *ctx,
296			       const struct pipe_shader_state *state)
297{
298	struct r600_pipe_shader *shader =  CALLOC_STRUCT(r600_pipe_shader);
299	int r;
300
301	r =  r600_pipe_shader_create(ctx, shader, state->tokens);
302	if (r) {
303		return NULL;
304	}
305	return shader;
306}
307
308void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
309{
310	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
311
312	/* TODO delete old shader */
313	rctx->ps_shader = (struct r600_pipe_shader *)state;
314	if (state) {
315		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
316	}
317}
318
319void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
320{
321	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
322
323	/* TODO delete old shader */
324	rctx->vs_shader = (struct r600_pipe_shader *)state;
325	if (state) {
326		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate);
327	}
328}
329
330void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
331{
332	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
333	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
334
335	if (rctx->ps_shader == shader) {
336		rctx->ps_shader = NULL;
337	}
338
339	r600_pipe_shader_destroy(ctx, shader);
340	free(shader);
341}
342
343void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
344{
345	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
346	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
347
348	if (rctx->vs_shader == shader) {
349		rctx->vs_shader = NULL;
350	}
351
352	r600_pipe_shader_destroy(ctx, shader);
353	free(shader);
354}
355
356/* FIXME optimize away spi update when it's not needed */
357void r600_spi_update(struct r600_pipe_context *rctx)
358{
359	struct r600_pipe_shader *shader = rctx->ps_shader;
360	struct r600_pipe_state rstate;
361	struct r600_shader *rshader = &shader->shader;
362	unsigned i, tmp;
363
364	rstate.nregs = 0;
365	for (i = 0; i < rshader->ninput; i++) {
366		tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i));
367
368		if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
369		    rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
370		    rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
371			tmp |= S_028644_FLAT_SHADE(rctx->flatshade);
372		}
373
374		if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
375		    rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) {
376			tmp |= S_028644_PT_SPRITE_TEX(1);
377		}
378
379                if (rctx->family < CHIP_CEDAR) {
380                    if (rshader->input[i].centroid)
381                            tmp |= S_028644_SEL_CENTROID(1);
382
383                    if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
384                            tmp |= S_028644_SEL_LINEAR(1);
385                }
386
387		r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL);
388	}
389	r600_context_pipe_state_set(&rctx->ctx, &rstate);
390}
391
392void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
393			      struct pipe_resource *buffer)
394{
395	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
396	struct r600_resource_buffer *rbuffer = r600_buffer(buffer);
397	uint32_t offset;
398
399	/* Note that the state tracker can unbind constant buffers by
400	 * passing NULL here.
401	 */
402	if (buffer == NULL) {
403		return;
404	}
405
406	r600_upload_const_buffer(rctx, &rbuffer, &offset);
407
408	switch (shader) {
409	case PIPE_SHADER_VERTEX:
410		rctx->vs_const_buffer.nregs = 0;
411		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
412					R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
413					ALIGN_DIVUP(buffer->width0 >> 4, 16),
414					0xFFFFFFFF, NULL);
415		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
416					R_028980_ALU_CONST_CACHE_VS_0,
417					(r600_bo_offset(rbuffer->r.bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->r.bo);
418		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
419		break;
420	case PIPE_SHADER_FRAGMENT:
421		rctx->ps_const_buffer.nregs = 0;
422		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
423					R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
424					ALIGN_DIVUP(buffer->width0 >> 4, 16),
425					0xFFFFFFFF, NULL);
426		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
427					R_028940_ALU_CONST_CACHE_PS_0,
428					(r600_bo_offset(rbuffer->r.bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->r.bo);
429		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
430		break;
431	default:
432		R600_ERR("unsupported %d\n", shader);
433		return;
434	}
435
436	if (buffer != &rbuffer->r.base.b)
437		pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
438}
439
440static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
441{
442	struct r600_pipe_state *rstate;
443	struct r600_resource *rbuffer;
444	struct pipe_vertex_buffer *vertex_buffer;
445	unsigned i, offset;
446
447	if (rctx->vertex_elements->vbuffer_need_offset) {
448		/* one resource per vertex elements */
449		rctx->nvs_resource = rctx->vertex_elements->count;
450	} else {
451		/* bind vertex buffer once */
452		rctx->nvs_resource = rctx->nreal_vertex_buffers;
453	}
454
455	for (i = 0 ; i < rctx->nvs_resource; i++) {
456		rstate = &rctx->vs_resource[i];
457		rstate->id = R600_PIPE_STATE_RESOURCE;
458		rstate->nregs = 0;
459
460		if (rctx->vertex_elements->vbuffer_need_offset) {
461			/* one resource per vertex elements */
462			unsigned vbuffer_index;
463			vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
464			vertex_buffer = &rctx->vertex_buffer[vbuffer_index];
465			rbuffer = (struct r600_resource*)rctx->real_vertex_buffer[vbuffer_index];
466			offset = rctx->vertex_elements->vbuffer_offset[i];
467		} else {
468			/* bind vertex buffer once */
469			vertex_buffer = &rctx->vertex_buffer[i];
470			rbuffer = (struct r600_resource*)rctx->real_vertex_buffer[i];
471			offset = 0;
472		}
473		if (vertex_buffer == NULL || rbuffer == NULL)
474			continue;
475		offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo);
476
477		if (rctx->family >= CHIP_CEDAR) {
478			evergreen_pipe_add_vertex_attrib(rctx, rstate, i,
479							 rbuffer, offset,
480							 vertex_buffer->stride);
481		} else {
482			r600_pipe_add_vertex_attrib(rctx, rstate, i,
483						    rbuffer, offset,
484						    vertex_buffer->stride);
485		}
486	}
487}
488
489void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
490{
491	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
492	struct r600_resource *rbuffer;
493	u32 vgt_dma_index_type, vgt_draw_initiator, mask;
494	struct r600_draw rdraw;
495	struct r600_pipe_state vgt;
496	struct r600_drawl draw = {};
497	unsigned prim;
498
499	r600_flush_depth_textures(rctx);
500
501	if (rctx->vertex_elements->incompatible_layout) {
502		r600_begin_vertex_translate(rctx, info->min_index, info->max_index);
503	}
504
505	if (rctx->any_user_vbs) {
506		r600_upload_user_buffers(rctx, info->min_index, info->max_index);
507	}
508
509	r600_vertex_buffer_update(rctx);
510
511	draw.info = *info;
512	draw.ctx = ctx;
513	if (info->indexed && rctx->index_buffer.buffer) {
514		draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size;
515
516		r600_translate_index_buffer(rctx, &rctx->index_buffer.buffer,
517					    &rctx->index_buffer.index_size,
518					    &draw.info.start,
519					    info->count);
520
521		draw.index_size = rctx->index_buffer.index_size;
522		pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer);
523		draw.index_buffer_offset = draw.info.start * draw.index_size;
524		draw.info.start = 0;
525
526		if (r600_is_user_buffer(draw.index_buffer)) {
527			r600_upload_index_buffer(rctx, &draw);
528		}
529	} else {
530		draw.info.index_bias = info->start;
531	}
532
533	switch (draw.index_size) {
534	case 2:
535		vgt_draw_initiator = 0;
536		vgt_dma_index_type = 0;
537		break;
538	case 4:
539		vgt_draw_initiator = 0;
540		vgt_dma_index_type = 1;
541		break;
542	case 0:
543		vgt_draw_initiator = 2;
544		vgt_dma_index_type = 0;
545		break;
546	default:
547		R600_ERR("unsupported index size %d\n", draw.index_size);
548		return;
549	}
550	if (r600_conv_pipe_prim(draw.info.mode, &prim))
551		return;
552	if (unlikely(rctx->ps_shader == NULL)) {
553		R600_ERR("missing vertex shader\n");
554		return;
555	}
556	if (unlikely(rctx->vs_shader == NULL)) {
557		R600_ERR("missing vertex shader\n");
558		return;
559	}
560	/* there should be enough input */
561	if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) {
562		R600_ERR("%d resources provided, expecting %d\n",
563			rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource);
564		return;
565	}
566
567	r600_spi_update(rctx);
568
569	mask = 0;
570	for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
571		mask |= (0xF << (i * 4));
572	}
573
574	vgt.id = R600_PIPE_STATE_VGT;
575	vgt.nregs = 0;
576	r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
577	r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL);
578	r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL);
579	r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL);
580	r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
581	r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL);
582	r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0xFFFFFFFF, NULL);
583	r600_context_pipe_state_set(&rctx->ctx, &vgt);
584
585	rdraw.vgt_num_indices = draw.info.count;
586	rdraw.vgt_num_instances = 1;
587	rdraw.vgt_index_type = vgt_dma_index_type;
588	rdraw.vgt_draw_initiator = vgt_draw_initiator;
589	rdraw.indices = NULL;
590	if (draw.index_buffer) {
591		rbuffer = (struct r600_resource*)draw.index_buffer;
592		rdraw.indices = rbuffer->bo;
593		rdraw.indices_bo_offset = draw.index_buffer_offset;
594	}
595
596	if (rctx->family >= CHIP_CEDAR) {
597		evergreen_context_draw(&rctx->ctx, &rdraw);
598	} else {
599		r600_context_draw(&rctx->ctx, &rdraw);
600	}
601
602	if (rctx->framebuffer.zsbuf)
603	{
604		struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
605		((struct r600_resource_texture *)tex)->dirty_db = TRUE;
606	}
607
608	pipe_resource_reference(&draw.index_buffer, NULL);
609
610	/* delete previous translated vertex elements */
611	if (rctx->tran.new_velems) {
612		r600_end_vertex_translate(rctx);
613	}
614}
615