r600_state_common.c revision bb1f0cf3508630a9a93512c79badf8c493c46743
1/*
2 * Copyright 2010 Red Hat Inc.
3 *           2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 *          Jerome Glisse <jglisse@redhat.com>
26 */
27#include "util/u_blitter.h"
28#include "util/u_memory.h"
29#include "util/u_format.h"
30#include "pipebuffer/pb_buffer.h"
31#include "pipe/p_shader_tokens.h"
32#include "tgsi/tgsi_parse.h"
33#include "r600_formats.h"
34#include "r600_pipe.h"
35#include "r600d.h"
36
37static bool r600_conv_pipe_prim(unsigned pprim, unsigned *prim)
38{
39	static const int prim_conv[] = {
40		V_008958_DI_PT_POINTLIST,
41		V_008958_DI_PT_LINELIST,
42		V_008958_DI_PT_LINELOOP,
43		V_008958_DI_PT_LINESTRIP,
44		V_008958_DI_PT_TRILIST,
45		V_008958_DI_PT_TRISTRIP,
46		V_008958_DI_PT_TRIFAN,
47		V_008958_DI_PT_QUADLIST,
48		V_008958_DI_PT_QUADSTRIP,
49		V_008958_DI_PT_POLYGON,
50		-1,
51		-1,
52		-1,
53		-1
54	};
55
56	*prim = prim_conv[pprim];
57	if (*prim == -1) {
58		fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim);
59		return false;
60	}
61	return true;
62}
63
64/* common state between evergreen and r600 */
65void r600_bind_blend_state(struct pipe_context *ctx, void *state)
66{
67	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
68	struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
69	struct r600_pipe_state *rstate;
70
71	if (state == NULL)
72		return;
73	rstate = &blend->rstate;
74	rctx->states[rstate->id] = rstate;
75	rctx->cb_target_mask = blend->cb_target_mask;
76	r600_context_pipe_state_set(&rctx->ctx, rstate);
77}
78
79void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
80{
81	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
82	struct r600_pipe_dsa *dsa = state;
83	struct r600_pipe_state *rstate;
84
85	if (state == NULL)
86		return;
87	rstate = &dsa->rstate;
88	rctx->states[rstate->id] = rstate;
89	rctx->alpha_ref = dsa->alpha_ref;
90	rctx->alpha_ref_dirty = true;
91	r600_context_pipe_state_set(&rctx->ctx, rstate);
92}
93
94void r600_bind_rs_state(struct pipe_context *ctx, void *state)
95{
96	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
97	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
98
99	if (state == NULL)
100		return;
101
102	rctx->clamp_vertex_color = rs->clamp_vertex_color;
103	rctx->clamp_fragment_color = rs->clamp_fragment_color;
104
105	rctx->sprite_coord_enable = rs->sprite_coord_enable;
106
107	rctx->rasterizer = rs;
108
109	rctx->states[rs->rstate.id] = &rs->rstate;
110	r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
111
112	if (rctx->chip_class >= EVERGREEN) {
113		evergreen_polygon_offset_update(rctx);
114	} else {
115		r600_polygon_offset_update(rctx);
116	}
117}
118
119void r600_delete_rs_state(struct pipe_context *ctx, void *state)
120{
121	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
122	struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
123
124	if (rctx->rasterizer == rs) {
125		rctx->rasterizer = NULL;
126	}
127	if (rctx->states[rs->rstate.id] == &rs->rstate) {
128		rctx->states[rs->rstate.id] = NULL;
129	}
130	free(rs);
131}
132
133void r600_sampler_view_destroy(struct pipe_context *ctx,
134			       struct pipe_sampler_view *state)
135{
136	struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
137
138	pipe_resource_reference(&state->texture, NULL);
139	FREE(resource);
140}
141
142void r600_delete_state(struct pipe_context *ctx, void *state)
143{
144	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
145	struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
146
147	if (rctx->states[rstate->id] == rstate) {
148		rctx->states[rstate->id] = NULL;
149	}
150	for (int i = 0; i < rstate->nregs; i++) {
151		pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL);
152	}
153	free(rstate);
154}
155
156void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
157{
158	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
159	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
160
161	rctx->vertex_elements = v;
162	if (v) {
163		u_vbuf_bind_vertex_elements(rctx->vbuf_mgr, state,
164						v->vmgr_elements);
165
166		rctx->states[v->rstate.id] = &v->rstate;
167		r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
168	}
169}
170
171void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
172{
173	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
174	struct r600_vertex_element *v = (struct r600_vertex_element*)state;
175
176	if (rctx->states[v->rstate.id] == &v->rstate) {
177		rctx->states[v->rstate.id] = NULL;
178	}
179	if (rctx->vertex_elements == state)
180		rctx->vertex_elements = NULL;
181
182	pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL);
183	u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
184	FREE(state);
185}
186
187
188void r600_set_index_buffer(struct pipe_context *ctx,
189			   const struct pipe_index_buffer *ib)
190{
191	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
192
193	u_vbuf_set_index_buffer(rctx->vbuf_mgr, ib);
194}
195
196void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
197			     const struct pipe_vertex_buffer *buffers)
198{
199	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
200	int i;
201
202	/* Zero states. */
203	for (i = 0; i < count; i++) {
204		if (!buffers[i].buffer) {
205			if (rctx->chip_class >= EVERGREEN) {
206				evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
207			} else {
208				r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
209			}
210		}
211	}
212	for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) {
213		if (rctx->chip_class >= EVERGREEN) {
214			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
215		} else {
216			r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
217		}
218	}
219
220	u_vbuf_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
221}
222
223void *r600_create_vertex_elements(struct pipe_context *ctx,
224				  unsigned count,
225				  const struct pipe_vertex_element *elements)
226{
227	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
228	struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
229
230	assert(count < 32);
231	if (!v)
232		return NULL;
233
234	v->count = count;
235	v->vmgr_elements =
236		u_vbuf_create_vertex_elements(rctx->vbuf_mgr, count,
237						  elements, v->elements);
238
239	if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
240		FREE(v);
241		return NULL;
242	}
243
244	return v;
245}
246
247void *r600_create_shader_state(struct pipe_context *ctx,
248			       const struct pipe_shader_state *state)
249{
250	struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader);
251	int r;
252
253	shader->tokens = tgsi_dup_tokens(state->tokens);
254	shader->so = state->stream_output;
255
256	r =  r600_pipe_shader_create(ctx, shader);
257	if (r) {
258		return NULL;
259	}
260	return shader;
261}
262
263void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
264{
265	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
266
267	/* TODO delete old shader */
268	rctx->ps_shader = (struct r600_pipe_shader *)state;
269	if (state) {
270		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
271	}
272	if (rctx->ps_shader && rctx->vs_shader) {
273		r600_adjust_gprs(rctx);
274	}
275}
276
277void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
278{
279	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
280
281	/* TODO delete old shader */
282	rctx->vs_shader = (struct r600_pipe_shader *)state;
283	if (state) {
284		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate);
285	}
286	if (rctx->ps_shader && rctx->vs_shader) {
287		r600_adjust_gprs(rctx);
288	}
289}
290
291void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
292{
293	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
294	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
295
296	if (rctx->ps_shader == shader) {
297		rctx->ps_shader = NULL;
298	}
299
300	free(shader->tokens);
301	r600_pipe_shader_destroy(ctx, shader);
302	free(shader);
303}
304
305void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
306{
307	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
308	struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
309
310	if (rctx->vs_shader == shader) {
311		rctx->vs_shader = NULL;
312	}
313
314	free(shader->tokens);
315	r600_pipe_shader_destroy(ctx, shader);
316	free(shader);
317}
318
319static void r600_update_alpha_ref(struct r600_pipe_context *rctx)
320{
321	unsigned alpha_ref;
322	struct r600_pipe_state rstate;
323
324	alpha_ref = rctx->alpha_ref;
325	rstate.nregs = 0;
326	if (rctx->export_16bpc)
327		alpha_ref &= ~0x1FFF;
328	r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL, 0);
329
330	r600_context_pipe_state_set(&rctx->ctx, &rstate);
331	rctx->alpha_ref_dirty = false;
332}
333
334void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
335			      struct pipe_resource *buffer)
336{
337	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
338	struct r600_resource *rbuffer = r600_resource(buffer);
339	struct r600_pipe_resource_state *rstate;
340	uint64_t va_offset;
341	uint32_t offset;
342
343	/* Note that the state tracker can unbind constant buffers by
344	 * passing NULL here.
345	 */
346	if (buffer == NULL) {
347		return;
348	}
349
350	r600_upload_const_buffer(rctx, &rbuffer, &offset);
351	va_offset = r600_resource_va(ctx->screen, (void*)rbuffer);
352	va_offset += offset;
353	va_offset >>= 8;
354
355	switch (shader) {
356	case PIPE_SHADER_VERTEX:
357		rctx->vs_const_buffer.nregs = 0;
358		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
359					R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
360					ALIGN_DIVUP(buffer->width0 >> 4, 16),
361					0xFFFFFFFF, NULL, 0);
362		r600_pipe_state_add_reg(&rctx->vs_const_buffer,
363					R_028980_ALU_CONST_CACHE_VS_0,
364					va_offset, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
365		r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
366
367		rstate = &rctx->vs_const_buffer_resource[index];
368		if (!rstate->id) {
369			if (rctx->chip_class >= EVERGREEN) {
370				evergreen_pipe_init_buffer_resource(rctx, rstate);
371			} else {
372				r600_pipe_init_buffer_resource(rctx, rstate);
373			}
374		}
375
376		if (rctx->chip_class >= EVERGREEN) {
377			evergreen_pipe_mod_buffer_resource(ctx, rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
378			evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
379		} else {
380			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
381			r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
382		}
383		break;
384	case PIPE_SHADER_FRAGMENT:
385		rctx->ps_const_buffer.nregs = 0;
386		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
387					R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
388					ALIGN_DIVUP(buffer->width0 >> 4, 16),
389					0xFFFFFFFF, NULL, 0);
390		r600_pipe_state_add_reg(&rctx->ps_const_buffer,
391					R_028940_ALU_CONST_CACHE_PS_0,
392					va_offset, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
393		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
394
395		rstate = &rctx->ps_const_buffer_resource[index];
396		if (!rstate->id) {
397			if (rctx->chip_class >= EVERGREEN) {
398				evergreen_pipe_init_buffer_resource(rctx, rstate);
399			} else {
400				r600_pipe_init_buffer_resource(rctx, rstate);
401			}
402		}
403		if (rctx->chip_class >= EVERGREEN) {
404			evergreen_pipe_mod_buffer_resource(ctx, rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
405			evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
406		} else {
407			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, 16, RADEON_USAGE_READ);
408			r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
409		}
410		break;
411	default:
412		R600_ERR("unsupported %d\n", shader);
413		return;
414	}
415
416	if (buffer != &rbuffer->b.b.b)
417		pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
418}
419
420struct pipe_stream_output_target *
421r600_create_so_target(struct pipe_context *ctx,
422		      struct pipe_resource *buffer,
423		      unsigned buffer_offset,
424		      unsigned buffer_size)
425{
426	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
427	struct r600_so_target *t;
428	void *ptr;
429
430	t = CALLOC_STRUCT(r600_so_target);
431	if (!t) {
432		return NULL;
433	}
434
435	t->b.reference.count = 1;
436	t->b.context = ctx;
437	pipe_resource_reference(&t->b.buffer, buffer);
438	t->b.buffer_offset = buffer_offset;
439	t->b.buffer_size = buffer_size;
440
441	t->filled_size = (struct r600_resource*)
442		pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4);
443	ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
444	memset(ptr, 0, t->filled_size->buf->size);
445	rctx->ws->buffer_unmap(t->filled_size->buf);
446
447	return &t->b;
448}
449
450void r600_so_target_destroy(struct pipe_context *ctx,
451			    struct pipe_stream_output_target *target)
452{
453	struct r600_so_target *t = (struct r600_so_target*)target;
454	pipe_resource_reference(&t->b.buffer, NULL);
455	pipe_resource_reference((struct pipe_resource**)&t->filled_size, NULL);
456	FREE(t);
457}
458
459void r600_set_so_targets(struct pipe_context *ctx,
460			 unsigned num_targets,
461			 struct pipe_stream_output_target **targets,
462			 unsigned append_bitmask)
463{
464	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
465	unsigned i;
466
467	/* Stop streamout. */
468	if (rctx->ctx.num_so_targets) {
469		r600_context_streamout_end(&rctx->ctx);
470	}
471
472	/* Set the new targets. */
473	for (i = 0; i < num_targets; i++) {
474		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->ctx.so_targets[i], targets[i]);
475	}
476	for (; i < rctx->ctx.num_so_targets; i++) {
477		pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->ctx.so_targets[i], NULL);
478	}
479
480	rctx->ctx.num_so_targets = num_targets;
481	rctx->ctx.streamout_start = num_targets != 0;
482	rctx->ctx.streamout_append_bitmask = append_bitmask;
483}
484
485static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
486{
487	struct r600_pipe_resource_state *rstate;
488	struct r600_resource *rbuffer;
489	struct pipe_vertex_buffer *vertex_buffer;
490	unsigned i, count, offset;
491
492	if (rctx->vertex_elements->vbuffer_need_offset) {
493		/* one resource per vertex elements */
494		count = rctx->vertex_elements->count;
495	} else {
496		/* bind vertex buffer once */
497		count = rctx->vbuf_mgr->nr_real_vertex_buffers;
498	}
499
500	for (i = 0 ; i < count; i++) {
501		rstate = &rctx->fs_resource[i];
502
503		if (rctx->vertex_elements->vbuffer_need_offset) {
504			/* one resource per vertex elements */
505			unsigned vbuffer_index;
506			vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
507			vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
508			rbuffer = (struct r600_resource*)vertex_buffer->buffer;
509			offset = rctx->vertex_elements->vbuffer_offset[i];
510		} else {
511			/* bind vertex buffer once */
512			vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[i];
513			rbuffer = (struct r600_resource*)vertex_buffer->buffer;
514			offset = 0;
515		}
516		if (vertex_buffer == NULL || rbuffer == NULL)
517			continue;
518		offset += vertex_buffer->buffer_offset;
519
520		if (!rstate->id) {
521			if (rctx->chip_class >= EVERGREEN) {
522				evergreen_pipe_init_buffer_resource(rctx, rstate);
523			} else {
524				r600_pipe_init_buffer_resource(rctx, rstate);
525			}
526		}
527
528		if (rctx->chip_class >= EVERGREEN) {
529			evergreen_pipe_mod_buffer_resource(&rctx->context, rstate, rbuffer, offset, vertex_buffer->stride, RADEON_USAGE_READ);
530			evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
531		} else {
532			r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride, RADEON_USAGE_READ);
533			r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
534		}
535	}
536}
537
538static int r600_shader_rebuild(struct pipe_context * ctx, struct r600_pipe_shader * shader)
539{
540	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
541	int r;
542
543	r600_pipe_shader_destroy(ctx, shader);
544	r = r600_pipe_shader_create(ctx, shader);
545	if (r) {
546		return r;
547	}
548	r600_context_pipe_state_set(&rctx->ctx, &shader->rstate);
549
550	return 0;
551}
552
553static void r600_update_derived_state(struct r600_pipe_context *rctx)
554{
555	struct pipe_context * ctx = (struct pipe_context*)rctx;
556
557	if (!rctx->blitter->running) {
558		if (rctx->have_depth_fb || rctx->have_depth_texture)
559			r600_flush_depth_textures(rctx);
560	}
561
562	if (rctx->chip_class < EVERGREEN) {
563		r600_update_sampler_states(rctx);
564	}
565
566	if (rctx->vs_shader->shader.clamp_color != rctx->clamp_vertex_color) {
567		r600_shader_rebuild(&rctx->context, rctx->vs_shader);
568	}
569
570	if ((rctx->ps_shader->shader.clamp_color != rctx->clamp_fragment_color) ||
571	    ((rctx->chip_class >= EVERGREEN) && rctx->ps_shader->shader.fs_write_all &&
572	     (rctx->ps_shader->shader.nr_cbufs != rctx->nr_cbufs))) {
573		r600_shader_rebuild(&rctx->context, rctx->ps_shader);
574	}
575
576	if (rctx->alpha_ref_dirty) {
577		r600_update_alpha_ref(rctx);
578	}
579
580	if (rctx->ps_shader && rctx->sprite_coord_enable &&
581		(rctx->ps_shader->sprite_coord_enable != rctx->sprite_coord_enable)) {
582
583		if (rctx->chip_class >= EVERGREEN)
584			evergreen_pipe_shader_ps(ctx, rctx->ps_shader);
585		else
586			r600_pipe_shader_ps(ctx, rctx->ps_shader);
587
588		r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
589	}
590
591}
592
593void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
594{
595	struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
596	struct pipe_draw_info info = *dinfo;
597	struct r600_draw rdraw = {};
598	struct pipe_index_buffer ib = {};
599	unsigned prim, mask, ls_mask = 0;
600
601	if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
602	    (info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
603	    !r600_conv_pipe_prim(info.mode, &prim)) {
604		return;
605	}
606
607	if (!rctx->ps_shader || !rctx->vs_shader)
608		return;
609
610	r600_update_derived_state(rctx);
611
612	u_vbuf_draw_begin(rctx->vbuf_mgr, &info);
613	r600_vertex_buffer_update(rctx);
614
615	rdraw.vgt_num_indices = info.count;
616	rdraw.vgt_num_instances = info.instance_count;
617
618	if (info.indexed) {
619		/* Initialize the index buffer struct. */
620		pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer);
621		ib.index_size = rctx->vbuf_mgr->index_buffer.index_size;
622		ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size;
623
624		/* Translate or upload, if needed. */
625		r600_translate_index_buffer(rctx, &ib, info.count);
626
627		if (u_vbuf_resource(ib.buffer)->user_ptr) {
628			r600_upload_index_buffer(rctx, &ib, info.count);
629		}
630
631		/* Initialize the r600_draw struct with index buffer info. */
632		if (ib.index_size == 4) {
633			rdraw.vgt_index_type = VGT_INDEX_32 |
634				(R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0);
635		} else {
636			rdraw.vgt_index_type = VGT_INDEX_16 |
637				(R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0);
638		}
639		rdraw.indices = (struct r600_resource*)ib.buffer;
640		rdraw.indices_bo_offset = ib.offset;
641		rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_DMA;
642	} else {
643		info.index_bias = info.start;
644		rdraw.vgt_draw_initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
645		if (info.count_from_stream_output) {
646			rdraw.vgt_draw_initiator |= S_0287F0_USE_OPAQUE(1);
647
648			r600_context_draw_opaque_count(&rctx->ctx, (struct r600_so_target*)info.count_from_stream_output);
649		}
650	}
651
652	rctx->ctx.vs_shader_so_strides = rctx->vs_shader->so_strides;
653
654	mask = (1ULL << ((unsigned)rctx->framebuffer.nr_cbufs * 4)) - 1;
655
656	if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
657		rctx->vgt.id = R600_PIPE_STATE_VGT;
658		rctx->vgt.nregs = 0;
659		r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL, 0);
660		r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL, 0);
661		r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, ~0, 0xFFFFFFFF, NULL, 0);
662		r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, 0, 0xFFFFFFFF, NULL, 0);
663		r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias, 0xFFFFFFFF, NULL, 0);
664		r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index, 0xFFFFFFFF, NULL, 0);
665		r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart, 0xFFFFFFFF, NULL, 0);
666		r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL, 0);
667		r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance, 0xFFFFFFFF, NULL, 0);
668		r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE,
669					0,
670					S_028A0C_AUTO_RESET_CNTL(3), NULL, 0);
671		r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL,
672					0,
673					S_028814_PROVOKING_VTX_LAST(1), NULL, 0);
674	}
675
676	rctx->vgt.nregs = 0;
677	r600_pipe_state_mod_reg(&rctx->vgt, prim);
678	r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask);
679	r600_pipe_state_mod_reg(&rctx->vgt, ~0);
680	r600_pipe_state_mod_reg(&rctx->vgt, 0);
681	r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias);
682	r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index);
683	r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart);
684	r600_pipe_state_mod_reg(&rctx->vgt, 0);
685	r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance);
686
687	if (prim == V_008958_DI_PT_LINELIST)
688		ls_mask = 1;
689	else if (prim == V_008958_DI_PT_LINESTRIP)
690		ls_mask = 2;
691	r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask));
692
693	if (info.mode == PIPE_PRIM_QUADS || info.mode == PIPE_PRIM_QUAD_STRIP || info.mode == PIPE_PRIM_POLYGON) {
694		r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1));
695	}
696
697	r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt);
698
699	if (rctx->chip_class >= EVERGREEN) {
700		evergreen_context_draw(&rctx->ctx, &rdraw);
701	} else {
702		r600_context_draw(&rctx->ctx, &rdraw);
703	}
704
705	if (rctx->framebuffer.zsbuf)
706	{
707		struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
708		((struct r600_resource_texture *)tex)->dirty_db = TRUE;
709	}
710
711	pipe_resource_reference(&ib.buffer, NULL);
712	u_vbuf_draw_end(rctx->vbuf_mgr);
713}
714
715void _r600_pipe_state_add_reg(struct r600_context *ctx,
716			      struct r600_pipe_state *state,
717			      u32 offset, u32 value, u32 mask,
718			      u32 range_id, u32 block_id,
719			      struct r600_resource *bo,
720			      enum radeon_bo_usage usage)
721{
722	struct r600_range *range;
723	struct r600_block *block;
724
725	if (bo) assert(usage);
726
727	range = &ctx->range[range_id];
728	block = range->blocks[block_id];
729	state->regs[state->nregs].block = block;
730	state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
731
732	state->regs[state->nregs].value = value;
733	state->regs[state->nregs].mask = mask;
734	state->regs[state->nregs].bo = bo;
735	state->regs[state->nregs].bo_usage = usage;
736
737	state->nregs++;
738	assert(state->nregs < R600_BLOCK_MAX_REG);
739}
740
741void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
742				     u32 offset, u32 value, u32 mask,
743				     struct r600_resource *bo,
744				     enum radeon_bo_usage usage)
745{
746	if (bo) assert(usage);
747
748	state->regs[state->nregs].id = offset;
749	state->regs[state->nregs].block = NULL;
750	state->regs[state->nregs].value = value;
751	state->regs[state->nregs].mask = mask;
752	state->regs[state->nregs].bo = bo;
753	state->regs[state->nregs].bo_usage = usage;
754
755	state->nregs++;
756	assert(state->nregs < R600_BLOCK_MAX_REG);
757}
758