nv50_vbo.c revision 0dcf0f9dfaa23b08d2bc20f8cbd02550c2632e52
1/*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#include "pipe/p_context.h"
24#include "pipe/p_state.h"
25#include "util/u_inlines.h"
26#include "util/u_format.h"
27#include "util/u_split_prim.h"
28
29#include "nv50_context.h"
30#include "nv50_resource.h"
31
32static INLINE uint32_t
33nv50_vbo_type_to_hw(enum pipe_format format)
34{
35	const struct util_format_description *desc;
36
37	desc = util_format_description(format);
38	assert(desc);
39
40	switch (desc->channel[0].type) {
41	case UTIL_FORMAT_TYPE_FLOAT:
42		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT;
43	case UTIL_FORMAT_TYPE_UNSIGNED:
44		if (desc->channel[0].normalized) {
45			return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM;
46		}
47		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED;
48	case UTIL_FORMAT_TYPE_SIGNED:
49		if (desc->channel[0].normalized) {
50			return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM;
51		}
52		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED;
53	/*
54	case PIPE_FORMAT_TYPE_UINT:
55		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
56	case PIPE_FORMAT_TYPE_SINT:
57		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
58	default:
59		return 0;
60	}
61}
62
63static INLINE uint32_t
64nv50_vbo_size_to_hw(unsigned size, unsigned nr_c)
65{
66	static const uint32_t hw_values[] = {
67		0, 0, 0, 0,
68		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8,
69		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8,
70		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8,
71		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8_8,
72		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16,
73		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16,
74		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16,
75		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16_16,
76		0, 0, 0, 0,
77		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32,
78		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32,
79		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32,
80		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32_32 };
81
82	/* we'd also have R11G11B10 and R10G10B10A2 */
83
84	assert(nr_c > 0 && nr_c <= 4);
85
86	if (size > 32)
87		return 0;
88	size >>= (3 - 2);
89
90	return hw_values[size + (nr_c - 1)];
91}
92
93static INLINE uint32_t
94nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element *ve)
95{
96	uint32_t hw_type, hw_size;
97	enum pipe_format pf = ve->src_format;
98	const struct util_format_description *desc;
99	unsigned size, nr_components;
100
101	desc = util_format_description(pf);
102	assert(desc);
103
104	size = util_format_get_component_bits(pf, UTIL_FORMAT_COLORSPACE_RGB, 0);
105	nr_components = util_format_get_nr_components(pf);
106
107	hw_type = nv50_vbo_type_to_hw(pf);
108	hw_size = nv50_vbo_size_to_hw(size, nr_components);
109
110	if (!hw_type || !hw_size) {
111		NOUVEAU_ERR("unsupported vbo format: %s\n", util_format_name(pf));
112		abort();
113		return 0x24e80000;
114	}
115
116	if (desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z) /* BGRA */
117		hw_size |= (1 << 31); /* no real swizzle bits :-( */
118
119	return (hw_type | hw_size);
120}
121
122struct instance {
123	struct nouveau_bo *bo;
124	unsigned delta;
125	unsigned stride;
126	unsigned step;
127	unsigned divisor;
128};
129
130static void
131instance_init(struct nv50_context *nv50, struct instance *a, unsigned first)
132{
133	int i;
134
135	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
136		struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i];
137		struct pipe_vertex_buffer *vb;
138
139		a[i].divisor = ve->instance_divisor;
140		if (a[i].divisor) {
141			vb = &nv50->vtxbuf[ve->vertex_buffer_index];
142
143			a[i].bo = nv50_resource(vb->buffer)->bo;
144			a[i].stride = vb->stride;
145			a[i].step = first % a[i].divisor;
146			a[i].delta = vb->buffer_offset + ve->src_offset +
147				     (first * a[i].stride);
148		}
149	}
150}
151
152static void
153instance_step(struct nv50_context *nv50, struct instance *a)
154{
155	struct nouveau_channel *chan = nv50->screen->tesla->channel;
156	struct nouveau_grobj *tesla = nv50->screen->tesla;
157	int i;
158
159	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
160		if (!a[i].divisor)
161			continue;
162
163		BEGIN_RING(chan, tesla,
164			   NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
165		OUT_RELOCh(chan, a[i].bo, a[i].delta, NOUVEAU_BO_RD |
166			   NOUVEAU_BO_VRAM | NOUVEAU_BO_GART);
167		OUT_RELOCl(chan, a[i].bo, a[i].delta, NOUVEAU_BO_RD |
168			   NOUVEAU_BO_VRAM | NOUVEAU_BO_GART);
169		if (++a[i].step == a[i].divisor) {
170			a[i].step = 0;
171			a[i].delta += a[i].stride;
172		}
173	}
174}
175
176static void
177nv50_draw_arrays_instanced(struct pipe_context *pipe,
178			   unsigned mode, unsigned start, unsigned count,
179			   unsigned startInstance, unsigned instanceCount)
180{
181	struct nv50_context *nv50 = nv50_context(pipe);
182	struct nouveau_channel *chan = nv50->screen->tesla->channel;
183	struct nouveau_grobj *tesla = nv50->screen->tesla;
184	struct instance a[16];
185	unsigned prim = nv50_prim(mode);
186
187	instance_init(nv50, a, startInstance);
188	if (!nv50_state_validate(nv50, 10 + 16*3))
189		return;
190
191	if (nv50->vbo_fifo) {
192		nv50_push_elements_instanced(pipe, NULL, 0, 0, mode, start,
193					     count, startInstance,
194					     instanceCount);
195		return;
196	}
197
198	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
199	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
200	OUT_RING  (chan, startInstance);
201	while (instanceCount--) {
202		if (AVAIL_RING(chan) < (7 + 16*3)) {
203			FIRE_RING(chan);
204			if (!nv50_state_validate(nv50, 7 + 16*3)) {
205				assert(0);
206				return;
207			}
208		}
209		instance_step(nv50, a);
210
211		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
212		OUT_RING  (chan, prim);
213		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
214		OUT_RING  (chan, start);
215		OUT_RING  (chan, count);
216		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
217		OUT_RING  (chan, 0);
218
219		prim |= (1 << 28);
220	}
221}
222
223struct inline_ctx {
224	struct nv50_context *nv50;
225	void *map;
226};
227
228static void
229inline_elt08(void *priv, unsigned start, unsigned count)
230{
231	struct inline_ctx *ctx = priv;
232	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
233	struct nouveau_channel *chan = tesla->channel;
234	uint8_t *map = (uint8_t *)ctx->map + start;
235
236	if (count & 1) {
237		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
238		OUT_RING  (chan, map[0]);
239		map++;
240		count &= ~1;
241	}
242
243	count >>= 1;
244	if (!count)
245		return;
246
247	BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, count);
248	while (count--) {
249		OUT_RING(chan, (map[1] << 16) | map[0]);
250		map += 2;
251	}
252}
253
254static void
255inline_elt16(void *priv, unsigned start, unsigned count)
256{
257	struct inline_ctx *ctx = priv;
258	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
259	struct nouveau_channel *chan = tesla->channel;
260	uint16_t *map = (uint16_t *)ctx->map + start;
261
262	if (count & 1) {
263		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
264		OUT_RING  (chan, map[0]);
265		count &= ~1;
266		map++;
267	}
268
269	count >>= 1;
270	if (!count)
271		return;
272
273	BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, count);
274	while (count--) {
275		OUT_RING(chan, (map[1] << 16) | map[0]);
276		map += 2;
277	}
278}
279
280static void
281inline_elt32(void *priv, unsigned start, unsigned count)
282{
283	struct inline_ctx *ctx = priv;
284	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
285	struct nouveau_channel *chan = tesla->channel;
286
287	BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U32, count);
288	OUT_RINGp    (chan, (uint32_t *)ctx->map + start, count);
289}
290
291static void
292inline_edgeflag(void *priv, boolean enabled)
293{
294	struct inline_ctx *ctx = priv;
295	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
296	struct nouveau_channel *chan = tesla->channel;
297
298	BEGIN_RING(chan, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
299	OUT_RING  (chan, enabled ? 1 : 0);
300}
301
302static void
303nv50_draw_elements_inline(struct pipe_context *pipe,
304			  struct pipe_resource *indexBuffer, unsigned indexSize,
305			  unsigned mode, unsigned start, unsigned count,
306			  unsigned startInstance, unsigned instanceCount)
307{
308	struct nv50_context *nv50 = nv50_context(pipe);
309	struct nouveau_channel *chan = nv50->screen->tesla->channel;
310	struct nouveau_grobj *tesla = nv50->screen->tesla;
311	struct pipe_transfer *transfer;
312	struct instance a[16];
313	struct inline_ctx ctx;
314	struct u_split_prim s;
315	boolean nzi = FALSE;
316	unsigned overhead;
317
318	overhead = 16*3; /* potential instance adjustments */
319	overhead += 4; /* Begin()/End() */
320	overhead += 4; /* potential edgeflag disable/reenable */
321	overhead += 3; /* potentially 3 VTX_ELT_U16/U32 packet headers */
322
323	s.priv = &ctx;
324	if (indexSize == 1)
325		s.emit = inline_elt08;
326	else
327	if (indexSize == 2)
328		s.emit = inline_elt16;
329	else
330		s.emit = inline_elt32;
331	s.edge = inline_edgeflag;
332
333	ctx.nv50 = nv50;
334	ctx.map = pipe_buffer_map(pipe, indexBuffer, PIPE_TRANSFER_READ, &transfer);
335	assert(ctx.map);
336	if (!ctx.map)
337		return;
338
339	instance_init(nv50, a, startInstance);
340	if (!nv50_state_validate(nv50, overhead + 6 + 3))
341		return;
342
343	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
344	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
345	OUT_RING  (chan, startInstance);
346	while (instanceCount--) {
347		unsigned max_verts;
348		boolean done;
349
350		u_split_prim_init(&s, mode, start, count);
351		do {
352			if (AVAIL_RING(chan) < (overhead + 6)) {
353				FIRE_RING(chan);
354				if (!nv50_state_validate(nv50, (overhead + 6))) {
355					assert(0);
356					return;
357				}
358			}
359
360			max_verts = AVAIL_RING(chan) - overhead;
361			if (max_verts > 2047)
362				max_verts = 2047;
363			if (indexSize != 4)
364				max_verts <<= 1;
365			instance_step(nv50, a);
366
367			BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
368			OUT_RING  (chan, nv50_prim(s.mode) | (nzi ? (1<<28) : 0));
369			done = u_split_prim_next(&s, max_verts);
370			BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
371			OUT_RING  (chan, 0);
372		} while (!done);
373
374		nzi = TRUE;
375	}
376
377	pipe_buffer_unmap(pipe, indexBuffer, transfer);
378}
379
380static void
381nv50_draw_elements_instanced(struct pipe_context *pipe,
382			     struct pipe_resource *indexBuffer,
383			     unsigned indexSize, int indexBias,
384			     unsigned mode, unsigned start, unsigned count,
385			     unsigned startInstance, unsigned instanceCount)
386{
387	struct nv50_context *nv50 = nv50_context(pipe);
388	struct nouveau_channel *chan = nv50->screen->tesla->channel;
389	struct nouveau_grobj *tesla = nv50->screen->tesla;
390	struct instance a[16];
391	unsigned prim = nv50_prim(mode);
392
393	instance_init(nv50, a, startInstance);
394	if (!nv50_state_validate(nv50, 13 + 16*3))
395		return;
396
397	if (nv50->vbo_fifo) {
398		nv50_push_elements_instanced(pipe, indexBuffer, indexSize,
399					     indexBias, mode, start, count,
400					     startInstance, instanceCount);
401		return;
402	}
403
404	/* indices are uint32 internally, so large indexBias means negative */
405	BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_BASE, 1);
406	OUT_RING  (chan, indexBias);
407
408	if (!nv50_resource_mapped_by_gpu(indexBuffer) || indexSize == 1) {
409		nv50_draw_elements_inline(pipe, indexBuffer, indexSize,
410					  mode, start, count, startInstance,
411					  instanceCount);
412		return;
413	}
414
415	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
416	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
417	OUT_RING  (chan, startInstance);
418	while (instanceCount--) {
419		if (AVAIL_RING(chan) < (7 + 16*3)) {
420			FIRE_RING(chan);
421			if (!nv50_state_validate(nv50, 10 + 16*3)) {
422				assert(0);
423				return;
424			}
425		}
426		instance_step(nv50, a);
427
428		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
429		OUT_RING  (chan, prim);
430		if (indexSize == 4) {
431			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32 | 0x30000, 0);
432			OUT_RING  (chan, count);
433			nouveau_pushbuf_submit(chan,
434					       nv50_resource(indexBuffer)->bo,
435					       start << 2, count << 2);
436		} else
437		if (indexSize == 2) {
438			unsigned vb_start = (start & ~1);
439			unsigned vb_end = (start + count + 1) & ~1;
440			unsigned dwords = (vb_end - vb_start) >> 1;
441
442			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
443			OUT_RING  (chan, ((start & 1) << 31) | count);
444			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16 | 0x30000, 0);
445			OUT_RING  (chan, dwords);
446			nouveau_pushbuf_submit(chan,
447					       nv50_resource(indexBuffer)->bo,
448					       vb_start << 1, dwords << 2);
449			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
450			OUT_RING  (chan, 0);
451		}
452		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
453		OUT_RING  (chan, 0);
454
455		prim |= (1 << 28);
456	}
457}
458
459void
460nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
461{
462	struct nv50_context *nv50 = nv50_context(pipe);
463
464	if (info->indexed && nv50->idxbuf.buffer) {
465		unsigned offset;
466
467		assert(nv50->idxbuf.offset % nv50->idxbuf.index_size == 0);
468		offset = nv50->idxbuf.offset / nv50->idxbuf.index_size;
469
470		nv50_draw_elements_instanced(pipe,
471					     nv50->idxbuf.buffer,
472					     nv50->idxbuf.index_size,
473					     info->index_bias,
474					     info->mode,
475					     info->start + offset,
476					     info->count,
477					     info->start_instance,
478					     info->instance_count);
479	}
480	else {
481		nv50_draw_arrays_instanced(pipe,
482					   info->mode,
483					   info->start,
484					   info->count,
485					   info->start_instance,
486					   info->instance_count);
487	}
488}
489
490static INLINE boolean
491nv50_vbo_static_attrib(struct nv50_context *nv50, unsigned attrib,
492		       struct nouveau_stateobj **pso,
493		       struct pipe_vertex_element *ve,
494		       struct pipe_vertex_buffer *vb)
495
496{
497	struct nouveau_stateobj *so;
498	struct nouveau_grobj *tesla = nv50->screen->tesla;
499	struct nouveau_bo *bo = nv50_resource(vb->buffer)->bo;
500	float v[4];
501	int ret;
502	unsigned nr_components = util_format_get_nr_components(ve->src_format);
503
504	ret = nouveau_bo_map(bo, NOUVEAU_BO_RD);
505	if (ret)
506		return FALSE;
507
508	util_format_read_4f(ve->src_format, v, 0, (uint8_t *)bo->map +
509			    (vb->buffer_offset + ve->src_offset), 0,
510			    0, 0, 1, 1);
511	so = *pso;
512	if (!so)
513		*pso = so = so_new(nv50->vtxelt->num_elements,
514				   nv50->vtxelt->num_elements * 4, 0);
515
516	switch (nr_components) {
517	case 4:
518		so_method(so, tesla, NV50TCL_VTX_ATTR_4F_X(attrib), 4);
519		so_data  (so, fui(v[0]));
520		so_data  (so, fui(v[1]));
521		so_data  (so, fui(v[2]));
522		so_data  (so, fui(v[3]));
523		break;
524	case 3:
525		so_method(so, tesla, NV50TCL_VTX_ATTR_3F_X(attrib), 3);
526		so_data  (so, fui(v[0]));
527		so_data  (so, fui(v[1]));
528		so_data  (so, fui(v[2]));
529		break;
530	case 2:
531		so_method(so, tesla, NV50TCL_VTX_ATTR_2F_X(attrib), 2);
532		so_data  (so, fui(v[0]));
533		so_data  (so, fui(v[1]));
534		break;
535	case 1:
536		if (attrib == nv50->vertprog->cfg.edgeflag_in) {
537			so_method(so, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
538			so_data  (so, v[0] ? 1 : 0);
539		}
540		so_method(so, tesla, NV50TCL_VTX_ATTR_1F(attrib), 1);
541		so_data  (so, fui(v[0]));
542		break;
543	default:
544		nouveau_bo_unmap(bo);
545		return FALSE;
546	}
547
548	nouveau_bo_unmap(bo);
549	return TRUE;
550}
551
552void
553nv50_vtxelt_construct(struct nv50_vtxelt_stateobj *cso)
554{
555	unsigned i;
556
557	for (i = 0; i < cso->num_elements; ++i) {
558		struct pipe_vertex_element *ve = &cso->pipe[i];
559
560		cso->hw[i] = nv50_vbo_vtxelt_to_hw(ve);
561	}
562}
563
564struct nouveau_stateobj *
565nv50_vbo_validate(struct nv50_context *nv50)
566{
567	struct nouveau_grobj *tesla = nv50->screen->tesla;
568	struct nouveau_stateobj *vtxbuf, *vtxfmt, *vtxattr;
569	unsigned i, n_ve;
570
571	/* don't validate if Gallium took away our buffers */
572	if (nv50->vtxbuf_nr == 0)
573		return NULL;
574
575	nv50->vbo_fifo = 0;
576	if (nv50->screen->force_push ||
577	    nv50->vertprog->cfg.edgeflag_in < 16)
578		nv50->vbo_fifo = 0xffff;
579
580	for (i = 0; i < nv50->vtxbuf_nr; i++) {
581		if (nv50->vtxbuf[i].stride &&
582		    !nv50_resource_mapped_by_gpu(nv50->vtxbuf[i].buffer))
583			nv50->vbo_fifo = 0xffff;
584	}
585
586	n_ve = MAX2(nv50->vtxelt->num_elements, nv50->state.vtxelt_nr);
587
588	vtxattr = NULL;
589	vtxbuf = so_new(n_ve * 2, n_ve * 5, nv50->vtxelt->num_elements * 4);
590	vtxfmt = so_new(1, n_ve, 0);
591	so_method(vtxfmt, tesla, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve);
592
593	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
594		struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i];
595		struct pipe_vertex_buffer *vb =
596			&nv50->vtxbuf[ve->vertex_buffer_index];
597		struct nouveau_bo *bo = nv50_resource(vb->buffer)->bo;
598		uint32_t hw = nv50->vtxelt->hw[i];
599
600		if (!vb->stride &&
601		    nv50_vbo_static_attrib(nv50, i, &vtxattr, ve, vb)) {
602			so_data(vtxfmt, hw | (1 << 4));
603
604			so_method(vtxbuf, tesla,
605				  NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
606			so_data  (vtxbuf, 0);
607
608			nv50->vbo_fifo &= ~(1 << i);
609			continue;
610		}
611
612		if (nv50->vbo_fifo) {
613			so_data  (vtxfmt, hw | (ve->instance_divisor ? (1 << 4) : i));
614			so_method(vtxbuf, tesla,
615				  NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
616			so_data  (vtxbuf, 0);
617			continue;
618		}
619
620		so_data(vtxfmt, hw | i);
621
622		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 3);
623		so_data  (vtxbuf, 0x20000000 |
624			  (ve->instance_divisor ? 0 : vb->stride));
625		so_reloc (vtxbuf, bo, vb->buffer_offset +
626			  ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
627			  NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
628		so_reloc (vtxbuf, bo, vb->buffer_offset +
629			  ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
630			  NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
631
632		/* vertex array limits */
633		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_LIMIT_HIGH(i), 2);
634		so_reloc (vtxbuf, bo, vb->buffer->width0 - 1,
635			  NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
636			  NOUVEAU_BO_HIGH, 0, 0);
637		so_reloc (vtxbuf, bo, vb->buffer->width0 - 1,
638			  NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
639			  NOUVEAU_BO_LOW, 0, 0);
640	}
641	for (; i < n_ve; ++i) {
642		so_data  (vtxfmt, 0x7e080010);
643
644		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
645		so_data  (vtxbuf, 0);
646	}
647	nv50->state.vtxelt_nr = nv50->vtxelt->num_elements;
648
649	so_ref (vtxbuf, &nv50->state.vtxbuf);
650	so_ref (vtxattr, &nv50->state.vtxattr);
651	so_ref (NULL, &vtxbuf);
652	so_ref (NULL, &vtxattr);
653	return vtxfmt;
654}
655
656
657