nv50_vbo.c revision 3c9df0bda67cdcbc340a4f20997f7a3345cbe9cb
1/*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#include "pipe/p_context.h"
24#include "pipe/p_state.h"
25#include "util/u_inlines.h"
26#include "util/u_format.h"
27
28#include "nouveau/nouveau_util.h"
29#include "nv50_context.h"
30#include "nv50_resource.h"
31
32static INLINE uint32_t
33nv50_vbo_type_to_hw(enum pipe_format format)
34{
35	const struct util_format_description *desc;
36
37	desc = util_format_description(format);
38	assert(desc);
39
40	switch (desc->channel[0].type) {
41	case UTIL_FORMAT_TYPE_FLOAT:
42		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT;
43	case UTIL_FORMAT_TYPE_UNSIGNED:
44		if (desc->channel[0].normalized) {
45			return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM;
46		}
47		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED;
48	case UTIL_FORMAT_TYPE_SIGNED:
49		if (desc->channel[0].normalized) {
50			return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM;
51		}
52		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED;
53	/*
54	case PIPE_FORMAT_TYPE_UINT:
55		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
56	case PIPE_FORMAT_TYPE_SINT:
57		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
58	default:
59		return 0;
60	}
61}
62
63static INLINE uint32_t
64nv50_vbo_size_to_hw(unsigned size, unsigned nr_c)
65{
66	static const uint32_t hw_values[] = {
67		0, 0, 0, 0,
68		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8,
69		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8,
70		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8,
71		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8_8,
72		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16,
73		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16,
74		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16,
75		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16_16,
76		0, 0, 0, 0,
77		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32,
78		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32,
79		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32,
80		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32_32 };
81
82	/* we'd also have R11G11B10 and R10G10B10A2 */
83
84	assert(nr_c > 0 && nr_c <= 4);
85
86	if (size > 32)
87		return 0;
88	size >>= (3 - 2);
89
90	return hw_values[size + (nr_c - 1)];
91}
92
93static INLINE uint32_t
94nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element *ve)
95{
96	uint32_t hw_type, hw_size;
97	enum pipe_format pf = ve->src_format;
98	const struct util_format_description *desc;
99	unsigned size, nr_components;
100
101	desc = util_format_description(pf);
102	assert(desc);
103
104	size = util_format_get_component_bits(pf, UTIL_FORMAT_COLORSPACE_RGB, 0);
105	nr_components = util_format_get_nr_components(pf);
106
107	hw_type = nv50_vbo_type_to_hw(pf);
108	hw_size = nv50_vbo_size_to_hw(size, nr_components);
109
110	if (!hw_type || !hw_size) {
111		NOUVEAU_ERR("unsupported vbo format: %s\n", util_format_name(pf));
112		abort();
113		return 0x24e80000;
114	}
115
116	if (desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z) /* BGRA */
117		hw_size |= (1 << 31); /* no real swizzle bits :-( */
118
119	return (hw_type | hw_size);
120}
121
122struct instance {
123	struct nouveau_bo *bo;
124	unsigned delta;
125	unsigned stride;
126	unsigned step;
127	unsigned divisor;
128};
129
130static void
131instance_init(struct nv50_context *nv50, struct instance *a, unsigned first)
132{
133	int i;
134
135	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
136		struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i];
137		struct pipe_vertex_buffer *vb;
138
139		a[i].divisor = ve->instance_divisor;
140		if (a[i].divisor) {
141			vb = &nv50->vtxbuf[ve->vertex_buffer_index];
142
143			a[i].bo = nv50_resource(vb->buffer)->bo;
144			a[i].stride = vb->stride;
145			a[i].step = first % a[i].divisor;
146			a[i].delta = vb->buffer_offset + ve->src_offset +
147				     (first * a[i].stride);
148		}
149	}
150}
151
152static void
153instance_step(struct nv50_context *nv50, struct instance *a)
154{
155	struct nouveau_channel *chan = nv50->screen->tesla->channel;
156	struct nouveau_grobj *tesla = nv50->screen->tesla;
157	int i;
158
159	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
160		if (!a[i].divisor)
161			continue;
162
163		BEGIN_RING(chan, tesla,
164			   NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
165		OUT_RELOCh(chan, a[i].bo, a[i].delta, NOUVEAU_BO_RD |
166			   NOUVEAU_BO_VRAM | NOUVEAU_BO_GART);
167		OUT_RELOCl(chan, a[i].bo, a[i].delta, NOUVEAU_BO_RD |
168			   NOUVEAU_BO_VRAM | NOUVEAU_BO_GART);
169		if (++a[i].step == a[i].divisor) {
170			a[i].step = 0;
171			a[i].delta += a[i].stride;
172		}
173	}
174}
175
176void
177nv50_draw_arrays_instanced(struct pipe_context *pipe,
178			   unsigned mode, unsigned start, unsigned count,
179			   unsigned startInstance, unsigned instanceCount)
180{
181	struct nv50_context *nv50 = nv50_context(pipe);
182	struct nouveau_channel *chan = nv50->screen->tesla->channel;
183	struct nouveau_grobj *tesla = nv50->screen->tesla;
184	struct instance a[16];
185	unsigned prim = nv50_prim(mode);
186
187	instance_init(nv50, a, startInstance);
188	if (!nv50_state_validate(nv50, 10 + 16*3))
189		return;
190
191	if (nv50->vbo_fifo) {
192		nv50_push_elements_instanced(pipe, NULL, 0, 0, mode, start,
193					     count, startInstance,
194					     instanceCount);
195		return;
196	}
197
198	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
199	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
200	OUT_RING  (chan, startInstance);
201	while (instanceCount--) {
202		if (AVAIL_RING(chan) < (7 + 16*3)) {
203			FIRE_RING(chan);
204			if (!nv50_state_validate(nv50, 7 + 16*3)) {
205				assert(0);
206				return;
207			}
208		}
209		instance_step(nv50, a);
210
211		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
212		OUT_RING  (chan, prim);
213		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
214		OUT_RING  (chan, start);
215		OUT_RING  (chan, count);
216		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
217		OUT_RING  (chan, 0);
218
219		prim |= (1 << 28);
220	}
221}
222
223void
224nv50_draw_arrays(struct pipe_context *pipe, unsigned mode, unsigned start,
225		 unsigned count)
226{
227	nv50_draw_arrays_instanced(pipe, mode, start, count, 0, 1);
228}
229
230struct inline_ctx {
231	struct nv50_context *nv50;
232	void *map;
233};
234
235static void
236inline_elt08(void *priv, unsigned start, unsigned count)
237{
238	struct inline_ctx *ctx = priv;
239	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
240	struct nouveau_channel *chan = tesla->channel;
241	uint8_t *map = (uint8_t *)ctx->map + start;
242
243	if (count & 1) {
244		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
245		OUT_RING  (chan, map[0]);
246		map++;
247		count &= ~1;
248	}
249
250	count >>= 1;
251	if (!count)
252		return;
253
254	BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, count);
255	while (count--) {
256		OUT_RING(chan, (map[1] << 16) | map[0]);
257		map += 2;
258	}
259}
260
261static void
262inline_elt16(void *priv, unsigned start, unsigned count)
263{
264	struct inline_ctx *ctx = priv;
265	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
266	struct nouveau_channel *chan = tesla->channel;
267	uint16_t *map = (uint16_t *)ctx->map + start;
268
269	if (count & 1) {
270		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
271		OUT_RING  (chan, map[0]);
272		count &= ~1;
273		map++;
274	}
275
276	count >>= 1;
277	if (!count)
278		return;
279
280	BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, count);
281	while (count--) {
282		OUT_RING(chan, (map[1] << 16) | map[0]);
283		map += 2;
284	}
285}
286
287static void
288inline_elt32(void *priv, unsigned start, unsigned count)
289{
290	struct inline_ctx *ctx = priv;
291	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
292	struct nouveau_channel *chan = tesla->channel;
293
294	BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U32, count);
295	OUT_RINGp    (chan, (uint32_t *)ctx->map + start, count);
296}
297
298static void
299inline_edgeflag(void *priv, boolean enabled)
300{
301	struct inline_ctx *ctx = priv;
302	struct nouveau_grobj *tesla = ctx->nv50->screen->tesla;
303	struct nouveau_channel *chan = tesla->channel;
304
305	BEGIN_RING(chan, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
306	OUT_RING  (chan, enabled ? 1 : 0);
307}
308
309static void
310nv50_draw_elements_inline(struct pipe_context *pipe,
311			  struct pipe_resource *indexBuffer, unsigned indexSize,
312			  unsigned mode, unsigned start, unsigned count,
313			  unsigned startInstance, unsigned instanceCount)
314{
315	struct nv50_context *nv50 = nv50_context(pipe);
316	struct nouveau_channel *chan = nv50->screen->tesla->channel;
317	struct nouveau_grobj *tesla = nv50->screen->tesla;
318	struct pipe_transfer *transfer;
319	struct instance a[16];
320	struct inline_ctx ctx;
321	struct u_split_prim s;
322	boolean nzi = FALSE;
323	unsigned overhead;
324
325	overhead = 16*3; /* potential instance adjustments */
326	overhead += 4; /* Begin()/End() */
327	overhead += 4; /* potential edgeflag disable/reenable */
328	overhead += 3; /* potentially 3 VTX_ELT_U16/U32 packet headers */
329
330	s.priv = &ctx;
331	if (indexSize == 1)
332		s.emit = inline_elt08;
333	else
334	if (indexSize == 2)
335		s.emit = inline_elt16;
336	else
337		s.emit = inline_elt32;
338	s.edge = inline_edgeflag;
339
340	ctx.nv50 = nv50;
341	ctx.map = pipe_buffer_map(pipe, indexBuffer, PIPE_TRANSFER_READ, &transfer);
342	assert(ctx.map);
343	if (!ctx.map)
344		return;
345
346	instance_init(nv50, a, startInstance);
347	if (!nv50_state_validate(nv50, overhead + 6 + 3))
348		return;
349
350	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
351	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
352	OUT_RING  (chan, startInstance);
353	while (instanceCount--) {
354		unsigned max_verts;
355		boolean done;
356
357		u_split_prim_init(&s, mode, start, count);
358		do {
359			if (AVAIL_RING(chan) < (overhead + 6)) {
360				FIRE_RING(chan);
361				if (!nv50_state_validate(nv50, (overhead + 6))) {
362					assert(0);
363					return;
364				}
365			}
366
367			max_verts = AVAIL_RING(chan) - overhead;
368			if (max_verts > 2047)
369				max_verts = 2047;
370			if (indexSize != 4)
371				max_verts <<= 1;
372			instance_step(nv50, a);
373
374			BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
375			OUT_RING  (chan, nv50_prim(s.mode) | (nzi ? (1<<28) : 0));
376			done = u_split_prim_next(&s, max_verts);
377			BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
378			OUT_RING  (chan, 0);
379		} while (!done);
380
381		nzi = TRUE;
382	}
383
384	pipe_buffer_unmap(pipe, indexBuffer, transfer);
385}
386
387void
388nv50_draw_elements_instanced(struct pipe_context *pipe,
389			     struct pipe_resource *indexBuffer,
390			     unsigned indexSize, int indexBias,
391			     unsigned mode, unsigned start, unsigned count,
392			     unsigned startInstance, unsigned instanceCount)
393{
394	struct nv50_context *nv50 = nv50_context(pipe);
395	struct nouveau_channel *chan = nv50->screen->tesla->channel;
396	struct nouveau_grobj *tesla = nv50->screen->tesla;
397	struct instance a[16];
398	unsigned prim = nv50_prim(mode);
399
400	instance_init(nv50, a, startInstance);
401	if (!nv50_state_validate(nv50, 13 + 16*3))
402		return;
403
404	if (nv50->vbo_fifo) {
405		nv50_push_elements_instanced(pipe, indexBuffer, indexSize,
406					     indexBias, mode, start, count,
407					     startInstance, instanceCount);
408		return;
409	}
410
411	/* indices are uint32 internally, so large indexBias means negative */
412	BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_BASE, 1);
413	OUT_RING  (chan, indexBias);
414
415	if (!(indexBuffer->bind & PIPE_BIND_INDEX_BUFFER) || indexSize == 1) {
416		nv50_draw_elements_inline(pipe, indexBuffer, indexSize,
417					  mode, start, count, startInstance,
418					  instanceCount);
419		return;
420	}
421
422	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
423	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
424	OUT_RING  (chan, startInstance);
425	while (instanceCount--) {
426		if (AVAIL_RING(chan) < (7 + 16*3)) {
427			FIRE_RING(chan);
428			if (!nv50_state_validate(nv50, 10 + 16*3)) {
429				assert(0);
430				return;
431			}
432		}
433		instance_step(nv50, a);
434
435		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
436		OUT_RING  (chan, prim);
437		if (indexSize == 4) {
438			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32 | 0x30000, 0);
439			OUT_RING  (chan, count);
440			nouveau_pushbuf_submit(chan,
441					       nv50_resource(indexBuffer)->bo,
442					       start << 2, count << 2);
443		} else
444		if (indexSize == 2) {
445			unsigned vb_start = (start & ~1);
446			unsigned vb_end = (start + count + 1) & ~1;
447			unsigned dwords = (vb_end - vb_start) >> 1;
448
449			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
450			OUT_RING  (chan, ((start & 1) << 31) | count);
451			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16 | 0x30000, 0);
452			OUT_RING  (chan, dwords);
453			nouveau_pushbuf_submit(chan,
454					       nv50_resource(indexBuffer)->bo,
455					       vb_start << 1, dwords << 2);
456			BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
457			OUT_RING  (chan, 0);
458		}
459		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
460		OUT_RING  (chan, 0);
461
462		prim |= (1 << 28);
463	}
464}
465
466void
467nv50_draw_elements(struct pipe_context *pipe,
468		   struct pipe_resource *indexBuffer,
469		   unsigned indexSize, int indexBias,
470		   unsigned mode, unsigned start, unsigned count)
471{
472	nv50_draw_elements_instanced(pipe, indexBuffer, indexSize, indexBias,
473				     mode, start, count, 0, 1);
474}
475
476static INLINE boolean
477nv50_vbo_static_attrib(struct nv50_context *nv50, unsigned attrib,
478		       struct nouveau_stateobj **pso,
479		       struct pipe_vertex_element *ve,
480		       struct pipe_vertex_buffer *vb)
481
482{
483	struct nouveau_stateobj *so;
484	struct nouveau_grobj *tesla = nv50->screen->tesla;
485	struct nouveau_bo *bo = nv50_resource(vb->buffer)->bo;
486	float v[4];
487	int ret;
488	unsigned nr_components = util_format_get_nr_components(ve->src_format);
489
490	ret = nouveau_bo_map(bo, NOUVEAU_BO_RD);
491	if (ret)
492		return FALSE;
493
494	util_format_read_4f(ve->src_format, v, 0, (uint8_t *)bo->map +
495			    (vb->buffer_offset + ve->src_offset), 0,
496			    0, 0, 1, 1);
497	so = *pso;
498	if (!so)
499		*pso = so = so_new(nv50->vtxelt->num_elements,
500				   nv50->vtxelt->num_elements * 4, 0);
501
502	switch (nr_components) {
503	case 4:
504		so_method(so, tesla, NV50TCL_VTX_ATTR_4F_X(attrib), 4);
505		so_data  (so, fui(v[0]));
506		so_data  (so, fui(v[1]));
507		so_data  (so, fui(v[2]));
508		so_data  (so, fui(v[3]));
509		break;
510	case 3:
511		so_method(so, tesla, NV50TCL_VTX_ATTR_3F_X(attrib), 3);
512		so_data  (so, fui(v[0]));
513		so_data  (so, fui(v[1]));
514		so_data  (so, fui(v[2]));
515		break;
516	case 2:
517		so_method(so, tesla, NV50TCL_VTX_ATTR_2F_X(attrib), 2);
518		so_data  (so, fui(v[0]));
519		so_data  (so, fui(v[1]));
520		break;
521	case 1:
522		if (attrib == nv50->vertprog->cfg.edgeflag_in) {
523			so_method(so, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
524			so_data  (so, v[0] ? 1 : 0);
525		}
526		so_method(so, tesla, NV50TCL_VTX_ATTR_1F(attrib), 1);
527		so_data  (so, fui(v[0]));
528		break;
529	default:
530		nouveau_bo_unmap(bo);
531		return FALSE;
532	}
533
534	nouveau_bo_unmap(bo);
535	return TRUE;
536}
537
538void
539nv50_vtxelt_construct(struct nv50_vtxelt_stateobj *cso)
540{
541	unsigned i;
542
543	for (i = 0; i < cso->num_elements; ++i) {
544		struct pipe_vertex_element *ve = &cso->pipe[i];
545
546		cso->hw[i] = nv50_vbo_vtxelt_to_hw(ve);
547	}
548}
549
550struct nouveau_stateobj *
551nv50_vbo_validate(struct nv50_context *nv50)
552{
553	struct nouveau_grobj *tesla = nv50->screen->tesla;
554	struct nouveau_stateobj *vtxbuf, *vtxfmt, *vtxattr;
555	unsigned i, n_ve;
556
557	/* don't validate if Gallium took away our buffers */
558	if (nv50->vtxbuf_nr == 0)
559		return NULL;
560
561	nv50->vbo_fifo = 0;
562	if (nv50->screen->force_push ||
563	    nv50->vertprog->cfg.edgeflag_in < 16)
564		nv50->vbo_fifo = 0xffff;
565
566	for (i = 0; i < nv50->vtxbuf_nr; i++) {
567		if (nv50->vtxbuf[i].stride &&
568		    !(nv50->vtxbuf[i].buffer->bind & PIPE_BIND_VERTEX_BUFFER))
569			nv50->vbo_fifo = 0xffff;
570	}
571
572	n_ve = MAX2(nv50->vtxelt->num_elements, nv50->state.vtxelt_nr);
573
574	vtxattr = NULL;
575	vtxbuf = so_new(n_ve * 2, n_ve * 5, nv50->vtxelt->num_elements * 4);
576	vtxfmt = so_new(1, n_ve, 0);
577	so_method(vtxfmt, tesla, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve);
578
579	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
580		struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i];
581		struct pipe_vertex_buffer *vb =
582			&nv50->vtxbuf[ve->vertex_buffer_index];
583		struct nouveau_bo *bo = nv50_resource(vb->buffer)->bo;
584		uint32_t hw = nv50->vtxelt->hw[i];
585
586		if (!vb->stride &&
587		    nv50_vbo_static_attrib(nv50, i, &vtxattr, ve, vb)) {
588			so_data(vtxfmt, hw | (1 << 4));
589
590			so_method(vtxbuf, tesla,
591				  NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
592			so_data  (vtxbuf, 0);
593
594			nv50->vbo_fifo &= ~(1 << i);
595			continue;
596		}
597
598		if (nv50->vbo_fifo) {
599			so_data  (vtxfmt, hw | (ve->instance_divisor ? (1 << 4) : i));
600			so_method(vtxbuf, tesla,
601				  NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
602			so_data  (vtxbuf, 0);
603			continue;
604		}
605
606		so_data(vtxfmt, hw | i);
607
608		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 3);
609		so_data  (vtxbuf, 0x20000000 |
610			  (ve->instance_divisor ? 0 : vb->stride));
611		so_reloc (vtxbuf, bo, vb->buffer_offset +
612			  ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
613			  NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
614		so_reloc (vtxbuf, bo, vb->buffer_offset +
615			  ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
616			  NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
617
618		/* vertex array limits */
619		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_LIMIT_HIGH(i), 2);
620		so_reloc (vtxbuf, bo, vb->buffer->width0 - 1,
621			  NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
622			  NOUVEAU_BO_HIGH, 0, 0);
623		so_reloc (vtxbuf, bo, vb->buffer->width0 - 1,
624			  NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
625			  NOUVEAU_BO_LOW, 0, 0);
626	}
627	for (; i < n_ve; ++i) {
628		so_data  (vtxfmt, 0x7e080010);
629
630		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
631		so_data  (vtxbuf, 0);
632	}
633	nv50->state.vtxelt_nr = nv50->vtxelt->num_elements;
634
635	so_ref (vtxbuf, &nv50->state.vtxbuf);
636	so_ref (vtxattr, &nv50->state.vtxattr);
637	so_ref (NULL, &vtxbuf);
638	so_ref (NULL, &vtxattr);
639	return vtxfmt;
640}
641
642
643