nv50_vbo.c revision 62ab89785b55e60b978dc2b32995676859299c80
1/*
2 * Copyright 2008 Ben Skeggs
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#include "pipe/p_context.h"
24#include "pipe/p_state.h"
25#include "util/u_inlines.h"
26#include "util/u_format.h"
27
28#include "nv50_context.h"
29
30static boolean
31nv50_push_elements_u08(struct nv50_context *, uint8_t *, unsigned);
32
33static boolean
34nv50_push_elements_u16(struct nv50_context *, uint16_t *, unsigned);
35
36static boolean
37nv50_push_elements_u32(struct nv50_context *, uint32_t *, unsigned);
38
39static boolean
40nv50_push_arrays(struct nv50_context *, unsigned, unsigned);
41
42#define NV50_USING_LOATHED_EDGEFLAG(ctx) ((ctx)->vertprog->cfg.edgeflag_in < 16)
43
44static INLINE unsigned
45nv50_prim(unsigned mode)
46{
47	switch (mode) {
48	case PIPE_PRIM_POINTS: return NV50TCL_VERTEX_BEGIN_POINTS;
49	case PIPE_PRIM_LINES: return NV50TCL_VERTEX_BEGIN_LINES;
50	case PIPE_PRIM_LINE_LOOP: return NV50TCL_VERTEX_BEGIN_LINE_LOOP;
51	case PIPE_PRIM_LINE_STRIP: return NV50TCL_VERTEX_BEGIN_LINE_STRIP;
52	case PIPE_PRIM_TRIANGLES: return NV50TCL_VERTEX_BEGIN_TRIANGLES;
53	case PIPE_PRIM_TRIANGLE_STRIP:
54		return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP;
55	case PIPE_PRIM_TRIANGLE_FAN: return NV50TCL_VERTEX_BEGIN_TRIANGLE_FAN;
56	case PIPE_PRIM_QUADS: return NV50TCL_VERTEX_BEGIN_QUADS;
57	case PIPE_PRIM_QUAD_STRIP: return NV50TCL_VERTEX_BEGIN_QUAD_STRIP;
58	case PIPE_PRIM_POLYGON: return NV50TCL_VERTEX_BEGIN_POLYGON;
59	case PIPE_PRIM_LINES_ADJACENCY:
60		return NV50TCL_VERTEX_BEGIN_LINES_ADJACENCY;
61	case PIPE_PRIM_LINE_STRIP_ADJACENCY:
62		return NV50TCL_VERTEX_BEGIN_LINE_STRIP_ADJACENCY;
63	case PIPE_PRIM_TRIANGLES_ADJACENCY:
64		return NV50TCL_VERTEX_BEGIN_TRIANGLES_ADJACENCY;
65	case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY:
66		return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP_ADJACENCY;
67	default:
68		break;
69	}
70
71	NOUVEAU_ERR("invalid primitive type %d\n", mode);
72	return NV50TCL_VERTEX_BEGIN_POINTS;
73}
74
75static INLINE uint32_t
76nv50_vbo_type_to_hw(enum pipe_format format)
77{
78	const struct util_format_description *desc;
79
80	desc = util_format_description(format);
81	assert(desc);
82
83	switch (desc->channel[0].type) {
84	case UTIL_FORMAT_TYPE_FLOAT:
85		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT;
86	case UTIL_FORMAT_TYPE_UNSIGNED:
87		if (desc->channel[0].normalized) {
88			return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM;
89		}
90		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED;
91	case UTIL_FORMAT_TYPE_SIGNED:
92		if (desc->channel[0].normalized) {
93			return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM;
94		}
95		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED;
96	/*
97	case PIPE_FORMAT_TYPE_UINT:
98		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
99	case PIPE_FORMAT_TYPE_SINT:
100		return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
101	default:
102		return 0;
103	}
104}
105
106static INLINE uint32_t
107nv50_vbo_size_to_hw(unsigned size, unsigned nr_c)
108{
109	static const uint32_t hw_values[] = {
110		0, 0, 0, 0,
111		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8,
112		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8,
113		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8,
114		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8_8,
115		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16,
116		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16,
117		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16,
118		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16_16,
119		0, 0, 0, 0,
120		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32,
121		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32,
122		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32,
123		NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32_32 };
124
125	/* we'd also have R11G11B10 and R10G10B10A2 */
126
127	assert(nr_c > 0 && nr_c <= 4);
128
129	if (size > 32)
130		return 0;
131	size >>= (3 - 2);
132
133	return hw_values[size + (nr_c - 1)];
134}
135
136static INLINE uint32_t
137nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element *ve)
138{
139	uint32_t hw_type, hw_size;
140	enum pipe_format pf = ve->src_format;
141	const struct util_format_description *desc;
142	unsigned size, nr_components;
143
144	desc = util_format_description(pf);
145	assert(desc);
146
147	size = util_format_get_component_bits(pf, UTIL_FORMAT_COLORSPACE_RGB, 0);
148	nr_components = util_format_get_nr_components(pf);
149
150	hw_type = nv50_vbo_type_to_hw(pf);
151	hw_size = nv50_vbo_size_to_hw(size, nr_components);
152
153	if (!hw_type || !hw_size) {
154		NOUVEAU_ERR("unsupported vbo format: %s\n", util_format_name(pf));
155		abort();
156		return 0x24e80000;
157	}
158
159	if (desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z) /* BGRA */
160		hw_size |= (1 << 31); /* no real swizzle bits :-( */
161
162	return (hw_type | hw_size);
163}
164
165/* For instanced drawing from user buffers, hitting the FIFO repeatedly
166 * with the same vertex data is probably worse than uploading all data.
167 */
168static boolean
169nv50_upload_vtxbuf(struct nv50_context *nv50, unsigned i)
170{
171	struct nv50_screen *nscreen = nv50->screen;
172	struct pipe_screen *pscreen = &nscreen->base.base;
173	struct pipe_buffer *buf = nscreen->strm_vbuf[i];
174	struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
175	uint8_t *src;
176	unsigned size = align(vb->buffer->size, 4096);
177
178	if (buf && buf->size < size)
179		pipe_buffer_reference(&nscreen->strm_vbuf[i], NULL);
180
181	if (!nscreen->strm_vbuf[i]) {
182		nscreen->strm_vbuf[i] = pipe_buffer_create(
183			pscreen, 0, PIPE_BUFFER_USAGE_VERTEX, size);
184		buf = nscreen->strm_vbuf[i];
185	}
186
187	src = pipe_buffer_map(pscreen, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ);
188	if (!src)
189		return FALSE;
190	src += vb->buffer_offset;
191
192	size = (vb->max_index + 1) * vb->stride + 16; /* + 16 is for stride 0 */
193	if (vb->buffer_offset + size > vb->buffer->size)
194		size = vb->buffer->size - vb->buffer_offset;
195
196	pipe_buffer_write(pscreen, buf, vb->buffer_offset, size, src);
197	pipe_buffer_unmap(pscreen, vb->buffer);
198
199	vb->buffer = buf; /* don't pipe_reference, this is a private copy */
200	return TRUE;
201}
202
203static void
204nv50_upload_user_vbufs(struct nv50_context *nv50)
205{
206	unsigned i;
207
208	if (nv50->vbo_fifo)
209		nv50->dirty |= NV50_NEW_ARRAYS;
210	if (!(nv50->dirty & NV50_NEW_ARRAYS))
211		return;
212
213	for (i = 0; i < nv50->vtxbuf_nr; ++i) {
214		if (nv50->vtxbuf[i].buffer->usage & PIPE_BUFFER_USAGE_VERTEX)
215			continue;
216		nv50_upload_vtxbuf(nv50, i);
217	}
218}
219
220static void
221nv50_set_static_vtxattr(struct nv50_context *nv50, unsigned i, void *data)
222{
223	struct nouveau_grobj *tesla = nv50->screen->tesla;
224	struct nouveau_channel *chan = tesla->channel;
225	float v[4];
226	enum pipe_format pf = nv50->vtxelt->pipe[i].src_format;
227	unsigned nr_components = util_format_get_nr_components(pf);
228
229	util_format_read_4f(pf, v, 0, data, 0, 0, 0, 1, 1);
230
231	switch (nr_components) {
232	case 4:
233		BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_4F_X(i), 4);
234		OUT_RINGf (chan, v[0]);
235		OUT_RINGf (chan, v[1]);
236		OUT_RINGf (chan, v[2]);
237		OUT_RINGf (chan, v[3]);
238		break;
239	case 3:
240		BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_3F_X(i), 3);
241		OUT_RINGf (chan, v[0]);
242		OUT_RINGf (chan, v[1]);
243		OUT_RINGf (chan, v[2]);
244		break;
245	case 2:
246		BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_2F_X(i), 2);
247		OUT_RINGf (chan, v[0]);
248		OUT_RINGf (chan, v[1]);
249		break;
250	case 1:
251		BEGIN_RING(chan, tesla, NV50TCL_VTX_ATTR_1F(i), 1);
252		OUT_RINGf (chan, v[0]);
253		break;
254	default:
255		assert(0);
256		break;
257	}
258}
259
260static unsigned
261init_per_instance_arrays_immd(struct nv50_context *nv50,
262			      unsigned startInstance,
263			      unsigned pos[16], unsigned step[16])
264{
265	struct nouveau_bo *bo;
266	unsigned i, b, count = 0;
267
268	for (i = 0; i < nv50->vtxelt->num_elements; ++i) {
269		if (!nv50->vtxelt->pipe[i].instance_divisor)
270			continue;
271		++count;
272		b = nv50->vtxelt->pipe[i].vertex_buffer_index;
273
274		pos[i] = nv50->vtxelt->pipe[i].src_offset +
275			nv50->vtxbuf[b].buffer_offset +
276			startInstance * nv50->vtxbuf[b].stride;
277		step[i] = startInstance %
278			nv50->vtxelt->pipe[i].instance_divisor;
279
280		bo = nouveau_bo(nv50->vtxbuf[b].buffer);
281		if (!bo->map)
282			nouveau_bo_map(bo, NOUVEAU_BO_RD);
283
284		nv50_set_static_vtxattr(nv50, i, (uint8_t *)bo->map + pos[i]);
285	}
286
287	return count;
288}
289
290static unsigned
291init_per_instance_arrays(struct nv50_context *nv50,
292			 unsigned startInstance,
293			 unsigned pos[16], unsigned step[16])
294{
295	struct nouveau_grobj *tesla = nv50->screen->tesla;
296	struct nouveau_channel *chan = tesla->channel;
297	struct nouveau_bo *bo;
298	struct nouveau_stateobj *so;
299	unsigned i, b, count = 0, num_elements = nv50->vtxelt->num_elements;
300	const uint32_t rl = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
301
302	if (nv50->vbo_fifo)
303		return init_per_instance_arrays_immd(nv50, startInstance,
304						     pos, step);
305
306	so = so_new(num_elements, num_elements * 2, num_elements * 2);
307
308	for (i = 0; i < nv50->vtxelt->num_elements; ++i) {
309		if (!nv50->vtxelt->pipe[i].instance_divisor)
310			continue;
311		++count;
312		b = nv50->vtxelt->pipe[i].vertex_buffer_index;
313
314		pos[i] = nv50->vtxelt->pipe[i].src_offset +
315			nv50->vtxbuf[b].buffer_offset +
316			startInstance * nv50->vtxbuf[b].stride;
317
318		if (!startInstance) {
319			step[i] = 0;
320			continue;
321		}
322		step[i] = startInstance %
323			nv50->vtxelt->pipe[i].instance_divisor;
324
325		bo = nouveau_bo(nv50->vtxbuf[b].buffer);
326
327		so_method(so, tesla, NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
328		so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_HIGH, 0, 0);
329		so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_LOW, 0, 0);
330	}
331
332	if (count && startInstance) {
333		so_ref (so, &nv50->state.instbuf); /* for flush notify */
334		so_emit(chan, nv50->state.instbuf);
335	}
336	so_ref (NULL, &so);
337
338	return count;
339}
340
341static void
342step_per_instance_arrays_immd(struct nv50_context *nv50,
343			      unsigned pos[16], unsigned step[16])
344{
345	struct nouveau_bo *bo;
346	unsigned i, b;
347
348	for (i = 0; i < nv50->vtxelt->num_elements; ++i) {
349		if (!nv50->vtxelt->pipe[i].instance_divisor)
350			continue;
351		if (++step[i] != nv50->vtxelt->pipe[i].instance_divisor)
352			continue;
353		b = nv50->vtxelt->pipe[i].vertex_buffer_index;
354		bo = nouveau_bo(nv50->vtxbuf[b].buffer);
355
356		step[i] = 0;
357		pos[i] += nv50->vtxbuf[b].stride;
358
359		nv50_set_static_vtxattr(nv50, i, (uint8_t *)bo->map + pos[i]);
360	}
361}
362
363static void
364step_per_instance_arrays(struct nv50_context *nv50,
365			 unsigned pos[16], unsigned step[16])
366{
367	struct nouveau_grobj *tesla = nv50->screen->tesla;
368	struct nouveau_channel *chan = tesla->channel;
369	struct nouveau_bo *bo;
370	struct nouveau_stateobj *so;
371	unsigned i, b, num_elements = nv50->vtxelt->num_elements;
372	const uint32_t rl = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
373
374	if (nv50->vbo_fifo) {
375		step_per_instance_arrays_immd(nv50, pos, step);
376		return;
377	}
378
379	so = so_new(num_elements, num_elements * 2, num_elements * 2);
380
381	for (i = 0; i < nv50->vtxelt->num_elements; ++i) {
382		if (!nv50->vtxelt->pipe[i].instance_divisor)
383			continue;
384		b = nv50->vtxelt->pipe[i].vertex_buffer_index;
385
386		if (++step[i] == nv50->vtxelt->pipe[i].instance_divisor) {
387			step[i] = 0;
388			pos[i] += nv50->vtxbuf[b].stride;
389		}
390
391		bo = nouveau_bo(nv50->vtxbuf[b].buffer);
392
393		so_method(so, tesla, NV50TCL_VERTEX_ARRAY_START_HIGH(i), 2);
394		so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_HIGH, 0, 0);
395		so_reloc (so, bo, pos[i], rl | NOUVEAU_BO_LOW, 0, 0);
396	}
397
398	so_ref (so, &nv50->state.instbuf); /* for flush notify */
399	so_ref (NULL, &so);
400
401	so_emit(chan, nv50->state.instbuf);
402}
403
404static INLINE void
405nv50_unmap_vbufs(struct nv50_context *nv50)
406{
407        unsigned i;
408
409        for (i = 0; i < nv50->vtxbuf_nr; ++i)
410                if (nouveau_bo(nv50->vtxbuf[i].buffer)->map)
411                        nouveau_bo_unmap(nouveau_bo(nv50->vtxbuf[i].buffer));
412}
413
414void
415nv50_draw_arrays_instanced(struct pipe_context *pipe,
416			   unsigned mode, unsigned start, unsigned count,
417			   unsigned startInstance, unsigned instanceCount)
418{
419	struct nv50_context *nv50 = nv50_context(pipe);
420	struct nouveau_channel *chan = nv50->screen->tesla->channel;
421	struct nouveau_grobj *tesla = nv50->screen->tesla;
422	unsigned i, nz_divisors;
423	unsigned step[16], pos[16];
424
425	if (!NV50_USING_LOATHED_EDGEFLAG(nv50))
426		nv50_upload_user_vbufs(nv50);
427
428	nv50_state_validate(nv50);
429
430	nz_divisors = init_per_instance_arrays(nv50, startInstance, pos, step);
431
432	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
433	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
434	OUT_RING  (chan, startInstance);
435
436	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
437	OUT_RING  (chan, nv50_prim(mode));
438
439	if (nv50->vbo_fifo)
440		nv50_push_arrays(nv50, start, count);
441	else {
442		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
443		OUT_RING  (chan, start);
444		OUT_RING  (chan, count);
445	}
446	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
447	OUT_RING  (chan, 0);
448
449	for (i = 1; i < instanceCount; i++) {
450		if (nz_divisors) /* any non-zero array divisors ? */
451			step_per_instance_arrays(nv50, pos, step);
452
453		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
454		OUT_RING  (chan, nv50_prim(mode) | (1 << 28));
455
456		if (nv50->vbo_fifo)
457			nv50_push_arrays(nv50, start, count);
458		else {
459			BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
460			OUT_RING  (chan, start);
461			OUT_RING  (chan, count);
462		}
463		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
464		OUT_RING  (chan, 0);
465	}
466	nv50_unmap_vbufs(nv50);
467
468	so_ref(NULL, &nv50->state.instbuf);
469}
470
471void
472nv50_draw_arrays(struct pipe_context *pipe, unsigned mode, unsigned start,
473		 unsigned count)
474{
475	struct nv50_context *nv50 = nv50_context(pipe);
476	struct nouveau_channel *chan = nv50->screen->tesla->channel;
477	struct nouveau_grobj *tesla = nv50->screen->tesla;
478	boolean ret;
479
480	nv50_state_validate(nv50);
481
482	BEGIN_RING(chan, tesla, 0x142c, 1);
483	OUT_RING  (chan, 0);
484	BEGIN_RING(chan, tesla, 0x142c, 1);
485	OUT_RING  (chan, 0);
486
487	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
488	OUT_RING  (chan, nv50_prim(mode));
489
490	if (nv50->vbo_fifo)
491		ret = nv50_push_arrays(nv50, start, count);
492	else {
493		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
494		OUT_RING  (chan, start);
495		OUT_RING  (chan, count);
496		ret = TRUE;
497	}
498	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
499	OUT_RING  (chan, 0);
500
501	nv50_unmap_vbufs(nv50);
502
503        /* XXX: not sure what to do if ret != TRUE: flush and retry?
504         */
505        assert(ret);
506}
507
508static INLINE boolean
509nv50_draw_elements_inline_u08(struct nv50_context *nv50, uint8_t *map,
510			      unsigned start, unsigned count)
511{
512	struct nouveau_channel *chan = nv50->screen->tesla->channel;
513	struct nouveau_grobj *tesla = nv50->screen->tesla;
514
515	map += start;
516
517	if (nv50->vbo_fifo)
518		return nv50_push_elements_u08(nv50, map, count);
519
520	if (count & 1) {
521		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
522		OUT_RING  (chan, map[0]);
523		map++;
524		count--;
525	}
526
527	while (count) {
528		unsigned nr = count > 2046 ? 2046 : count;
529		int i;
530
531		BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, nr >> 1);
532		for (i = 0; i < nr; i += 2)
533			OUT_RING  (chan, (map[i + 1] << 16) | map[i]);
534
535		count -= nr;
536		map += nr;
537	}
538	return TRUE;
539}
540
541static INLINE boolean
542nv50_draw_elements_inline_u16(struct nv50_context *nv50, uint16_t *map,
543			      unsigned start, unsigned count)
544{
545	struct nouveau_channel *chan = nv50->screen->tesla->channel;
546	struct nouveau_grobj *tesla = nv50->screen->tesla;
547
548	map += start;
549
550	if (nv50->vbo_fifo)
551		return nv50_push_elements_u16(nv50, map, count);
552
553	if (count & 1) {
554		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32, 1);
555		OUT_RING  (chan, map[0]);
556		map++;
557		count--;
558	}
559
560	while (count) {
561		unsigned nr = count > 2046 ? 2046 : count;
562		int i;
563
564		BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U16, nr >> 1);
565		for (i = 0; i < nr; i += 2)
566			OUT_RING  (chan, (map[i + 1] << 16) | map[i]);
567
568		count -= nr;
569		map += nr;
570	}
571	return TRUE;
572}
573
574static INLINE boolean
575nv50_draw_elements_inline_u32(struct nv50_context *nv50, uint32_t *map,
576			      unsigned start, unsigned count)
577{
578	struct nouveau_channel *chan = nv50->screen->tesla->channel;
579	struct nouveau_grobj *tesla = nv50->screen->tesla;
580
581	map += start;
582
583	if (nv50->vbo_fifo)
584		return nv50_push_elements_u32(nv50, map, count);
585
586	while (count) {
587		unsigned nr = count > 2047 ? 2047 : count;
588
589		BEGIN_RING_NI(chan, tesla, NV50TCL_VB_ELEMENT_U32, nr);
590		OUT_RINGp (chan, map, nr);
591
592		count -= nr;
593		map += nr;
594	}
595	return TRUE;
596}
597
598static INLINE void
599nv50_draw_elements_inline(struct nv50_context *nv50,
600			  void *map, unsigned indexSize,
601			  unsigned start, unsigned count)
602{
603	switch (indexSize) {
604	case 1:
605		nv50_draw_elements_inline_u08(nv50, map, start, count);
606		break;
607	case 2:
608		nv50_draw_elements_inline_u16(nv50, map, start, count);
609		break;
610	case 4:
611		nv50_draw_elements_inline_u32(nv50, map, start, count);
612		break;
613	}
614}
615
616void
617nv50_draw_elements_instanced(struct pipe_context *pipe,
618			     struct pipe_buffer *indexBuffer,
619			     unsigned indexSize,
620			     unsigned mode, unsigned start, unsigned count,
621			     unsigned startInstance, unsigned instanceCount)
622{
623	struct nv50_context *nv50 = nv50_context(pipe);
624	struct nouveau_grobj *tesla = nv50->screen->tesla;
625	struct nouveau_channel *chan = tesla->channel;
626	struct pipe_screen *pscreen = pipe->screen;
627	void *map;
628	unsigned i, nz_divisors;
629	unsigned step[16], pos[16];
630
631	map = pipe_buffer_map(pscreen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ);
632
633	if (!NV50_USING_LOATHED_EDGEFLAG(nv50))
634		nv50_upload_user_vbufs(nv50);
635
636	nv50_state_validate(nv50);
637
638	nz_divisors = init_per_instance_arrays(nv50, startInstance, pos, step);
639
640	BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2);
641	OUT_RING  (chan, NV50_CB_AUX | (24 << 8));
642	OUT_RING  (chan, startInstance);
643
644	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
645	OUT_RING  (chan, nv50_prim(mode));
646
647	nv50_draw_elements_inline(nv50, map, indexSize, start, count);
648
649	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
650	OUT_RING  (chan, 0);
651
652	for (i = 1; i < instanceCount; ++i) {
653		if (nz_divisors) /* any non-zero array divisors ? */
654			step_per_instance_arrays(nv50, pos, step);
655
656		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
657		OUT_RING  (chan, nv50_prim(mode) | (1 << 28));
658
659		nv50_draw_elements_inline(nv50, map, indexSize, start, count);
660
661		BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
662		OUT_RING  (chan, 0);
663	}
664	nv50_unmap_vbufs(nv50);
665
666	so_ref(NULL, &nv50->state.instbuf);
667}
668
669void
670nv50_draw_elements(struct pipe_context *pipe,
671		   struct pipe_buffer *indexBuffer, unsigned indexSize,
672		   unsigned mode, unsigned start, unsigned count)
673{
674	struct nv50_context *nv50 = nv50_context(pipe);
675	struct nouveau_channel *chan = nv50->screen->tesla->channel;
676	struct nouveau_grobj *tesla = nv50->screen->tesla;
677	struct pipe_screen *pscreen = pipe->screen;
678	void *map;
679
680	nv50_state_validate(nv50);
681
682	BEGIN_RING(chan, tesla, 0x142c, 1);
683	OUT_RING  (chan, 0);
684	BEGIN_RING(chan, tesla, 0x142c, 1);
685	OUT_RING  (chan, 0);
686
687	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
688	OUT_RING  (chan, nv50_prim(mode));
689
690	if (!nv50->vbo_fifo && indexSize == 4) {
691		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U32 | 0x30000, 0);
692		OUT_RING  (chan, count);
693		nouveau_pushbuf_submit(chan, nouveau_bo(indexBuffer),
694				       start << 2, count << 2);
695	} else
696	if (!nv50->vbo_fifo && indexSize == 2) {
697		unsigned vb_start = (start & ~1);
698		unsigned vb_end = (start + count + 1) & ~1;
699		unsigned dwords = (vb_end - vb_start) >> 1;
700
701		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
702		OUT_RING  (chan, ((start & 1) << 31) | count);
703		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16 | 0x30000, 0);
704		OUT_RING  (chan, dwords);
705		nouveau_pushbuf_submit(chan, nouveau_bo(indexBuffer),
706				       vb_start << 1, dwords << 2);
707		BEGIN_RING(chan, tesla, NV50TCL_VB_ELEMENT_U16_SETUP, 1);
708		OUT_RING  (chan, 0);
709	} else {
710		map = pipe_buffer_map(pscreen, indexBuffer,
711				      PIPE_BUFFER_USAGE_CPU_READ);
712		nv50_draw_elements_inline(nv50, map, indexSize, start, count);
713		nv50_unmap_vbufs(nv50);
714		pipe_buffer_unmap(pscreen, indexBuffer);
715	}
716
717	BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
718	OUT_RING  (chan, 0);
719}
720
721static INLINE boolean
722nv50_vbo_static_attrib(struct nv50_context *nv50, unsigned attrib,
723		       struct nouveau_stateobj **pso,
724		       struct pipe_vertex_element *ve,
725		       struct pipe_vertex_buffer *vb)
726
727{
728	struct nouveau_stateobj *so;
729	struct nouveau_grobj *tesla = nv50->screen->tesla;
730	struct nouveau_bo *bo = nouveau_bo(vb->buffer);
731	float v[4];
732	int ret;
733	unsigned nr_components = util_format_get_nr_components(ve->src_format);
734
735	ret = nouveau_bo_map(bo, NOUVEAU_BO_RD);
736	if (ret)
737		return FALSE;
738
739	util_format_read_4f(ve->src_format, v, 0, (uint8_t *)bo->map +
740			    (vb->buffer_offset + ve->src_offset), 0,
741			    0, 0, 1, 1);
742	so = *pso;
743	if (!so)
744		*pso = so = so_new(nv50->vtxelt->num_elements,
745				   nv50->vtxelt->num_elements * 4, 0);
746
747	switch (nr_components) {
748	case 4:
749		so_method(so, tesla, NV50TCL_VTX_ATTR_4F_X(attrib), 4);
750		so_data  (so, fui(v[0]));
751		so_data  (so, fui(v[1]));
752		so_data  (so, fui(v[2]));
753		so_data  (so, fui(v[3]));
754		break;
755	case 3:
756		so_method(so, tesla, NV50TCL_VTX_ATTR_3F_X(attrib), 3);
757		so_data  (so, fui(v[0]));
758		so_data  (so, fui(v[1]));
759		so_data  (so, fui(v[2]));
760		break;
761	case 2:
762		so_method(so, tesla, NV50TCL_VTX_ATTR_2F_X(attrib), 2);
763		so_data  (so, fui(v[0]));
764		so_data  (so, fui(v[1]));
765		break;
766	case 1:
767		if (attrib == nv50->vertprog->cfg.edgeflag_in) {
768			so_method(so, tesla, NV50TCL_EDGEFLAG_ENABLE, 1);
769			so_data  (so, v[0] ? 1 : 0);
770		}
771		so_method(so, tesla, NV50TCL_VTX_ATTR_1F(attrib), 1);
772		so_data  (so, fui(v[0]));
773		break;
774	default:
775		nouveau_bo_unmap(bo);
776		return FALSE;
777	}
778
779	nouveau_bo_unmap(bo);
780	return TRUE;
781}
782
783void
784nv50_vtxelt_construct(struct nv50_vtxelt_stateobj *cso)
785{
786	unsigned i;
787
788	for (i = 0; i < cso->num_elements; ++i) {
789		struct pipe_vertex_element *ve = &cso->pipe[i];
790
791		cso->hw[i] = nv50_vbo_vtxelt_to_hw(ve);
792	}
793}
794
795struct nouveau_stateobj *
796nv50_vbo_validate(struct nv50_context *nv50)
797{
798	struct nouveau_grobj *tesla = nv50->screen->tesla;
799	struct nouveau_stateobj *vtxbuf, *vtxfmt, *vtxattr;
800	unsigned i, n_ve;
801
802	/* don't validate if Gallium took away our buffers */
803	if (nv50->vtxbuf_nr == 0)
804		return NULL;
805	nv50->vbo_fifo = 0;
806
807	for (i = 0; i < nv50->vtxbuf_nr; ++i)
808		if (nv50->vtxbuf[i].stride &&
809		    !(nv50->vtxbuf[i].buffer->usage & PIPE_BUFFER_USAGE_VERTEX))
810			nv50->vbo_fifo = 0xffff;
811
812	if (NV50_USING_LOATHED_EDGEFLAG(nv50))
813		nv50->vbo_fifo = 0xffff; /* vertprog can't set edgeflag */
814
815	n_ve = MAX2(nv50->vtxelt->num_elements, nv50->state.vtxelt_nr);
816
817	vtxattr = NULL;
818	vtxbuf = so_new(n_ve * 2, n_ve * 5, nv50->vtxelt->num_elements * 4);
819	vtxfmt = so_new(1, n_ve, 0);
820	so_method(vtxfmt, tesla, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve);
821
822	for (i = 0; i < nv50->vtxelt->num_elements; i++) {
823		struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i];
824		struct pipe_vertex_buffer *vb =
825			&nv50->vtxbuf[ve->vertex_buffer_index];
826		struct nouveau_bo *bo = nouveau_bo(vb->buffer);
827		uint32_t hw = nv50->vtxelt->hw[i];
828
829		if (!vb->stride &&
830		    nv50_vbo_static_attrib(nv50, i, &vtxattr, ve, vb)) {
831			so_data(vtxfmt, hw | (1 << 4));
832
833			so_method(vtxbuf, tesla,
834				  NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
835			so_data  (vtxbuf, 0);
836
837			nv50->vbo_fifo &= ~(1 << i);
838			continue;
839		}
840
841		if (nv50->vbo_fifo) {
842			so_data  (vtxfmt, hw |
843				  (ve->instance_divisor ? (1 << 4) : i));
844			so_method(vtxbuf, tesla,
845				  NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
846			so_data  (vtxbuf, 0);
847			continue;
848		}
849		so_data(vtxfmt, hw | i);
850
851		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 3);
852		so_data  (vtxbuf, 0x20000000 |
853			  (ve->instance_divisor ? 0 : vb->stride));
854		so_reloc (vtxbuf, bo, vb->buffer_offset +
855			  ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
856			  NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
857		so_reloc (vtxbuf, bo, vb->buffer_offset +
858			  ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
859			  NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
860
861		/* vertex array limits */
862		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_LIMIT_HIGH(i), 2);
863		so_reloc (vtxbuf, bo, vb->buffer->size - 1,
864			  NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
865			  NOUVEAU_BO_HIGH, 0, 0);
866		so_reloc (vtxbuf, bo, vb->buffer->size - 1,
867			  NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
868			  NOUVEAU_BO_LOW, 0, 0);
869	}
870	for (; i < n_ve; ++i) {
871		so_data  (vtxfmt, 0x7e080010);
872
873		so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
874		so_data  (vtxbuf, 0);
875	}
876	nv50->state.vtxelt_nr = nv50->vtxelt->num_elements;
877
878	so_ref (vtxbuf, &nv50->state.vtxbuf);
879	so_ref (vtxattr, &nv50->state.vtxattr);
880	so_ref (NULL, &vtxbuf);
881	so_ref (NULL, &vtxattr);
882	return vtxfmt;
883}
884
885typedef void (*pfn_push)(struct nouveau_channel *, void *);
886
887struct nv50_vbo_emitctx
888{
889	pfn_push push[16];
890	uint8_t *map[16];
891	unsigned stride[16];
892	unsigned nr_ve;
893	unsigned vtx_dwords;
894	unsigned vtx_max;
895
896	float edgeflag;
897	unsigned ve_edgeflag;
898};
899
900static INLINE void
901emit_vtx_next(struct nouveau_channel *chan, struct nv50_vbo_emitctx *emit)
902{
903	unsigned i;
904
905	for (i = 0; i < emit->nr_ve; ++i) {
906		emit->push[i](chan, emit->map[i]);
907		emit->map[i] += emit->stride[i];
908	}
909}
910
911static INLINE void
912emit_vtx(struct nouveau_channel *chan, struct nv50_vbo_emitctx *emit,
913	 uint32_t vi)
914{
915	unsigned i;
916
917	for (i = 0; i < emit->nr_ve; ++i)
918		emit->push[i](chan, emit->map[i] + emit->stride[i] * vi);
919}
920
921static INLINE boolean
922nv50_map_vbufs(struct nv50_context *nv50)
923{
924	int i;
925
926	for (i = 0; i < nv50->vtxbuf_nr; ++i) {
927		struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
928		unsigned size = vb->stride * (vb->max_index + 1) + 16;
929
930		if (nouveau_bo(vb->buffer)->map)
931			continue;
932
933		size = vb->stride * (vb->max_index + 1) + 16;
934		size = MIN2(size, vb->buffer->size);
935		if (!size)
936			size = vb->buffer->size;
937
938		if (nouveau_bo_map_range(nouveau_bo(vb->buffer),
939					 0, size, NOUVEAU_BO_RD))
940			break;
941	}
942
943	if (i == nv50->vtxbuf_nr)
944		return TRUE;
945	for (; i >= 0; --i)
946		nouveau_bo_unmap(nouveau_bo(nv50->vtxbuf[i].buffer));
947	return FALSE;
948}
949
950static void
951emit_b32_1(struct nouveau_channel *chan, void *data)
952{
953	uint32_t *v = data;
954
955	OUT_RING(chan, v[0]);
956}
957
958static void
959emit_b32_2(struct nouveau_channel *chan, void *data)
960{
961	uint32_t *v = data;
962
963	OUT_RING(chan, v[0]);
964	OUT_RING(chan, v[1]);
965}
966
967static void
968emit_b32_3(struct nouveau_channel *chan, void *data)
969{
970	uint32_t *v = data;
971
972	OUT_RING(chan, v[0]);
973	OUT_RING(chan, v[1]);
974	OUT_RING(chan, v[2]);
975}
976
977static void
978emit_b32_4(struct nouveau_channel *chan, void *data)
979{
980	uint32_t *v = data;
981
982	OUT_RING(chan, v[0]);
983	OUT_RING(chan, v[1]);
984	OUT_RING(chan, v[2]);
985	OUT_RING(chan, v[3]);
986}
987
988static void
989emit_b16_1(struct nouveau_channel *chan, void *data)
990{
991	uint16_t *v = data;
992
993	OUT_RING(chan, v[0]);
994}
995
996static void
997emit_b16_3(struct nouveau_channel *chan, void *data)
998{
999	uint16_t *v = data;
1000
1001	OUT_RING(chan, (v[1] << 16) | v[0]);
1002	OUT_RING(chan, v[2]);
1003}
1004
1005static void
1006emit_b08_1(struct nouveau_channel *chan, void *data)
1007{
1008	uint8_t *v = data;
1009
1010	OUT_RING(chan, v[0]);
1011}
1012
1013static void
1014emit_b08_3(struct nouveau_channel *chan, void *data)
1015{
1016	uint8_t *v = data;
1017
1018	OUT_RING(chan, (v[2] << 16) | (v[1] << 8) | v[0]);
1019}
1020
1021static boolean
1022emit_prepare(struct nv50_context *nv50, struct nv50_vbo_emitctx *emit,
1023	     unsigned start)
1024{
1025	unsigned i;
1026
1027	if (nv50_map_vbufs(nv50) == FALSE)
1028		return FALSE;
1029
1030	emit->ve_edgeflag = nv50->vertprog->cfg.edgeflag_in;
1031
1032	emit->edgeflag = 0.5f;
1033	emit->nr_ve = 0;
1034	emit->vtx_dwords = 0;
1035
1036	for (i = 0; i < nv50->vtxelt->num_elements; ++i) {
1037		struct pipe_vertex_element *ve;
1038		struct pipe_vertex_buffer *vb;
1039		unsigned n, size, nr_components;
1040		const struct util_format_description *desc;
1041
1042		ve = &nv50->vtxelt->pipe[i];
1043		vb = &nv50->vtxbuf[ve->vertex_buffer_index];
1044		if (!(nv50->vbo_fifo & (1 << i)) || ve->instance_divisor)
1045			continue;
1046		n = emit->nr_ve++;
1047
1048		emit->stride[n] = vb->stride;
1049		emit->map[n] = (uint8_t *)nouveau_bo(vb->buffer)->map +
1050			vb->buffer_offset +
1051			(start * vb->stride + ve->src_offset);
1052
1053		desc = util_format_description(ve->src_format);
1054		assert(desc);
1055
1056		size = util_format_get_component_bits(
1057			ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0);
1058		nr_components = util_format_get_nr_components(ve->src_format);
1059
1060		assert(nr_components > 0 && nr_components <= 4);
1061
1062		/* It shouldn't be necessary to push the implicit 1s
1063		 * for case 3 and size 8 cases 1, 2, 3.
1064		 */
1065		switch (size) {
1066		default:
1067			NOUVEAU_ERR("unsupported vtxelt size: %u\n", size);
1068			return FALSE;
1069		case 32:
1070			switch (nr_components) {
1071			case 1: emit->push[n] = emit_b32_1; break;
1072			case 2: emit->push[n] = emit_b32_2; break;
1073			case 3: emit->push[n] = emit_b32_3; break;
1074			case 4: emit->push[n] = emit_b32_4; break;
1075			}
1076			emit->vtx_dwords += nr_components;
1077			break;
1078		case 16:
1079			switch (nr_components) {
1080			case 1: emit->push[n] = emit_b16_1; break;
1081			case 2: emit->push[n] = emit_b32_1; break;
1082			case 3: emit->push[n] = emit_b16_3; break;
1083			case 4: emit->push[n] = emit_b32_2; break;
1084			}
1085			emit->vtx_dwords += (nr_components + 1) >> 1;
1086			break;
1087		case 8:
1088			switch (nr_components) {
1089			case 1: emit->push[n] = emit_b08_1; break;
1090			case 2: emit->push[n] = emit_b16_1; break;
1091			case 3: emit->push[n] = emit_b08_3; break;
1092			case 4: emit->push[n] = emit_b32_1; break;
1093			}
1094			emit->vtx_dwords += 1;
1095			break;
1096		}
1097	}
1098
1099	emit->vtx_max = 512 / emit->vtx_dwords;
1100	if (emit->ve_edgeflag < 16)
1101		emit->vtx_max = 1;
1102
1103	return TRUE;
1104}
1105
1106static INLINE void
1107set_edgeflag(struct nouveau_channel *chan,
1108	     struct nouveau_grobj *tesla,
1109	     struct nv50_vbo_emitctx *emit, uint32_t index)
1110{
1111	unsigned i = emit->ve_edgeflag;
1112
1113	if (i < 16) {
1114		float f = *((float *)(emit->map[i] + index * emit->stride[i]));
1115
1116		if (emit->edgeflag != f) {
1117			emit->edgeflag = f;
1118
1119			BEGIN_RING(chan, tesla, 0x15e4, 1);
1120			OUT_RING  (chan, f ? 1 : 0);
1121		}
1122	}
1123}
1124
1125static boolean
1126nv50_push_arrays(struct nv50_context *nv50, unsigned start, unsigned count)
1127{
1128	struct nouveau_channel *chan = nv50->screen->base.channel;
1129	struct nouveau_grobj *tesla = nv50->screen->tesla;
1130	struct nv50_vbo_emitctx emit;
1131
1132	if (emit_prepare(nv50, &emit, start) == FALSE)
1133		return FALSE;
1134
1135	while (count) {
1136		unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1137	        dw = nr * emit.vtx_dwords;
1138
1139		set_edgeflag(chan, tesla, &emit, 0); /* nr will be 1 */
1140
1141		BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1142		for (i = 0; i < nr; ++i)
1143			emit_vtx_next(chan, &emit);
1144
1145		count -= nr;
1146	}
1147
1148	return TRUE;
1149}
1150
1151static boolean
1152nv50_push_elements_u32(struct nv50_context *nv50, uint32_t *map, unsigned count)
1153{
1154	struct nouveau_channel *chan = nv50->screen->base.channel;
1155	struct nouveau_grobj *tesla = nv50->screen->tesla;
1156	struct nv50_vbo_emitctx emit;
1157
1158	if (emit_prepare(nv50, &emit, 0) == FALSE)
1159		return FALSE;
1160
1161	while (count) {
1162		unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1163	        dw = nr * emit.vtx_dwords;
1164
1165		set_edgeflag(chan, tesla, &emit, *map);
1166
1167		BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1168		for (i = 0; i < nr; ++i)
1169			emit_vtx(chan, &emit, *map++);
1170
1171		count -= nr;
1172	}
1173
1174	return TRUE;
1175}
1176
1177static boolean
1178nv50_push_elements_u16(struct nv50_context *nv50, uint16_t *map, unsigned count)
1179{
1180	struct nouveau_channel *chan = nv50->screen->base.channel;
1181	struct nouveau_grobj *tesla = nv50->screen->tesla;
1182	struct nv50_vbo_emitctx emit;
1183
1184	if (emit_prepare(nv50, &emit, 0) == FALSE)
1185		return FALSE;
1186
1187	while (count) {
1188		unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1189	        dw = nr * emit.vtx_dwords;
1190
1191		set_edgeflag(chan, tesla, &emit, *map);
1192
1193		BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1194		for (i = 0; i < nr; ++i)
1195			emit_vtx(chan, &emit, *map++);
1196
1197		count -= nr;
1198	}
1199
1200	return TRUE;
1201}
1202
1203static boolean
1204nv50_push_elements_u08(struct nv50_context *nv50, uint8_t *map, unsigned count)
1205{
1206	struct nouveau_channel *chan = nv50->screen->base.channel;
1207	struct nouveau_grobj *tesla = nv50->screen->tesla;
1208	struct nv50_vbo_emitctx emit;
1209
1210	if (emit_prepare(nv50, &emit, 0) == FALSE)
1211		return FALSE;
1212
1213	while (count) {
1214		unsigned i, dw, nr = MIN2(count, emit.vtx_max);
1215	        dw = nr * emit.vtx_dwords;
1216
1217		set_edgeflag(chan, tesla, &emit, *map);
1218
1219		BEGIN_RING_NI(chan, tesla, NV50TCL_VERTEX_DATA, dw);
1220		for (i = 0; i < nr; ++i)
1221			emit_vtx(chan, &emit, *map++);
1222
1223		count -= nr;
1224	}
1225
1226	return TRUE;
1227}
1228