r300_render.c revision eb7ef433bbbeabda963e74adf0ef61c47883f292
1/*
2 * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24/* r300_render: Vertex and index buffer primitive emission. Contains both
25 * HW TCL fastpath rendering, and SW TCL Draw-assisted rendering. */
26
27#include "draw/draw_context.h"
28#include "draw/draw_vbuf.h"
29
30#include "util/u_inlines.h"
31
32#include "util/u_format.h"
33#include "util/u_memory.h"
34#include "util/u_upload_mgr.h"
35#include "util/u_prim.h"
36
37#include "r300_cs.h"
38#include "r300_cb.h"
39#include "r300_context.h"
40#include "r300_screen_buffer.h"
41#include "r300_emit.h"
42#include "r300_reg.h"
43#include "r300_state_derived.h"
44
45#include <limits.h>
46
47#define IMMD_DWORDS 32
48
49static uint32_t r300_translate_primitive(unsigned prim)
50{
51    switch (prim) {
52        case PIPE_PRIM_POINTS:
53            return R300_VAP_VF_CNTL__PRIM_POINTS;
54        case PIPE_PRIM_LINES:
55            return R300_VAP_VF_CNTL__PRIM_LINES;
56        case PIPE_PRIM_LINE_LOOP:
57            return R300_VAP_VF_CNTL__PRIM_LINE_LOOP;
58        case PIPE_PRIM_LINE_STRIP:
59            return R300_VAP_VF_CNTL__PRIM_LINE_STRIP;
60        case PIPE_PRIM_TRIANGLES:
61            return R300_VAP_VF_CNTL__PRIM_TRIANGLES;
62        case PIPE_PRIM_TRIANGLE_STRIP:
63            return R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP;
64        case PIPE_PRIM_TRIANGLE_FAN:
65            return R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN;
66        case PIPE_PRIM_QUADS:
67            return R300_VAP_VF_CNTL__PRIM_QUADS;
68        case PIPE_PRIM_QUAD_STRIP:
69            return R300_VAP_VF_CNTL__PRIM_QUAD_STRIP;
70        case PIPE_PRIM_POLYGON:
71            return R300_VAP_VF_CNTL__PRIM_POLYGON;
72        default:
73            return 0;
74    }
75}
76
77static uint32_t r300_provoking_vertex_fixes(struct r300_context *r300,
78                                            unsigned mode)
79{
80    struct r300_rs_state* rs = (struct r300_rs_state*)r300->rs_state.state;
81    uint32_t color_control = rs->color_control;
82
83    /* By default (see r300_state.c:r300_create_rs_state) color_control is
84     * initialized to provoking the first vertex.
85     *
86     * Triangle fans must be reduced to the second vertex, not the first, in
87     * Gallium flatshade-first mode, as per the GL spec.
88     * (http://www.opengl.org/registry/specs/ARB/provoking_vertex.txt)
89     *
90     * Quads never provoke correctly in flatshade-first mode. The first
91     * vertex is never considered as provoking, so only the second, third,
92     * and fourth vertices can be selected, and both "third" and "last" modes
93     * select the fourth vertex. This is probably due to D3D lacking quads.
94     *
95     * Similarly, polygons reduce to the first, not the last, vertex, when in
96     * "last" mode, and all other modes start from the second vertex.
97     *
98     * ~ C.
99     */
100
101    if (rs->rs.flatshade_first) {
102        switch (mode) {
103            case PIPE_PRIM_TRIANGLE_FAN:
104                color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_SECOND;
105                break;
106            case PIPE_PRIM_QUADS:
107            case PIPE_PRIM_QUAD_STRIP:
108            case PIPE_PRIM_POLYGON:
109                color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST;
110                break;
111            default:
112                color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_FIRST;
113                break;
114        }
115    } else {
116        color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST;
117    }
118
119    return color_control;
120}
121
122static boolean index_bias_supported(struct r300_context *r300)
123{
124    return r300->screen->caps.is_r500 &&
125           r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0);
126}
127
128static void r500_emit_index_bias(struct r300_context *r300, int index_bias)
129{
130    CS_LOCALS(r300);
131
132    BEGIN_CS(2);
133    OUT_CS_REG(R500_VAP_INDEX_OFFSET,
134               (index_bias & 0xFFFFFF) | (index_bias < 0 ? 1<<24 : 0));
135    END_CS;
136}
137
138/* This function splits the index bias value into two parts:
139 * - buffer_offset: the value that can be safely added to buffer offsets
140 *   in r300_emit_aos (it must yield a positive offset when added to
141 *   a vertex buffer offset)
142 * - index_offset: the value that must be manually subtracted from indices
143 *   in an index buffer to achieve negative offsets. */
144static void r300_split_index_bias(struct r300_context *r300, int index_bias,
145                                  int *buffer_offset, int *index_offset)
146{
147    struct pipe_vertex_buffer *vb, *vbufs = r300->vertex_buffer;
148    struct pipe_vertex_element *velem = r300->velems->velem;
149    unsigned i, size;
150    int max_neg_bias;
151
152    if (index_bias < 0) {
153        /* See how large index bias we may subtract. We must be careful
154         * here because negative buffer offsets are not allowed
155         * by the DRM API. */
156        max_neg_bias = INT_MAX;
157        for (i = 0; i < r300->velems->count; i++) {
158            vb = &vbufs[velem[i].vertex_buffer_index];
159            size = (vb->buffer_offset + velem[i].src_offset) / vb->stride;
160            max_neg_bias = MIN2(max_neg_bias, size);
161        }
162
163        /* Now set the minimum allowed value. */
164        *buffer_offset = MAX2(-max_neg_bias, index_bias);
165    } else {
166        /* A positive index bias is OK. */
167        *buffer_offset = index_bias;
168    }
169
170    *index_offset = index_bias - *buffer_offset;
171}
172
173enum r300_prepare_flags {
174    PREP_FIRST_DRAW     = (1 << 0), /* call emit_dirty_state and friends? */
175    PREP_VALIDATE_VBOS  = (1 << 1), /* validate VBOs? */
176    PREP_EMIT_AOS       = (1 << 2), /* call emit_aos? */
177    PREP_EMIT_AOS_SWTCL = (1 << 3), /* call emit_aos_swtcl? */
178    PREP_INDEXED        = (1 << 4)  /* is this draw_elements? */
179};
180
181/**
182 * Check if the requested number of dwords is available in the CS and
183 * if not, flush. Then validate buffers and emit dirty state.
184 * \param r300          The context.
185 * \param flags         See r300_prepare_flags.
186 * \param index_buffer  The index buffer to validate. The parameter may be NULL.
187 * \param cs_dwords     The number of dwords to reserve in CS.
188 * \param aos_offset    The offset passed to emit_aos.
189 * \param index_bias    The index bias to emit.
190 * \param end_cs_dwords The number of free dwords which must be available
191 *                      at the end of CS after drawing in case the CS space
192 *                      management is performed by a draw_* function manually.
193 *                      The parameter may be NULL.
194 */
195static void r300_prepare_for_rendering(struct r300_context *r300,
196                                       enum r300_prepare_flags flags,
197                                       struct pipe_resource *index_buffer,
198                                       unsigned cs_dwords,
199                                       int aos_offset,
200                                       int index_bias,
201                                       unsigned *end_cs_dwords)
202{
203    unsigned end_dwords    = 0;
204    boolean flushed        = FALSE;
205    boolean first_draw     = flags & PREP_FIRST_DRAW;
206    boolean emit_aos       = flags & PREP_EMIT_AOS;
207    boolean emit_aos_swtcl = flags & PREP_EMIT_AOS_SWTCL;
208    boolean indexed        = flags & PREP_INDEXED;
209    boolean hw_index_bias  = index_bias_supported(r300);
210
211    /* Add dirty state, index offset, and AOS. */
212    if (first_draw) {
213        cs_dwords += r300_get_num_dirty_dwords(r300);
214
215        if (hw_index_bias)
216            cs_dwords += 2; /* emit_index_offset */
217
218        if (emit_aos)
219            cs_dwords += 55; /* emit_aos */
220
221        if (emit_aos_swtcl)
222            cs_dwords += 7; /* emit_aos_swtcl */
223    }
224
225    /* Emitted in flush. */
226    end_dwords += 26; /* emit_query_end */
227
228    cs_dwords += end_dwords;
229
230    /* Reserve requested CS space. */
231    if (!r300_check_cs(r300, cs_dwords)) {
232        r300->context.flush(&r300->context, 0, NULL);
233        flushed = TRUE;
234    }
235
236    /* Validate buffers and emit dirty state if needed. */
237    if (first_draw || flushed) {
238        r300_emit_buffer_validate(r300, flags & PREP_VALIDATE_VBOS, index_buffer);
239        r300_emit_dirty_state(r300);
240        if (hw_index_bias) {
241            if (r300->screen->caps.has_tcl)
242                r500_emit_index_bias(r300, index_bias);
243            else
244                r500_emit_index_bias(r300, 0);
245        }
246
247        if (emit_aos)
248            r300_emit_aos(r300, aos_offset, indexed);
249
250        if (emit_aos_swtcl)
251            r300_emit_aos_swtcl(r300, indexed);
252    }
253
254    if (end_cs_dwords)
255        *end_cs_dwords = end_dwords;
256}
257
258static boolean immd_is_good_idea(struct r300_context *r300,
259                                 unsigned count)
260{
261    struct pipe_vertex_element* velem;
262    struct pipe_vertex_buffer* vbuf;
263    boolean checked[PIPE_MAX_ATTRIBS] = {0};
264    unsigned vertex_element_count = r300->velems->count;
265    unsigned i, vbi;
266
267    if (DBG_ON(r300, DBG_NO_IMMD)) {
268        return FALSE;
269    }
270
271    if (r300->draw) {
272        return FALSE;
273    }
274
275    if (count * r300->velems->vertex_size_dwords > IMMD_DWORDS) {
276        return FALSE;
277    }
278
279    /* We shouldn't map buffers referenced by CS, busy buffers,
280     * and ones placed in VRAM. */
281    /* XXX Check for VRAM buffers. */
282    for (i = 0; i < vertex_element_count; i++) {
283        velem = &r300->velems->velem[i];
284        vbi = velem->vertex_buffer_index;
285
286        if (!checked[vbi]) {
287            vbuf = &r300->vertex_buffer[vbi];
288
289            if (r300_buffer_is_referenced(&r300->context,
290                                          vbuf->buffer,
291                                          R300_REF_CS | R300_REF_HW)) {
292                /* It's a very bad idea to map it... */
293                return FALSE;
294            }
295            checked[vbi] = TRUE;
296        }
297    }
298    return TRUE;
299}
300
301/*****************************************************************************
302 * The emission of draw packets for r500. Older GPUs may use these functions *
303 * after resolving fallback issues (e.g. stencil ref two-sided).             *
304 ****************************************************************************/
305
306static void r300_emit_draw_arrays_immediate(struct r300_context *r300,
307                                            unsigned mode,
308                                            unsigned start,
309                                            unsigned count)
310{
311    struct pipe_vertex_element* velem;
312    struct pipe_vertex_buffer* vbuf;
313    unsigned vertex_element_count = r300->velems->count;
314    unsigned i, v, vbi, dwords;
315
316    /* Size of the vertex, in dwords. */
317    unsigned vertex_size = r300->velems->vertex_size_dwords;
318
319    /* Offsets of the attribute, in dwords, from the start of the vertex. */
320    unsigned offset[PIPE_MAX_ATTRIBS];
321
322    /* Size of the vertex element, in dwords. */
323    unsigned size[PIPE_MAX_ATTRIBS];
324
325    /* Stride to the same attrib in the next vertex in the vertex buffer,
326     * in dwords. */
327    unsigned stride[PIPE_MAX_ATTRIBS] = {0};
328
329    /* Mapped vertex buffers. */
330    uint32_t* map[PIPE_MAX_ATTRIBS] = {0};
331    struct pipe_transfer* transfer[PIPE_MAX_ATTRIBS] = {NULL};
332
333    CB_LOCALS;
334
335    /* Calculate the vertex size, offsets, strides etc. and map the buffers. */
336    for (i = 0; i < vertex_element_count; i++) {
337        velem = &r300->velems->velem[i];
338        offset[i] = velem->src_offset / 4;
339        size[i] = r300->velems->hw_format_size[i] / 4;
340        vbi = velem->vertex_buffer_index;
341
342        /* Map the buffer. */
343        if (!map[vbi]) {
344            vbuf = &r300->vertex_buffer[vbi];
345            map[vbi] = (uint32_t*)pipe_buffer_map(&r300->context,
346                                                  vbuf->buffer,
347                                                  PIPE_TRANSFER_READ,
348						  &transfer[vbi]);
349            stride[vbi] = vbuf->stride / 4;
350            map[vbi] += vbuf->buffer_offset / 4 + stride[vbi] * start;
351        }
352    }
353
354    dwords = 9 + count * vertex_size;
355
356    r300_prepare_for_rendering(r300, PREP_FIRST_DRAW, NULL, dwords, 0, 0, NULL);
357
358    BEGIN_CS_AS_CB(r300, dwords);
359    OUT_CB_REG(R300_GA_COLOR_CONTROL,
360            r300_provoking_vertex_fixes(r300, mode));
361    OUT_CB_REG(R300_VAP_VTX_SIZE, vertex_size);
362    OUT_CB_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
363    OUT_CB(count - 1);
364    OUT_CB(0);
365    OUT_CB_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, count * vertex_size);
366    OUT_CB(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (count << 16) |
367            r300_translate_primitive(mode));
368
369    /* Emit vertices. */
370    for (v = 0; v < count; v++) {
371        for (i = 0; i < vertex_element_count; i++) {
372            vbi = r300->velems->velem[i].vertex_buffer_index;
373
374            OUT_CB_TABLE(&map[vbi][offset[i] + stride[vbi] * v], size[i]);
375        }
376    }
377    END_CB;
378
379    /* Unmap buffers. */
380    for (i = 0; i < vertex_element_count; i++) {
381        vbi = r300->velems->velem[i].vertex_buffer_index;
382
383        if (map[vbi]) {
384            vbuf = &r300->vertex_buffer[vbi];
385            pipe_buffer_unmap(&r300->context, vbuf->buffer, transfer[vbi]);
386            map[vbi] = NULL;
387        }
388    }
389}
390
391static void r300_emit_draw_arrays(struct r300_context *r300,
392                                  unsigned mode,
393                                  unsigned count)
394{
395    boolean alt_num_verts = count > 65535;
396    CS_LOCALS(r300);
397
398    if (count >= (1 << 24)) {
399        fprintf(stderr, "r300: Got a huge number of vertices: %i, "
400                "refusing to render.\n", count);
401        return;
402    }
403
404    BEGIN_CS(7 + (alt_num_verts ? 2 : 0));
405    if (alt_num_verts) {
406        OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
407    }
408    OUT_CS_REG(R300_GA_COLOR_CONTROL,
409            r300_provoking_vertex_fixes(r300, mode));
410    OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
411    OUT_CS(count - 1);
412    OUT_CS(0);
413    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
414    OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
415           r300_translate_primitive(mode) |
416           (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
417    END_CS;
418}
419
420static void r300_emit_draw_elements(struct r300_context *r300,
421                                    struct pipe_resource* indexBuffer,
422                                    unsigned indexSize,
423                                    unsigned minIndex,
424                                    unsigned maxIndex,
425                                    unsigned mode,
426                                    unsigned start,
427                                    unsigned count)
428{
429    uint32_t count_dwords;
430    uint32_t offset_dwords = indexSize * start / sizeof(uint32_t);
431    boolean alt_num_verts = count > 65535;
432    CS_LOCALS(r300);
433
434    if (count >= (1 << 24)) {
435        fprintf(stderr, "r300: Got a huge number of vertices: %i, "
436                "refusing to render.\n", count);
437        return;
438    }
439
440    maxIndex = MIN2(maxIndex, r300->vertex_buffer_max_index);
441
442    DBG(r300, DBG_DRAW, "r300: Indexbuf of %u indices, min %u max %u\n",
443        count, minIndex, maxIndex);
444
445    BEGIN_CS(13 + (alt_num_verts ? 2 : 0));
446    if (alt_num_verts) {
447        OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
448    }
449    OUT_CS_REG(R300_GA_COLOR_CONTROL,
450            r300_provoking_vertex_fixes(r300, mode));
451    OUT_CS_REG_SEQ(R300_VAP_VF_MAX_VTX_INDX, 2);
452    OUT_CS(maxIndex);
453    OUT_CS(minIndex);
454    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0);
455    if (indexSize == 4) {
456        count_dwords = count;
457        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
458               R300_VAP_VF_CNTL__INDEX_SIZE_32bit |
459               r300_translate_primitive(mode) |
460               (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
461    } else {
462        count_dwords = (count + 1) / 2;
463        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
464               r300_translate_primitive(mode) |
465               (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
466    }
467
468    /* INDX_BUFFER is a truly special packet3.
469     * Unlike most other packet3, where the offset is after the count,
470     * the order is reversed, so the relocation ends up carrying the
471     * size of the indexbuf instead of the offset.
472     */
473    OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2);
474    OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2) |
475           (0 << R300_INDX_BUFFER_SKIP_SHIFT));
476    OUT_CS(offset_dwords << 2);
477    OUT_CS_BUF_RELOC(indexBuffer, count_dwords,
478		     r300_buffer(indexBuffer)->domain, 0, 0);
479
480    END_CS;
481}
482
483/* This is the fast-path drawing & emission for HW TCL. */
484static void r300_draw_range_elements(struct pipe_context* pipe,
485                                     struct pipe_resource* indexBuffer,
486                                     unsigned indexSize,
487                                     int indexBias,
488                                     unsigned minIndex,
489                                     unsigned maxIndex,
490                                     unsigned mode,
491                                     unsigned start,
492                                     unsigned count)
493{
494    struct r300_context* r300 = r300_context(pipe);
495    struct pipe_resource* orgIndexBuffer = indexBuffer;
496    boolean alt_num_verts = r300->screen->caps.is_r500 &&
497                            count > 65536 &&
498                            r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0);
499    unsigned short_count;
500    int buffer_offset = 0, index_offset = 0; /* for index bias emulation */
501    boolean translate = FALSE;
502
503    if (r300->skip_rendering) {
504        return;
505    }
506
507    if (!u_trim_pipe_prim(mode, &count)) {
508        return;
509    }
510
511    /* Set up fallback for incompatible vertex layout if needed. */
512    if (r300->incompatible_vb_layout || r300->velems->incompatible_layout) {
513        r300_begin_vertex_translate(r300);
514        translate = TRUE;
515    }
516
517    if (indexBias && !index_bias_supported(r300)) {
518        r300_split_index_bias(r300, indexBias, &buffer_offset, &index_offset);
519    }
520
521    r300_translate_index_buffer(r300, &indexBuffer, &indexSize, index_offset,
522                                &start, count);
523
524    r300_update_derived_state(r300);
525    r300_upload_index_buffer(r300, &indexBuffer, indexSize, start, count);
526
527    /* 15 dwords for emit_draw_elements */
528    r300_prepare_for_rendering(r300,
529        PREP_FIRST_DRAW | PREP_VALIDATE_VBOS | PREP_EMIT_AOS | PREP_INDEXED,
530        indexBuffer, 15, buffer_offset, indexBias, NULL);
531
532    u_upload_flush(r300->upload_vb);
533    u_upload_flush(r300->upload_ib);
534    if (alt_num_verts || count <= 65535) {
535        r300_emit_draw_elements(r300, indexBuffer, indexSize,
536                                 minIndex, maxIndex, mode, start, count);
537    } else {
538        do {
539            short_count = MIN2(count, 65534);
540            r300_emit_draw_elements(r300, indexBuffer, indexSize,
541                                     minIndex, maxIndex,
542                                     mode, start, short_count);
543
544            start += short_count;
545            count -= short_count;
546
547            /* 15 dwords for emit_draw_elements */
548            if (count) {
549                r300_prepare_for_rendering(r300,
550                    PREP_VALIDATE_VBOS | PREP_EMIT_AOS | PREP_INDEXED,
551                    indexBuffer, 15, buffer_offset, indexBias, NULL);
552            }
553        } while (count);
554    }
555
556    if (indexBuffer != orgIndexBuffer) {
557        pipe_resource_reference( &indexBuffer, NULL );
558    }
559
560    if (translate) {
561        r300_end_vertex_translate(r300);
562    }
563}
564
565/* Simple helpers for context setup. Should probably be moved to util. */
566static void r300_draw_elements(struct pipe_context* pipe,
567                               struct pipe_resource* indexBuffer,
568                               unsigned indexSize, int indexBias, unsigned mode,
569                               unsigned start, unsigned count)
570{
571    struct r300_context *r300 = r300_context(pipe);
572
573    pipe->draw_range_elements(pipe, indexBuffer, indexSize, indexBias,
574                              0, r300->vertex_buffer_max_index,
575                              mode, start, count);
576}
577
578static void r300_draw_arrays(struct pipe_context* pipe, unsigned mode,
579                             unsigned start, unsigned count)
580{
581    struct r300_context* r300 = r300_context(pipe);
582    boolean alt_num_verts = r300->screen->caps.is_r500 &&
583                            count > 65536 &&
584                            r300->rws->get_value(r300->rws, R300_VID_DRM_2_3_0);
585    unsigned short_count;
586    boolean translate = FALSE;
587
588    if (r300->skip_rendering) {
589        return;
590    }
591
592    if (!u_trim_pipe_prim(mode, &count)) {
593        return;
594    }
595
596    /* Set up fallback for incompatible vertex layout if needed. */
597    if (r300->incompatible_vb_layout || r300->velems->incompatible_layout) {
598        r300_begin_vertex_translate(r300);
599        translate = TRUE;
600    }
601
602    r300_update_derived_state(r300);
603
604    if (immd_is_good_idea(r300, count)) {
605        r300_emit_draw_arrays_immediate(r300, mode, start, count);
606    } else {
607        /* 9 spare dwords for emit_draw_arrays. */
608        r300_prepare_for_rendering(r300, PREP_FIRST_DRAW | PREP_VALIDATE_VBOS | PREP_EMIT_AOS,
609                               NULL, 9, start, 0, NULL);
610
611        if (alt_num_verts || count <= 65535) {
612            r300_emit_draw_arrays(r300, mode, count);
613        } else {
614            do {
615                short_count = MIN2(count, 65535);
616                r300_emit_draw_arrays(r300, mode, short_count);
617
618                start += short_count;
619                count -= short_count;
620
621                /* 9 spare dwords for emit_draw_arrays. */
622                if (count) {
623                    r300_prepare_for_rendering(r300,
624                        PREP_VALIDATE_VBOS | PREP_EMIT_AOS, NULL, 9,
625                        start, 0, NULL);
626                }
627            } while (count);
628        }
629	u_upload_flush(r300->upload_vb);
630    }
631
632    if (translate) {
633        r300_end_vertex_translate(r300);
634    }
635}
636
637/****************************************************************************
638 * The rest of this file is for SW TCL rendering only. Please be polite and *
639 * keep these functions separated so that they are easier to locate. ~C.    *
640 ***************************************************************************/
641
642/* SW TCL arrays, using Draw. */
643static void r300_swtcl_draw_arrays(struct pipe_context* pipe,
644                                   unsigned mode,
645                                   unsigned start,
646                                   unsigned count)
647{
648    struct r300_context* r300 = r300_context(pipe);
649    struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
650    int i;
651
652    if (r300->skip_rendering) {
653        return;
654    }
655
656    if (!u_trim_pipe_prim(mode, &count)) {
657        return;
658    }
659
660    r300_update_derived_state(r300);
661
662    for (i = 0; i < r300->vertex_buffer_count; i++) {
663        void* buf = pipe_buffer_map(pipe,
664                                    r300->vertex_buffer[i].buffer,
665                                    PIPE_TRANSFER_READ,
666				    &vb_transfer[i]);
667        draw_set_mapped_vertex_buffer(r300->draw, i, buf);
668    }
669
670    draw_set_mapped_element_buffer(r300->draw, 0, 0, NULL);
671
672    draw_arrays(r300->draw, mode, start, count);
673
674    /* XXX Not sure whether this is the best fix.
675     * It prevents CS from being rejected and weird assertion failures. */
676    draw_flush(r300->draw);
677
678    for (i = 0; i < r300->vertex_buffer_count; i++) {
679        pipe_buffer_unmap(pipe, r300->vertex_buffer[i].buffer,
680			  vb_transfer[i]);
681        draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
682    }
683}
684
685/* SW TCL elements, using Draw. */
686static void r300_swtcl_draw_range_elements(struct pipe_context* pipe,
687                                           struct pipe_resource* indexBuffer,
688                                           unsigned indexSize,
689                                           int indexBias,
690                                           unsigned minIndex,
691                                           unsigned maxIndex,
692                                           unsigned mode,
693                                           unsigned start,
694                                           unsigned count)
695{
696    struct r300_context* r300 = r300_context(pipe);
697    struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
698    struct pipe_transfer *ib_transfer;
699    int i;
700    void* indices;
701
702    if (r300->skip_rendering) {
703        return;
704    }
705
706    if (!u_trim_pipe_prim(mode, &count)) {
707        return;
708    }
709
710    r300_update_derived_state(r300);
711
712    for (i = 0; i < r300->vertex_buffer_count; i++) {
713        void* buf = pipe_buffer_map(pipe,
714                                    r300->vertex_buffer[i].buffer,
715                                    PIPE_TRANSFER_READ,
716				    &vb_transfer[i]);
717        draw_set_mapped_vertex_buffer(r300->draw, i, buf);
718    }
719
720    indices = pipe_buffer_map(pipe, indexBuffer,
721                              PIPE_TRANSFER_READ, &ib_transfer);
722    draw_set_mapped_element_buffer_range(r300->draw, indexSize, indexBias,
723                                         minIndex, maxIndex, indices);
724
725    draw_arrays(r300->draw, mode, start, count);
726
727    /* XXX Not sure whether this is the best fix.
728     * It prevents CS from being rejected and weird assertion failures. */
729    draw_flush(r300->draw);
730
731    for (i = 0; i < r300->vertex_buffer_count; i++) {
732        pipe_buffer_unmap(pipe, r300->vertex_buffer[i].buffer,
733			  vb_transfer[i]);
734        draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
735    }
736
737    pipe_buffer_unmap(pipe, indexBuffer,
738		      ib_transfer);
739    draw_set_mapped_element_buffer_range(r300->draw, 0, 0,
740                                         start, start + count - 1,
741                                         NULL);
742}
743
744/* Object for rendering using Draw. */
745struct r300_render {
746    /* Parent class */
747    struct vbuf_render base;
748
749    /* Pipe context */
750    struct r300_context* r300;
751
752    /* Vertex information */
753    size_t vertex_size;
754    unsigned prim;
755    unsigned hwprim;
756
757    /* VBO */
758    struct pipe_resource* vbo;
759    size_t vbo_size;
760    size_t vbo_offset;
761    size_t vbo_max_used;
762    void * vbo_ptr;
763
764    struct pipe_transfer *vbo_transfer;
765};
766
767static INLINE struct r300_render*
768r300_render(struct vbuf_render* render)
769{
770    return (struct r300_render*)render;
771}
772
773static const struct vertex_info*
774r300_render_get_vertex_info(struct vbuf_render* render)
775{
776    struct r300_render* r300render = r300_render(render);
777    struct r300_context* r300 = r300render->r300;
778
779    return &r300->vertex_info;
780}
781
782static boolean r300_render_allocate_vertices(struct vbuf_render* render,
783                                                   ushort vertex_size,
784                                                   ushort count)
785{
786    struct r300_render* r300render = r300_render(render);
787    struct r300_context* r300 = r300render->r300;
788    struct pipe_screen* screen = r300->context.screen;
789    size_t size = (size_t)vertex_size * (size_t)count;
790
791    if (size + r300render->vbo_offset > r300render->vbo_size)
792    {
793        pipe_resource_reference(&r300->vbo, NULL);
794        r300render->vbo = pipe_buffer_create(screen,
795                                             PIPE_BIND_VERTEX_BUFFER,
796                                             R300_MAX_DRAW_VBO_SIZE);
797        r300render->vbo_offset = 0;
798        r300render->vbo_size = R300_MAX_DRAW_VBO_SIZE;
799    }
800
801    r300render->vertex_size = vertex_size;
802    r300->vbo = r300render->vbo;
803    r300->vbo_offset = r300render->vbo_offset;
804
805    return (r300render->vbo) ? TRUE : FALSE;
806}
807
808static void* r300_render_map_vertices(struct vbuf_render* render)
809{
810    struct r300_render* r300render = r300_render(render);
811
812    assert(!r300render->vbo_transfer);
813
814    r300render->vbo_ptr = pipe_buffer_map(&r300render->r300->context,
815					  r300render->vbo,
816                                          PIPE_TRANSFER_WRITE,
817					  &r300render->vbo_transfer);
818
819    return ((uint8_t*)r300render->vbo_ptr + r300render->vbo_offset);
820}
821
822static void r300_render_unmap_vertices(struct vbuf_render* render,
823                                             ushort min,
824                                             ushort max)
825{
826    struct r300_render* r300render = r300_render(render);
827    struct pipe_context* context = &r300render->r300->context;
828
829    assert(r300render->vbo_transfer);
830
831    r300render->vbo_max_used = MAX2(r300render->vbo_max_used,
832                                    r300render->vertex_size * (max + 1));
833    pipe_buffer_unmap(context, r300render->vbo, r300render->vbo_transfer);
834
835    r300render->vbo_transfer = NULL;
836}
837
838static void r300_render_release_vertices(struct vbuf_render* render)
839{
840    struct r300_render* r300render = r300_render(render);
841
842    r300render->vbo_offset += r300render->vbo_max_used;
843    r300render->vbo_max_used = 0;
844}
845
846static boolean r300_render_set_primitive(struct vbuf_render* render,
847                                               unsigned prim)
848{
849    struct r300_render* r300render = r300_render(render);
850
851    r300render->prim = prim;
852    r300render->hwprim = r300_translate_primitive(prim);
853
854    return TRUE;
855}
856
857static void r300_render_draw_arrays(struct vbuf_render* render,
858                                    unsigned start,
859                                    unsigned count)
860{
861    struct r300_render* r300render = r300_render(render);
862    struct r300_context* r300 = r300render->r300;
863    uint8_t* ptr;
864    unsigned i;
865    unsigned dwords = 6;
866
867    CS_LOCALS(r300);
868
869    (void) i; (void) ptr;
870
871    r300_prepare_for_rendering(r300, PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL,
872                               NULL, dwords, 0, 0, NULL);
873
874    DBG(r300, DBG_DRAW, "r300: Doing vbuf render, count %d\n", count);
875
876    /* Uncomment to dump all VBOs rendered through this interface.
877     * Slow and noisy!
878    ptr = pipe_buffer_map(&r300render->r300->context,
879                          r300render->vbo, PIPE_TRANSFER_READ,
880                          &r300render->vbo_transfer);
881
882    for (i = 0; i < count; i++) {
883        printf("r300: Vertex %d\n", i);
884        draw_dump_emitted_vertex(&r300->vertex_info, ptr);
885        ptr += r300->vertex_info.size * 4;
886        printf("\n");
887    }
888
889    pipe_buffer_unmap(&r300render->r300->context, r300render->vbo,
890        r300render->vbo_transfer);
891    */
892
893    BEGIN_CS(dwords);
894    OUT_CS_REG(R300_GA_COLOR_CONTROL,
895            r300_provoking_vertex_fixes(r300, r300render->prim));
896    OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1);
897    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
898    OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
899           r300render->hwprim);
900    END_CS;
901}
902
903static void r300_render_draw_elements(struct vbuf_render* render,
904                                      const ushort* indices,
905                                      uint count)
906{
907    struct r300_render* r300render = r300_render(render);
908    struct r300_context* r300 = r300render->r300;
909    int i;
910    unsigned end_cs_dwords;
911    unsigned max_index = (r300render->vbo_size - r300render->vbo_offset) /
912                         (r300render->r300->vertex_info.size * 4) - 1;
913    unsigned short_count;
914    unsigned free_dwords;
915
916    CS_LOCALS(r300);
917
918    /* Reserve at least 256 dwords.
919     *
920     * Below we manage the CS space manually because there may be more
921     * indices than it can fit in CS. */
922    r300_prepare_for_rendering(r300,
923        PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL | PREP_INDEXED,
924        NULL, 256, 0, 0, &end_cs_dwords);
925
926    while (count) {
927        free_dwords = r300->rws->get_cs_free_dwords(r300->rws);
928
929        short_count = MIN2(count, (free_dwords - end_cs_dwords - 6) * 2);
930
931        BEGIN_CS(6 + (short_count+1)/2);
932        OUT_CS_REG(R300_GA_COLOR_CONTROL,
933                r300_provoking_vertex_fixes(r300, r300render->prim));
934        OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index);
935        OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (short_count+1)/2);
936        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (short_count << 16) |
937               r300render->hwprim);
938        for (i = 0; i < short_count-1; i += 2) {
939            OUT_CS(indices[i+1] << 16 | indices[i]);
940        }
941        if (short_count % 2) {
942            OUT_CS(indices[short_count-1]);
943        }
944        END_CS;
945
946        /* OK now subtract the emitted indices and see if we need to emit
947         * another draw packet. */
948        indices += short_count;
949        count -= short_count;
950
951        if (count) {
952            r300_prepare_for_rendering(r300,
953                PREP_EMIT_AOS_SWTCL | PREP_INDEXED,
954                NULL, 256, 0, 0, &end_cs_dwords);
955        }
956    }
957}
958
959static void r300_render_destroy(struct vbuf_render* render)
960{
961    FREE(render);
962}
963
964static struct vbuf_render* r300_render_create(struct r300_context* r300)
965{
966    struct r300_render* r300render = CALLOC_STRUCT(r300_render);
967
968    r300render->r300 = r300;
969
970    /* XXX find real numbers plz */
971    r300render->base.max_vertex_buffer_bytes = 128 * 1024;
972    r300render->base.max_indices = 16 * 1024;
973
974    r300render->base.get_vertex_info = r300_render_get_vertex_info;
975    r300render->base.allocate_vertices = r300_render_allocate_vertices;
976    r300render->base.map_vertices = r300_render_map_vertices;
977    r300render->base.unmap_vertices = r300_render_unmap_vertices;
978    r300render->base.set_primitive = r300_render_set_primitive;
979    r300render->base.draw_elements = r300_render_draw_elements;
980    r300render->base.draw_arrays = r300_render_draw_arrays;
981    r300render->base.release_vertices = r300_render_release_vertices;
982    r300render->base.destroy = r300_render_destroy;
983
984    r300render->vbo = NULL;
985    r300render->vbo_size = 0;
986    r300render->vbo_offset = 0;
987
988    return &r300render->base;
989}
990
991struct draw_stage* r300_draw_stage(struct r300_context* r300)
992{
993    struct vbuf_render* render;
994    struct draw_stage* stage;
995
996    render = r300_render_create(r300);
997
998    if (!render) {
999        return NULL;
1000    }
1001
1002    stage = draw_vbuf_stage(r300->draw, render);
1003
1004    if (!stage) {
1005        render->destroy(render);
1006        return NULL;
1007    }
1008
1009    draw_set_render(r300->draw, render);
1010
1011    return stage;
1012}
1013
1014/****************************************************************************
1015 *                         End of SW TCL functions                          *
1016 ***************************************************************************/
1017
1018static void r300_resource_resolve(struct pipe_context* pipe,
1019                                  struct pipe_resource* dest,
1020                                  struct pipe_subresource subdest,
1021                                  struct pipe_resource* src,
1022                                  struct pipe_subresource subsrc)
1023{
1024    struct r300_context* r300 = r300_context(pipe);
1025    struct r300_surface* destsurf = r300_surface(
1026        dest->screen->get_tex_surface(dest->screen,
1027            dest, subdest.face, subdest.level, 0, 0));
1028    struct pipe_surface* srcsurf = src->screen->get_tex_surface(src->screen,
1029            src, subsrc.face, subsrc.level, 0, 0);
1030    float color[] = {0, 0, 0, 0};
1031    CS_LOCALS(r300);
1032
1033    DBG(r300, DBG_DRAW, "r300: Resolving resource...\n");
1034
1035    OUT_CS_REG_SEQ(R300_RB3D_AARESOLVE_OFFSET, 1);
1036    OUT_CS_RELOC(destsurf->buffer, destsurf->offset, 0, destsurf->domain, 0);
1037
1038    OUT_CS_REG_SEQ(R300_RB3D_AARESOLVE_PITCH, 1);
1039    OUT_CS_RELOC(destsurf->buffer, destsurf->pitch, 0, destsurf->domain, 0);
1040
1041    OUT_CS_REG(R300_RB3D_AARESOLVE_CTL,
1042        R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE |
1043        R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_AVERAGE);
1044
1045    r300->context.clear_render_target(pipe,
1046        srcsurf, color, 0, 0, src->width0, src->height0);
1047
1048    OUT_CS_REG(R300_RB3D_AARESOLVE_CTL, 0x0);
1049
1050    pipe_surface_reference((struct pipe_surface**)&srcsurf, NULL);
1051    pipe_surface_reference((struct pipe_surface**)&destsurf, NULL);
1052}
1053
1054void r300_init_render_functions(struct r300_context *r300)
1055{
1056    /* Set generic functions. */
1057    r300->context.draw_elements = r300_draw_elements;
1058
1059    /* Set draw functions based on presence of HW TCL. */
1060    if (r300->screen->caps.has_tcl) {
1061        r300->context.draw_arrays = r300_draw_arrays;
1062        r300->context.draw_range_elements = r300_draw_range_elements;
1063    } else {
1064        r300->context.draw_arrays = r300_swtcl_draw_arrays;
1065        r300->context.draw_range_elements = r300_swtcl_draw_range_elements;
1066    }
1067
1068    r300->context.resource_resolve = r300_resource_resolve;
1069
1070    /* Plug in the two-sided stencil reference value fallback if needed. */
1071    if (!r300->screen->caps.is_r500)
1072        r300_plug_in_stencil_ref_fallback(r300);
1073}
1074