Lines Matching refs:nv50

135 nv50_emit_vtxattr(struct nv50_context *nv50, struct pipe_vertex_buffer *vb,
138 struct nouveau_pushbuf *push = nv50->base.pushbuf;
167 if (attr == nv50->vertprog->vp.edgeflag) {
181 nv50_user_vbuf_range(struct nv50_context *nv50, int vbi,
184 if (unlikely(nv50->vertex->instance_bufs & (1 << vbi))) {
187 *size = nv50->vtxbuf[vbi].buffer->width0;
190 assert(nv50->vb_elt_limit != ~0);
191 *base = nv50->vb_elt_first * nv50->vtxbuf[vbi].stride;
192 *size = nv50->vb_elt_limit * nv50->vtxbuf[vbi].stride +
193 nv50->vertex->vb_access_size[vbi];
198 nv50_upload_user_buffers(struct nv50_context *nv50,
203 for (b = 0; b < nv50->num_vtxbufs; ++b) {
205 const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[b];
208 if (!(nv50->vbo_user & (1 << b)) || !vb->stride)
210 nv50_user_vbuf_range(nv50, b, &base, &size);
213 addrs[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer, base, size,
216 BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, NOUVEAU_BO_GART |
219 nv50->base.vbo_dirty = TRUE;
223 nv50_update_user_vbufs(struct nv50_context *nv50)
226 struct nouveau_pushbuf *push = nv50->base.pushbuf;
230 for (i = 0; i < nv50->vertex->num_elements; ++i) {
231 struct pipe_vertex_element *ve = &nv50->vertex->element[i].pipe;
233 struct pipe_vertex_buffer *vb = &nv50->vtxbuf[b];
236 if (!(nv50->vbo_user & (1 << b)))
240 nv50_emit_vtxattr(nv50, vb, ve, i);
243 nv50_user_vbuf_range(nv50, b, &base, &size);
249 address[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer,
252 BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, bo_flags, bo);
262 nv50->base.vbo_dirty = TRUE;
266 nv50_release_user_vbufs(struct nv50_context *nv50)
268 if (nv50->vbo_user) {
269 nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX_TMP);
270 nouveau_scratch_done(&nv50->base);
275 nv50_vertex_arrays_validate(struct nv50_context *nv50)
279 struct nouveau_pushbuf *push = nv50->base.pushbuf;
280 struct nv50_vertex_stateobj *vertex = nv50->vertex;
286 const unsigned n = MAX2(vertex->num_elements, nv50->state.num_vtxelts);
289 nv50->vbo_fifo = ~0;
291 if (nv50->vbo_user & ~nv50->vbo_constant)
292 nv50->vbo_fifo = nv50->vbo_push_hint ? ~0 : 0;
294 nv50->vbo_fifo = 0;
296 if (!nv50->vbo_fifo) {
298 for (i = 0; i < nv50->num_vtxbufs; ++i) {
299 struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer);
302 nv50->base.vbo_dirty = TRUE;
310 if (nv50->vbo_fifo) {
311 nv50->state.num_vtxelts = vertex->num_elements;
325 vb = &nv50->vtxbuf[b];
327 if (likely(vb->stride) || !(nv50->vbo_user & (1 << b)))
336 mask = vertex->instance_elts ^ nv50->state.instance_elts;
343 nv50->state.instance_elts = vertex->instance_elts;
345 if (nv50->vbo_user & ~nv50->vbo_constant)
346 nv50_upload_user_buffers(nv50, addrs, limits);
353 vb = &nv50->vtxbuf[b];
355 if (unlikely(nv50->vbo_constant & (1 << b))) {
358 nv50_emit_vtxattr(nv50, vb, &ve->pipe, i);
361 if (nv50->vbo_user & (1 << b)) {
368 BCTX_REFN(nv50->bufctx_3d, VERTEX, buf, RD);
390 for (; i < nv50->state.num_vtxelts; ++i) {
394 nv50->state.num_vtxelts = vertex->num_elements;
444 nv50_draw_arrays(struct nv50_context *nv50,
448 struct nouveau_pushbuf *push = nv50->base.pushbuf;
451 if (nv50->state.index_bias) {
454 nv50->state.index_bias = 0;
563 nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
567 struct nouveau_pushbuf *push = nv50->base.pushbuf;
569 const unsigned index_size = nv50->idxbuf.index_size;
573 if (index_bias != nv50->state.index_bias) {
576 nv50->state.index_bias = index_bias;
579 if (nv50->idxbuf.buffer) {
580 struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
583 const unsigned base = (buf->offset + nv50->idxbuf.offset) & ~3;
585 start += ((buf->offset + nv50->idxbuf.offset) & 3) >> (index_size >> 1);
587 assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer));
630 const void *data = nv50->idxbuf.user_buffer;
661 nva0_draw_stream_output(struct nv50_context *nv50,
664 struct nouveau_pushbuf *push = nv50->base.pushbuf;
670 if (unlikely(nv50->screen->base.class_3d < NVA0_3D_CLASS)) {
718 struct nv50_context *nv50 = nv50_context(pipe);
719 struct nouveau_pushbuf *push = nv50->base.pushbuf;
722 nv50->vb_elt_first = info->min_index + info->index_bias;
723 nv50->vb_elt_limit = info->max_index - info->min_index;
724 nv50->instance_off = info->start_instance;
725 nv50->instance_max = info->instance_count - 1;
730 nv50->vbo_push_hint = /* the 64 is heuristic */
731 !(info->indexed && ((nv50->vb_elt_limit + 64) < info->count));
733 if (nv50->vbo_user && !(nv50->dirty & (NV50_NEW_ARRAYS | NV50_NEW_VERTEX))) {
734 if (!!nv50->vbo_fifo != nv50->vbo_push_hint)
735 nv50->dirty |= NV50_NEW_ARRAYS;
737 if (!nv50->vbo_fifo)
738 nv50_update_user_vbufs(nv50);
741 if (unlikely(nv50->num_so_targets && !nv50->gmtyprog))
742 nv50->state.prim_size = nv50_pipe_prim_to_prim_size[info->mode];
744 nv50_state_validate(nv50, ~0, 8); /* 8 as minimum, we use flush_notify */
748 if (nv50->vbo_fifo) {
749 nv50_push_vbo(nv50, info);
755 if (nv50->state.instance_base != info->start_instance) {
756 nv50->state.instance_base = info->start_instance;
762 if (nv50->base.vbo_dirty) {
765 nv50->base.vbo_dirty = FALSE;
771 if (info->primitive_restart != nv50->state.prim_restart) {
783 nv50->state.prim_restart = info->primitive_restart;
793 nv50_draw_elements(nv50, shorten,
798 nva0_draw_stream_output(nv50, info);
800 nv50_draw_arrays(nv50,
806 nv50_release_user_vbufs(nv50);