Lines Matching defs:dma

206 	make_empty_list(&rmesa->dma.free);
207 make_empty_list(&rmesa->dma.wait);
208 make_empty_list(&rmesa->dma.reserved);
209 rmesa->dma.minimum_size = MAX_DMA_BUF_SZ;
217 if (size > rmesa->dma.minimum_size)
218 rmesa->dma.minimum_size = (size + 15) & (~15);
221 __FUNCTION__, size, rmesa->dma.minimum_size);
223 if (is_empty_list(&rmesa->dma.free)
224 || last_elem(&rmesa->dma.free)->bo->size < size) {
230 0, rmesa->dma.minimum_size, 4,
237 insert_at_head(&rmesa->dma.reserved, dma_bo);
242 dma_bo = last_elem(&rmesa->dma.free);
244 insert_at_head(&rmesa->dma.reserved, dma_bo);
247 rmesa->dma.current_used = 0;
248 rmesa->dma.current_vertexptr = 0;
251 first_elem(&rmesa->dma.reserved)->bo,
255 if (is_empty_list(&rmesa->dma.reserved)) {
259 radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
262 /* Allocates a region from rmesa->dma.current. If there isn't enough
272 if (rmesa->dma.flush)
273 rmesa->dma.flush(rmesa->glCtx);
275 assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
278 rmesa->dma.current_used = (rmesa->dma.current_used + alignment) & ~alignment;
280 if (is_empty_list(&rmesa->dma.reserved)
281 || rmesa->dma.current_used + bytes > first_elem(&rmesa->dma.reserved)->bo->size)
284 *poffset = rmesa->dma.current_used;
285 *pbo = first_elem(&rmesa->dma.reserved)->bo;
289 rmesa->dma.current_used = (rmesa->dma.current_used + bytes + 15) & ~15;
290 rmesa->dma.current_vertexptr = rmesa->dma.current_used;
292 assert(rmesa->dma.current_used <= first_elem(&rmesa->dma.reserved)->bo->size);
302 foreach_s(dma_bo, temp, &rmesa->dma.free) {
308 foreach_s(dma_bo, temp, &rmesa->dma.wait) {
314 foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
323 if (is_empty_list(&rmesa->dma.reserved))
328 rmesa->dma.current_used -= return_bytes;
329 rmesa->dma.current_vertexptr = rmesa->dma.current_used;
347 const int expire_at = ++rmesa->dma.free.expire_counter + DMA_BO_FREE_TIME;
348 const int time = rmesa->dma.free.expire_counter;
354 foreach(dma_bo, &rmesa->dma.free)
357 foreach(dma_bo, &rmesa->dma.wait)
360 foreach(dma_bo, &rmesa->dma.reserved)
364 __FUNCTION__, free, wait, reserved, rmesa->dma.minimum_size);
369 foreach_s(dma_bo, temp, &rmesa->dma.wait) {
371 WARN_ONCE("Leaking dma buffer object!\n");
378 if (dma_bo->bo->size < rmesa->dma.minimum_size) {
389 insert_at_tail(&rmesa->dma.free, dma_bo);
393 foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
396 if (dma_bo->bo->size < rmesa->dma.minimum_size) {
404 insert_at_tail(&rmesa->dma.wait, dma_bo);
408 foreach_s(dma_bo, temp, &rmesa->dma.free) {
419 /* Flush vertices in the current dma region.
424 struct radeon_dma *dma = &rmesa->dma;
428 dma->flush = NULL;
432 if (!is_empty_list(&dma->reserved)) {
433 GLuint current_offset = dma->current_used;
435 assert (dma->current_used +
437 dma->current_vertexptr);
439 if (dma->current_used != dma->current_vertexptr) {
440 dma->current_used = dma->current_vertexptr;
449 /* Alloc space in the current dma region.
459 if(is_empty_list(&rmesa->dma.reserved)
460 ||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
461 if (rmesa->dma.flush) {
462 rmesa->dma.flush(rmesa->glCtx);
470 if (!rmesa->dma.flush) {
473 rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
477 ASSERT( rmesa->dma.flush == rcommon_flush_last_swtcl_prim );
478 ASSERT( rmesa->dma.current_used +
480 rmesa->dma.current_vertexptr );
483 rmesa->swtcl.bo = first_elem(&rmesa->dma.reserved)->bo;
488 head = (rmesa->swtcl.bo->ptr + rmesa->dma.current_vertexptr);
489 rmesa->dma.current_vertexptr += bytes;
501 if (radeon->dma.flush) {
502 radeon->dma.flush(radeon->glCtx);