Lines Matching refs:rmesa

140 	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
144 radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * 4, 32);
148 radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
175 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
182 radeonAllocDmaRegion( rmesa, &aos->bo, &aos->offset, size * 4, 32 );
186 radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
204 void radeon_init_dma(radeonContextPtr rmesa)
206 make_empty_list(&rmesa->dma.free);
207 make_empty_list(&rmesa->dma.wait);
208 make_empty_list(&rmesa->dma.reserved);
209 rmesa->dma.minimum_size = MAX_DMA_BUF_SZ;
212 void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size)
217 if (size > rmesa->dma.minimum_size)
218 rmesa->dma.minimum_size = (size + 15) & (~15);
221 __FUNCTION__, size, rmesa->dma.minimum_size);
223 if (is_empty_list(&rmesa->dma.free)
224 || last_elem(&rmesa->dma.free)->bo->size < size) {
229 dma_bo->bo = radeon_bo_open(rmesa->radeonScreen->bom,
230 0, rmesa->dma.minimum_size, 4,
234 rcommonFlushCmdBuf(rmesa, __FUNCTION__);
237 insert_at_head(&rmesa->dma.reserved, dma_bo);
242 dma_bo = last_elem(&rmesa->dma.free);
244 insert_at_head(&rmesa->dma.reserved, dma_bo);
247 rmesa->dma.current_used = 0;
248 rmesa->dma.current_vertexptr = 0;
250 if (radeon_cs_space_check_with_bo(rmesa->cmdbuf.cs,
251 first_elem(&rmesa->dma.reserved)->bo,
255 if (is_empty_list(&rmesa->dma.reserved)) {
259 radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
262 /* Allocates a region from rmesa->dma.current. If there isn't enough
265 void radeonAllocDmaRegion(radeonContextPtr rmesa,
272 if (rmesa->dma.flush)
273 rmesa->dma.flush(rmesa->glCtx);
275 assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
278 rmesa->dma.current_used = (rmesa->dma.current_used + alignment) & ~alignment;
280 if (is_empty_list(&rmesa->dma.reserved)
281 || rmesa->dma.current_used + bytes > first_elem(&rmesa->dma.reserved)->bo->size)
282 radeonRefillCurrentDmaRegion(rmesa, bytes);
284 *poffset = rmesa->dma.current_used;
285 *pbo = first_elem(&rmesa->dma.reserved)->bo;
289 rmesa->dma.current_used = (rmesa->dma.current_used + bytes + 15) & ~15;
290 rmesa->dma.current_vertexptr = rmesa->dma.current_used;
292 assert(rmesa->dma.current_used <= first_elem(&rmesa->dma.reserved)->bo->size);
295 void radeonFreeDmaRegions(radeonContextPtr rmesa)
302 foreach_s(dma_bo, temp, &rmesa->dma.free) {
308 foreach_s(dma_bo, temp, &rmesa->dma.wait) {
314 foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
321 void radeonReturnDmaRegion(radeonContextPtr rmesa, int return_bytes)
323 if (is_empty_list(&rmesa->dma.reserved))
328 rmesa->dma.current_used -= return_bytes;
329 rmesa->dma.current_vertexptr = rmesa->dma.current_used;
343 void radeonReleaseDmaRegions(radeonContextPtr rmesa)
347 const int expire_at = ++rmesa->dma.free.expire_counter + DMA_BO_FREE_TIME;
348 const int time = rmesa->dma.free.expire_counter;
354 foreach(dma_bo, &rmesa->dma.free)
357 foreach(dma_bo, &rmesa->dma.wait)
360 foreach(dma_bo, &rmesa->dma.reserved)
364 __FUNCTION__, free, wait, reserved, rmesa->dma.minimum_size);
369 foreach_s(dma_bo, temp, &rmesa->dma.wait) {
378 if (dma_bo->bo->size < rmesa->dma.minimum_size) {
389 insert_at_tail(&rmesa->dma.free, dma_bo);
393 foreach_s(dma_bo, temp, &rmesa->dma.reserved) {
396 if (dma_bo->bo->size < rmesa->dma.minimum_size) {
404 insert_at_tail(&rmesa->dma.wait, dma_bo);
408 foreach_s(dma_bo, temp, &rmesa->dma.free) {
423 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
424 struct radeon_dma *dma = &rmesa->dma;
430 radeon_bo_unmap(rmesa->swtcl.bo);
436 rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
442 rmesa->vtbl.swtcl_flush(ctx, current_offset);
444 rmesa->swtcl.numverts = 0;
446 radeon_bo_unref(rmesa->swtcl.bo);
447 rmesa->swtcl.bo = NULL;
452 rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
459 if(is_empty_list(&rmesa->dma.reserved)
460 ||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
461 if (rmesa->dma.flush) {
462 rmesa->dma.flush(rmesa->glCtx);
465 radeonRefillCurrentDmaRegion(rmesa, bytes);
470 if (!rmesa->dma.flush) {
472 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
473 rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
476 ASSERT( vsize == rmesa->swtcl.vertex_size * 4 );
477 ASSERT( rmesa->dma.flush == rcommon_flush_last_swtcl_prim );
478 ASSERT( rmesa->dma.current_used +
479 rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
480 rmesa->dma.current_vertexptr );
482 if (!rmesa->swtcl.bo) {
483 rmesa->swtcl.bo = first_elem(&rmesa->dma.reserved)->bo;
484 radeon_bo_ref(rmesa->swtcl.bo);
485 radeon_bo_map(rmesa->swtcl.bo, 1);
488 head = (rmesa->swtcl.bo->ptr + rmesa->dma.current_vertexptr);
489 rmesa->dma.current_vertexptr += bytes;
490 rmesa->swtcl.numverts += nverts;