1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 * 26 * 27 **************************************************************************/ 28 29/** 30 * \file 31 * Batch buffer pool management. 32 * 33 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com> 34 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com> 35 */ 36 37 38#include "pipe/p_compiler.h" 39#include "util/u_debug.h" 40#include "os/os_thread.h" 41#include "pipe/p_defines.h" 42#include "util/u_memory.h" 43#include "util/u_double_list.h" 44 45#include "pb_buffer.h" 46#include "pb_bufmgr.h" 47 48 49/** 50 * Convenience macro (type safe). 51 */ 52#define SUPER(__derived) (&(__derived)->base) 53 54 55struct pool_pb_manager 56{ 57 struct pb_manager base; 58 59 pipe_mutex mutex; 60 61 pb_size bufSize; 62 pb_size bufAlign; 63 64 pb_size numFree; 65 pb_size numTot; 66 67 struct list_head free; 68 69 struct pb_buffer *buffer; 70 void *map; 71 72 struct pool_buffer *bufs; 73}; 74 75 76static INLINE struct pool_pb_manager * 77pool_pb_manager(struct pb_manager *mgr) 78{ 79 assert(mgr); 80 return (struct pool_pb_manager *)mgr; 81} 82 83 84struct pool_buffer 85{ 86 struct pb_buffer base; 87 88 struct pool_pb_manager *mgr; 89 90 struct list_head head; 91 92 pb_size start; 93}; 94 95 96static INLINE struct pool_buffer * 97pool_buffer(struct pb_buffer *buf) 98{ 99 assert(buf); 100 return (struct pool_buffer *)buf; 101} 102 103 104 105static void 106pool_buffer_destroy(struct pb_buffer *buf) 107{ 108 struct pool_buffer *pool_buf = pool_buffer(buf); 109 struct pool_pb_manager *pool = pool_buf->mgr; 110 111 assert(!pipe_is_referenced(&pool_buf->base.reference)); 112 113 pipe_mutex_lock(pool->mutex); 114 LIST_ADD(&pool_buf->head, &pool->free); 115 pool->numFree++; 116 pipe_mutex_unlock(pool->mutex); 117} 118 119 120static void * 121pool_buffer_map(struct pb_buffer *buf, unsigned flags, void *flush_ctx) 122{ 123 struct pool_buffer *pool_buf = pool_buffer(buf); 124 struct pool_pb_manager *pool = pool_buf->mgr; 125 void *map; 126 127 /* XXX: it will be necessary to remap here to propagate flush_ctx */ 128 129 pipe_mutex_lock(pool->mutex); 130 map = (unsigned char *) pool->map + pool_buf->start; 131 pipe_mutex_unlock(pool->mutex); 132 return map; 133} 134 135 136static void 137pool_buffer_unmap(struct pb_buffer *buf) 138{ 139 /* No-op */ 140} 141 142 143static enum pipe_error 144pool_buffer_validate(struct pb_buffer *buf, 145 struct pb_validate *vl, 146 unsigned flags) 147{ 148 struct pool_buffer *pool_buf = pool_buffer(buf); 149 struct pool_pb_manager *pool = pool_buf->mgr; 150 return pb_validate(pool->buffer, vl, flags); 151} 152 153 154static void 155pool_buffer_fence(struct pb_buffer *buf, 156 struct pipe_fence_handle *fence) 157{ 158 struct pool_buffer *pool_buf = pool_buffer(buf); 159 struct pool_pb_manager *pool = pool_buf->mgr; 160 pb_fence(pool->buffer, fence); 161} 162 163 164static void 165pool_buffer_get_base_buffer(struct pb_buffer *buf, 166 struct pb_buffer **base_buf, 167 pb_size *offset) 168{ 169 struct pool_buffer *pool_buf = pool_buffer(buf); 170 struct pool_pb_manager *pool = pool_buf->mgr; 171 pb_get_base_buffer(pool->buffer, base_buf, offset); 172 *offset += pool_buf->start; 173} 174 175 176static const struct pb_vtbl 177pool_buffer_vtbl = { 178 pool_buffer_destroy, 179 pool_buffer_map, 180 pool_buffer_unmap, 181 pool_buffer_validate, 182 pool_buffer_fence, 183 pool_buffer_get_base_buffer 184}; 185 186 187static struct pb_buffer * 188pool_bufmgr_create_buffer(struct pb_manager *mgr, 189 pb_size size, 190 const struct pb_desc *desc) 191{ 192 struct pool_pb_manager *pool = pool_pb_manager(mgr); 193 struct pool_buffer *pool_buf; 194 struct list_head *item; 195 196 assert(size == pool->bufSize); 197 assert(pool->bufAlign % desc->alignment == 0); 198 199 pipe_mutex_lock(pool->mutex); 200 201 if (pool->numFree == 0) { 202 pipe_mutex_unlock(pool->mutex); 203 debug_printf("warning: out of fixed size buffer objects\n"); 204 return NULL; 205 } 206 207 item = pool->free.next; 208 209 if (item == &pool->free) { 210 pipe_mutex_unlock(pool->mutex); 211 debug_printf("error: fixed size buffer pool corruption\n"); 212 return NULL; 213 } 214 215 LIST_DEL(item); 216 --pool->numFree; 217 218 pipe_mutex_unlock(pool->mutex); 219 220 pool_buf = LIST_ENTRY(struct pool_buffer, item, head); 221 assert(!pipe_is_referenced(&pool_buf->base.reference)); 222 pipe_reference_init(&pool_buf->base.reference, 1); 223 pool_buf->base.alignment = desc->alignment; 224 pool_buf->base.usage = desc->usage; 225 226 return SUPER(pool_buf); 227} 228 229 230static void 231pool_bufmgr_flush(struct pb_manager *mgr) 232{ 233 /* No-op */ 234} 235 236 237static void 238pool_bufmgr_destroy(struct pb_manager *mgr) 239{ 240 struct pool_pb_manager *pool = pool_pb_manager(mgr); 241 pipe_mutex_lock(pool->mutex); 242 243 FREE(pool->bufs); 244 245 pb_unmap(pool->buffer); 246 pb_reference(&pool->buffer, NULL); 247 248 pipe_mutex_unlock(pool->mutex); 249 250 FREE(mgr); 251} 252 253 254struct pb_manager * 255pool_bufmgr_create(struct pb_manager *provider, 256 pb_size numBufs, 257 pb_size bufSize, 258 const struct pb_desc *desc) 259{ 260 struct pool_pb_manager *pool; 261 struct pool_buffer *pool_buf; 262 pb_size i; 263 264 if(!provider) 265 return NULL; 266 267 pool = CALLOC_STRUCT(pool_pb_manager); 268 if (!pool) 269 return NULL; 270 271 pool->base.destroy = pool_bufmgr_destroy; 272 pool->base.create_buffer = pool_bufmgr_create_buffer; 273 pool->base.flush = pool_bufmgr_flush; 274 275 LIST_INITHEAD(&pool->free); 276 277 pool->numTot = numBufs; 278 pool->numFree = numBufs; 279 pool->bufSize = bufSize; 280 pool->bufAlign = desc->alignment; 281 282 pipe_mutex_init(pool->mutex); 283 284 pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); 285 if (!pool->buffer) 286 goto failure; 287 288 pool->map = pb_map(pool->buffer, 289 PB_USAGE_CPU_READ | 290 PB_USAGE_CPU_WRITE, NULL); 291 if(!pool->map) 292 goto failure; 293 294 pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs)); 295 if (!pool->bufs) 296 goto failure; 297 298 pool_buf = pool->bufs; 299 for (i = 0; i < numBufs; ++i) { 300 pipe_reference_init(&pool_buf->base.reference, 0); 301 pool_buf->base.alignment = 0; 302 pool_buf->base.usage = 0; 303 pool_buf->base.size = bufSize; 304 pool_buf->base.vtbl = &pool_buffer_vtbl; 305 pool_buf->mgr = pool; 306 pool_buf->start = i * bufSize; 307 LIST_ADDTAIL(&pool_buf->head, &pool->free); 308 pool_buf++; 309 } 310 311 return SUPER(pool); 312 313failure: 314 if(pool->bufs) 315 FREE(pool->bufs); 316 if(pool->map) 317 pb_unmap(pool->buffer); 318 if(pool->buffer) 319 pb_reference(&pool->buffer, NULL); 320 if(pool) 321 FREE(pool); 322 return NULL; 323} 324