pb_bufmgr_cache.c revision 287c94ea4987033f9c99a2f91c5750c9083504ca
1/************************************************************************** 2 * 3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28/** 29 * \file 30 * Buffer cache. 31 * 32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com> 33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com> 34 */ 35 36 37#include "pipe/p_compiler.h" 38#include "util/u_debug.h" 39#include "os/os_thread.h" 40#include "util/u_memory.h" 41#include "util/u_double_list.h" 42#include "util/u_time.h" 43 44#include "pb_buffer.h" 45#include "pb_bufmgr.h" 46 47 48/** 49 * Convenience macro (type safe). 50 */ 51#define SUPER(__derived) (&(__derived)->base) 52 53 54struct pb_cache_manager; 55 56 57/** 58 * Wrapper around a pipe buffer which adds delayed destruction. 59 */ 60struct pb_cache_buffer 61{ 62 struct pb_buffer base; 63 64 struct pb_buffer *buffer; 65 struct pb_cache_manager *mgr; 66 67 /** Caching time interval */ 68 int64_t start, end; 69 70 struct list_head head; 71}; 72 73 74struct pb_cache_manager 75{ 76 struct pb_manager base; 77 78 struct pb_manager *provider; 79 unsigned usecs; 80 81 pipe_mutex mutex; 82 83 struct list_head delayed; 84 pb_size numDelayed; 85}; 86 87 88static INLINE struct pb_cache_buffer * 89pb_cache_buffer(struct pb_buffer *buf) 90{ 91 assert(buf); 92 return (struct pb_cache_buffer *)buf; 93} 94 95 96static INLINE struct pb_cache_manager * 97pb_cache_manager(struct pb_manager *mgr) 98{ 99 assert(mgr); 100 return (struct pb_cache_manager *)mgr; 101} 102 103 104/** 105 * Actually destroy the buffer. 106 */ 107static INLINE void 108_pb_cache_buffer_destroy(struct pb_cache_buffer *buf) 109{ 110 struct pb_cache_manager *mgr = buf->mgr; 111 112 LIST_DEL(&buf->head); 113 assert(mgr->numDelayed); 114 --mgr->numDelayed; 115 assert(!pipe_is_referenced(&buf->base.base.reference)); 116 pb_reference(&buf->buffer, NULL); 117 FREE(buf); 118} 119 120 121/** 122 * Free as many cache buffers from the list head as possible. 123 */ 124static void 125_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr) 126{ 127 struct list_head *curr, *next; 128 struct pb_cache_buffer *buf; 129 int64_t now; 130 131 now = os_time_get(); 132 133 curr = mgr->delayed.next; 134 next = curr->next; 135 while(curr != &mgr->delayed) { 136 buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); 137 138 if(!os_time_timeout(buf->start, buf->end, now)) 139 break; 140 141 _pb_cache_buffer_destroy(buf); 142 143 curr = next; 144 next = curr->next; 145 } 146} 147 148 149static void 150pb_cache_buffer_destroy(struct pb_buffer *_buf) 151{ 152 struct pb_cache_buffer *buf = pb_cache_buffer(_buf); 153 struct pb_cache_manager *mgr = buf->mgr; 154 155 pipe_mutex_lock(mgr->mutex); 156 assert(!pipe_is_referenced(&buf->base.base.reference)); 157 158 _pb_cache_buffer_list_check_free(mgr); 159 160 buf->start = os_time_get(); 161 buf->end = buf->start + mgr->usecs; 162 LIST_ADDTAIL(&buf->head, &mgr->delayed); 163 ++mgr->numDelayed; 164 pipe_mutex_unlock(mgr->mutex); 165} 166 167 168static void * 169pb_cache_buffer_map(struct pb_buffer *_buf, 170 unsigned flags) 171{ 172 struct pb_cache_buffer *buf = pb_cache_buffer(_buf); 173 return pb_map(buf->buffer, flags); 174} 175 176 177static void 178pb_cache_buffer_unmap(struct pb_buffer *_buf) 179{ 180 struct pb_cache_buffer *buf = pb_cache_buffer(_buf); 181 pb_unmap(buf->buffer); 182} 183 184 185static enum pipe_error 186pb_cache_buffer_validate(struct pb_buffer *_buf, 187 struct pb_validate *vl, 188 unsigned flags) 189{ 190 struct pb_cache_buffer *buf = pb_cache_buffer(_buf); 191 return pb_validate(buf->buffer, vl, flags); 192} 193 194 195static void 196pb_cache_buffer_fence(struct pb_buffer *_buf, 197 struct pipe_fence_handle *fence) 198{ 199 struct pb_cache_buffer *buf = pb_cache_buffer(_buf); 200 pb_fence(buf->buffer, fence); 201} 202 203 204static void 205pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf, 206 struct pb_buffer **base_buf, 207 pb_size *offset) 208{ 209 struct pb_cache_buffer *buf = pb_cache_buffer(_buf); 210 pb_get_base_buffer(buf->buffer, base_buf, offset); 211} 212 213 214const struct pb_vtbl 215pb_cache_buffer_vtbl = { 216 pb_cache_buffer_destroy, 217 pb_cache_buffer_map, 218 pb_cache_buffer_unmap, 219 pb_cache_buffer_validate, 220 pb_cache_buffer_fence, 221 pb_cache_buffer_get_base_buffer 222}; 223 224 225static INLINE boolean 226pb_cache_is_buffer_compat(struct pb_cache_buffer *buf, 227 pb_size size, 228 const struct pb_desc *desc) 229{ 230 void *map; 231 232 if(buf->base.base.size < size) 233 return FALSE; 234 235 /* be lenient with size */ 236 if(buf->base.base.size >= 2*size) 237 return FALSE; 238 239 if(!pb_check_alignment(desc->alignment, buf->base.base.alignment)) 240 return FALSE; 241 242 if(!pb_check_usage(desc->usage, buf->base.base.usage)) 243 return FALSE; 244 245 map = pb_map(buf->buffer, PB_USAGE_DONTBLOCK); 246 if (!map) { 247 return FALSE; 248 } 249 250 pb_unmap(buf->buffer); 251 252 return TRUE; 253} 254 255 256static struct pb_buffer * 257pb_cache_manager_create_buffer(struct pb_manager *_mgr, 258 pb_size size, 259 const struct pb_desc *desc) 260{ 261 struct pb_cache_manager *mgr = pb_cache_manager(_mgr); 262 struct pb_cache_buffer *buf; 263 struct pb_cache_buffer *curr_buf; 264 struct list_head *curr, *next; 265 int64_t now; 266 267 pipe_mutex_lock(mgr->mutex); 268 269 buf = NULL; 270 curr = mgr->delayed.next; 271 next = curr->next; 272 273 /* search in the expired buffers, freeing them in the process */ 274 now = os_time_get(); 275 while(curr != &mgr->delayed) { 276 curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); 277 if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc)) 278 buf = curr_buf; 279 else if(os_time_timeout(curr_buf->start, curr_buf->end, now)) 280 _pb_cache_buffer_destroy(curr_buf); 281 else 282 /* This buffer (and all hereafter) are still hot in cache */ 283 break; 284 curr = next; 285 next = curr->next; 286 } 287 288 /* keep searching in the hot buffers */ 289 if(!buf) { 290 while(curr != &mgr->delayed) { 291 curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); 292 if(pb_cache_is_buffer_compat(curr_buf, size, desc)) { 293 buf = curr_buf; 294 break; 295 } 296 /* no need to check the timeout here */ 297 curr = next; 298 next = curr->next; 299 } 300 } 301 302 if(buf) { 303 LIST_DEL(&buf->head); 304 pipe_mutex_unlock(mgr->mutex); 305 /* Increase refcount */ 306 pipe_reference_init(&buf->base.base.reference, 1); 307 return &buf->base; 308 } 309 310 pipe_mutex_unlock(mgr->mutex); 311 312 buf = CALLOC_STRUCT(pb_cache_buffer); 313 if(!buf) 314 return NULL; 315 316 buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc); 317 if(!buf->buffer) { 318 FREE(buf); 319 return NULL; 320 } 321 322 assert(pipe_is_referenced(&buf->buffer->base.reference)); 323 assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment)); 324 assert(pb_check_usage(desc->usage, buf->buffer->base.usage)); 325 assert(buf->buffer->base.size >= size); 326 327 pipe_reference_init(&buf->base.base.reference, 1); 328 buf->base.base.alignment = buf->buffer->base.alignment; 329 buf->base.base.usage = buf->buffer->base.usage; 330 buf->base.base.size = buf->buffer->base.size; 331 332 buf->base.vtbl = &pb_cache_buffer_vtbl; 333 buf->mgr = mgr; 334 335 return &buf->base; 336} 337 338 339static void 340pb_cache_manager_flush(struct pb_manager *_mgr) 341{ 342 struct pb_cache_manager *mgr = pb_cache_manager(_mgr); 343 struct list_head *curr, *next; 344 struct pb_cache_buffer *buf; 345 346 pipe_mutex_lock(mgr->mutex); 347 curr = mgr->delayed.next; 348 next = curr->next; 349 while(curr != &mgr->delayed) { 350 buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); 351 _pb_cache_buffer_destroy(buf); 352 curr = next; 353 next = curr->next; 354 } 355 pipe_mutex_unlock(mgr->mutex); 356 357 assert(mgr->provider->flush); 358 if(mgr->provider->flush) 359 mgr->provider->flush(mgr->provider); 360} 361 362 363static void 364pb_cache_manager_destroy(struct pb_manager *mgr) 365{ 366 pb_cache_manager_flush(mgr); 367 FREE(mgr); 368} 369 370 371struct pb_manager * 372pb_cache_manager_create(struct pb_manager *provider, 373 unsigned usecs) 374{ 375 struct pb_cache_manager *mgr; 376 377 if(!provider) 378 return NULL; 379 380 mgr = CALLOC_STRUCT(pb_cache_manager); 381 if (!mgr) 382 return NULL; 383 384 mgr->base.destroy = pb_cache_manager_destroy; 385 mgr->base.create_buffer = pb_cache_manager_create_buffer; 386 mgr->base.flush = pb_cache_manager_flush; 387 mgr->provider = provider; 388 mgr->usecs = usecs; 389 LIST_INITHEAD(&mgr->delayed); 390 mgr->numDelayed = 0; 391 pipe_mutex_init(mgr->mutex); 392 393 return &mgr->base; 394} 395