pb_buffer_fenced.c revision a41b78d107264227f3338446e04dcfda32634f52
1/************************************************************************** 2 * 3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28/** 29 * \file 30 * Implementation of fenced buffers. 31 * 32 * \author José Fonseca <jrfonseca-at-tungstengraphics-dot-com> 33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com> 34 */ 35 36 37#include "pipe/p_config.h" 38 39#if defined(PIPE_OS_LINUX) 40#include <unistd.h> 41#endif 42 43#include "pipe/p_compiler.h" 44#include "pipe/p_error.h" 45#include "pipe/p_debug.h" 46#include "pipe/p_winsys.h" 47#include "pipe/p_thread.h" 48#include "pipe/p_util.h" 49#include "util/u_double_list.h" 50 51#include "pb_buffer.h" 52#include "pb_buffer_fenced.h" 53 54 55 56/** 57 * Convenience macro (type safe). 58 */ 59#define SUPER(__derived) (&(__derived)->base) 60 61#define PIPE_BUFFER_USAGE_CPU_READ_WRITE \ 62 ( PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE ) 63#define PIPE_BUFFER_USAGE_GPU_READ_WRITE \ 64 ( PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE ) 65#define PIPE_BUFFER_USAGE_WRITE \ 66 ( PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_GPU_WRITE ) 67 68 69struct fenced_buffer_list 70{ 71 _glthread_Mutex mutex; 72 73 struct pipe_winsys *winsys; 74 75 size_t numDelayed; 76 77 struct list_head delayed; 78}; 79 80 81/** 82 * Wrapper around a pipe buffer which adds fencing and reference counting. 83 */ 84struct fenced_buffer 85{ 86 struct pb_buffer base; 87 88 struct pb_buffer *buffer; 89 90 /* FIXME: protect access with mutex */ 91 92 /** 93 * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current 94 * buffer usage. 95 */ 96 unsigned flags; 97 98 unsigned mapcount; 99 struct pipe_fence_handle *fence; 100 101 struct list_head head; 102 struct fenced_buffer_list *list; 103}; 104 105 106static INLINE struct fenced_buffer * 107fenced_buffer(struct pb_buffer *buf) 108{ 109 assert(buf); 110 assert(buf->vtbl == &fenced_buffer_vtbl); 111 return (struct fenced_buffer *)buf; 112} 113 114 115static INLINE void 116_fenced_buffer_add(struct fenced_buffer *fenced_buf) 117{ 118 struct fenced_buffer_list *fenced_list = fenced_buf->list; 119 120 assert(fenced_buf->base.base.refcount); 121 assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE); 122 assert(fenced_buf->fence); 123 124 assert(!fenced_buf->head.prev); 125 assert(!fenced_buf->head.next); 126 LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed); 127 ++fenced_list->numDelayed; 128} 129 130 131/** 132 * Actually destroy the buffer. 133 */ 134static INLINE void 135_fenced_buffer_destroy(struct fenced_buffer *fenced_buf) 136{ 137 assert(!fenced_buf->base.base.refcount); 138 assert(!fenced_buf->fence); 139 pb_reference(&fenced_buf->buffer, NULL); 140 FREE(fenced_buf); 141} 142 143 144static INLINE void 145_fenced_buffer_remove(struct fenced_buffer *fenced_buf) 146{ 147 struct fenced_buffer_list *fenced_list = fenced_buf->list; 148 struct pipe_winsys *winsys = fenced_list->winsys; 149 150 assert(fenced_buf->fence); 151 152 winsys->fence_reference(winsys, &fenced_buf->fence, NULL); 153 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE; 154 155 assert(fenced_buf->head.prev); 156 assert(fenced_buf->head.next); 157 LIST_DEL(&fenced_buf->head); 158#ifdef DEBUG 159 fenced_buf->head.prev = NULL; 160 fenced_buf->head.next = NULL; 161#endif 162 163 assert(fenced_list->numDelayed); 164 --fenced_list->numDelayed; 165 166 if(!fenced_buf->base.base.refcount) 167 _fenced_buffer_destroy(fenced_buf); 168} 169 170 171/** 172 * Free as many fenced buffers from the list head as possible. 173 */ 174static void 175_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, 176 int wait) 177{ 178 struct pipe_winsys *winsys = fenced_list->winsys; 179 struct list_head *curr, *next; 180 struct fenced_buffer *fenced_buf; 181 struct pipe_fence_handle *prev_fence = NULL; 182 183 curr = fenced_list->delayed.next; 184 next = curr->next; 185 while(curr != &fenced_list->delayed) { 186 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); 187 188 if(fenced_buf->fence != prev_fence) { 189 int signaled; 190 if (wait) 191 signaled = winsys->fence_finish(winsys, fenced_buf->fence, 0); 192 else 193 signaled = winsys->fence_signalled(winsys, fenced_buf->fence, 0); 194 if (signaled != 0) 195 break; 196 prev_fence = fenced_buf->fence; 197 } 198 else { 199 assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0); 200 } 201 202 _fenced_buffer_remove(fenced_buf); 203 204 curr = next; 205 next = curr->next; 206 } 207} 208 209 210/** 211 * Serialize writes, but allow concurrent reads. 212 */ 213static INLINE enum pipe_error 214fenced_buffer_serialize(struct fenced_buffer *fenced_buf, unsigned flags) 215{ 216 struct fenced_buffer_list *fenced_list = fenced_buf->list; 217 struct pipe_winsys *winsys = fenced_list->winsys; 218 219 /* Allow concurrent reads */ 220 if(((fenced_buf->flags | flags) & PIPE_BUFFER_USAGE_WRITE) == 0) 221 return PIPE_OK; 222 223 /* Wait for the CPU to finish */ 224 if(fenced_buf->mapcount) { 225 /* FIXME: Use thread conditions variables to signal when mapcount 226 * reaches zero */ 227 debug_warning("attemp to write concurrently to buffer"); 228 /* XXX: we must not fail here in order to support texture mipmap generation 229 return PIPE_ERROR_RETRY; 230 */ 231 } 232 233 /* Wait for the GPU to finish */ 234 if(fenced_buf->fence) { 235 if(winsys->fence_finish(winsys, fenced_buf->fence, 0) != 0) 236 return PIPE_ERROR_RETRY; 237 _fenced_buffer_remove(fenced_buf); 238 } 239 240 return PIPE_OK; 241} 242 243 244static void 245fenced_buffer_destroy(struct pb_buffer *buf) 246{ 247 struct fenced_buffer *fenced_buf = fenced_buffer(buf); 248 struct fenced_buffer_list *fenced_list = fenced_buf->list; 249 250 _glthread_LOCK_MUTEX(fenced_list->mutex); 251 assert(fenced_buf->base.base.refcount == 0); 252 if (fenced_buf->fence) { 253 struct pipe_winsys *winsys = fenced_list->winsys; 254 if(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0) { 255 struct list_head *curr, *prev; 256 curr = &fenced_buf->head; 257 prev = curr->prev; 258 do { 259 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head); 260 assert(winsys->fence_signalled(winsys, fenced_buf->fence, 0) == 0); 261 _fenced_buffer_remove(fenced_buf); 262 curr = prev; 263 prev = curr->prev; 264 } while (curr != &fenced_list->delayed); 265 } 266 else { 267 /* delay destruction */ 268 } 269 } 270 else { 271 _fenced_buffer_destroy(fenced_buf); 272 } 273 _glthread_UNLOCK_MUTEX(fenced_list->mutex); 274} 275 276 277static void * 278fenced_buffer_map(struct pb_buffer *buf, 279 unsigned flags) 280{ 281 struct fenced_buffer *fenced_buf = fenced_buffer(buf); 282 void *map; 283 assert((flags & ~PIPE_BUFFER_USAGE_CPU_READ_WRITE) == 0); 284 285 if(fenced_buffer_serialize(fenced_buf, flags) != PIPE_OK) 286 return NULL; 287 288 map = pb_map(fenced_buf->buffer, flags); 289 if(map) 290 ++fenced_buf->mapcount; 291 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE; 292 return map; 293} 294 295 296static void 297fenced_buffer_unmap(struct pb_buffer *buf) 298{ 299 struct fenced_buffer *fenced_buf = fenced_buffer(buf); 300 assert(fenced_buf->mapcount); 301 pb_unmap(fenced_buf->buffer); 302 --fenced_buf->mapcount; 303 if(!fenced_buf->mapcount) 304 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE; 305} 306 307 308static void 309fenced_buffer_get_base_buffer(struct pb_buffer *buf, 310 struct pb_buffer **base_buf, 311 unsigned *offset) 312{ 313 struct fenced_buffer *fenced_buf = fenced_buffer(buf); 314 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset); 315} 316 317 318const struct pb_vtbl 319fenced_buffer_vtbl = { 320 fenced_buffer_destroy, 321 fenced_buffer_map, 322 fenced_buffer_unmap, 323 fenced_buffer_get_base_buffer 324}; 325 326 327struct pb_buffer * 328fenced_buffer_create(struct fenced_buffer_list *fenced_list, 329 struct pb_buffer *buffer) 330{ 331 struct fenced_buffer *buf; 332 333 if(!buffer) 334 return NULL; 335 336 buf = CALLOC_STRUCT(fenced_buffer); 337 if(!buf) 338 return NULL; 339 340 buf->base.base.refcount = 1; 341 buf->base.base.alignment = buffer->base.alignment; 342 buf->base.base.usage = buffer->base.usage; 343 buf->base.base.size = buffer->base.size; 344 345 buf->base.vtbl = &fenced_buffer_vtbl; 346 buf->buffer = buffer; 347 buf->list = fenced_list; 348 349 return &buf->base; 350} 351 352 353void 354buffer_fence(struct pb_buffer *buf, 355 struct pipe_fence_handle *fence) 356{ 357 struct fenced_buffer *fenced_buf; 358 struct fenced_buffer_list *fenced_list; 359 struct pipe_winsys *winsys; 360 /* FIXME: receive this as a parameter */ 361 unsigned flags = fence ? PIPE_BUFFER_USAGE_GPU_READ_WRITE : 0; 362 363 /* This is a public function, so be extra cautious with the buffer passed, 364 * as happens frequently to receive null buffers, or pointer to buffers 365 * other than fenced buffers. */ 366 assert(buf); 367 if(!buf) 368 return; 369 assert(buf->vtbl == &fenced_buffer_vtbl); 370 if(buf->vtbl != &fenced_buffer_vtbl) 371 return; 372 373 fenced_buf = fenced_buffer(buf); 374 fenced_list = fenced_buf->list; 375 winsys = fenced_list->winsys; 376 377 if(fence == fenced_buf->fence) { 378 /* Handle the same fence case specially, not only because it is a fast 379 * path, but mostly to avoid serializing two writes with the same fence, 380 * as that would bring the hardware down to synchronous operation without 381 * any benefit. 382 */ 383 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE; 384 return; 385 } 386 387 if(fenced_buffer_serialize(fenced_buf, flags) != PIPE_OK) { 388 /* FIXME: propagate error */ 389 (void)0; 390 } 391 392 _glthread_LOCK_MUTEX(fenced_list->mutex); 393 if (fenced_buf->fence) 394 _fenced_buffer_remove(fenced_buf); 395 if (fence) { 396 winsys->fence_reference(winsys, &fenced_buf->fence, fence); 397 fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE; 398 _fenced_buffer_add(fenced_buf); 399 } 400 _glthread_UNLOCK_MUTEX(fenced_list->mutex); 401} 402 403 404struct fenced_buffer_list * 405fenced_buffer_list_create(struct pipe_winsys *winsys) 406{ 407 struct fenced_buffer_list *fenced_list; 408 409 fenced_list = (struct fenced_buffer_list *)CALLOC(1, sizeof(*fenced_list)); 410 if (!fenced_list) 411 return NULL; 412 413 fenced_list->winsys = winsys; 414 415 LIST_INITHEAD(&fenced_list->delayed); 416 417 fenced_list->numDelayed = 0; 418 419 _glthread_INIT_MUTEX(fenced_list->mutex); 420 421 return fenced_list; 422} 423 424 425void 426fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list, 427 int wait) 428{ 429 _glthread_LOCK_MUTEX(fenced_list->mutex); 430 _fenced_buffer_list_check_free(fenced_list, wait); 431 _glthread_UNLOCK_MUTEX(fenced_list->mutex); 432} 433 434 435void 436fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list) 437{ 438 _glthread_LOCK_MUTEX(fenced_list->mutex); 439 440 /* Wait on outstanding fences */ 441 while (fenced_list->numDelayed) { 442 _glthread_UNLOCK_MUTEX(fenced_list->mutex); 443#if defined(PIPE_OS_LINUX) 444 sched_yield(); 445#endif 446 _fenced_buffer_list_check_free(fenced_list, 1); 447 _glthread_LOCK_MUTEX(fenced_list->mutex); 448 } 449 450 _glthread_UNLOCK_MUTEX(fenced_list->mutex); 451 452 FREE(fenced_list); 453} 454 455 456