pb_buffer_fenced.c revision 5b64d94390e4805e1634f0c8b5e3156e12b8b872
1/**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37#include "pipe/p_config.h"
38
39#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40#include <unistd.h>
41#include <sched.h>
42#endif
43
44#include "pipe/p_compiler.h"
45#include "pipe/p_defines.h"
46#include "util/u_debug.h"
47#include "pipe/p_thread.h"
48#include "util/u_memory.h"
49#include "util/u_double_list.h"
50
51#include "pb_buffer.h"
52#include "pb_buffer_fenced.h"
53
54
55
56/**
57 * Convenience macro (type safe).
58 */
59#define SUPER(__derived) (&(__derived)->base)
60
61
62struct fenced_buffer_list
63{
64   pipe_mutex mutex;
65
66   struct pb_fence_ops *ops;
67
68   pb_size numDelayed;
69   struct list_head delayed;
70
71#ifdef DEBUG
72   pb_size numUnfenced;
73   struct list_head unfenced;
74#endif
75};
76
77
78/**
79 * Wrapper around a pipe buffer which adds fencing and reference counting.
80 */
81struct fenced_buffer
82{
83   /*
84    * Immutable members.
85    */
86
87   struct pb_buffer base;
88   struct pb_buffer *buffer;
89   struct fenced_buffer_list *list;
90
91   /**
92    * Protected by fenced_buffer_list::mutex
93    */
94   struct list_head head;
95
96   /**
97    * Following members are mutable and protected by this mutex.
98    *
99    * You may lock this mutex alone, or lock it with fenced_buffer_list::mutex
100    * held, but in order to prevent deadlocks you must never lock
101    * fenced_buffer_list::mutex with this mutex held.
102    */
103   pipe_mutex mutex;
104
105   /**
106    * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
107    * buffer usage.
108    */
109   unsigned flags;
110
111   unsigned mapcount;
112   struct pb_validate *vl;
113   unsigned validation_flags;
114   struct pipe_fence_handle *fence;
115};
116
117
118static INLINE struct fenced_buffer *
119fenced_buffer(struct pb_buffer *buf)
120{
121   assert(buf);
122   return (struct fenced_buffer *)buf;
123}
124
125
126/**
127 * Add the buffer to the fenced list.
128 *
129 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
130 * order, before calling this function.
131 *
132 * Reference count should be incremented before calling this function.
133 */
134static INLINE void
135fenced_buffer_add_locked(struct fenced_buffer_list *fenced_list,
136                         struct fenced_buffer *fenced_buf)
137{
138   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
139   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
140   assert(fenced_buf->fence);
141
142   /* TODO: Move the reference count increment here */
143
144#ifdef DEBUG
145   LIST_DEL(&fenced_buf->head);
146   assert(fenced_list->numUnfenced);
147   --fenced_list->numUnfenced;
148#endif
149   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
150   ++fenced_list->numDelayed;
151}
152
153
154/**
155 * Remove the buffer from the fenced list.
156 *
157 * fenced_buffer_list::mutex and fenced_buffer::mutex must be held, in this
158 * order before calling this function.
159 *
160 * Reference count should be decremented after calling this function.
161 */
162static INLINE void
163fenced_buffer_remove_locked(struct fenced_buffer_list *fenced_list,
164                            struct fenced_buffer *fenced_buf)
165{
166   struct pb_fence_ops *ops = fenced_list->ops;
167
168   assert(fenced_buf->fence);
169   assert(fenced_buf->list == fenced_list);
170
171   ops->fence_reference(ops, &fenced_buf->fence, NULL);
172   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
173
174   assert(fenced_buf->head.prev);
175   assert(fenced_buf->head.next);
176
177   LIST_DEL(&fenced_buf->head);
178   assert(fenced_list->numDelayed);
179   --fenced_list->numDelayed;
180
181#ifdef DEBUG
182   LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
183   ++fenced_list->numUnfenced;
184#endif
185
186   /* TODO: Move the reference count decrement and destruction here */
187}
188
189
190/**
191 * Wait for the fence to expire, and remove it from the fenced list.
192 *
193 * fenced_buffer::mutex must be held. fenced_buffer_list::mutex must not be
194 * held -- it will be acquired internally.
195 */
196static INLINE enum pipe_error
197fenced_buffer_finish_locked(struct fenced_buffer_list *fenced_list,
198                              struct fenced_buffer *fenced_buf)
199{
200   struct pb_fence_ops *ops = fenced_list->ops;
201   enum pipe_error ret = PIPE_ERROR;
202
203#if 0
204   debug_warning("waiting for GPU");
205#endif
206
207   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
208   assert(fenced_buf->fence);
209
210   /*
211    * Acquire the global lock. Must release buffer mutex first to preserve
212    * lock order.
213    */
214   pipe_mutex_unlock(fenced_buf->mutex);
215   pipe_mutex_lock(fenced_list->mutex);
216   pipe_mutex_lock(fenced_buf->mutex);
217
218   if(fenced_buf->fence) {
219      if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
220         /* Remove from the fenced list */
221         /* TODO: remove consequents */
222         fenced_buffer_remove_locked(fenced_list, fenced_buf);
223
224         p_atomic_dec(&fenced_buf->base.base.reference.count);
225         assert(pipe_is_referenced(&fenced_buf->base.base.reference));
226
227         fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
228
229         ret = PIPE_OK;
230      }
231   }
232
233   pipe_mutex_unlock(fenced_list->mutex);
234
235   return ret;
236}
237
238
239/**
240 * Free as many fenced buffers from the list head as possible.
241 */
242static void
243fenced_buffer_list_check_free_locked(struct fenced_buffer_list *fenced_list,
244                                     int wait)
245{
246   struct pb_fence_ops *ops = fenced_list->ops;
247   struct list_head *curr, *next;
248   struct fenced_buffer *fenced_buf;
249   struct pipe_fence_handle *prev_fence = NULL;
250
251   curr = fenced_list->delayed.next;
252   next = curr->next;
253   while(curr != &fenced_list->delayed) {
254      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
255
256      pipe_mutex_lock(fenced_buf->mutex);
257
258      if(fenced_buf->fence != prev_fence) {
259	 int signaled;
260	 if (wait)
261	    signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
262	 else
263	    signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
264	 if (signaled != 0) {
265            pipe_mutex_unlock(fenced_buf->mutex);
266	    break;
267         }
268	 prev_fence = fenced_buf->fence;
269      }
270      else {
271	 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
272      }
273
274      fenced_buffer_remove_locked(fenced_list, fenced_buf);
275      pipe_mutex_unlock(fenced_buf->mutex);
276
277      pb_reference((struct pb_buffer **)&fenced_buf, NULL);
278
279      curr = next;
280      next = curr->next;
281   }
282}
283
284
285static void
286fenced_buffer_destroy(struct pb_buffer *buf)
287{
288   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
289   struct fenced_buffer_list *fenced_list = fenced_buf->list;
290
291   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
292   assert(!fenced_buf->fence);
293
294#ifdef DEBUG
295   pipe_mutex_lock(fenced_list->mutex);
296   assert(fenced_buf->head.prev);
297   assert(fenced_buf->head.next);
298   LIST_DEL(&fenced_buf->head);
299   assert(fenced_list->numUnfenced);
300   --fenced_list->numUnfenced;
301   pipe_mutex_unlock(fenced_list->mutex);
302#else
303   (void)fenced_list;
304#endif
305
306   pb_reference(&fenced_buf->buffer, NULL);
307
308   pipe_mutex_destroy(fenced_buf->mutex);
309   FREE(fenced_buf);
310}
311
312
313static void *
314fenced_buffer_map(struct pb_buffer *buf,
315                  unsigned flags)
316{
317   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
318   struct fenced_buffer_list *fenced_list = fenced_buf->list;
319   struct pb_fence_ops *ops = fenced_list->ops;
320   void *map = NULL;
321
322   pipe_mutex_lock(fenced_buf->mutex);
323
324   assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
325
326   /* Serialize writes */
327   if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
328      ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
329      if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
330          ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
331         /* Don't wait for the GPU to finish writing */
332         goto done;
333      }
334
335      /* Wait for the GPU to finish writing */
336      fenced_buffer_finish_locked(fenced_list, fenced_buf);
337   }
338
339#if 0
340   /* Check for CPU write access (read is OK) */
341   if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
342      /* this is legal -- just for debugging */
343      debug_warning("concurrent CPU writes");
344   }
345#endif
346
347   map = pb_map(fenced_buf->buffer, flags);
348   if(map) {
349      ++fenced_buf->mapcount;
350      fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
351   }
352
353done:
354   pipe_mutex_unlock(fenced_buf->mutex);
355
356   return map;
357}
358
359
360static void
361fenced_buffer_unmap(struct pb_buffer *buf)
362{
363   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
364
365   pipe_mutex_lock(fenced_buf->mutex);
366
367   assert(fenced_buf->mapcount);
368   if(fenced_buf->mapcount) {
369      pb_unmap(fenced_buf->buffer);
370      --fenced_buf->mapcount;
371      if(!fenced_buf->mapcount)
372	 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
373   }
374
375   pipe_mutex_unlock(fenced_buf->mutex);
376}
377
378
379static enum pipe_error
380fenced_buffer_validate(struct pb_buffer *buf,
381                       struct pb_validate *vl,
382                       unsigned flags)
383{
384   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
385   enum pipe_error ret;
386
387   pipe_mutex_lock(fenced_buf->mutex);
388
389   if(!vl) {
390      /* invalidate */
391      fenced_buf->vl = NULL;
392      fenced_buf->validation_flags = 0;
393      ret = PIPE_OK;
394      goto done;
395   }
396
397   assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
398   assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
399   flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
400
401   /* Buffer cannot be validated in two different lists */
402   if(fenced_buf->vl && fenced_buf->vl != vl) {
403      ret = PIPE_ERROR_RETRY;
404      goto done;
405   }
406
407#if 0
408   /* Do not validate if buffer is still mapped */
409   if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
410      /* TODO: wait for the thread that mapped the buffer to unmap it */
411      ret = PIPE_ERROR_RETRY;
412      goto done;
413   }
414   /* Final sanity checking */
415   assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
416   assert(!fenced_buf->mapcount);
417#endif
418
419   if(fenced_buf->vl == vl &&
420      (fenced_buf->validation_flags & flags) == flags) {
421      /* Nothing to do -- buffer already validated */
422      ret = PIPE_OK;
423      goto done;
424   }
425
426   ret = pb_validate(fenced_buf->buffer, vl, flags);
427   if (ret != PIPE_OK)
428      goto done;
429
430   fenced_buf->vl = vl;
431   fenced_buf->validation_flags |= flags;
432
433done:
434   pipe_mutex_unlock(fenced_buf->mutex);
435
436   return ret;
437}
438
439
440static void
441fenced_buffer_fence(struct pb_buffer *buf,
442                    struct pipe_fence_handle *fence)
443{
444   struct fenced_buffer *fenced_buf;
445   struct fenced_buffer_list *fenced_list;
446   struct pb_fence_ops *ops;
447
448   fenced_buf = fenced_buffer(buf);
449   fenced_list = fenced_buf->list;
450   ops = fenced_list->ops;
451
452   pipe_mutex_lock(fenced_list->mutex);
453   pipe_mutex_lock(fenced_buf->mutex);
454
455   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
456
457   if(fence != fenced_buf->fence) {
458      assert(fenced_buf->vl);
459      assert(fenced_buf->validation_flags);
460
461      if (fenced_buf->fence) {
462         fenced_buffer_remove_locked(fenced_list, fenced_buf);
463         p_atomic_dec(&fenced_buf->base.base.reference.count);
464         assert(pipe_is_referenced(&fenced_buf->base.base.reference));
465      }
466      if (fence) {
467         ops->fence_reference(ops, &fenced_buf->fence, fence);
468         fenced_buf->flags |= fenced_buf->validation_flags;
469         p_atomic_inc(&fenced_buf->base.base.reference.count);
470         fenced_buffer_add_locked(fenced_list, fenced_buf);
471      }
472
473      pb_fence(fenced_buf->buffer, fence);
474
475      fenced_buf->vl = NULL;
476      fenced_buf->validation_flags = 0;
477   }
478
479   pipe_mutex_unlock(fenced_buf->mutex);
480   pipe_mutex_unlock(fenced_list->mutex);
481}
482
483
484static void
485fenced_buffer_get_base_buffer(struct pb_buffer *buf,
486                              struct pb_buffer **base_buf,
487                              pb_size *offset)
488{
489   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
490   /* NOTE: accesses immutable members only -- mutex not necessary */
491   pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
492}
493
494
495static const struct pb_vtbl
496fenced_buffer_vtbl = {
497      fenced_buffer_destroy,
498      fenced_buffer_map,
499      fenced_buffer_unmap,
500      fenced_buffer_validate,
501      fenced_buffer_fence,
502      fenced_buffer_get_base_buffer
503};
504
505
506struct pb_buffer *
507fenced_buffer_create(struct fenced_buffer_list *fenced_list,
508                     struct pb_buffer *buffer)
509{
510   struct fenced_buffer *buf;
511
512   if(!buffer)
513      return NULL;
514
515   buf = CALLOC_STRUCT(fenced_buffer);
516   if(!buf) {
517      pb_reference(&buffer, NULL);
518      return NULL;
519   }
520
521   pipe_reference_init(&buf->base.base.reference, 1);
522   buf->base.base.alignment = buffer->base.alignment;
523   buf->base.base.usage = buffer->base.usage;
524   buf->base.base.size = buffer->base.size;
525
526   buf->base.vtbl = &fenced_buffer_vtbl;
527   buf->buffer = buffer;
528   buf->list = fenced_list;
529
530   pipe_mutex_init(buf->mutex);
531
532#ifdef DEBUG
533   pipe_mutex_lock(fenced_list->mutex);
534   LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
535   ++fenced_list->numUnfenced;
536   pipe_mutex_unlock(fenced_list->mutex);
537#endif
538
539   return &buf->base;
540}
541
542
543struct fenced_buffer_list *
544fenced_buffer_list_create(struct pb_fence_ops *ops)
545{
546   struct fenced_buffer_list *fenced_list;
547
548   fenced_list = CALLOC_STRUCT(fenced_buffer_list);
549   if (!fenced_list)
550      return NULL;
551
552   fenced_list->ops = ops;
553
554   LIST_INITHEAD(&fenced_list->delayed);
555   fenced_list->numDelayed = 0;
556
557#ifdef DEBUG
558   LIST_INITHEAD(&fenced_list->unfenced);
559   fenced_list->numUnfenced = 0;
560#endif
561
562   pipe_mutex_init(fenced_list->mutex);
563
564   return fenced_list;
565}
566
567
568void
569fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
570                              int wait)
571{
572   pipe_mutex_lock(fenced_list->mutex);
573   fenced_buffer_list_check_free_locked(fenced_list, wait);
574   pipe_mutex_unlock(fenced_list->mutex);
575}
576
577
578#ifdef DEBUG
579void
580fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
581{
582   struct pb_fence_ops *ops = fenced_list->ops;
583   struct list_head *curr, *next;
584   struct fenced_buffer *fenced_buf;
585
586   pipe_mutex_lock(fenced_list->mutex);
587
588   debug_printf("%10s %7s %7s %10s %s\n",
589                "buffer", "size", "refcount", "fence", "signalled");
590
591   curr = fenced_list->unfenced.next;
592   next = curr->next;
593   while(curr != &fenced_list->unfenced) {
594      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
595      pipe_mutex_lock(fenced_buf->mutex);
596      assert(!fenced_buf->fence);
597      debug_printf("%10p %7u %7u\n",
598                   (void *) fenced_buf,
599                   fenced_buf->base.base.size,
600                   p_atomic_read(&fenced_buf->base.base.reference.count));
601      pipe_mutex_unlock(fenced_buf->mutex);
602      curr = next;
603      next = curr->next;
604   }
605
606   curr = fenced_list->delayed.next;
607   next = curr->next;
608   while(curr != &fenced_list->delayed) {
609      int signaled;
610      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
611      pipe_mutex_lock(fenced_buf->mutex);
612      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
613      debug_printf("%10p %7u %7u %10p %s\n",
614                   (void *) fenced_buf,
615                   fenced_buf->base.base.size,
616                   p_atomic_read(&fenced_buf->base.base.reference.count),
617                   (void *) fenced_buf->fence,
618                   signaled == 0 ? "y" : "n");
619      pipe_mutex_unlock(fenced_buf->mutex);
620      curr = next;
621      next = curr->next;
622   }
623
624   pipe_mutex_unlock(fenced_list->mutex);
625}
626#endif
627
628
629void
630fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
631{
632   pipe_mutex_lock(fenced_list->mutex);
633
634   /* Wait on outstanding fences */
635   while (fenced_list->numDelayed) {
636      pipe_mutex_unlock(fenced_list->mutex);
637#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
638      sched_yield();
639#endif
640      pipe_mutex_lock(fenced_list->mutex);
641      fenced_buffer_list_check_free_locked(fenced_list, 1);
642   }
643
644#ifdef DEBUG
645   /*assert(!fenced_list->numUnfenced);*/
646#endif
647
648   pipe_mutex_unlock(fenced_list->mutex);
649   pipe_mutex_destroy(fenced_list->mutex);
650
651   fenced_list->ops->destroy(fenced_list->ops);
652
653   FREE(fenced_list);
654}
655
656
657