pb_buffer_fenced.c revision 3498616b03f4b1da7a2a74ed83e95aee77204a2f
1/**************************************************************************
2 *
3 * Copyright 2007-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
34 */
35
36
37#include "pipe/p_config.h"
38
39#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40#include <unistd.h>
41#include <sched.h>
42#endif
43
44#include "pipe/p_compiler.h"
45#include "pipe/p_defines.h"
46#include "util/u_debug.h"
47#include "pipe/p_thread.h"
48#include "util/u_memory.h"
49#include "util/u_double_list.h"
50
51#include "pb_buffer.h"
52#include "pb_buffer_fenced.h"
53#include "pb_bufmgr.h"
54
55
56
57/**
58 * Convenience macro (type safe).
59 */
60#define SUPER(__derived) (&(__derived)->base)
61
62
63struct fenced_manager
64{
65   struct pb_manager base;
66   struct pb_manager *provider;
67   struct pb_fence_ops *ops;
68
69   /**
70    * Maximum buffer size that can be safely allocated.
71    */
72   pb_size max_buffer_size;
73
74   /**
75    * Maximum cpu memory we can allocate before we start waiting for the
76    * GPU to idle.
77    */
78   pb_size max_cpu_total_size;
79
80   /**
81    * Following members are mutable and protected by this mutex.
82    */
83   pipe_mutex mutex;
84
85   /**
86    * Fenced buffer list.
87    *
88    * All fenced buffers are placed in this listed, ordered from the oldest
89    * fence to the newest fence.
90    */
91   struct list_head fenced;
92   pb_size num_fenced;
93
94   struct list_head unfenced;
95   pb_size num_unfenced;
96
97   /**
98    * How much temporary CPU memory is being used to hold unvalidated buffers.
99    */
100   pb_size cpu_total_size;
101};
102
103
104/**
105 * Fenced buffer.
106 *
107 * Wrapper around a pipe buffer which adds fencing and reference counting.
108 */
109struct fenced_buffer
110{
111   /*
112    * Immutable members.
113    */
114
115   struct pb_buffer base;
116   struct fenced_manager *mgr;
117
118   /*
119    * Following members are mutable and protected by fenced_manager::mutex.
120    */
121
122   struct list_head head;
123
124   /**
125    * Buffer with storage.
126    */
127   struct pb_buffer *buffer;
128   pb_size size;
129   struct pb_desc desc;
130
131   /**
132    * Temporary CPU storage data. Used when there isn't enough GPU memory to
133    * store the buffer.
134    */
135   void *data;
136
137   /**
138    * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
139    * buffer usage.
140    */
141   unsigned flags;
142
143   unsigned mapcount;
144
145   struct pb_validate *vl;
146   unsigned validation_flags;
147
148   struct pipe_fence_handle *fence;
149};
150
151
152static INLINE struct fenced_manager *
153fenced_manager(struct pb_manager *mgr)
154{
155   assert(mgr);
156   return (struct fenced_manager *)mgr;
157}
158
159
160static INLINE struct fenced_buffer *
161fenced_buffer(struct pb_buffer *buf)
162{
163   assert(buf);
164   return (struct fenced_buffer *)buf;
165}
166
167
168static void
169fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
170
171static enum pipe_error
172fenced_buffer_create_cpu_storage_locked(struct fenced_buffer *fenced_buf);
173
174static void
175fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
176
177static enum pipe_error
178fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
179                                        struct fenced_buffer *fenced_buf,
180                                        boolean wait);
181
182static enum pipe_error
183fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
184
185static enum pipe_error
186fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
187
188
189/**
190 * Dump the fenced buffer list.
191 *
192 * Useful to understand failures to allocate buffers.
193 */
194static void
195fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
196{
197#ifdef DEBUG
198   struct pb_fence_ops *ops = fenced_mgr->ops;
199   struct list_head *curr, *next;
200   struct fenced_buffer *fenced_buf;
201
202   debug_printf("%10s %7s %7s %10s %s\n",
203                "buffer", "size", "refcount", "fence", "signalled");
204
205   curr = fenced_mgr->unfenced.next;
206   next = curr->next;
207   while(curr != &fenced_mgr->unfenced) {
208      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
209      assert(!fenced_buf->fence);
210      debug_printf("%10p %7u %7u\n",
211                   (void *) fenced_buf,
212                   fenced_buf->base.base.size,
213                   p_atomic_read(&fenced_buf->base.base.reference.count));
214      curr = next;
215      next = curr->next;
216   }
217
218   curr = fenced_mgr->fenced.next;
219   next = curr->next;
220   while(curr != &fenced_mgr->fenced) {
221      int signaled;
222      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
223      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
224      debug_printf("%10p %7u %7u %10p %s\n",
225                   (void *) fenced_buf,
226                   fenced_buf->base.base.size,
227                   p_atomic_read(&fenced_buf->base.base.reference.count),
228                   (void *) fenced_buf->fence,
229                   signaled == 0 ? "y" : "n");
230      curr = next;
231      next = curr->next;
232   }
233#else
234   (void)fenced_mgr;
235#endif
236}
237
238
239/**
240 * Add the buffer to the fenced list.
241 *
242 * Reference count should be incremented before calling this function.
243 */
244static INLINE void
245fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
246                         struct fenced_buffer *fenced_buf)
247{
248   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
249   assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
250   assert(fenced_buf->fence);
251
252   /* TODO: Move the reference count increment here */
253
254   LIST_DEL(&fenced_buf->head);
255   assert(fenced_mgr->num_unfenced);
256   --fenced_mgr->num_unfenced;
257   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
258   ++fenced_mgr->num_fenced;
259}
260
261
262/**
263 * Remove the buffer from the fenced list.
264 *
265 * Reference count should be decremented after calling this function.
266 */
267static INLINE void
268fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
269                            struct fenced_buffer *fenced_buf)
270{
271   struct pb_fence_ops *ops = fenced_mgr->ops;
272
273   assert(fenced_buf->fence);
274   assert(fenced_buf->mgr == fenced_mgr);
275
276   ops->fence_reference(ops, &fenced_buf->fence, NULL);
277   fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
278
279   assert(fenced_buf->head.prev);
280   assert(fenced_buf->head.next);
281
282   LIST_DEL(&fenced_buf->head);
283   assert(fenced_mgr->num_fenced);
284   --fenced_mgr->num_fenced;
285
286   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
287   ++fenced_mgr->num_unfenced;
288
289   /* TODO: Move the reference count decrement and destruction here */
290}
291
292
293/**
294 * Wait for the fence to expire, and remove it from the fenced list.
295 */
296static INLINE enum pipe_error
297fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
298                              struct fenced_buffer *fenced_buf)
299{
300   struct pb_fence_ops *ops = fenced_mgr->ops;
301   enum pipe_error ret = PIPE_ERROR;
302
303#if 0
304   debug_warning("waiting for GPU");
305#endif
306
307   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
308   assert(fenced_buf->fence);
309
310   if(fenced_buf->fence) {
311      if(ops->fence_finish(ops, fenced_buf->fence, 0) == 0) {
312         /*
313          * Remove from the fenced list
314          */
315         fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
316
317         /* TODO: remove consequents buffers with the same fence? */
318
319         p_atomic_dec(&fenced_buf->base.base.reference.count);
320         assert(pipe_is_referenced(&fenced_buf->base.base.reference));
321
322         fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
323
324         ret = PIPE_OK;
325      }
326   }
327
328   return ret;
329}
330
331
332/**
333 * Remove as many fenced buffers from the fenced list as possible.
334 *
335 * Returns TRUE if at least one buffer was removed.
336 */
337static boolean
338fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
339                                      boolean wait)
340{
341   struct pb_fence_ops *ops = fenced_mgr->ops;
342   struct list_head *curr, *next;
343   struct fenced_buffer *fenced_buf;
344   struct pipe_fence_handle *prev_fence = NULL;
345   boolean ret = FALSE;
346
347   curr = fenced_mgr->fenced.next;
348   next = curr->next;
349   while(curr != &fenced_mgr->fenced) {
350      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
351
352      if(fenced_buf->fence != prev_fence) {
353	 int signaled;
354
355	 if (wait) {
356	    signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
357
358	    /*
359	     * Don't return just now. Instead preemptively check if the
360	     * following buffers' fences already expired, without further waits.
361	     */
362	    wait = FALSE;
363	 }
364	 else {
365	    signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
366	 }
367
368	 if (signaled != 0) {
369	    return ret;
370         }
371
372	 prev_fence = fenced_buf->fence;
373      }
374      else {
375         /* This buffer's fence object is identical to the previous buffer's
376          * fence object, so no need to check the fence again.
377          */
378	 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
379      }
380
381      fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
382
383      pb_reference((struct pb_buffer **)&fenced_buf, NULL);
384
385      ret = TRUE;
386
387      curr = next;
388      next = curr->next;
389   }
390
391   return ret;
392}
393
394
395/**
396 * Try to free some GPU memory by backing it up into CPU memory.
397 *
398 * Returns TRUE if at least one buffer was freed.
399 */
400static boolean
401fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
402{
403   struct list_head *curr, *next;
404   struct fenced_buffer *fenced_buf;
405
406   curr = fenced_mgr->unfenced.next;
407   next = curr->next;
408   while(curr != &fenced_mgr->unfenced) {
409      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
410
411      /*
412       * We can only move storage if the buffer is not mapped and not
413       * validated.
414       */
415      if(fenced_buf->buffer &&
416         !fenced_buf->mapcount &&
417         !fenced_buf->vl) {
418         enum pipe_error ret;
419
420         ret = fenced_buffer_create_cpu_storage_locked(fenced_buf);
421         if(ret == PIPE_OK) {
422            ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
423            if(ret == PIPE_OK) {
424               fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
425               return TRUE;
426            }
427            fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
428         }
429      }
430
431      curr = next;
432      next = curr->next;
433   }
434
435   return FALSE;
436}
437
438
439/**
440 * Destroy CPU storage for this buffer.
441 */
442static void
443fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
444{
445   if(fenced_buf->data) {
446      align_free(fenced_buf->data);
447      fenced_buf->data = NULL;
448      fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
449   }
450}
451
452
453/**
454 * Create CPU storage for this buffer.
455 */
456static enum pipe_error
457fenced_buffer_create_cpu_storage_locked(struct fenced_buffer *fenced_buf)
458{
459   assert(!fenced_buf->data);
460   if(fenced_buf->data)
461      return PIPE_OK;
462
463   fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
464   if(!fenced_buf->data)
465      return PIPE_ERROR_OUT_OF_MEMORY;
466
467   fenced_buf->mgr->cpu_total_size += fenced_buf->size;
468   debug_printf("%s: cpu_total_size = %lu\n",
469                __FUNCTION__,
470                (unsigned long)fenced_buf->mgr->cpu_total_size);
471
472   return PIPE_OK;
473}
474
475
476/**
477 * Destroy the GPU storage.
478 */
479static void
480fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
481{
482   if(fenced_buf->buffer) {
483      pb_reference(&fenced_buf->buffer, NULL);
484   }
485}
486
487
488/**
489 * Try to create GPU storage for this buffer.
490 *
491 * This function is a shorthand around pb_manager::create_buffer for
492 * fenced_buffer_create_gpu_storage_locked()'s benefit.
493 */
494static INLINE boolean
495fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
496                                            struct fenced_buffer *fenced_buf)
497{
498   struct pb_manager *provider = fenced_mgr->provider;
499
500   assert(!fenced_buf->buffer);
501
502   fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
503                                                fenced_buf->size,
504                                                &fenced_buf->desc);
505   return fenced_buf->buffer ? TRUE : FALSE;
506}
507
508
509/**
510 * Create GPU storage for this buffer.
511 */
512static enum pipe_error
513fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
514                                        struct fenced_buffer *fenced_buf,
515                                        boolean wait)
516{
517   assert(!fenced_buf->buffer);
518
519   /*
520    * Check for signaled buffers before trying to allocate.
521    */
522   fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
523
524   fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
525
526   /*
527    * Keep trying while there is some sort of progress:
528    * - fences are expiring,
529    * - or buffers are being being swapped out from GPU memory into CPU memory.
530    */
531   while(!fenced_buf->buffer &&
532         (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
533          fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
534      fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
535   }
536
537   if(!fenced_buf->buffer && wait) {
538      /*
539       * Same as before, but this time around, wait to free buffers if
540       * necessary.
541       */
542      while(!fenced_buf->buffer &&
543            (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
544             fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
545         fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
546      }
547   }
548
549   if(!fenced_buf->buffer) {
550      if(0)
551         fenced_manager_dump_locked(fenced_mgr);
552
553      /* give up */
554      return PIPE_ERROR_OUT_OF_MEMORY;
555   }
556
557   return PIPE_OK;
558}
559
560
561static enum pipe_error
562fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
563{
564   uint8_t *map;
565
566   assert(fenced_buf->data);
567   assert(fenced_buf->buffer);
568
569   map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
570   if(!map)
571      return PIPE_ERROR;
572
573   memcpy(map, fenced_buf->data, fenced_buf->size);
574
575   pb_unmap(fenced_buf->buffer);
576
577   return PIPE_OK;
578}
579
580
581static enum pipe_error
582fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
583{
584   const uint8_t *map;
585
586   assert(fenced_buf->data);
587   assert(fenced_buf->buffer);
588
589   map = pb_map(fenced_buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
590   if(!map)
591      return PIPE_ERROR;
592
593   memcpy(fenced_buf->data, map, fenced_buf->size);
594
595   pb_unmap(fenced_buf->buffer);
596
597   return PIPE_OK;
598}
599
600
601static void
602fenced_buffer_destroy(struct pb_buffer *buf)
603{
604   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
605   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
606
607   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
608
609   pipe_mutex_lock(fenced_mgr->mutex);
610
611   assert(!fenced_buf->fence);
612   assert(fenced_buf->head.prev);
613   assert(fenced_buf->head.next);
614   LIST_DEL(&fenced_buf->head);
615   assert(fenced_mgr->num_unfenced);
616   --fenced_mgr->num_unfenced;
617
618   fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
619   fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
620
621   pipe_mutex_unlock(fenced_mgr->mutex);
622
623   FREE(fenced_buf);
624}
625
626
627static void *
628fenced_buffer_map(struct pb_buffer *buf,
629                  unsigned flags)
630{
631   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
632   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
633   struct pb_fence_ops *ops = fenced_mgr->ops;
634   void *map = NULL;
635
636   pipe_mutex_lock(fenced_mgr->mutex);
637
638   assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
639
640   /* Serialize writes */
641   if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
642      ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
643
644      if((flags & PIPE_BUFFER_USAGE_DONTBLOCK) &&
645          ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
646         /* Don't wait for the GPU to finish writing */
647         goto done;
648      }
649
650      /* Wait for the GPU to finish writing */
651      fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
652   }
653
654   if(fenced_buf->buffer) {
655      map = pb_map(fenced_buf->buffer, flags);
656   }
657   else {
658      assert(fenced_buf->data);
659      map = fenced_buf->data;
660   }
661
662   if(map) {
663      ++fenced_buf->mapcount;
664      fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
665   }
666
667done:
668   pipe_mutex_unlock(fenced_mgr->mutex);
669
670   return map;
671}
672
673
674static void
675fenced_buffer_unmap(struct pb_buffer *buf)
676{
677   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
678   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
679
680   pipe_mutex_lock(fenced_mgr->mutex);
681
682   assert(fenced_buf->mapcount);
683   if(fenced_buf->mapcount) {
684      if (fenced_buf->buffer)
685         pb_unmap(fenced_buf->buffer);
686      --fenced_buf->mapcount;
687      if(!fenced_buf->mapcount)
688	 fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
689   }
690
691   pipe_mutex_unlock(fenced_mgr->mutex);
692}
693
694
695static enum pipe_error
696fenced_buffer_validate(struct pb_buffer *buf,
697                       struct pb_validate *vl,
698                       unsigned flags)
699{
700   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
701   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
702   enum pipe_error ret;
703
704   pipe_mutex_lock(fenced_mgr->mutex);
705
706   if(!vl) {
707      /* invalidate */
708      fenced_buf->vl = NULL;
709      fenced_buf->validation_flags = 0;
710      ret = PIPE_OK;
711      goto done;
712   }
713
714   assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
715   assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
716   flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
717
718   /* Buffer cannot be validated in two different lists */
719   if(fenced_buf->vl && fenced_buf->vl != vl) {
720      ret = PIPE_ERROR_RETRY;
721      goto done;
722   }
723
724   if(fenced_buf->vl == vl &&
725      (fenced_buf->validation_flags & flags) == flags) {
726      /* Nothing to do -- buffer already validated */
727      ret = PIPE_OK;
728      goto done;
729   }
730
731   /*
732    * Create and update GPU storage.
733    */
734   if(!fenced_buf->buffer) {
735      assert(!fenced_buf->mapcount);
736
737      ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
738      if(ret != PIPE_OK) {
739         goto done;
740      }
741
742      ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
743      if(ret != PIPE_OK) {
744         fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
745         goto done;
746      }
747
748      if(fenced_buf->mapcount) {
749         debug_printf("warning: validating a buffer while it is still mapped\n");
750      }
751      else {
752         fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
753      }
754   }
755
756   ret = pb_validate(fenced_buf->buffer, vl, flags);
757   if (ret != PIPE_OK)
758      goto done;
759
760   fenced_buf->vl = vl;
761   fenced_buf->validation_flags |= flags;
762
763done:
764   pipe_mutex_unlock(fenced_mgr->mutex);
765
766   return ret;
767}
768
769
770static void
771fenced_buffer_fence(struct pb_buffer *buf,
772                    struct pipe_fence_handle *fence)
773{
774   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
775   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
776   struct pb_fence_ops *ops = fenced_mgr->ops;
777
778   pipe_mutex_lock(fenced_mgr->mutex);
779
780   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
781   assert(fenced_buf->buffer);
782
783   if(fence != fenced_buf->fence) {
784      assert(fenced_buf->vl);
785      assert(fenced_buf->validation_flags);
786
787      if (fenced_buf->fence) {
788         fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
789         p_atomic_dec(&fenced_buf->base.base.reference.count);
790         assert(pipe_is_referenced(&fenced_buf->base.base.reference));
791      }
792      if (fence) {
793         ops->fence_reference(ops, &fenced_buf->fence, fence);
794         fenced_buf->flags |= fenced_buf->validation_flags;
795         p_atomic_inc(&fenced_buf->base.base.reference.count);
796         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
797      }
798
799      pb_fence(fenced_buf->buffer, fence);
800
801      fenced_buf->vl = NULL;
802      fenced_buf->validation_flags = 0;
803   }
804
805   pipe_mutex_unlock(fenced_mgr->mutex);
806}
807
808
809static void
810fenced_buffer_get_base_buffer(struct pb_buffer *buf,
811                              struct pb_buffer **base_buf,
812                              pb_size *offset)
813{
814   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
815   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
816
817   pipe_mutex_lock(fenced_mgr->mutex);
818
819   /*
820    * This should only be called when the buffer is validated. Typically
821    * when processing relocations.
822    */
823   assert(fenced_buf->vl);
824   assert(fenced_buf->buffer);
825
826   if(fenced_buf->buffer)
827      pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
828   else {
829      *base_buf = buf;
830      *offset = 0;
831   }
832
833   pipe_mutex_unlock(fenced_mgr->mutex);
834}
835
836
837static const struct pb_vtbl
838fenced_buffer_vtbl = {
839      fenced_buffer_destroy,
840      fenced_buffer_map,
841      fenced_buffer_unmap,
842      fenced_buffer_validate,
843      fenced_buffer_fence,
844      fenced_buffer_get_base_buffer
845};
846
847
848/**
849 * Wrap a buffer in a fenced buffer.
850 */
851static struct pb_buffer *
852fenced_bufmgr_create_buffer(struct pb_manager *mgr,
853                            pb_size size,
854                            const struct pb_desc *desc)
855{
856   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
857   struct fenced_buffer *fenced_buf;
858   enum pipe_error ret;
859
860   fenced_buf = CALLOC_STRUCT(fenced_buffer);
861   if(!fenced_buf)
862      goto no_buffer;
863
864   pipe_reference_init(&fenced_buf->base.base.reference, 1);
865   fenced_buf->base.base.alignment = desc->alignment;
866   fenced_buf->base.base.usage = desc->usage;
867   fenced_buf->base.base.size = size;
868   fenced_buf->size = size;
869   fenced_buf->desc = *desc;
870
871   fenced_buf->base.vtbl = &fenced_buffer_vtbl;
872   fenced_buf->mgr = fenced_mgr;
873
874   pipe_mutex_lock(fenced_mgr->mutex);
875
876   /*
877    * Try to create GPU storage without stalling,
878    */
879   ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, 0);
880   if(ret != PIPE_OK) {
881      /*
882       * Don't stall the GPU or waste memory trying to create a buffer that will
883       * most likely never fit into the graphics aperture.
884       */
885      if(size > fenced_mgr->max_buffer_size) {
886         goto no_storage;
887      }
888
889      if(fenced_mgr->cpu_total_size + size <= fenced_mgr->max_cpu_total_size) {
890         /* Use CPU memory to avoid stalling the GPU */
891         ret = fenced_buffer_create_cpu_storage_locked(fenced_buf);
892      }
893      else {
894         /* Create GPU storage, waiting for some to be available */
895         ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, 1);
896      }
897      if(ret != PIPE_OK) {
898         goto no_storage;
899      }
900   }
901
902   assert(fenced_buf->buffer || fenced_buf->data);
903
904   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
905   ++fenced_mgr->num_unfenced;
906   pipe_mutex_unlock(fenced_mgr->mutex);
907
908   return &fenced_buf->base;
909
910no_storage:
911   pipe_mutex_unlock(fenced_mgr->mutex);
912   FREE(fenced_buf);
913no_buffer:
914   return NULL;
915}
916
917
918static void
919fenced_bufmgr_flush(struct pb_manager *mgr)
920{
921   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
922
923   pipe_mutex_lock(fenced_mgr->mutex);
924   while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
925      ;
926   pipe_mutex_unlock(fenced_mgr->mutex);
927
928   assert(fenced_mgr->provider->flush);
929   if(fenced_mgr->provider->flush)
930      fenced_mgr->provider->flush(fenced_mgr->provider);
931}
932
933
934static void
935fenced_bufmgr_destroy(struct pb_manager *mgr)
936{
937   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
938
939   pipe_mutex_lock(fenced_mgr->mutex);
940
941   /* Wait on outstanding fences */
942   while (fenced_mgr->num_fenced) {
943      pipe_mutex_unlock(fenced_mgr->mutex);
944#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
945      sched_yield();
946#endif
947      pipe_mutex_lock(fenced_mgr->mutex);
948      while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
949         ;
950   }
951
952#ifdef DEBUG
953   /*assert(!fenced_mgr->num_unfenced);*/
954#endif
955
956   pipe_mutex_unlock(fenced_mgr->mutex);
957   pipe_mutex_destroy(fenced_mgr->mutex);
958
959   if(fenced_mgr->provider)
960      fenced_mgr->provider->destroy(fenced_mgr->provider);
961
962   fenced_mgr->ops->destroy(fenced_mgr->ops);
963
964   FREE(fenced_mgr);
965}
966
967
968struct pb_manager *
969fenced_bufmgr_create(struct pb_manager *provider,
970                     struct pb_fence_ops *ops,
971                     pb_size max_buffer_size,
972                     pb_size max_cpu_total_size)
973{
974   struct fenced_manager *fenced_mgr;
975
976   if(!provider)
977      return NULL;
978
979   fenced_mgr = CALLOC_STRUCT(fenced_manager);
980   if (!fenced_mgr)
981      return NULL;
982
983   fenced_mgr->base.destroy = fenced_bufmgr_destroy;
984   fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
985   fenced_mgr->base.flush = fenced_bufmgr_flush;
986
987   fenced_mgr->provider = provider;
988   fenced_mgr->ops = ops;
989   fenced_mgr->max_buffer_size = max_buffer_size;
990   fenced_mgr->max_cpu_total_size = max_cpu_total_size;
991
992   LIST_INITHEAD(&fenced_mgr->fenced);
993   fenced_mgr->num_fenced = 0;
994
995   LIST_INITHEAD(&fenced_mgr->unfenced);
996   fenced_mgr->num_unfenced = 0;
997
998   pipe_mutex_init(fenced_mgr->mutex);
999
1000   return &fenced_mgr->base;
1001}
1002