pb_bufmgr_slab.c revision 95aeeb6d746e57473116ef4d72c05330902f68a5
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29/**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
35 */
36
37#include "pipe/p_compiler.h"
38#include "pipe/p_error.h"
39#include "pipe/p_debug.h"
40#include "pipe/p_thread.h"
41#include "pipe/p_defines.h"
42#include "pipe/p_util.h"
43#include "util/u_double_list.h"
44#include "util/u_time.h"
45
46#include "pb_buffer.h"
47#include "pb_bufmgr.h"
48
49
50#define DRI_SLABPOOL_ALLOC_RETRIES 100
51
52
53struct pb_slab;
54
55struct pb_slab_buffer
56{
57   struct pb_buffer base;
58
59   struct pb_slab *slab;
60   struct list_head head;
61   unsigned mapCount;
62   size_t start;
63   _glthread_Cond event;
64};
65
66struct pb_slab
67{
68   struct list_head head;
69   struct list_head freeBuffers;
70   size_t numBuffers;
71   size_t numFree;
72   struct pb_slab_buffer *buffers;
73   struct pb_slab_manager *mgr;
74
75   struct pb_buffer *bo;
76   void *virtual;
77};
78
79struct pb_slab_manager
80{
81   struct pb_manager base;
82
83   struct pb_manager *provider;
84   size_t bufSize;
85   size_t slabSize;
86   struct pb_desc desc;
87
88   struct list_head slabs;
89   struct list_head freeSlabs;
90
91   _glthread_Mutex mutex;
92};
93
94/**
95 * The data of this structure remains constant after
96 * initialization and thus needs no mutex protection.
97 */
98struct pb_slab_range_manager
99{
100   struct pb_manager base;
101
102   struct pb_manager *provider;
103   size_t minBufSize;
104   size_t maxBufSize;
105   struct pb_desc desc;
106
107   unsigned numBuckets;
108   size_t *bucketSizes;
109   struct pb_manager **buckets;
110};
111
112
113static INLINE struct pb_slab_buffer *
114pb_slab_buffer(struct pb_buffer *buf)
115{
116   assert(buf);
117   return (struct pb_slab_buffer *)buf;
118}
119
120
121static INLINE struct pb_slab_manager *
122pb_slab_manager(struct pb_manager *mgr)
123{
124   assert(mgr);
125   return (struct pb_slab_manager *)mgr;
126}
127
128
129static INLINE struct pb_slab_range_manager *
130pb_slab_range_manager(struct pb_manager *mgr)
131{
132   assert(mgr);
133   return (struct pb_slab_range_manager *)mgr;
134}
135
136
137/**
138 * Delete a buffer from the slab delayed list and put
139 * it on the slab FREE list.
140 */
141static void
142pb_slab_buffer_destroy(struct pb_buffer *_buf)
143{
144   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
145   struct pb_slab *slab = buf->slab;
146   struct pb_slab_manager *mgr = slab->mgr;
147   struct list_head *list = &buf->head;
148
149   _glthread_LOCK_MUTEX(mgr->mutex);
150
151   assert(buf->base.base.refcount == 0);
152
153   buf->mapCount = 0;
154
155   LIST_DEL(list);
156   LIST_ADDTAIL(list, &slab->freeBuffers);
157   slab->numFree++;
158
159   if (slab->head.next == &slab->head)
160      LIST_ADDTAIL(&slab->head, &mgr->slabs);
161
162   if (slab->numFree == slab->numBuffers) {
163      list = &slab->head;
164      LIST_DEL(list);
165      LIST_ADDTAIL(list, &mgr->freeSlabs);
166   }
167
168   if (mgr->slabs.next == &mgr->slabs || slab->numFree
169	 != slab->numBuffers) {
170
171      struct list_head *next;
172
173      for (list = mgr->freeSlabs.next, next = list->next; list
174	    != &mgr->freeSlabs; list = next, next = list->next) {
175
176	 slab = LIST_ENTRY(struct pb_slab, list, head);
177
178	 LIST_DELINIT(list);
179	 pb_reference(&slab->bo, NULL);
180	 FREE(slab->buffers);
181	 FREE(slab);
182      }
183   }
184
185   _glthread_UNLOCK_MUTEX(mgr->mutex);
186}
187
188
189static void *
190pb_slab_buffer_map(struct pb_buffer *_buf,
191                   unsigned flags)
192{
193   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
194
195   ++buf->mapCount;
196   return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
197}
198
199
200static void
201pb_slab_buffer_unmap(struct pb_buffer *_buf)
202{
203   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
204
205   --buf->mapCount;
206   if (buf->mapCount == 0)
207       _glthread_COND_BROADCAST(buf->event);
208}
209
210
211static void
212pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
213                               struct pb_buffer **base_buf,
214                               unsigned *offset)
215{
216   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
217   pb_get_base_buffer(buf->slab->bo, base_buf, offset);
218   *offset += buf->start;
219}
220
221
222static const struct pb_vtbl
223pb_slab_buffer_vtbl = {
224      pb_slab_buffer_destroy,
225      pb_slab_buffer_map,
226      pb_slab_buffer_unmap,
227      pb_slab_buffer_get_base_buffer
228};
229
230
231static enum pipe_error
232pb_slab_create(struct pb_slab_manager *mgr)
233{
234   struct pb_slab *slab;
235   struct pb_slab_buffer *buf;
236   unsigned numBuffers;
237   unsigned i;
238   enum pipe_error ret;
239
240   slab = CALLOC_STRUCT(pb_slab);
241   if (!slab)
242      return PIPE_ERROR_OUT_OF_MEMORY;
243
244   /*
245    * FIXME: We should perhaps allow some variation in slabsize in order
246    * to efficiently reuse slabs.
247    */
248
249   slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
250   if(!slab->bo) {
251      ret = PIPE_ERROR_OUT_OF_MEMORY;
252      goto out_err0;
253   }
254
255   slab->virtual = pb_map(slab->bo,
256                          PIPE_BUFFER_USAGE_CPU_READ |
257                          PIPE_BUFFER_USAGE_CPU_WRITE);
258   if(!slab->virtual) {
259      ret = PIPE_ERROR_OUT_OF_MEMORY;
260      goto out_err1;
261   }
262
263   pb_unmap(slab->bo);
264
265   numBuffers = slab->bo->base.size / mgr->bufSize;
266
267   slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
268   if (!slab->buffers) {
269      ret = PIPE_ERROR_OUT_OF_MEMORY;
270      goto out_err1;
271   }
272
273   LIST_INITHEAD(&slab->head);
274   LIST_INITHEAD(&slab->freeBuffers);
275   slab->numBuffers = numBuffers;
276   slab->numFree = 0;
277   slab->mgr = mgr;
278
279   buf = slab->buffers;
280   for (i=0; i < numBuffers; ++i) {
281      buf->base.base.refcount = 0;
282      buf->base.base.size = mgr->bufSize;
283      buf->base.base.alignment = 0;
284      buf->base.base.usage = 0;
285      buf->base.vtbl = &pb_slab_buffer_vtbl;
286      buf->slab = slab;
287      buf->start = i* mgr->bufSize;
288      buf->mapCount = 0;
289      _glthread_INIT_COND(buf->event);
290      LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
291      slab->numFree++;
292      buf++;
293   }
294
295   LIST_ADDTAIL(&slab->head, &mgr->slabs);
296
297   return PIPE_OK;
298
299out_err1:
300   pb_reference(&slab->bo, NULL);
301out_err0:
302   FREE(slab);
303   return ret;
304}
305
306
307static struct pb_buffer *
308pb_slab_manager_create_buffer(struct pb_manager *_mgr,
309                              size_t size,
310                              const struct pb_desc *desc)
311{
312   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
313   static struct pb_slab_buffer *buf;
314   struct pb_slab *slab;
315   struct list_head *list;
316   int count = DRI_SLABPOOL_ALLOC_RETRIES;
317
318   /* check size */
319   assert(size == mgr->bufSize);
320   if(size != mgr->bufSize)
321      return NULL;
322
323   /* check if we can provide the requested alignment */
324   assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
325   if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
326      return NULL;
327   assert(pb_check_alignment(desc->alignment, mgr->bufSize));
328   if(!pb_check_alignment(desc->alignment, mgr->bufSize))
329      return NULL;
330
331   /* XXX: check for compatible buffer usage too? */
332
333   _glthread_LOCK_MUTEX(mgr->mutex);
334   while (mgr->slabs.next == &mgr->slabs && count > 0) {
335      if (mgr->slabs.next != &mgr->slabs)
336	 break;
337
338      _glthread_UNLOCK_MUTEX(mgr->mutex);
339      if (count != DRI_SLABPOOL_ALLOC_RETRIES)
340	 util_time_sleep(1);
341      _glthread_LOCK_MUTEX(mgr->mutex);
342      (void) pb_slab_create(mgr);
343      count--;
344   }
345
346   list = mgr->slabs.next;
347   if (list == &mgr->slabs) {
348      _glthread_UNLOCK_MUTEX(mgr->mutex);
349      return NULL;
350   }
351   slab = LIST_ENTRY(struct pb_slab, list, head);
352   if (--slab->numFree == 0)
353      LIST_DELINIT(list);
354
355   list = slab->freeBuffers.next;
356   LIST_DELINIT(list);
357
358   _glthread_UNLOCK_MUTEX(mgr->mutex);
359   buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
360
361   ++buf->base.base.refcount;
362   buf->base.base.alignment = desc->alignment;
363   buf->base.base.usage = desc->usage;
364
365   return &buf->base;
366}
367
368
369static void
370pb_slab_manager_destroy(struct pb_manager *_mgr)
371{
372   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
373
374   /* TODO: cleanup all allocated buffers */
375   FREE(mgr);
376}
377
378
379struct pb_manager *
380pb_slab_manager_create(struct pb_manager *provider,
381                       size_t bufSize,
382                       size_t slabSize,
383                       const struct pb_desc *desc)
384{
385   struct pb_slab_manager *mgr;
386
387   mgr = CALLOC_STRUCT(pb_slab_manager);
388   if (!mgr)
389      return NULL;
390
391   mgr->base.destroy = pb_slab_manager_destroy;
392   mgr->base.create_buffer = pb_slab_manager_create_buffer;
393
394   mgr->provider = provider;
395   mgr->bufSize = bufSize;
396   mgr->slabSize = slabSize;
397   mgr->desc = *desc;
398
399   LIST_INITHEAD(&mgr->slabs);
400   LIST_INITHEAD(&mgr->freeSlabs);
401
402   _glthread_INIT_MUTEX(mgr->mutex);
403
404   return &mgr->base;
405}
406
407
408static struct pb_buffer *
409pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
410                                    size_t size,
411                                    const struct pb_desc *desc)
412{
413   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
414   size_t bufSize;
415   unsigned i;
416
417   bufSize = mgr->minBufSize;
418   for (i = 0; i < mgr->numBuckets; ++i) {
419      if(bufSize >= size)
420	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
421      bufSize *= 2;
422   }
423
424   /* Fall back to allocate a buffer object directly from the provider. */
425   return mgr->provider->create_buffer(mgr->provider, size, desc);
426}
427
428
429static void
430pb_slab_range_manager_destroy(struct pb_manager *_mgr)
431{
432   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
433   unsigned i;
434
435   for (i = 0; i < mgr->numBuckets; ++i)
436      mgr->buckets[i]->destroy(mgr->buckets[i]);
437   FREE(mgr->buckets);
438   FREE(mgr->bucketSizes);
439   FREE(mgr);
440}
441
442
443struct pb_manager *
444pb_slab_range_manager_create(struct pb_manager *provider,
445                             size_t minBufSize,
446                             size_t maxBufSize,
447                             size_t slabSize,
448                             const struct pb_desc *desc)
449{
450   struct pb_slab_range_manager *mgr;
451   size_t bufSize;
452   unsigned i;
453
454   mgr = CALLOC_STRUCT(pb_slab_range_manager);
455   if (!mgr)
456      goto out_err0;
457
458   mgr->base.destroy = pb_slab_range_manager_destroy;
459   mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
460
461   mgr->provider = provider;
462   mgr->minBufSize = minBufSize;
463   mgr->maxBufSize = maxBufSize;
464
465   mgr->numBuckets = 1;
466   bufSize = minBufSize;
467   while(bufSize < maxBufSize) {
468      bufSize *= 2;
469      ++mgr->numBuckets;
470   }
471
472   mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
473   if (!mgr->buckets)
474      goto out_err1;
475
476   bufSize = minBufSize;
477   for (i = 0; i < mgr->numBuckets; ++i) {
478      mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
479      if(!mgr->buckets[i])
480	 goto out_err2;
481      bufSize *= 2;
482   }
483
484   return &mgr->base;
485
486out_err2:
487   for (i = 0; i < mgr->numBuckets; ++i)
488      if(mgr->buckets[i])
489	    mgr->buckets[i]->destroy(mgr->buckets[i]);
490   FREE(mgr->buckets);
491out_err1:
492   FREE(mgr);
493out_err0:
494   return NULL;
495}
496