pb_bufmgr_slab.c revision a175e15f20b2a231cc9d09099e7b6d8aea6c624e
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29/**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
35 */
36
37#include "pipe/p_compiler.h"
38#include "pipe/p_error.h"
39#include "pipe/p_debug.h"
40#include "pipe/p_thread.h"
41#include "pipe/p_defines.h"
42#include "pipe/p_util.h"
43#include "util/u_double_list.h"
44#include "util/u_time.h"
45
46#include "pb_buffer.h"
47#include "pb_bufmgr.h"
48
49
50#define DRI_SLABPOOL_ALLOC_RETRIES 100
51
52
53struct pb_slab;
54
55struct pb_slab_buffer
56{
57   struct pb_buffer base;
58
59   struct pb_slab *slab;
60   struct list_head head;
61   unsigned mapCount;
62   size_t start;
63   _glthread_Cond event;
64};
65
66struct pb_slab
67{
68   struct list_head head;
69   struct list_head freeBuffers;
70   size_t numBuffers;
71   size_t numFree;
72   struct pb_slab_buffer *buffers;
73   struct pb_slab_manager *mgr;
74
75   struct pb_buffer *bo;
76   void *virtual;
77};
78
79struct pb_slab_manager
80{
81   struct pb_manager base;
82
83   struct pb_manager *provider;
84   size_t bufSize;
85   size_t slabSize;
86   struct pb_desc desc;
87
88   struct list_head slabs;
89   struct list_head freeSlabs;
90
91   _glthread_Mutex mutex;
92};
93
94/**
95 * The data of this structure remains constant after
96 * initialization and thus needs no mutex protection.
97 */
98struct pb_slab_range_manager
99{
100   struct pb_manager base;
101
102   struct pb_manager *provider;
103   size_t minBufSize;
104   size_t maxBufSize;
105   struct pb_desc desc;
106
107   unsigned numBuckets;
108   size_t *bucketSizes;
109   struct pb_manager **buckets;
110};
111
112
113static INLINE struct pb_slab_buffer *
114pb_slab_buffer(struct pb_buffer *buf)
115{
116   assert(buf);
117   return (struct pb_slab_buffer *)buf;
118}
119
120
121static INLINE struct pb_slab_manager *
122pb_slab_manager(struct pb_manager *mgr)
123{
124   assert(mgr);
125   return (struct pb_slab_manager *)mgr;
126}
127
128
129static INLINE struct pb_slab_range_manager *
130pb_slab_range_manager(struct pb_manager *mgr)
131{
132   assert(mgr);
133   return (struct pb_slab_range_manager *)mgr;
134}
135
136
137/**
138 * Delete a buffer from the slab delayed list and put
139 * it on the slab FREE list.
140 */
141static void
142pb_slab_buffer_destroy(struct pb_buffer *_buf)
143{
144   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
145   struct pb_slab *slab = buf->slab;
146   struct pb_slab_manager *mgr = slab->mgr;
147   struct list_head *list = &buf->head;
148
149   _glthread_LOCK_MUTEX(mgr->mutex);
150
151   assert(buf->base.base.refcount == 0);
152
153   buf->mapCount = 0;
154
155   LIST_DEL(list);
156   LIST_ADDTAIL(list, &slab->freeBuffers);
157   slab->numFree++;
158
159   if (slab->head.next == &slab->head)
160      LIST_ADDTAIL(&slab->head, &mgr->slabs);
161
162   if (slab->numFree == slab->numBuffers) {
163      list = &slab->head;
164      LIST_DEL(list);
165      LIST_ADDTAIL(list, &mgr->freeSlabs);
166   }
167
168   if (mgr->slabs.next == &mgr->slabs || slab->numFree
169	 != slab->numBuffers) {
170
171      struct list_head *next;
172
173      for (list = mgr->freeSlabs.next, next = list->next; list
174	    != &mgr->freeSlabs; list = next, next = list->next) {
175
176	 slab = LIST_ENTRY(struct pb_slab, list, head);
177
178	 LIST_DELINIT(list);
179	 pb_reference(&slab->bo, NULL);
180	 FREE(slab->buffers);
181	 FREE(slab);
182      }
183   }
184
185   _glthread_UNLOCK_MUTEX(mgr->mutex);
186}
187
188
189static void *
190pb_slab_buffer_map(struct pb_buffer *_buf,
191                   unsigned flags)
192{
193   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
194
195   ++buf->mapCount;
196   return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
197}
198
199
200static void
201pb_slab_buffer_unmap(struct pb_buffer *_buf)
202{
203   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
204
205   --buf->mapCount;
206   if (buf->mapCount == 0)
207       _glthread_COND_BROADCAST(buf->event);
208}
209
210
211static void
212pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
213                               struct pb_buffer **base_buf,
214                               unsigned *offset)
215{
216   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
217   pb_get_base_buffer(buf->slab->bo, base_buf, offset);
218   *offset += buf->start;
219}
220
221
222static const struct pb_vtbl
223pb_slab_buffer_vtbl = {
224      pb_slab_buffer_destroy,
225      pb_slab_buffer_map,
226      pb_slab_buffer_unmap,
227      pb_slab_buffer_get_base_buffer
228};
229
230
231static enum pipe_error
232pb_slab_create(struct pb_slab_manager *mgr)
233{
234   struct pb_slab *slab;
235   struct pb_slab_buffer *buf;
236   unsigned numBuffers;
237   unsigned i;
238   enum pipe_error ret;
239
240   slab = CALLOC_STRUCT(pb_slab);
241   if (!slab)
242      return PIPE_ERROR_OUT_OF_MEMORY;
243
244   /*
245    * FIXME: We should perhaps allow some variation in slabsize in order
246    * to efficiently reuse slabs.
247    */
248
249   slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
250   if(!slab->bo) {
251      ret = PIPE_ERROR_OUT_OF_MEMORY;
252      goto out_err0;
253   }
254
255   slab->virtual = pb_map(slab->bo,
256                          PIPE_BUFFER_USAGE_CPU_READ |
257                          PIPE_BUFFER_USAGE_CPU_WRITE);
258   if(!slab->virtual) {
259      ret = PIPE_ERROR_OUT_OF_MEMORY;
260      goto out_err1;
261   }
262
263   pb_unmap(slab->bo);
264
265   numBuffers = slab->bo->base.size / mgr->bufSize;
266
267   slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
268   if (!slab->buffers) {
269      ret = PIPE_ERROR_OUT_OF_MEMORY;
270      goto out_err1;
271   }
272
273   LIST_INITHEAD(&slab->head);
274   LIST_INITHEAD(&slab->freeBuffers);
275   slab->numBuffers = numBuffers;
276   slab->numFree = 0;
277   slab->mgr = mgr;
278
279   buf = slab->buffers;
280   for (i=0; i < numBuffers; ++i) {
281      buf->base.base.refcount = 0;
282      buf->base.base.size = mgr->bufSize;
283      buf->base.base.alignment = 0;
284      buf->base.base.usage = 0;
285      buf->base.vtbl = &pb_slab_buffer_vtbl;
286      buf->slab = slab;
287      buf->start = i* mgr->bufSize;
288      buf->mapCount = 0;
289      _glthread_INIT_COND(buf->event);
290      LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
291      slab->numFree++;
292      buf++;
293   }
294
295   LIST_ADDTAIL(&slab->head, &mgr->slabs);
296
297   return PIPE_OK;
298
299out_err1:
300   pb_reference(&slab->bo, NULL);
301out_err0:
302   FREE(slab);
303   return ret;
304}
305
306
307static int
308check_alignment(size_t requested, size_t provided)
309{
310   return requested <= provided && (provided % requested) == 0;
311}
312
313
314static struct pb_buffer *
315pb_slab_manager_create_buffer(struct pb_manager *_mgr,
316                              size_t size,
317                              const struct pb_desc *desc)
318{
319   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
320   static struct pb_slab_buffer *buf;
321   struct pb_slab *slab;
322   struct list_head *list;
323   int count = DRI_SLABPOOL_ALLOC_RETRIES;
324
325   /* check size */
326   assert(size == mgr->bufSize);
327   if(size != mgr->bufSize)
328      return NULL;
329
330   /* check if we can provide the requested alignment */
331   assert(check_alignment(desc->alignment, mgr->desc.alignment));
332   if(!check_alignment(desc->alignment, mgr->desc.alignment))
333      return NULL;
334   assert(check_alignment(desc->alignment, mgr->bufSize));
335   if(!check_alignment(desc->alignment, mgr->bufSize))
336      return NULL;
337
338   /* XXX: check for compatible buffer usage too? */
339
340   _glthread_LOCK_MUTEX(mgr->mutex);
341   while (mgr->slabs.next == &mgr->slabs && count > 0) {
342      if (mgr->slabs.next != &mgr->slabs)
343	 break;
344
345      _glthread_UNLOCK_MUTEX(mgr->mutex);
346      if (count != DRI_SLABPOOL_ALLOC_RETRIES)
347	 util_time_sleep(1);
348      _glthread_LOCK_MUTEX(mgr->mutex);
349      (void) pb_slab_create(mgr);
350      count--;
351   }
352
353   list = mgr->slabs.next;
354   if (list == &mgr->slabs) {
355      _glthread_UNLOCK_MUTEX(mgr->mutex);
356      return NULL;
357   }
358   slab = LIST_ENTRY(struct pb_slab, list, head);
359   if (--slab->numFree == 0)
360      LIST_DELINIT(list);
361
362   list = slab->freeBuffers.next;
363   LIST_DELINIT(list);
364
365   _glthread_UNLOCK_MUTEX(mgr->mutex);
366   buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
367
368   ++buf->base.base.refcount;
369   buf->base.base.alignment = desc->alignment;
370   buf->base.base.usage = desc->usage;
371
372   return &buf->base;
373}
374
375
376static void
377pb_slab_manager_destroy(struct pb_manager *_mgr)
378{
379   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
380
381   /* TODO: cleanup all allocated buffers */
382   FREE(mgr);
383}
384
385
386struct pb_manager *
387pb_slab_manager_create(struct pb_manager *provider,
388                       size_t bufSize,
389                       size_t slabSize,
390                       const struct pb_desc *desc)
391{
392   struct pb_slab_manager *mgr;
393
394   mgr = CALLOC_STRUCT(pb_slab_manager);
395   if (!mgr)
396      return NULL;
397
398   mgr->base.destroy = pb_slab_manager_destroy;
399   mgr->base.create_buffer = pb_slab_manager_create_buffer;
400
401   mgr->provider = provider;
402   mgr->bufSize = bufSize;
403   mgr->slabSize = slabSize;
404   mgr->desc = *desc;
405
406   LIST_INITHEAD(&mgr->slabs);
407   LIST_INITHEAD(&mgr->freeSlabs);
408
409   _glthread_INIT_MUTEX(mgr->mutex);
410
411   return &mgr->base;
412}
413
414
415static struct pb_buffer *
416pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
417                                    size_t size,
418                                    const struct pb_desc *desc)
419{
420   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
421   size_t bufSize;
422   unsigned i;
423
424   bufSize = mgr->minBufSize;
425   for (i = 0; i < mgr->numBuckets; ++i) {
426      if(bufSize >= size)
427	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
428      bufSize *= 2;
429   }
430
431   /* Fall back to allocate a buffer object directly from the provider. */
432   return mgr->provider->create_buffer(mgr->provider, size, desc);
433}
434
435
436static void
437pb_slab_range_manager_destroy(struct pb_manager *_mgr)
438{
439   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
440   unsigned i;
441
442   for (i = 0; i < mgr->numBuckets; ++i)
443      mgr->buckets[i]->destroy(mgr->buckets[i]);
444   FREE(mgr->buckets);
445   FREE(mgr->bucketSizes);
446   FREE(mgr);
447}
448
449
450struct pb_manager *
451pb_slab_range_manager_create(struct pb_manager *provider,
452                             size_t minBufSize,
453                             size_t maxBufSize,
454                             size_t slabSize,
455                             const struct pb_desc *desc)
456{
457   struct pb_slab_range_manager *mgr;
458   size_t bufSize;
459   unsigned i;
460
461   mgr = CALLOC_STRUCT(pb_slab_range_manager);
462   if (!mgr)
463      goto out_err0;
464
465   mgr->base.destroy = pb_slab_range_manager_destroy;
466   mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
467
468   mgr->provider = provider;
469   mgr->minBufSize = minBufSize;
470   mgr->maxBufSize = maxBufSize;
471
472   mgr->numBuckets = 1;
473   bufSize = minBufSize;
474   while(bufSize < maxBufSize) {
475      bufSize *= 2;
476      ++mgr->numBuckets;
477   }
478
479   mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
480   if (!mgr->buckets)
481      goto out_err1;
482
483   bufSize = minBufSize;
484   for (i = 0; i < mgr->numBuckets; ++i) {
485      mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
486      if(!mgr->buckets[i])
487	 goto out_err2;
488      bufSize *= 2;
489   }
490
491   return &mgr->base;
492
493out_err2:
494   for (i = 0; i < mgr->numBuckets; ++i)
495      if(mgr->buckets[i])
496	    mgr->buckets[i]->destroy(mgr->buckets[i]);
497   FREE(mgr->buckets);
498out_err1:
499   FREE(mgr);
500out_err0:
501   return NULL;
502}
503