pb_bufmgr_slab.c revision aef455c4a7bbd7df97a6444ae332cb5fb976e627
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29/**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @sa http://en.wikipedia.org/wiki/Slab_allocation
34 *
35 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
37 */
38
39#include "pipe/p_compiler.h"
40#include "pipe/p_error.h"
41#include "pipe/p_debug.h"
42#include "pipe/p_thread.h"
43#include "pipe/p_defines.h"
44#include "util/u_memory.h"
45#include "util/u_double_list.h"
46#include "util/u_time.h"
47
48#include "pb_buffer.h"
49#include "pb_bufmgr.h"
50
51
52struct pb_slab;
53
54
55/**
56 * Buffer in a slab.
57 *
58 * Sub-allocation of a contiguous buffer.
59 */
60struct pb_slab_buffer
61{
62   struct pb_buffer base;
63
64   struct pb_slab *slab;
65
66   struct list_head head;
67
68   unsigned mapCount;
69
70   /** Offset relative to the start of the slab buffer. */
71   size_t start;
72
73   /** Use when validating, to signal that all mappings are finished */
74   /* TODO: Actually validation does not reach this stage yet */
75   pipe_condvar event;
76};
77
78
79/**
80 * Slab -- a contiguous piece of memory.
81 */
82struct pb_slab
83{
84   struct list_head head;
85   struct list_head freeBuffers;
86   size_t numBuffers;
87   size_t numFree;
88
89   struct pb_slab_buffer *buffers;
90   struct pb_slab_manager *mgr;
91
92   /** Buffer from the provider */
93   struct pb_buffer *bo;
94
95   void *virtual;
96};
97
98
99/**
100 * It adds/removes slabs as needed in order to meet the allocation/destruction
101 * of individual buffers.
102 */
103struct pb_slab_manager
104{
105   struct pb_manager base;
106
107   /** From where we get our buffers */
108   struct pb_manager *provider;
109
110   /** Size of the buffers we hand on downstream */
111   size_t bufSize;
112
113   /** Size of the buffers we request upstream */
114   size_t slabSize;
115
116   /**
117    * Alignment, usage to be used to allocate the slab buffers.
118    *
119    * We can only provide buffers which are consistent (in alignment, usage)
120    * with this description.
121    */
122   struct pb_desc desc;
123
124   /**
125    * Partial slabs
126    *
127    * Full slabs are not stored in any list. Empty slabs are destroyed
128    * immediatly.
129    */
130   struct list_head slabs;
131
132   pipe_mutex mutex;
133};
134
135
136/**
137 * Wrapper around several slabs, therefore capable of handling buffers of
138 * multiple sizes.
139 *
140 * This buffer manager just dispatches buffer allocations to the appropriate slab
141 * manager, according to the requested buffer size, or by passes the slab
142 * managers altogether for even greater sizes.
143 *
144 * The data of this structure remains constant after
145 * initialization and thus needs no mutex protection.
146 */
147struct pb_slab_range_manager
148{
149   struct pb_manager base;
150
151   struct pb_manager *provider;
152
153   size_t minBufSize;
154   size_t maxBufSize;
155
156   /** @sa pb_slab_manager::desc */
157   struct pb_desc desc;
158
159   unsigned numBuckets;
160   size_t *bucketSizes;
161
162   /** Array of pb_slab_manager, one for each bucket size */
163   struct pb_manager **buckets;
164};
165
166
167static INLINE struct pb_slab_buffer *
168pb_slab_buffer(struct pb_buffer *buf)
169{
170   assert(buf);
171   return (struct pb_slab_buffer *)buf;
172}
173
174
175static INLINE struct pb_slab_manager *
176pb_slab_manager(struct pb_manager *mgr)
177{
178   assert(mgr);
179   return (struct pb_slab_manager *)mgr;
180}
181
182
183static INLINE struct pb_slab_range_manager *
184pb_slab_range_manager(struct pb_manager *mgr)
185{
186   assert(mgr);
187   return (struct pb_slab_range_manager *)mgr;
188}
189
190
191/**
192 * Delete a buffer from the slab delayed list and put
193 * it on the slab FREE list.
194 */
195static void
196pb_slab_buffer_destroy(struct pb_buffer *_buf)
197{
198   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
199   struct pb_slab *slab = buf->slab;
200   struct pb_slab_manager *mgr = slab->mgr;
201   struct list_head *list = &buf->head;
202
203   pipe_mutex_lock(mgr->mutex);
204
205   assert(buf->base.base.refcount == 0);
206
207   buf->mapCount = 0;
208
209   LIST_DEL(list);
210   LIST_ADDTAIL(list, &slab->freeBuffers);
211   slab->numFree++;
212
213   if (slab->head.next == &slab->head)
214      LIST_ADDTAIL(&slab->head, &mgr->slabs);
215
216   /* If the slab becomes totally empty, free it */
217   if (slab->numFree == slab->numBuffers) {
218      list = &slab->head;
219      LIST_DELINIT(list);
220      pb_reference(&slab->bo, NULL);
221      FREE(slab->buffers);
222      FREE(slab);
223   }
224
225   pipe_mutex_unlock(mgr->mutex);
226}
227
228
229static void *
230pb_slab_buffer_map(struct pb_buffer *_buf,
231                   unsigned flags)
232{
233   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
234
235   ++buf->mapCount;
236   return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
237}
238
239
240static void
241pb_slab_buffer_unmap(struct pb_buffer *_buf)
242{
243   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
244
245   --buf->mapCount;
246   if (buf->mapCount == 0)
247       pipe_condvar_broadcast(buf->event);
248}
249
250
251static void
252pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
253                               struct pb_buffer **base_buf,
254                               unsigned *offset)
255{
256   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
257   pb_get_base_buffer(buf->slab->bo, base_buf, offset);
258   *offset += buf->start;
259}
260
261
262static const struct pb_vtbl
263pb_slab_buffer_vtbl = {
264      pb_slab_buffer_destroy,
265      pb_slab_buffer_map,
266      pb_slab_buffer_unmap,
267      pb_slab_buffer_get_base_buffer
268};
269
270
271/**
272 * Create a new slab.
273 *
274 * Called when we ran out of free slabs.
275 */
276static enum pipe_error
277pb_slab_create(struct pb_slab_manager *mgr)
278{
279   struct pb_slab *slab;
280   struct pb_slab_buffer *buf;
281   unsigned numBuffers;
282   unsigned i;
283   enum pipe_error ret;
284
285   slab = CALLOC_STRUCT(pb_slab);
286   if (!slab)
287      return PIPE_ERROR_OUT_OF_MEMORY;
288
289   slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
290   if(!slab->bo) {
291      ret = PIPE_ERROR_OUT_OF_MEMORY;
292      goto out_err0;
293   }
294
295   /* Note down the slab virtual address. All mappings are accessed directly
296    * through this address so it is required that the buffer is pinned. */
297   slab->virtual = pb_map(slab->bo,
298                          PIPE_BUFFER_USAGE_CPU_READ |
299                          PIPE_BUFFER_USAGE_CPU_WRITE);
300   if(!slab->virtual) {
301      ret = PIPE_ERROR_OUT_OF_MEMORY;
302      goto out_err1;
303   }
304   pb_unmap(slab->bo);
305
306   numBuffers = slab->bo->base.size / mgr->bufSize;
307
308   slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
309   if (!slab->buffers) {
310      ret = PIPE_ERROR_OUT_OF_MEMORY;
311      goto out_err1;
312   }
313
314   LIST_INITHEAD(&slab->head);
315   LIST_INITHEAD(&slab->freeBuffers);
316   slab->numBuffers = numBuffers;
317   slab->numFree = 0;
318   slab->mgr = mgr;
319
320   buf = slab->buffers;
321   for (i=0; i < numBuffers; ++i) {
322      buf->base.base.refcount = 0;
323      buf->base.base.size = mgr->bufSize;
324      buf->base.base.alignment = 0;
325      buf->base.base.usage = 0;
326      buf->base.vtbl = &pb_slab_buffer_vtbl;
327      buf->slab = slab;
328      buf->start = i* mgr->bufSize;
329      buf->mapCount = 0;
330      pipe_condvar_init(buf->event);
331      LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
332      slab->numFree++;
333      buf++;
334   }
335
336   /* Add this slab to the list of partial slabs */
337   LIST_ADDTAIL(&slab->head, &mgr->slabs);
338
339   return PIPE_OK;
340
341out_err1:
342   pb_reference(&slab->bo, NULL);
343out_err0:
344   FREE(slab);
345   return ret;
346}
347
348
349static struct pb_buffer *
350pb_slab_manager_create_buffer(struct pb_manager *_mgr,
351                              size_t size,
352                              const struct pb_desc *desc)
353{
354   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
355   static struct pb_slab_buffer *buf;
356   struct pb_slab *slab;
357   struct list_head *list;
358
359   /* check size */
360   assert(size <= mgr->bufSize);
361   if(size > mgr->bufSize)
362      return NULL;
363
364   /* check if we can provide the requested alignment */
365   assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
366   if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
367      return NULL;
368   assert(pb_check_alignment(desc->alignment, mgr->bufSize));
369   if(!pb_check_alignment(desc->alignment, mgr->bufSize))
370      return NULL;
371
372   assert(pb_check_usage(desc->usage, mgr->desc.usage));
373   if(!pb_check_usage(desc->usage, mgr->desc.usage))
374      return NULL;
375
376   pipe_mutex_lock(mgr->mutex);
377
378   /* Create a new slab, if we run out of partial slabs */
379   if (mgr->slabs.next == &mgr->slabs) {
380      (void) pb_slab_create(mgr);
381      if (mgr->slabs.next == &mgr->slabs) {
382	 pipe_mutex_unlock(mgr->mutex);
383	 return NULL;
384      }
385   }
386
387   /* Allocate the buffer from a partial (or just created) slab */
388   list = mgr->slabs.next;
389   slab = LIST_ENTRY(struct pb_slab, list, head);
390
391   /* If totally full remove from the partial slab list */
392   if (--slab->numFree == 0)
393      LIST_DELINIT(list);
394
395   list = slab->freeBuffers.next;
396   LIST_DELINIT(list);
397
398   pipe_mutex_unlock(mgr->mutex);
399   buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
400
401   ++buf->base.base.refcount;
402   buf->base.base.alignment = desc->alignment;
403   buf->base.base.usage = desc->usage;
404
405   return &buf->base;
406}
407
408
409static void
410pb_slab_manager_flush(struct pb_manager *_mgr)
411{
412   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
413
414   assert(mgr->provider->flush);
415   if(mgr->provider->flush)
416      mgr->provider->flush(mgr->provider);
417}
418
419
420static void
421pb_slab_manager_destroy(struct pb_manager *_mgr)
422{
423   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
424
425   /* TODO: cleanup all allocated buffers */
426   FREE(mgr);
427}
428
429
430struct pb_manager *
431pb_slab_manager_create(struct pb_manager *provider,
432                       size_t bufSize,
433                       size_t slabSize,
434                       const struct pb_desc *desc)
435{
436   struct pb_slab_manager *mgr;
437
438   mgr = CALLOC_STRUCT(pb_slab_manager);
439   if (!mgr)
440      return NULL;
441
442   mgr->base.destroy = pb_slab_manager_destroy;
443   mgr->base.create_buffer = pb_slab_manager_create_buffer;
444   mgr->base.flush = pb_slab_manager_flush;
445
446   mgr->provider = provider;
447   mgr->bufSize = bufSize;
448   mgr->slabSize = slabSize;
449   mgr->desc = *desc;
450
451   LIST_INITHEAD(&mgr->slabs);
452
453   pipe_mutex_init(mgr->mutex);
454
455   return &mgr->base;
456}
457
458
459static struct pb_buffer *
460pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
461                                    size_t size,
462                                    const struct pb_desc *desc)
463{
464   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
465   size_t bufSize;
466   unsigned i;
467
468   bufSize = mgr->minBufSize;
469   for (i = 0; i < mgr->numBuckets; ++i) {
470      if(bufSize >= size)
471	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
472      bufSize *= 2;
473   }
474
475   /* Fall back to allocate a buffer object directly from the provider. */
476   return mgr->provider->create_buffer(mgr->provider, size, desc);
477}
478
479
480static void
481pb_slab_range_manager_flush(struct pb_manager *_mgr)
482{
483   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
484
485   /* Individual slabs don't hold any temporary buffers so no need to call them */
486
487   assert(mgr->provider->flush);
488   if(mgr->provider->flush)
489      mgr->provider->flush(mgr->provider);
490}
491
492
493static void
494pb_slab_range_manager_destroy(struct pb_manager *_mgr)
495{
496   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
497   unsigned i;
498
499   for (i = 0; i < mgr->numBuckets; ++i)
500      mgr->buckets[i]->destroy(mgr->buckets[i]);
501   FREE(mgr->buckets);
502   FREE(mgr->bucketSizes);
503   FREE(mgr);
504}
505
506
507struct pb_manager *
508pb_slab_range_manager_create(struct pb_manager *provider,
509                             size_t minBufSize,
510                             size_t maxBufSize,
511                             size_t slabSize,
512                             const struct pb_desc *desc)
513{
514   struct pb_slab_range_manager *mgr;
515   size_t bufSize;
516   unsigned i;
517
518   if(!provider)
519      return NULL;
520
521   mgr = CALLOC_STRUCT(pb_slab_range_manager);
522   if (!mgr)
523      goto out_err0;
524
525   mgr->base.destroy = pb_slab_range_manager_destroy;
526   mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
527   mgr->base.flush = pb_slab_range_manager_flush;
528
529   mgr->provider = provider;
530   mgr->minBufSize = minBufSize;
531   mgr->maxBufSize = maxBufSize;
532
533   mgr->numBuckets = 1;
534   bufSize = minBufSize;
535   while(bufSize < maxBufSize) {
536      bufSize *= 2;
537      ++mgr->numBuckets;
538   }
539
540   mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
541   if (!mgr->buckets)
542      goto out_err1;
543
544   bufSize = minBufSize;
545   for (i = 0; i < mgr->numBuckets; ++i) {
546      mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
547      if(!mgr->buckets[i])
548	 goto out_err2;
549      bufSize *= 2;
550   }
551
552   return &mgr->base;
553
554out_err2:
555   for (i = 0; i < mgr->numBuckets; ++i)
556      if(mgr->buckets[i])
557	    mgr->buckets[i]->destroy(mgr->buckets[i]);
558   FREE(mgr->buckets);
559out_err1:
560   FREE(mgr);
561out_err0:
562   return NULL;
563}
564