pb_bufmgr_slab.c revision ea4bf267e4b023b08043f91ac44592fed1736e7f
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29/**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @sa http://en.wikipedia.org/wiki/Slab_allocation
34 *
35 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
37 */
38
39#include "pipe/p_compiler.h"
40#include "pipe/p_error.h"
41#include "util/u_debug.h"
42#include "pipe/p_thread.h"
43#include "pipe/p_defines.h"
44#include "util/u_memory.h"
45#include "util/u_double_list.h"
46#include "util/u_time.h"
47
48#include "pb_buffer.h"
49#include "pb_bufmgr.h"
50
51
52struct pb_slab;
53
54
55/**
56 * Buffer in a slab.
57 *
58 * Sub-allocation of a contiguous buffer.
59 */
60struct pb_slab_buffer
61{
62   struct pb_buffer base;
63
64   struct pb_slab *slab;
65
66   struct list_head head;
67
68   unsigned mapCount;
69
70   /** Offset relative to the start of the slab buffer. */
71   size_t start;
72
73   /** Use when validating, to signal that all mappings are finished */
74   /* TODO: Actually validation does not reach this stage yet */
75   pipe_condvar event;
76};
77
78
79/**
80 * Slab -- a contiguous piece of memory.
81 */
82struct pb_slab
83{
84   struct list_head head;
85   struct list_head freeBuffers;
86   size_t numBuffers;
87   size_t numFree;
88
89   struct pb_slab_buffer *buffers;
90   struct pb_slab_manager *mgr;
91
92   /** Buffer from the provider */
93   struct pb_buffer *bo;
94
95   void *virtual;
96};
97
98
99/**
100 * It adds/removes slabs as needed in order to meet the allocation/destruction
101 * of individual buffers.
102 */
103struct pb_slab_manager
104{
105   struct pb_manager base;
106
107   /** From where we get our buffers */
108   struct pb_manager *provider;
109
110   /** Size of the buffers we hand on downstream */
111   size_t bufSize;
112
113   /** Size of the buffers we request upstream */
114   size_t slabSize;
115
116   /**
117    * Alignment, usage to be used to allocate the slab buffers.
118    *
119    * We can only provide buffers which are consistent (in alignment, usage)
120    * with this description.
121    */
122   struct pb_desc desc;
123
124   /**
125    * Partial slabs
126    *
127    * Full slabs are not stored in any list. Empty slabs are destroyed
128    * immediatly.
129    */
130   struct list_head slabs;
131
132   pipe_mutex mutex;
133};
134
135
136/**
137 * Wrapper around several slabs, therefore capable of handling buffers of
138 * multiple sizes.
139 *
140 * This buffer manager just dispatches buffer allocations to the appropriate slab
141 * manager, according to the requested buffer size, or by passes the slab
142 * managers altogether for even greater sizes.
143 *
144 * The data of this structure remains constant after
145 * initialization and thus needs no mutex protection.
146 */
147struct pb_slab_range_manager
148{
149   struct pb_manager base;
150
151   struct pb_manager *provider;
152
153   size_t minBufSize;
154   size_t maxBufSize;
155
156   /** @sa pb_slab_manager::desc */
157   struct pb_desc desc;
158
159   unsigned numBuckets;
160   size_t *bucketSizes;
161
162   /** Array of pb_slab_manager, one for each bucket size */
163   struct pb_manager **buckets;
164};
165
166
167static INLINE struct pb_slab_buffer *
168pb_slab_buffer(struct pb_buffer *buf)
169{
170   assert(buf);
171   return (struct pb_slab_buffer *)buf;
172}
173
174
175static INLINE struct pb_slab_manager *
176pb_slab_manager(struct pb_manager *mgr)
177{
178   assert(mgr);
179   return (struct pb_slab_manager *)mgr;
180}
181
182
183static INLINE struct pb_slab_range_manager *
184pb_slab_range_manager(struct pb_manager *mgr)
185{
186   assert(mgr);
187   return (struct pb_slab_range_manager *)mgr;
188}
189
190
191/**
192 * Delete a buffer from the slab delayed list and put
193 * it on the slab FREE list.
194 */
195static void
196pb_slab_buffer_destroy(struct pb_buffer *_buf)
197{
198   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
199   struct pb_slab *slab = buf->slab;
200   struct pb_slab_manager *mgr = slab->mgr;
201   struct list_head *list = &buf->head;
202
203   pipe_mutex_lock(mgr->mutex);
204
205   assert(buf->base.base.refcount == 0);
206
207   buf->mapCount = 0;
208
209   LIST_DEL(list);
210   LIST_ADDTAIL(list, &slab->freeBuffers);
211   slab->numFree++;
212
213   if (slab->head.next == &slab->head)
214      LIST_ADDTAIL(&slab->head, &mgr->slabs);
215
216   /* If the slab becomes totally empty, free it */
217   if (slab->numFree == slab->numBuffers) {
218      list = &slab->head;
219      LIST_DELINIT(list);
220      pb_reference(&slab->bo, NULL);
221      FREE(slab->buffers);
222      FREE(slab);
223   }
224
225   pipe_mutex_unlock(mgr->mutex);
226}
227
228
229static void *
230pb_slab_buffer_map(struct pb_buffer *_buf,
231                   unsigned flags)
232{
233   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
234
235   ++buf->mapCount;
236   return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
237}
238
239
240static void
241pb_slab_buffer_unmap(struct pb_buffer *_buf)
242{
243   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
244
245   --buf->mapCount;
246   if (buf->mapCount == 0)
247       pipe_condvar_broadcast(buf->event);
248}
249
250
251static enum pipe_error
252pb_slab_buffer_validate(struct pb_buffer *_buf,
253                         struct pb_validate *vl,
254                         unsigned flags)
255{
256   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
257   return pb_validate(buf->slab->bo, vl, flags);
258}
259
260
261static void
262pb_slab_buffer_fence(struct pb_buffer *_buf,
263                      struct pipe_fence_handle *fence)
264{
265   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
266   pb_fence(buf->slab->bo, fence);
267}
268
269
270static void
271pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
272                               struct pb_buffer **base_buf,
273                               unsigned *offset)
274{
275   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
276   pb_get_base_buffer(buf->slab->bo, base_buf, offset);
277   *offset += buf->start;
278}
279
280
281static const struct pb_vtbl
282pb_slab_buffer_vtbl = {
283      pb_slab_buffer_destroy,
284      pb_slab_buffer_map,
285      pb_slab_buffer_unmap,
286      pb_slab_buffer_validate,
287      pb_slab_buffer_fence,
288      pb_slab_buffer_get_base_buffer
289};
290
291
292/**
293 * Create a new slab.
294 *
295 * Called when we ran out of free slabs.
296 */
297static enum pipe_error
298pb_slab_create(struct pb_slab_manager *mgr)
299{
300   struct pb_slab *slab;
301   struct pb_slab_buffer *buf;
302   unsigned numBuffers;
303   unsigned i;
304   enum pipe_error ret;
305
306   slab = CALLOC_STRUCT(pb_slab);
307   if (!slab)
308      return PIPE_ERROR_OUT_OF_MEMORY;
309
310   slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
311   if(!slab->bo) {
312      ret = PIPE_ERROR_OUT_OF_MEMORY;
313      goto out_err0;
314   }
315
316   /* Note down the slab virtual address. All mappings are accessed directly
317    * through this address so it is required that the buffer is pinned. */
318   slab->virtual = pb_map(slab->bo,
319                          PIPE_BUFFER_USAGE_CPU_READ |
320                          PIPE_BUFFER_USAGE_CPU_WRITE);
321   if(!slab->virtual) {
322      ret = PIPE_ERROR_OUT_OF_MEMORY;
323      goto out_err1;
324   }
325   pb_unmap(slab->bo);
326
327   numBuffers = slab->bo->base.size / mgr->bufSize;
328
329   slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
330   if (!slab->buffers) {
331      ret = PIPE_ERROR_OUT_OF_MEMORY;
332      goto out_err1;
333   }
334
335   LIST_INITHEAD(&slab->head);
336   LIST_INITHEAD(&slab->freeBuffers);
337   slab->numBuffers = numBuffers;
338   slab->numFree = 0;
339   slab->mgr = mgr;
340
341   buf = slab->buffers;
342   for (i=0; i < numBuffers; ++i) {
343      buf->base.base.refcount = 0;
344      buf->base.base.size = mgr->bufSize;
345      buf->base.base.alignment = 0;
346      buf->base.base.usage = 0;
347      buf->base.vtbl = &pb_slab_buffer_vtbl;
348      buf->slab = slab;
349      buf->start = i* mgr->bufSize;
350      buf->mapCount = 0;
351      pipe_condvar_init(buf->event);
352      LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
353      slab->numFree++;
354      buf++;
355   }
356
357   /* Add this slab to the list of partial slabs */
358   LIST_ADDTAIL(&slab->head, &mgr->slabs);
359
360   return PIPE_OK;
361
362out_err1:
363   pb_reference(&slab->bo, NULL);
364out_err0:
365   FREE(slab);
366   return ret;
367}
368
369
370static struct pb_buffer *
371pb_slab_manager_create_buffer(struct pb_manager *_mgr,
372                              size_t size,
373                              const struct pb_desc *desc)
374{
375   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
376   static struct pb_slab_buffer *buf;
377   struct pb_slab *slab;
378   struct list_head *list;
379
380   /* check size */
381   assert(size <= mgr->bufSize);
382   if(size > mgr->bufSize)
383      return NULL;
384
385   /* check if we can provide the requested alignment */
386   assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
387   if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
388      return NULL;
389   assert(pb_check_alignment(desc->alignment, mgr->bufSize));
390   if(!pb_check_alignment(desc->alignment, mgr->bufSize))
391      return NULL;
392
393   assert(pb_check_usage(desc->usage, mgr->desc.usage));
394   if(!pb_check_usage(desc->usage, mgr->desc.usage))
395      return NULL;
396
397   pipe_mutex_lock(mgr->mutex);
398
399   /* Create a new slab, if we run out of partial slabs */
400   if (mgr->slabs.next == &mgr->slabs) {
401      (void) pb_slab_create(mgr);
402      if (mgr->slabs.next == &mgr->slabs) {
403	 pipe_mutex_unlock(mgr->mutex);
404	 return NULL;
405      }
406   }
407
408   /* Allocate the buffer from a partial (or just created) slab */
409   list = mgr->slabs.next;
410   slab = LIST_ENTRY(struct pb_slab, list, head);
411
412   /* If totally full remove from the partial slab list */
413   if (--slab->numFree == 0)
414      LIST_DELINIT(list);
415
416   list = slab->freeBuffers.next;
417   LIST_DELINIT(list);
418
419   pipe_mutex_unlock(mgr->mutex);
420   buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
421
422   ++buf->base.base.refcount;
423   buf->base.base.alignment = desc->alignment;
424   buf->base.base.usage = desc->usage;
425
426   return &buf->base;
427}
428
429
430static void
431pb_slab_manager_flush(struct pb_manager *_mgr)
432{
433   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
434
435   assert(mgr->provider->flush);
436   if(mgr->provider->flush)
437      mgr->provider->flush(mgr->provider);
438}
439
440
441static void
442pb_slab_manager_destroy(struct pb_manager *_mgr)
443{
444   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
445
446   /* TODO: cleanup all allocated buffers */
447   FREE(mgr);
448}
449
450
451struct pb_manager *
452pb_slab_manager_create(struct pb_manager *provider,
453                       size_t bufSize,
454                       size_t slabSize,
455                       const struct pb_desc *desc)
456{
457   struct pb_slab_manager *mgr;
458
459   mgr = CALLOC_STRUCT(pb_slab_manager);
460   if (!mgr)
461      return NULL;
462
463   mgr->base.destroy = pb_slab_manager_destroy;
464   mgr->base.create_buffer = pb_slab_manager_create_buffer;
465   mgr->base.flush = pb_slab_manager_flush;
466
467   mgr->provider = provider;
468   mgr->bufSize = bufSize;
469   mgr->slabSize = slabSize;
470   mgr->desc = *desc;
471
472   LIST_INITHEAD(&mgr->slabs);
473
474   pipe_mutex_init(mgr->mutex);
475
476   return &mgr->base;
477}
478
479
480static struct pb_buffer *
481pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
482                                    size_t size,
483                                    const struct pb_desc *desc)
484{
485   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
486   size_t bufSize;
487   unsigned i;
488
489   bufSize = mgr->minBufSize;
490   for (i = 0; i < mgr->numBuckets; ++i) {
491      if(bufSize >= size)
492	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
493      bufSize *= 2;
494   }
495
496   /* Fall back to allocate a buffer object directly from the provider. */
497   return mgr->provider->create_buffer(mgr->provider, size, desc);
498}
499
500
501static void
502pb_slab_range_manager_flush(struct pb_manager *_mgr)
503{
504   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
505
506   /* Individual slabs don't hold any temporary buffers so no need to call them */
507
508   assert(mgr->provider->flush);
509   if(mgr->provider->flush)
510      mgr->provider->flush(mgr->provider);
511}
512
513
514static void
515pb_slab_range_manager_destroy(struct pb_manager *_mgr)
516{
517   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
518   unsigned i;
519
520   for (i = 0; i < mgr->numBuckets; ++i)
521      mgr->buckets[i]->destroy(mgr->buckets[i]);
522   FREE(mgr->buckets);
523   FREE(mgr->bucketSizes);
524   FREE(mgr);
525}
526
527
528struct pb_manager *
529pb_slab_range_manager_create(struct pb_manager *provider,
530                             size_t minBufSize,
531                             size_t maxBufSize,
532                             size_t slabSize,
533                             const struct pb_desc *desc)
534{
535   struct pb_slab_range_manager *mgr;
536   size_t bufSize;
537   unsigned i;
538
539   if(!provider)
540      return NULL;
541
542   mgr = CALLOC_STRUCT(pb_slab_range_manager);
543   if (!mgr)
544      goto out_err0;
545
546   mgr->base.destroy = pb_slab_range_manager_destroy;
547   mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
548   mgr->base.flush = pb_slab_range_manager_flush;
549
550   mgr->provider = provider;
551   mgr->minBufSize = minBufSize;
552   mgr->maxBufSize = maxBufSize;
553
554   mgr->numBuckets = 1;
555   bufSize = minBufSize;
556   while(bufSize < maxBufSize) {
557      bufSize *= 2;
558      ++mgr->numBuckets;
559   }
560
561   mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
562   if (!mgr->buckets)
563      goto out_err1;
564
565   bufSize = minBufSize;
566   for (i = 0; i < mgr->numBuckets; ++i) {
567      mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
568      if(!mgr->buckets[i])
569	 goto out_err2;
570      bufSize *= 2;
571   }
572
573   return &mgr->base;
574
575out_err2:
576   for (i = 0; i < mgr->numBuckets; ++i)
577      if(mgr->buckets[i])
578	    mgr->buckets[i]->destroy(mgr->buckets[i]);
579   FREE(mgr->buckets);
580out_err1:
581   FREE(mgr);
582out_err0:
583   return NULL;
584}
585