pb_bufmgr_slab.c revision 3a49497f102f2b64a8755d3cf65b7c0386e95aac
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29/**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @sa http://en.wikipedia.org/wiki/Slab_allocation
34 *
35 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
36 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
37 */
38
39#include "pipe/p_compiler.h"
40#include "util/u_debug.h"
41#include "pipe/p_thread.h"
42#include "pipe/p_defines.h"
43#include "util/u_memory.h"
44#include "util/u_double_list.h"
45#include "util/u_time.h"
46
47#include "pb_buffer.h"
48#include "pb_bufmgr.h"
49
50
51struct pb_slab;
52
53
54/**
55 * Buffer in a slab.
56 *
57 * Sub-allocation of a contiguous buffer.
58 */
59struct pb_slab_buffer
60{
61   struct pb_buffer base;
62
63   struct pb_slab *slab;
64
65   struct list_head head;
66
67   unsigned mapCount;
68
69   /** Offset relative to the start of the slab buffer. */
70   pb_size start;
71
72   /** Use when validating, to signal that all mappings are finished */
73   /* TODO: Actually validation does not reach this stage yet */
74   pipe_condvar event;
75};
76
77
78/**
79 * Slab -- a contiguous piece of memory.
80 */
81struct pb_slab
82{
83   struct list_head head;
84   struct list_head freeBuffers;
85   pb_size numBuffers;
86   pb_size numFree;
87
88   struct pb_slab_buffer *buffers;
89   struct pb_slab_manager *mgr;
90
91   /** Buffer from the provider */
92   struct pb_buffer *bo;
93
94   void *virtual;
95};
96
97
98/**
99 * It adds/removes slabs as needed in order to meet the allocation/destruction
100 * of individual buffers.
101 */
102struct pb_slab_manager
103{
104   struct pb_manager base;
105
106   /** From where we get our buffers */
107   struct pb_manager *provider;
108
109   /** Size of the buffers we hand on downstream */
110   pb_size bufSize;
111
112   /** Size of the buffers we request upstream */
113   pb_size slabSize;
114
115   /**
116    * Alignment, usage to be used to allocate the slab buffers.
117    *
118    * We can only provide buffers which are consistent (in alignment, usage)
119    * with this description.
120    */
121   struct pb_desc desc;
122
123   /**
124    * Partial slabs
125    *
126    * Full slabs are not stored in any list. Empty slabs are destroyed
127    * immediatly.
128    */
129   struct list_head slabs;
130
131   pipe_mutex mutex;
132};
133
134
135/**
136 * Wrapper around several slabs, therefore capable of handling buffers of
137 * multiple sizes.
138 *
139 * This buffer manager just dispatches buffer allocations to the appropriate slab
140 * manager, according to the requested buffer size, or by passes the slab
141 * managers altogether for even greater sizes.
142 *
143 * The data of this structure remains constant after
144 * initialization and thus needs no mutex protection.
145 */
146struct pb_slab_range_manager
147{
148   struct pb_manager base;
149
150   struct pb_manager *provider;
151
152   pb_size minBufSize;
153   pb_size maxBufSize;
154
155   /** @sa pb_slab_manager::desc */
156   struct pb_desc desc;
157
158   unsigned numBuckets;
159   pb_size *bucketSizes;
160
161   /** Array of pb_slab_manager, one for each bucket size */
162   struct pb_manager **buckets;
163};
164
165
166static INLINE struct pb_slab_buffer *
167pb_slab_buffer(struct pb_buffer *buf)
168{
169   assert(buf);
170   return (struct pb_slab_buffer *)buf;
171}
172
173
174static INLINE struct pb_slab_manager *
175pb_slab_manager(struct pb_manager *mgr)
176{
177   assert(mgr);
178   return (struct pb_slab_manager *)mgr;
179}
180
181
182static INLINE struct pb_slab_range_manager *
183pb_slab_range_manager(struct pb_manager *mgr)
184{
185   assert(mgr);
186   return (struct pb_slab_range_manager *)mgr;
187}
188
189
190/**
191 * Delete a buffer from the slab delayed list and put
192 * it on the slab FREE list.
193 */
194static void
195pb_slab_buffer_destroy(struct pb_buffer *_buf)
196{
197   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
198   struct pb_slab *slab = buf->slab;
199   struct pb_slab_manager *mgr = slab->mgr;
200   struct list_head *list = &buf->head;
201
202   pipe_mutex_lock(mgr->mutex);
203
204   assert(!pipe_is_referenced(&buf->base.base.reference));
205
206   buf->mapCount = 0;
207
208   LIST_DEL(list);
209   LIST_ADDTAIL(list, &slab->freeBuffers);
210   slab->numFree++;
211
212   if (slab->head.next == &slab->head)
213      LIST_ADDTAIL(&slab->head, &mgr->slabs);
214
215   /* If the slab becomes totally empty, free it */
216   if (slab->numFree == slab->numBuffers) {
217      list = &slab->head;
218      LIST_DELINIT(list);
219      pb_reference(&slab->bo, NULL);
220      FREE(slab->buffers);
221      FREE(slab);
222   }
223
224   pipe_mutex_unlock(mgr->mutex);
225}
226
227
228static void *
229pb_slab_buffer_map(struct pb_buffer *_buf,
230                   unsigned flags)
231{
232   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
233
234   ++buf->mapCount;
235   return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
236}
237
238
239static void
240pb_slab_buffer_unmap(struct pb_buffer *_buf)
241{
242   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
243
244   --buf->mapCount;
245   if (buf->mapCount == 0)
246       pipe_condvar_broadcast(buf->event);
247}
248
249
250static enum pipe_error
251pb_slab_buffer_validate(struct pb_buffer *_buf,
252                         struct pb_validate *vl,
253                         unsigned flags)
254{
255   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
256   return pb_validate(buf->slab->bo, vl, flags);
257}
258
259
260static void
261pb_slab_buffer_fence(struct pb_buffer *_buf,
262                      struct pipe_fence_handle *fence)
263{
264   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
265   pb_fence(buf->slab->bo, fence);
266}
267
268
269static void
270pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
271                               struct pb_buffer **base_buf,
272                               pb_size *offset)
273{
274   struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
275   pb_get_base_buffer(buf->slab->bo, base_buf, offset);
276   *offset += buf->start;
277}
278
279
280static const struct pb_vtbl
281pb_slab_buffer_vtbl = {
282      pb_slab_buffer_destroy,
283      pb_slab_buffer_map,
284      pb_slab_buffer_unmap,
285      pb_slab_buffer_validate,
286      pb_slab_buffer_fence,
287      pb_slab_buffer_get_base_buffer
288};
289
290
291/**
292 * Create a new slab.
293 *
294 * Called when we ran out of free slabs.
295 */
296static enum pipe_error
297pb_slab_create(struct pb_slab_manager *mgr)
298{
299   struct pb_slab *slab;
300   struct pb_slab_buffer *buf;
301   unsigned numBuffers;
302   unsigned i;
303   enum pipe_error ret;
304
305   slab = CALLOC_STRUCT(pb_slab);
306   if (!slab)
307      return PIPE_ERROR_OUT_OF_MEMORY;
308
309   slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
310   if(!slab->bo) {
311      ret = PIPE_ERROR_OUT_OF_MEMORY;
312      goto out_err0;
313   }
314
315   /* Note down the slab virtual address. All mappings are accessed directly
316    * through this address so it is required that the buffer is pinned. */
317   slab->virtual = pb_map(slab->bo,
318                          PIPE_BUFFER_USAGE_CPU_READ |
319                          PIPE_BUFFER_USAGE_CPU_WRITE);
320   if(!slab->virtual) {
321      ret = PIPE_ERROR_OUT_OF_MEMORY;
322      goto out_err1;
323   }
324   pb_unmap(slab->bo);
325
326   numBuffers = slab->bo->base.size / mgr->bufSize;
327
328   slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
329   if (!slab->buffers) {
330      ret = PIPE_ERROR_OUT_OF_MEMORY;
331      goto out_err1;
332   }
333
334   LIST_INITHEAD(&slab->head);
335   LIST_INITHEAD(&slab->freeBuffers);
336   slab->numBuffers = numBuffers;
337   slab->numFree = 0;
338   slab->mgr = mgr;
339
340   buf = slab->buffers;
341   for (i=0; i < numBuffers; ++i) {
342      pipe_reference_init(&buf->base.base.reference, 0);
343      buf->base.base.size = mgr->bufSize;
344      buf->base.base.alignment = 0;
345      buf->base.base.usage = 0;
346      buf->base.vtbl = &pb_slab_buffer_vtbl;
347      buf->slab = slab;
348      buf->start = i* mgr->bufSize;
349      buf->mapCount = 0;
350      pipe_condvar_init(buf->event);
351      LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
352      slab->numFree++;
353      buf++;
354   }
355
356   /* Add this slab to the list of partial slabs */
357   LIST_ADDTAIL(&slab->head, &mgr->slabs);
358
359   return PIPE_OK;
360
361out_err1:
362   pb_reference(&slab->bo, NULL);
363out_err0:
364   FREE(slab);
365   return ret;
366}
367
368
369static struct pb_buffer *
370pb_slab_manager_create_buffer(struct pb_manager *_mgr,
371                              pb_size size,
372                              const struct pb_desc *desc)
373{
374   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
375   static struct pb_slab_buffer *buf;
376   struct pb_slab *slab;
377   struct list_head *list;
378
379   /* check size */
380   assert(size <= mgr->bufSize);
381   if(size > mgr->bufSize)
382      return NULL;
383
384   /* check if we can provide the requested alignment */
385   assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
386   if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
387      return NULL;
388   assert(pb_check_alignment(desc->alignment, mgr->bufSize));
389   if(!pb_check_alignment(desc->alignment, mgr->bufSize))
390      return NULL;
391
392   assert(pb_check_usage(desc->usage, mgr->desc.usage));
393   if(!pb_check_usage(desc->usage, mgr->desc.usage))
394      return NULL;
395
396   pipe_mutex_lock(mgr->mutex);
397
398   /* Create a new slab, if we run out of partial slabs */
399   if (mgr->slabs.next == &mgr->slabs) {
400      (void) pb_slab_create(mgr);
401      if (mgr->slabs.next == &mgr->slabs) {
402	 pipe_mutex_unlock(mgr->mutex);
403	 return NULL;
404      }
405   }
406
407   /* Allocate the buffer from a partial (or just created) slab */
408   list = mgr->slabs.next;
409   slab = LIST_ENTRY(struct pb_slab, list, head);
410
411   /* If totally full remove from the partial slab list */
412   if (--slab->numFree == 0)
413      LIST_DELINIT(list);
414
415   list = slab->freeBuffers.next;
416   LIST_DELINIT(list);
417
418   pipe_mutex_unlock(mgr->mutex);
419   buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
420
421   pipe_reference_init(&buf->base.base.reference, 1);
422   buf->base.base.alignment = desc->alignment;
423   buf->base.base.usage = desc->usage;
424
425   return &buf->base;
426}
427
428
429static void
430pb_slab_manager_flush(struct pb_manager *_mgr)
431{
432   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
433
434   assert(mgr->provider->flush);
435   if(mgr->provider->flush)
436      mgr->provider->flush(mgr->provider);
437}
438
439
440static void
441pb_slab_manager_destroy(struct pb_manager *_mgr)
442{
443   struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
444
445   /* TODO: cleanup all allocated buffers */
446   FREE(mgr);
447}
448
449
450struct pb_manager *
451pb_slab_manager_create(struct pb_manager *provider,
452                       pb_size bufSize,
453                       pb_size slabSize,
454                       const struct pb_desc *desc)
455{
456   struct pb_slab_manager *mgr;
457
458   mgr = CALLOC_STRUCT(pb_slab_manager);
459   if (!mgr)
460      return NULL;
461
462   mgr->base.destroy = pb_slab_manager_destroy;
463   mgr->base.create_buffer = pb_slab_manager_create_buffer;
464   mgr->base.flush = pb_slab_manager_flush;
465
466   mgr->provider = provider;
467   mgr->bufSize = bufSize;
468   mgr->slabSize = slabSize;
469   mgr->desc = *desc;
470
471   LIST_INITHEAD(&mgr->slabs);
472
473   pipe_mutex_init(mgr->mutex);
474
475   return &mgr->base;
476}
477
478
479static struct pb_buffer *
480pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
481                                    pb_size size,
482                                    const struct pb_desc *desc)
483{
484   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
485   pb_size bufSize;
486   unsigned i;
487
488   bufSize = mgr->minBufSize;
489   for (i = 0; i < mgr->numBuckets; ++i) {
490      if(bufSize >= size)
491	 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
492      bufSize *= 2;
493   }
494
495   /* Fall back to allocate a buffer object directly from the provider. */
496   return mgr->provider->create_buffer(mgr->provider, size, desc);
497}
498
499
500static void
501pb_slab_range_manager_flush(struct pb_manager *_mgr)
502{
503   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
504
505   /* Individual slabs don't hold any temporary buffers so no need to call them */
506
507   assert(mgr->provider->flush);
508   if(mgr->provider->flush)
509      mgr->provider->flush(mgr->provider);
510}
511
512
513static void
514pb_slab_range_manager_destroy(struct pb_manager *_mgr)
515{
516   struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
517   unsigned i;
518
519   for (i = 0; i < mgr->numBuckets; ++i)
520      mgr->buckets[i]->destroy(mgr->buckets[i]);
521   FREE(mgr->buckets);
522   FREE(mgr->bucketSizes);
523   FREE(mgr);
524}
525
526
527struct pb_manager *
528pb_slab_range_manager_create(struct pb_manager *provider,
529                             pb_size minBufSize,
530                             pb_size maxBufSize,
531                             pb_size slabSize,
532                             const struct pb_desc *desc)
533{
534   struct pb_slab_range_manager *mgr;
535   pb_size bufSize;
536   unsigned i;
537
538   if(!provider)
539      return NULL;
540
541   mgr = CALLOC_STRUCT(pb_slab_range_manager);
542   if (!mgr)
543      goto out_err0;
544
545   mgr->base.destroy = pb_slab_range_manager_destroy;
546   mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
547   mgr->base.flush = pb_slab_range_manager_flush;
548
549   mgr->provider = provider;
550   mgr->minBufSize = minBufSize;
551   mgr->maxBufSize = maxBufSize;
552
553   mgr->numBuckets = 1;
554   bufSize = minBufSize;
555   while(bufSize < maxBufSize) {
556      bufSize *= 2;
557      ++mgr->numBuckets;
558   }
559
560   mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
561   if (!mgr->buckets)
562      goto out_err1;
563
564   bufSize = minBufSize;
565   for (i = 0; i < mgr->numBuckets; ++i) {
566      mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
567      if(!mgr->buckets[i])
568	 goto out_err2;
569      bufSize *= 2;
570   }
571
572   return &mgr->base;
573
574out_err2:
575   for (i = 0; i < mgr->numBuckets; ++i)
576      if(mgr->buckets[i])
577	    mgr->buckets[i]->destroy(mgr->buckets[i]);
578   FREE(mgr->buckets);
579out_err1:
580   FREE(mgr);
581out_err0:
582   return NULL;
583}
584