u_slab.c revision 99d5c1a13b97b9133a166c00c9fba1bec5f4bd9e
1/*
2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
22
23#include "util/u_slab.h"
24
25#include "util/u_math.h"
26#include "util/u_memory.h"
27#include "util/u_simple_list.h"
28
29#include <stdio.h>
30
31#define UTIL_SLAB_MAGIC 0xcafe4321
32
33/* The block is either allocated memory or free space. */
34struct util_slab_block {
35   /* The header. */
36   /* The first next free block. */
37   struct util_slab_block *next_free;
38
39   intptr_t magic;
40
41   /* Memory after the last member is dedicated to the block itself.
42    * The allocated size is always larger than this structure. */
43};
44
45static struct util_slab_block *
46util_slab_get_block(struct util_slab_mempool *pool,
47                    struct util_slab_page *page, unsigned index)
48{
49   return (struct util_slab_block*)
50          ((uint8_t*)page + sizeof(struct util_slab_page) +
51           (pool->block_size * index));
52}
53
54static void util_slab_add_new_page(struct util_slab_mempool *pool)
55{
56   struct util_slab_page *page;
57   struct util_slab_block *block;
58   int i;
59
60   page = MALLOC(pool->page_size);
61   insert_at_tail(&pool->list, page);
62
63   /* Mark all blocks as free. */
64   for (i = 0; i < pool->num_blocks-1; i++) {
65      block = util_slab_get_block(pool, page, i);
66      block->next_free = util_slab_get_block(pool, page, i+1);
67      block->magic = UTIL_SLAB_MAGIC;
68   }
69
70   block = util_slab_get_block(pool, page, pool->num_blocks-1);
71   block->next_free = pool->first_free;
72   block->magic = UTIL_SLAB_MAGIC;
73   pool->first_free = util_slab_get_block(pool, page, 0);
74   pool->num_pages++;
75
76#if 0
77   fprintf(stderr, "New page! Num of pages: %i\n", pool->num_pages);
78#endif
79}
80
81static void *util_slab_alloc_st(struct util_slab_mempool *pool)
82{
83   struct util_slab_block *block;
84
85   if (!pool->first_free)
86      util_slab_add_new_page(pool);
87
88   block = pool->first_free;
89   assert(block->magic == UTIL_SLAB_MAGIC);
90   pool->first_free = block->next_free;
91
92   return (uint8_t*)block + sizeof(struct util_slab_block);
93}
94
95static void util_slab_free_st(struct util_slab_mempool *pool, void *ptr)
96{
97   struct util_slab_block *block =
98         (struct util_slab_block*)
99         ((uint8_t*)ptr - sizeof(struct util_slab_block));
100
101   assert(block->magic == UTIL_SLAB_MAGIC);
102   block->next_free = pool->first_free;
103   pool->first_free = block;
104}
105
106static void *util_slab_alloc_mt(struct util_slab_mempool *pool)
107{
108   void *mem;
109
110   pipe_mutex_lock(pool->mutex);
111   mem = util_slab_alloc_st(pool);
112   pipe_mutex_unlock(pool->mutex);
113   return mem;
114}
115
116static void util_slab_free_mt(struct util_slab_mempool *pool, void *ptr)
117{
118   pipe_mutex_lock(pool->mutex);
119   util_slab_free_st(pool, ptr);
120   pipe_mutex_unlock(pool->mutex);
121}
122
123void util_slab_set_thread_safety(struct util_slab_mempool *pool,
124                                    enum util_slab_threading threading)
125{
126   pool->threading = threading;
127
128   if (threading) {
129      pool->alloc = util_slab_alloc_mt;
130      pool->free = util_slab_free_mt;
131   } else {
132      pool->alloc = util_slab_alloc_st;
133      pool->free = util_slab_free_st;
134   }
135}
136
137void util_slab_create(struct util_slab_mempool *pool,
138                      unsigned item_size,
139                      unsigned num_blocks,
140                      enum util_slab_threading threading)
141{
142   item_size = align(item_size, sizeof(intptr_t));
143
144   pool->num_pages = 0;
145   pool->num_blocks = num_blocks;
146   pool->block_size = sizeof(struct util_slab_block) + item_size;
147   pool->block_size = align(pool->block_size, sizeof(intptr_t));
148   pool->page_size = sizeof(struct util_slab_page) +
149                     num_blocks * pool->block_size;
150   pool->first_free = NULL;
151
152   make_empty_list(&pool->list);
153
154   pipe_mutex_init(pool->mutex);
155
156   util_slab_set_thread_safety(pool, threading);
157}
158
159void util_slab_destroy(struct util_slab_mempool *pool)
160{
161   struct util_slab_page *page, *temp;
162
163   if (pool->list.next) {
164      foreach_s(page, temp, &pool->list) {
165         remove_from_list(page);
166         FREE(page);
167      }
168   }
169
170   pipe_mutex_destroy(pool->mutex);
171}
172