mempool.c revision 1946089a109251655c5438d92c539bd2930e71ea
1/*
2 *  linux/mm/mempool.c
3 *
4 *  memory buffer pool support. Such pools are mostly used
5 *  for guaranteed, deadlock-free memory allocations during
6 *  extreme VM load.
7 *
8 *  started by Ingo Molnar, Copyright (C) 2001
9 */
10
11#include <linux/mm.h>
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/mempool.h>
15#include <linux/blkdev.h>
16#include <linux/writeback.h>
17
18static void add_element(mempool_t *pool, void *element)
19{
20	BUG_ON(pool->curr_nr >= pool->min_nr);
21	pool->elements[pool->curr_nr++] = element;
22}
23
24static void *remove_element(mempool_t *pool)
25{
26	BUG_ON(pool->curr_nr <= 0);
27	return pool->elements[--pool->curr_nr];
28}
29
30static void free_pool(mempool_t *pool)
31{
32	while (pool->curr_nr) {
33		void *element = remove_element(pool);
34		pool->free(element, pool->pool_data);
35	}
36	kfree(pool->elements);
37	kfree(pool);
38}
39
40/**
41 * mempool_create - create a memory pool
42 * @min_nr:    the minimum number of elements guaranteed to be
43 *             allocated for this pool.
44 * @alloc_fn:  user-defined element-allocation function.
45 * @free_fn:   user-defined element-freeing function.
46 * @pool_data: optional private data available to the user-defined functions.
47 *
48 * this function creates and allocates a guaranteed size, preallocated
49 * memory pool. The pool can be used from the mempool_alloc and mempool_free
50 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
51 * functions might sleep - as long as the mempool_alloc function is not called
52 * from IRQ contexts.
53 */
54mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
55				mempool_free_t *free_fn, void *pool_data)
56{
57	return  mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
58}
59EXPORT_SYMBOL(mempool_create);
60
61mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
62			mempool_free_t *free_fn, void *pool_data, int node_id)
63{
64	mempool_t *pool;
65	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id);
66	if (!pool)
67		return NULL;
68	memset(pool, 0, sizeof(*pool));
69	pool->elements = kmalloc_node(min_nr * sizeof(void *),
70					GFP_KERNEL, node_id);
71	if (!pool->elements) {
72		kfree(pool);
73		return NULL;
74	}
75	spin_lock_init(&pool->lock);
76	pool->min_nr = min_nr;
77	pool->pool_data = pool_data;
78	init_waitqueue_head(&pool->wait);
79	pool->alloc = alloc_fn;
80	pool->free = free_fn;
81
82	/*
83	 * First pre-allocate the guaranteed number of buffers.
84	 */
85	while (pool->curr_nr < pool->min_nr) {
86		void *element;
87
88		element = pool->alloc(GFP_KERNEL, pool->pool_data);
89		if (unlikely(!element)) {
90			free_pool(pool);
91			return NULL;
92		}
93		add_element(pool, element);
94	}
95	return pool;
96}
97EXPORT_SYMBOL(mempool_create_node);
98
99/**
100 * mempool_resize - resize an existing memory pool
101 * @pool:       pointer to the memory pool which was allocated via
102 *              mempool_create().
103 * @new_min_nr: the new minimum number of elements guaranteed to be
104 *              allocated for this pool.
105 * @gfp_mask:   the usual allocation bitmask.
106 *
107 * This function shrinks/grows the pool. In the case of growing,
108 * it cannot be guaranteed that the pool will be grown to the new
109 * size immediately, but new mempool_free() calls will refill it.
110 *
111 * Note, the caller must guarantee that no mempool_destroy is called
112 * while this function is running. mempool_alloc() & mempool_free()
113 * might be called (eg. from IRQ contexts) while this function executes.
114 */
115int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask)
116{
117	void *element;
118	void **new_elements;
119	unsigned long flags;
120
121	BUG_ON(new_min_nr <= 0);
122
123	spin_lock_irqsave(&pool->lock, flags);
124	if (new_min_nr <= pool->min_nr) {
125		while (new_min_nr < pool->curr_nr) {
126			element = remove_element(pool);
127			spin_unlock_irqrestore(&pool->lock, flags);
128			pool->free(element, pool->pool_data);
129			spin_lock_irqsave(&pool->lock, flags);
130		}
131		pool->min_nr = new_min_nr;
132		goto out_unlock;
133	}
134	spin_unlock_irqrestore(&pool->lock, flags);
135
136	/* Grow the pool */
137	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
138	if (!new_elements)
139		return -ENOMEM;
140
141	spin_lock_irqsave(&pool->lock, flags);
142	if (unlikely(new_min_nr <= pool->min_nr)) {
143		/* Raced, other resize will do our work */
144		spin_unlock_irqrestore(&pool->lock, flags);
145		kfree(new_elements);
146		goto out;
147	}
148	memcpy(new_elements, pool->elements,
149			pool->curr_nr * sizeof(*new_elements));
150	kfree(pool->elements);
151	pool->elements = new_elements;
152	pool->min_nr = new_min_nr;
153
154	while (pool->curr_nr < pool->min_nr) {
155		spin_unlock_irqrestore(&pool->lock, flags);
156		element = pool->alloc(gfp_mask, pool->pool_data);
157		if (!element)
158			goto out;
159		spin_lock_irqsave(&pool->lock, flags);
160		if (pool->curr_nr < pool->min_nr) {
161			add_element(pool, element);
162		} else {
163			spin_unlock_irqrestore(&pool->lock, flags);
164			pool->free(element, pool->pool_data);	/* Raced */
165			goto out;
166		}
167	}
168out_unlock:
169	spin_unlock_irqrestore(&pool->lock, flags);
170out:
171	return 0;
172}
173EXPORT_SYMBOL(mempool_resize);
174
175/**
176 * mempool_destroy - deallocate a memory pool
177 * @pool:      pointer to the memory pool which was allocated via
178 *             mempool_create().
179 *
180 * this function only sleeps if the free_fn() function sleeps. The caller
181 * has to guarantee that all elements have been returned to the pool (ie:
182 * freed) prior to calling mempool_destroy().
183 */
184void mempool_destroy(mempool_t *pool)
185{
186	if (pool->curr_nr != pool->min_nr)
187		BUG();		/* There were outstanding elements */
188	free_pool(pool);
189}
190EXPORT_SYMBOL(mempool_destroy);
191
192/**
193 * mempool_alloc - allocate an element from a specific memory pool
194 * @pool:      pointer to the memory pool which was allocated via
195 *             mempool_create().
196 * @gfp_mask:  the usual allocation bitmask.
197 *
198 * this function only sleeps if the alloc_fn function sleeps or
199 * returns NULL. Note that due to preallocation, this function
200 * *never* fails when called from process contexts. (it might
201 * fail if called from an IRQ context.)
202 */
203void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
204{
205	void *element;
206	unsigned long flags;
207	DEFINE_WAIT(wait);
208	int gfp_temp;
209
210	might_sleep_if(gfp_mask & __GFP_WAIT);
211
212	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
213	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
214	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
215
216	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
217
218repeat_alloc:
219
220	element = pool->alloc(gfp_temp, pool->pool_data);
221	if (likely(element != NULL))
222		return element;
223
224	spin_lock_irqsave(&pool->lock, flags);
225	if (likely(pool->curr_nr)) {
226		element = remove_element(pool);
227		spin_unlock_irqrestore(&pool->lock, flags);
228		return element;
229	}
230	spin_unlock_irqrestore(&pool->lock, flags);
231
232	/* We must not sleep in the GFP_ATOMIC case */
233	if (!(gfp_mask & __GFP_WAIT))
234		return NULL;
235
236	/* Now start performing page reclaim */
237	gfp_temp = gfp_mask;
238	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
239	smp_mb();
240	if (!pool->curr_nr)
241		io_schedule();
242	finish_wait(&pool->wait, &wait);
243
244	goto repeat_alloc;
245}
246EXPORT_SYMBOL(mempool_alloc);
247
248/**
249 * mempool_free - return an element to the pool.
250 * @element:   pool element pointer.
251 * @pool:      pointer to the memory pool which was allocated via
252 *             mempool_create().
253 *
254 * this function only sleeps if the free_fn() function sleeps.
255 */
256void mempool_free(void *element, mempool_t *pool)
257{
258	unsigned long flags;
259
260	smp_mb();
261	if (pool->curr_nr < pool->min_nr) {
262		spin_lock_irqsave(&pool->lock, flags);
263		if (pool->curr_nr < pool->min_nr) {
264			add_element(pool, element);
265			spin_unlock_irqrestore(&pool->lock, flags);
266			wake_up(&pool->wait);
267			return;
268		}
269		spin_unlock_irqrestore(&pool->lock, flags);
270	}
271	pool->free(element, pool->pool_data);
272}
273EXPORT_SYMBOL(mempool_free);
274
275/*
276 * A commonly used alloc and free fn.
277 */
278void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data)
279{
280	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
281	return kmem_cache_alloc(mem, gfp_mask);
282}
283EXPORT_SYMBOL(mempool_alloc_slab);
284
285void mempool_free_slab(void *element, void *pool_data)
286{
287	kmem_cache_t *mem = (kmem_cache_t *) pool_data;
288	kmem_cache_free(mem, element);
289}
290EXPORT_SYMBOL(mempool_free_slab);
291