Lines Matching refs:pool

15  * available.  If new memory is added to the pool a lock has to be
146 * gen_pool_create - create a new special memory pool
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
155 struct gen_pool *pool;
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158 if (pool != NULL) {
159 spin_lock_init(&pool->lock);
160 INIT_LIST_HEAD(&pool->chunks);
161 pool->min_alloc_order = min_alloc_order;
162 pool->algo = gen_pool_first_fit;
163 pool->data = NULL;
165 return pool;
170 * gen_pool_add_virt - add a new chunk of special memory to the pool
171 * @pool: pool to add new memory chunk to
172 * @virt: virtual starting address of memory chunk to add to pool
173 * @phys: physical starting address of memory chunk to add to pool
174 * @size: size in bytes of the memory chunk to add to pool
178 * Add a new chunk of special memory to the specified pool.
182 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
186 int nbits = size >> pool->min_alloc_order;
199 spin_lock(&pool->lock);
200 list_add_rcu(&chunk->next_chunk, &pool->chunks);
201 spin_unlock(&pool->lock);
209 * @pool: pool to allocate from
214 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
233 * gen_pool_destroy - destroy a special memory pool
234 * @pool: pool to destroy
236 * Destroy the specified special memory pool. Verifies that there are no
239 void gen_pool_destroy(struct gen_pool *pool)
243 int order = pool->min_alloc_order;
246 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
256 kfree(pool);
262 * gen_pool_alloc - allocate special memory from the pool
263 * @pool: pool to allocate from
264 * @size: number of bytes to allocate from the pool
266 * Allocate the requested number of bytes from the specified pool.
267 * Uses the pool allocation function (with first-fit algorithm by default).
271 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
275 int order = pool->min_alloc_order;
287 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
293 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
294 pool->data);
316 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
317 * @pool: pool to allocate from
318 * @size: number of bytes to allocate from the pool
321 * Allocate the requested number of bytes from the specified pool.
322 * Uses the pool allocation function (with first-fit algorithm by default).
326 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
330 if (!pool)
333 vaddr = gen_pool_alloc(pool, size);
338 *dma = gen_pool_virt_to_phys(pool, vaddr);
345 * gen_pool_free - free allocated special memory back to the pool
346 * @pool: pool to free to
347 * @addr: starting address of memory to free back to pool
351 * pool. Can not be used in NMI handler on architectures without
354 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
357 int order = pool->min_alloc_order;
366 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
384 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
385 * @pool: the generic memory pool
389 * Call @func for every chunk of generic memory pool. The @func is
392 void gen_pool_for_each_chunk(struct gen_pool *pool,
393 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
399 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
400 func(pool, chunk, data);
406 * addr_in_gen_pool - checks if an address falls within the range of a pool
407 * @pool: the generic memory pool
411 * Check if the range of addresses falls within the specified pool. Returns
412 * true if the entire range is contained in the pool and false otherwise.
414 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
422 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
435 * gen_pool_avail - get available free space of the pool
436 * @pool: pool to get available free space
438 * Return available free space of the specified pool.
440 size_t gen_pool_avail(struct gen_pool *pool)
446 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
454 * gen_pool_size - get size in bytes of memory managed by the pool
455 * @pool: pool to get size
457 * Return size in bytes of memory managed by the pool.
459 size_t gen_pool_size(struct gen_pool *pool)
465 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
474 * @pool: pool to change allocation algorithm
478 * Call @algo for each memory allocation in the pool.
482 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
486 pool->algo = algo;
487 if (!pool->algo)
488 pool->algo = gen_pool_first_fit;
490 pool->data = data;
578 * @nid: node id of the node the pool structure should be allocated on, or -1
580 * Create a new special memory pool that can be used to manage special purpose
581 * memory not managed by the regular kmalloc/kfree interface. The pool will be
587 struct gen_pool **ptr, *pool;
591 pool = gen_pool_create(min_alloc_order, nid);
592 if (pool) {
593 *ptr = pool;
599 return pool;
622 * of_get_named_gen_pool - find a pool by phandle property
627 * Returns the pool that contains the chunk starting at the physical