dmapool.c revision b5ee5befa75e33e55d34584ad10286c5005cb1de
1/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 *   Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device.  It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple.  The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages.  Each page in the page_list is split into blocks of at
20 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page.  Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
23 */
24
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/mutex.h>
32#include <linux/poison.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/types.h>
38#include <linux/wait.h>
39
40#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41#define DMAPOOL_DEBUG 1
42#endif
43
44struct dma_pool {		/* the pool */
45	struct list_head page_list;
46	spinlock_t lock;
47	size_t size;
48	struct device *dev;
49	size_t allocation;
50	size_t boundary;
51	char name[32];
52	wait_queue_head_t waitq;
53	struct list_head pools;
54};
55
56struct dma_page {		/* cacheable header for 'allocation' bytes */
57	struct list_head page_list;
58	void *vaddr;
59	dma_addr_t dma;
60	unsigned int in_use;
61	unsigned int offset;
62};
63
64#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
65
66static DEFINE_MUTEX(pools_lock);
67
68static ssize_t
69show_pools(struct device *dev, struct device_attribute *attr, char *buf)
70{
71	unsigned temp;
72	unsigned size;
73	char *next;
74	struct dma_page *page;
75	struct dma_pool *pool;
76
77	next = buf;
78	size = PAGE_SIZE;
79
80	temp = scnprintf(next, size, "poolinfo - 0.1\n");
81	size -= temp;
82	next += temp;
83
84	mutex_lock(&pools_lock);
85	list_for_each_entry(pool, &dev->dma_pools, pools) {
86		unsigned pages = 0;
87		unsigned blocks = 0;
88
89		list_for_each_entry(page, &pool->page_list, page_list) {
90			pages++;
91			blocks += page->in_use;
92		}
93
94		/* per-pool info, no real statistics yet */
95		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
96				 pool->name, blocks,
97				 pages * (pool->allocation / pool->size),
98				 pool->size, pages);
99		size -= temp;
100		next += temp;
101	}
102	mutex_unlock(&pools_lock);
103
104	return PAGE_SIZE - size;
105}
106
107static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
108
109/**
110 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
111 * @name: name of pool, for diagnostics
112 * @dev: device that will be doing the DMA
113 * @size: size of the blocks in this pool.
114 * @align: alignment requirement for blocks; must be a power of two
115 * @boundary: returned blocks won't cross this power of two boundary
116 * Context: !in_interrupt()
117 *
118 * Returns a dma allocation pool with the requested characteristics, or
119 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
120 * may be used to allocate memory.  Such memory will all have "consistent"
121 * DMA mappings, accessible by the device and its driver without using
122 * cache flushing primitives.  The actual size of blocks allocated may be
123 * larger than requested because of alignment.
124 *
125 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
126 * cross that size boundary.  This is useful for devices which have
127 * addressing restrictions on individual DMA transfers, such as not crossing
128 * boundaries of 4KBytes.
129 */
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131				 size_t size, size_t align, size_t boundary)
132{
133	struct dma_pool *retval;
134	size_t allocation;
135
136	if (align == 0) {
137		align = 1;
138	} else if (align & (align - 1)) {
139		return NULL;
140	}
141
142	if (size == 0) {
143		return NULL;
144	} else if (size < 4) {
145		size = 4;
146	}
147
148	if ((size % align) != 0)
149		size = ALIGN(size, align);
150
151	allocation = max_t(size_t, size, PAGE_SIZE);
152
153	if (!boundary) {
154		boundary = allocation;
155	} else if ((boundary < size) || (boundary & (boundary - 1))) {
156		return NULL;
157	}
158
159	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
160	if (!retval)
161		return retval;
162
163	strlcpy(retval->name, name, sizeof(retval->name));
164
165	retval->dev = dev;
166
167	INIT_LIST_HEAD(&retval->page_list);
168	spin_lock_init(&retval->lock);
169	retval->size = size;
170	retval->boundary = boundary;
171	retval->allocation = allocation;
172	init_waitqueue_head(&retval->waitq);
173
174	if (dev) {
175		int ret;
176
177		mutex_lock(&pools_lock);
178		if (list_empty(&dev->dma_pools))
179			ret = device_create_file(dev, &dev_attr_pools);
180		else
181			ret = 0;
182		/* note:  not currently insisting "name" be unique */
183		if (!ret)
184			list_add(&retval->pools, &dev->dma_pools);
185		else {
186			kfree(retval);
187			retval = NULL;
188		}
189		mutex_unlock(&pools_lock);
190	} else
191		INIT_LIST_HEAD(&retval->pools);
192
193	return retval;
194}
195EXPORT_SYMBOL(dma_pool_create);
196
197static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
198{
199	unsigned int offset = 0;
200	unsigned int next_boundary = pool->boundary;
201
202	do {
203		unsigned int next = offset + pool->size;
204		if (unlikely((next + pool->size) >= next_boundary)) {
205			next = next_boundary;
206			next_boundary += pool->boundary;
207		}
208		*(int *)(page->vaddr + offset) = next;
209		offset = next;
210	} while (offset < pool->allocation);
211}
212
213static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
214{
215	struct dma_page *page;
216
217	page = kmalloc(sizeof(*page), mem_flags);
218	if (!page)
219		return NULL;
220	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
221					 &page->dma, mem_flags);
222	if (page->vaddr) {
223#ifdef	DMAPOOL_DEBUG
224		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
225#endif
226		pool_initialise_page(pool, page);
227		list_add(&page->page_list, &pool->page_list);
228		page->in_use = 0;
229		page->offset = 0;
230	} else {
231		kfree(page);
232		page = NULL;
233	}
234	return page;
235}
236
237static inline int is_page_busy(struct dma_page *page)
238{
239	return page->in_use != 0;
240}
241
242static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
243{
244	dma_addr_t dma = page->dma;
245
246#ifdef	DMAPOOL_DEBUG
247	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
248#endif
249	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
250	list_del(&page->page_list);
251	kfree(page);
252}
253
254/**
255 * dma_pool_destroy - destroys a pool of dma memory blocks.
256 * @pool: dma pool that will be destroyed
257 * Context: !in_interrupt()
258 *
259 * Caller guarantees that no more memory from the pool is in use,
260 * and that nothing will try to use the pool after this call.
261 */
262void dma_pool_destroy(struct dma_pool *pool)
263{
264	mutex_lock(&pools_lock);
265	list_del(&pool->pools);
266	if (pool->dev && list_empty(&pool->dev->dma_pools))
267		device_remove_file(pool->dev, &dev_attr_pools);
268	mutex_unlock(&pools_lock);
269
270	while (!list_empty(&pool->page_list)) {
271		struct dma_page *page;
272		page = list_entry(pool->page_list.next,
273				  struct dma_page, page_list);
274		if (is_page_busy(page)) {
275			if (pool->dev)
276				dev_err(pool->dev,
277					"dma_pool_destroy %s, %p busy\n",
278					pool->name, page->vaddr);
279			else
280				printk(KERN_ERR
281				       "dma_pool_destroy %s, %p busy\n",
282				       pool->name, page->vaddr);
283			/* leak the still-in-use consistent memory */
284			list_del(&page->page_list);
285			kfree(page);
286		} else
287			pool_free_page(pool, page);
288	}
289
290	kfree(pool);
291}
292EXPORT_SYMBOL(dma_pool_destroy);
293
294/**
295 * dma_pool_alloc - get a block of consistent memory
296 * @pool: dma pool that will produce the block
297 * @mem_flags: GFP_* bitmask
298 * @handle: pointer to dma address of block
299 *
300 * This returns the kernel virtual address of a currently unused block,
301 * and reports its dma address through the handle.
302 * If such a memory block can't be allocated, %NULL is returned.
303 */
304void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
305		     dma_addr_t *handle)
306{
307	unsigned long flags;
308	struct dma_page *page;
309	size_t offset;
310	void *retval;
311
312	spin_lock_irqsave(&pool->lock, flags);
313 restart:
314	list_for_each_entry(page, &pool->page_list, page_list) {
315		if (page->offset < pool->allocation)
316			goto ready;
317	}
318	page = pool_alloc_page(pool, GFP_ATOMIC);
319	if (!page) {
320		if (mem_flags & __GFP_WAIT) {
321			DECLARE_WAITQUEUE(wait, current);
322
323			__set_current_state(TASK_INTERRUPTIBLE);
324			__add_wait_queue(&pool->waitq, &wait);
325			spin_unlock_irqrestore(&pool->lock, flags);
326
327			schedule_timeout(POOL_TIMEOUT_JIFFIES);
328
329			spin_lock_irqsave(&pool->lock, flags);
330			__remove_wait_queue(&pool->waitq, &wait);
331			goto restart;
332		}
333		retval = NULL;
334		goto done;
335	}
336
337 ready:
338	page->in_use++;
339	offset = page->offset;
340	page->offset = *(int *)(page->vaddr + offset);
341	retval = offset + page->vaddr;
342	*handle = offset + page->dma;
343#ifdef	DMAPOOL_DEBUG
344	memset(retval, POOL_POISON_ALLOCATED, pool->size);
345#endif
346 done:
347	spin_unlock_irqrestore(&pool->lock, flags);
348	return retval;
349}
350EXPORT_SYMBOL(dma_pool_alloc);
351
352static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
353{
354	unsigned long flags;
355	struct dma_page *page;
356
357	spin_lock_irqsave(&pool->lock, flags);
358	list_for_each_entry(page, &pool->page_list, page_list) {
359		if (dma < page->dma)
360			continue;
361		if (dma < (page->dma + pool->allocation))
362			goto done;
363	}
364	page = NULL;
365 done:
366	spin_unlock_irqrestore(&pool->lock, flags);
367	return page;
368}
369
370/**
371 * dma_pool_free - put block back into dma pool
372 * @pool: the dma pool holding the block
373 * @vaddr: virtual address of block
374 * @dma: dma address of block
375 *
376 * Caller promises neither device nor driver will again touch this block
377 * unless it is first re-allocated.
378 */
379void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
380{
381	struct dma_page *page;
382	unsigned long flags;
383	unsigned int offset;
384
385	page = pool_find_page(pool, dma);
386	if (!page) {
387		if (pool->dev)
388			dev_err(pool->dev,
389				"dma_pool_free %s, %p/%lx (bad dma)\n",
390				pool->name, vaddr, (unsigned long)dma);
391		else
392			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
393			       pool->name, vaddr, (unsigned long)dma);
394		return;
395	}
396
397	offset = vaddr - page->vaddr;
398#ifdef	DMAPOOL_DEBUG
399	if ((dma - page->dma) != offset) {
400		if (pool->dev)
401			dev_err(pool->dev,
402				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
403				pool->name, vaddr, (unsigned long long)dma);
404		else
405			printk(KERN_ERR
406			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
407			       pool->name, vaddr, (unsigned long long)dma);
408		return;
409	}
410	{
411		unsigned int chain = page->offset;
412		while (chain < pool->allocation) {
413			if (chain != offset) {
414				chain = *(int *)(page->vaddr + chain);
415				continue;
416			}
417			if (pool->dev)
418				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
419					"already free\n", pool->name,
420					(unsigned long long)dma);
421			else
422				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
423					"already free\n", pool->name,
424					(unsigned long long)dma);
425			return;
426		}
427	}
428	memset(vaddr, POOL_POISON_FREED, pool->size);
429#endif
430
431	spin_lock_irqsave(&pool->lock, flags);
432	page->in_use--;
433	*(int *)vaddr = page->offset;
434	page->offset = offset;
435	if (waitqueue_active(&pool->waitq))
436		wake_up_locked(&pool->waitq);
437	/*
438	 * Resist a temptation to do
439	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
440	 * Better have a few empty pages hang around.
441	 */
442	spin_unlock_irqrestore(&pool->lock, flags);
443}
444EXPORT_SYMBOL(dma_pool_free);
445
446/*
447 * Managed DMA pool
448 */
449static void dmam_pool_release(struct device *dev, void *res)
450{
451	struct dma_pool *pool = *(struct dma_pool **)res;
452
453	dma_pool_destroy(pool);
454}
455
456static int dmam_pool_match(struct device *dev, void *res, void *match_data)
457{
458	return *(struct dma_pool **)res == match_data;
459}
460
461/**
462 * dmam_pool_create - Managed dma_pool_create()
463 * @name: name of pool, for diagnostics
464 * @dev: device that will be doing the DMA
465 * @size: size of the blocks in this pool.
466 * @align: alignment requirement for blocks; must be a power of two
467 * @allocation: returned blocks won't cross this boundary (or zero)
468 *
469 * Managed dma_pool_create().  DMA pool created with this function is
470 * automatically destroyed on driver detach.
471 */
472struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
473				  size_t size, size_t align, size_t allocation)
474{
475	struct dma_pool **ptr, *pool;
476
477	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
478	if (!ptr)
479		return NULL;
480
481	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
482	if (pool)
483		devres_add(dev, ptr);
484	else
485		devres_free(ptr);
486
487	return pool;
488}
489EXPORT_SYMBOL(dmam_pool_create);
490
491/**
492 * dmam_pool_destroy - Managed dma_pool_destroy()
493 * @pool: dma pool that will be destroyed
494 *
495 * Managed dma_pool_destroy().
496 */
497void dmam_pool_destroy(struct dma_pool *pool)
498{
499	struct device *dev = pool->dev;
500
501	dma_pool_destroy(pool);
502	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
503}
504EXPORT_SYMBOL(dmam_pool_destroy);
505