ion_page_pool.c revision 0214c7f20bf4d5d2e204afaf43789f2f4782d9ae
1/*
2 * drivers/staging/android/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/shrinker.h>
22#include "ion_priv.h"
23
24struct ion_page_pool_item {
25	struct page *page;
26	struct list_head list;
27};
28
29static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
30{
31	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
32
33	if (!page)
34		return NULL;
35	/* this is only being used to flush the page for dma,
36	   this api is not really suitable for calling from a driver
37	   but no better way to flush a page for dma exist at this time */
38	__dma_page_cpu_to_dev(page, 0, PAGE_SIZE << pool->order,
39			      DMA_BIDIRECTIONAL);
40	return page;
41}
42
43static void ion_page_pool_free_pages(struct ion_page_pool *pool,
44				     struct page *page)
45{
46	__free_pages(page, pool->order);
47}
48
49static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
50{
51	struct ion_page_pool_item *item;
52
53	item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
54	if (!item)
55		return -ENOMEM;
56	item->page = page;
57	list_add_tail(&item->list, &pool->items);
58	pool->count++;
59	return 0;
60}
61
62static struct page *ion_page_pool_remove(struct ion_page_pool *pool)
63{
64	struct ion_page_pool_item *item;
65	struct page *page;
66
67	BUG_ON(!pool->count);
68	BUG_ON(list_empty(&pool->items));
69
70	item = list_first_entry(&pool->items, struct ion_page_pool_item, list);
71	list_del(&item->list);
72	page = item->page;
73	kfree(item);
74	pool->count--;
75	return page;
76}
77
78void *ion_page_pool_alloc(struct ion_page_pool *pool)
79{
80	struct page *page = NULL;
81
82	BUG_ON(!pool);
83
84	mutex_lock(&pool->mutex);
85	if (pool->count)
86		page = ion_page_pool_remove(pool);
87	else
88		page = ion_page_pool_alloc_pages(pool);
89	mutex_unlock(&pool->mutex);
90
91	return page;
92}
93
94void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
95{
96	int ret;
97
98	mutex_lock(&pool->mutex);
99	ret = ion_page_pool_add(pool, page);
100	if (ret)
101		ion_page_pool_free_pages(pool, page);
102	mutex_unlock(&pool->mutex);
103}
104
105static int ion_page_pool_shrink(struct shrinker *shrinker,
106				 struct shrink_control *sc)
107{
108	struct ion_page_pool *pool = container_of(shrinker,
109						 struct ion_page_pool,
110						 shrinker);
111	int nr_freed = 0;
112	int i;
113
114	if (sc->nr_to_scan == 0)
115		return pool->count * (1 << pool->order);
116
117	mutex_lock(&pool->mutex);
118	for (i = 0; i < sc->nr_to_scan && pool->count; i++) {
119		struct ion_page_pool_item *item;
120		struct page *page;
121
122		item = list_first_entry(&pool->items, struct ion_page_pool_item, list);
123		page = item->page;
124		if (PageHighMem(page) && !(sc->gfp_mask & __GFP_HIGHMEM)) {
125			list_move_tail(&item->list, &pool->items);
126			continue;
127		}
128		BUG_ON(page != ion_page_pool_remove(pool));
129		ion_page_pool_free_pages(pool, page);
130		nr_freed += (1 << pool->order);
131	}
132	pr_info("%s: shrunk page_pool of order %d by %d pages\n", __func__,
133		pool->order, nr_freed);
134	mutex_unlock(&pool->mutex);
135
136	return pool->count * (1 << pool->order);
137}
138
139struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
140{
141	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
142					     GFP_KERNEL);
143	if (!pool)
144		return NULL;
145	pool->count = 0;
146	INIT_LIST_HEAD(&pool->items);
147	pool->shrinker.shrink = ion_page_pool_shrink;
148	pool->shrinker.seeks = DEFAULT_SEEKS * 16;
149	pool->shrinker.batch = 0;
150	register_shrinker(&pool->shrinker);
151	pool->gfp_mask = gfp_mask;
152	pool->order = order;
153	mutex_init(&pool->mutex);
154
155	return pool;
156}
157
158void ion_page_pool_destroy(struct ion_page_pool *pool)
159{
160	unregister_shrinker(&pool->shrinker);
161	kfree(pool);
162}
163
164