ion_page_pool.c revision 0fb9b815fe2010e9f8ff4b18bfd2a0ed9cf4eb8d
1/*
2 * drivers/staging/android/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/shrinker.h>
22#include "ion_priv.h"
23
24struct ion_page_pool_item {
25	struct page *page;
26	struct list_head list;
27};
28
29static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
30{
31	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
32
33	if (!page)
34		return NULL;
35	/* this is only being used to flush the page for dma,
36	   this api is not really suitable for calling from a driver
37	   but no better way to flush a page for dma exist at this time */
38	__dma_page_cpu_to_dev(page, 0, PAGE_SIZE << pool->order,
39			      DMA_BIDIRECTIONAL);
40	return page;
41}
42
43static void ion_page_pool_free_pages(struct ion_page_pool *pool,
44				     struct page *page)
45{
46	__free_pages(page, pool->order);
47}
48
49static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
50{
51	struct ion_page_pool_item *item;
52
53	item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
54	if (!item)
55		return -ENOMEM;
56	item->page = page;
57	if (PageHighMem(page)) {
58		list_add_tail(&item->list, &pool->high_items);
59		pool->high_count++;
60	} else {
61		list_add_tail(&item->list, &pool->low_items);
62		pool->low_count++;
63	}
64	return 0;
65}
66
67static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
68{
69	struct ion_page_pool_item *item;
70	struct page *page;
71
72	if (high) {
73		BUG_ON(!pool->high_count);
74		item = list_first_entry(&pool->high_items,
75					struct ion_page_pool_item, list);
76		pool->high_count--;
77	} else {
78		BUG_ON(!pool->low_count);
79		item = list_first_entry(&pool->low_items,
80					struct ion_page_pool_item, list);
81		pool->low_count--;
82	}
83
84	list_del(&item->list);
85	page = item->page;
86	kfree(item);
87	return page;
88}
89
90void *ion_page_pool_alloc(struct ion_page_pool *pool)
91{
92	struct page *page = NULL;
93
94	BUG_ON(!pool);
95
96	mutex_lock(&pool->mutex);
97	if (pool->high_count)
98		page = ion_page_pool_remove(pool, true);
99	else if (pool->low_count)
100		page = ion_page_pool_remove(pool, false);
101	mutex_unlock(&pool->mutex);
102
103	if (!page)
104		page = ion_page_pool_alloc_pages(pool);
105
106	return page;
107}
108
109void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
110{
111	int ret;
112
113	mutex_lock(&pool->mutex);
114	ret = ion_page_pool_add(pool, page);
115	mutex_unlock(&pool->mutex);
116	if (ret)
117		ion_page_pool_free_pages(pool, page);
118}
119
120static int ion_page_pool_shrink(struct shrinker *shrinker,
121				 struct shrink_control *sc)
122{
123	struct ion_page_pool *pool = container_of(shrinker,
124						 struct ion_page_pool,
125						 shrinker);
126	int nr_freed = 0;
127	int i;
128	bool high;
129
130	if (sc->gfp_mask & __GFP_HIGHMEM)
131		high = true;
132
133	if (sc->nr_to_scan == 0)
134		return high ? (pool->high_count + pool->low_count) *
135			(1 << pool->order) :
136			pool->low_count * (1 << pool->order);
137
138	for (i = 0; i < sc->nr_to_scan; i++) {
139		struct page *page;
140
141		mutex_lock(&pool->mutex);
142		if (high && pool->high_count) {
143			page = ion_page_pool_remove(pool, true);
144		} else if (pool->low_count) {
145			page = ion_page_pool_remove(pool, false);
146		} else {
147			mutex_unlock(&pool->mutex);
148			break;
149		}
150		mutex_unlock(&pool->mutex);
151		ion_page_pool_free_pages(pool, page);
152		nr_freed += (1 << pool->order);
153	}
154	pr_info("%s: shrunk page_pool of order %d by %d pages\n", __func__,
155		pool->order, nr_freed);
156
157	return high ? (pool->high_count + pool->low_count) *
158		(1 << pool->order) :
159		pool->low_count * (1 << pool->order);
160}
161
162struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
163{
164	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
165					     GFP_KERNEL);
166	if (!pool)
167		return NULL;
168	pool->high_count = 0;
169	pool->low_count = 0;
170	INIT_LIST_HEAD(&pool->low_items);
171	INIT_LIST_HEAD(&pool->high_items);
172	pool->shrinker.shrink = ion_page_pool_shrink;
173	pool->shrinker.seeks = DEFAULT_SEEKS * 16;
174	pool->shrinker.batch = 0;
175	register_shrinker(&pool->shrinker);
176	pool->gfp_mask = gfp_mask;
177	pool->order = order;
178	mutex_init(&pool->mutex);
179
180	return pool;
181}
182
183void ion_page_pool_destroy(struct ion_page_pool *pool)
184{
185	unregister_shrinker(&pool->shrinker);
186	kfree(pool);
187}
188
189