ion_system_heap.c revision 8898227ed555b477e2989a2a9b984fa37e7a9b42
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30					    __GFP_NOWARN | __GFP_NORETRY |
31					    __GFP_NO_KSWAPD) & ~__GFP_WAIT;
32static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
33					 __GFP_NOWARN);
34static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38	int i;
39	for (i = 0; i < num_orders; i++)
40		if (order == orders[i])
41			return i;
42	BUG();
43	return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48	return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52	struct ion_heap heap;
53	struct ion_page_pool **pools;
54};
55
56struct page_info {
57	struct page *page;
58	unsigned int order;
59	struct list_head list;
60};
61
62static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63				      struct ion_buffer *buffer,
64				      unsigned long order)
65{
66	bool cached = ion_buffer_cached(buffer);
67	bool split_pages = ion_buffer_fault_user_mappings(buffer);
68	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69	struct page *page;
70
71	if (!cached) {
72		page = ion_page_pool_alloc(pool);
73	} else {
74		gfp_t gfp_flags = low_order_gfp_flags;
75
76		if (order > 4)
77			gfp_flags = high_order_gfp_flags;
78		page = alloc_pages(gfp_flags, order);
79		if (!page)
80			return 0;
81		__dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
82				      DMA_BIDIRECTIONAL);
83	}
84	if (!page)
85		return 0;
86
87	if (split_pages)
88		split_page(page, order);
89	return page;
90}
91
92static void free_buffer_page(struct ion_system_heap *heap,
93			     struct ion_buffer *buffer, struct page *page,
94			     unsigned int order)
95{
96	bool cached = ion_buffer_cached(buffer);
97	bool split_pages = ion_buffer_fault_user_mappings(buffer);
98	int i;
99
100	if (!cached) {
101		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102		/* zero the pages before returning them to the pool for
103		   security.  This uses vmap as we want to set the pgprot so
104		   the writes to occur to noncached mappings, as the pool's
105		   purpose is to keep the pages out of the cache */
106		for (i = 0; i < (1 << order); i++) {
107			struct page *sub_page = page + i;
108			void *addr = vmap(&sub_page, 1, VM_MAP,
109					  pgprot_writecombine(PAGE_KERNEL));
110			memset(addr, 0, PAGE_SIZE);
111			vunmap(addr);
112		}
113		ion_page_pool_free(pool, page);
114	} else if (split_pages) {
115		for (i = 0; i < (1 << order); i++)
116			__free_page(page + i);
117	} else {
118		__free_pages(page, order);
119	}
120}
121
122
123static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
124						 struct ion_buffer *buffer,
125						 unsigned long size,
126						 unsigned int max_order)
127{
128	struct page *page;
129	struct page_info *info;
130	int i;
131
132	for (i = 0; i < num_orders; i++) {
133		if (size < order_to_size(orders[i]))
134			continue;
135		if (max_order < orders[i])
136			continue;
137
138		page = alloc_buffer_page(heap, buffer, orders[i]);
139		if (!page)
140			continue;
141
142		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
143		info->page = page;
144		info->order = orders[i];
145		return info;
146	}
147	return NULL;
148}
149
150static int ion_system_heap_allocate(struct ion_heap *heap,
151				     struct ion_buffer *buffer,
152				     unsigned long size, unsigned long align,
153				     unsigned long flags)
154{
155	struct ion_system_heap *sys_heap = container_of(heap,
156							struct ion_system_heap,
157							heap);
158	struct sg_table *table;
159	struct scatterlist *sg;
160	int ret;
161	struct list_head pages;
162	struct page_info *info, *tmp_info;
163	int i = 0;
164	long size_remaining = PAGE_ALIGN(size);
165	unsigned int max_order = orders[0];
166	bool split_pages = ion_buffer_fault_user_mappings(buffer);
167
168	INIT_LIST_HEAD(&pages);
169	while (size_remaining > 0) {
170		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
171		if (!info)
172			goto err;
173		list_add_tail(&info->list, &pages);
174		size_remaining -= (1 << info->order) * PAGE_SIZE;
175		max_order = info->order;
176		i++;
177	}
178
179	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
180	if (!table)
181		goto err;
182
183	if (split_pages)
184		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
185				     GFP_KERNEL);
186	else
187		ret = sg_alloc_table(table, i, GFP_KERNEL);
188
189	if (ret)
190		goto err1;
191
192	sg = table->sgl;
193	list_for_each_entry_safe(info, tmp_info, &pages, list) {
194		struct page *page = info->page;
195		if (split_pages) {
196			for (i = 0; i < (1 << info->order); i++) {
197				sg_set_page(sg, page + i, PAGE_SIZE, 0);
198				sg = sg_next(sg);
199			}
200		} else {
201			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
202				    0);
203			sg = sg_next(sg);
204		}
205		list_del(&info->list);
206		kfree(info);
207	}
208
209	buffer->priv_virt = table;
210	return 0;
211err1:
212	kfree(table);
213err:
214	list_for_each_entry(info, &pages, list) {
215		free_buffer_page(sys_heap, buffer, info->page, info->order);
216		kfree(info);
217	}
218	return -ENOMEM;
219}
220
221void ion_system_heap_free(struct ion_buffer *buffer)
222{
223	struct ion_heap *heap = buffer->heap;
224	struct ion_system_heap *sys_heap = container_of(heap,
225							struct ion_system_heap,
226							heap);
227	struct sg_table *table = buffer->sg_table;
228	struct scatterlist *sg;
229	LIST_HEAD(pages);
230	int i;
231
232	for_each_sg(table->sgl, sg, table->nents, i)
233		free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
234	sg_free_table(table);
235	kfree(table);
236}
237
238struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
239					 struct ion_buffer *buffer)
240{
241	return buffer->priv_virt;
242}
243
244void ion_system_heap_unmap_dma(struct ion_heap *heap,
245			       struct ion_buffer *buffer)
246{
247	return;
248}
249
250static struct ion_heap_ops system_heap_ops = {
251	.allocate = ion_system_heap_allocate,
252	.free = ion_system_heap_free,
253	.map_dma = ion_system_heap_map_dma,
254	.unmap_dma = ion_system_heap_unmap_dma,
255	.map_kernel = ion_heap_map_kernel,
256	.unmap_kernel = ion_heap_unmap_kernel,
257	.map_user = ion_heap_map_user,
258};
259
260static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
261				      void *unused)
262{
263
264	struct ion_system_heap *sys_heap = container_of(heap,
265							struct ion_system_heap,
266							heap);
267	int i;
268	for (i = 0; i < num_orders; i++) {
269		struct ion_page_pool *pool = sys_heap->pools[i];
270		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
271			   pool->high_count, pool->order,
272			   (1 << pool->order) * PAGE_SIZE * pool->high_count);
273		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
274			   pool->low_count, pool->order,
275			   (1 << pool->order) * PAGE_SIZE * pool->low_count);
276	}
277	return 0;
278}
279
280struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
281{
282	struct ion_system_heap *heap;
283	int i;
284
285	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
286	if (!heap)
287		return ERR_PTR(-ENOMEM);
288	heap->heap.ops = &system_heap_ops;
289	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
290	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
291			      GFP_KERNEL);
292	if (!heap->pools)
293		goto err_alloc_pools;
294	for (i = 0; i < num_orders; i++) {
295		struct ion_page_pool *pool;
296		gfp_t gfp_flags = low_order_gfp_flags;
297
298		if (orders[i] > 4)
299			gfp_flags = high_order_gfp_flags;
300		pool = ion_page_pool_create(gfp_flags, orders[i]);
301		if (!pool)
302			goto err_create_pool;
303		heap->pools[i] = pool;
304	}
305	heap->heap.debug_show = ion_system_heap_debug_show;
306	return &heap->heap;
307err_create_pool:
308	for (i = 0; i < num_orders; i++)
309		if (heap->pools[i])
310			ion_page_pool_destroy(heap->pools[i]);
311	kfree(heap->pools);
312err_alloc_pools:
313	kfree(heap);
314	return ERR_PTR(-ENOMEM);
315}
316
317void ion_system_heap_destroy(struct ion_heap *heap)
318{
319	struct ion_system_heap *sys_heap = container_of(heap,
320							struct ion_system_heap,
321							heap);
322	int i;
323
324	for (i = 0; i < num_orders; i++)
325		ion_page_pool_destroy(sys_heap->pools[i]);
326	kfree(sys_heap->pools);
327	kfree(sys_heap);
328}
329
330static int ion_system_contig_heap_allocate(struct ion_heap *heap,
331					   struct ion_buffer *buffer,
332					   unsigned long len,
333					   unsigned long align,
334					   unsigned long flags)
335{
336	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
337	if (!buffer->priv_virt)
338		return -ENOMEM;
339	return 0;
340}
341
342void ion_system_contig_heap_free(struct ion_buffer *buffer)
343{
344	kfree(buffer->priv_virt);
345}
346
347static int ion_system_contig_heap_phys(struct ion_heap *heap,
348				       struct ion_buffer *buffer,
349				       ion_phys_addr_t *addr, size_t *len)
350{
351	*addr = virt_to_phys(buffer->priv_virt);
352	*len = buffer->size;
353	return 0;
354}
355
356struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
357						struct ion_buffer *buffer)
358{
359	struct sg_table *table;
360	int ret;
361
362	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
363	if (!table)
364		return ERR_PTR(-ENOMEM);
365	ret = sg_alloc_table(table, 1, GFP_KERNEL);
366	if (ret) {
367		kfree(table);
368		return ERR_PTR(ret);
369	}
370	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
371		    0);
372	return table;
373}
374
375void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
376				      struct ion_buffer *buffer)
377{
378	sg_free_table(buffer->sg_table);
379	kfree(buffer->sg_table);
380}
381
382int ion_system_contig_heap_map_user(struct ion_heap *heap,
383				    struct ion_buffer *buffer,
384				    struct vm_area_struct *vma)
385{
386	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
387	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
388			       vma->vm_end - vma->vm_start,
389			       vma->vm_page_prot);
390
391}
392
393static struct ion_heap_ops kmalloc_ops = {
394	.allocate = ion_system_contig_heap_allocate,
395	.free = ion_system_contig_heap_free,
396	.phys = ion_system_contig_heap_phys,
397	.map_dma = ion_system_contig_heap_map_dma,
398	.unmap_dma = ion_system_contig_heap_unmap_dma,
399	.map_kernel = ion_heap_map_kernel,
400	.unmap_kernel = ion_heap_unmap_kernel,
401	.map_user = ion_system_contig_heap_map_user,
402};
403
404struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
405{
406	struct ion_heap *heap;
407
408	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
409	if (!heap)
410		return ERR_PTR(-ENOMEM);
411	heap->ops = &kmalloc_ops;
412	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
413	return heap;
414}
415
416void ion_system_contig_heap_destroy(struct ion_heap *heap)
417{
418	kfree(heap);
419}
420
421