ion_system_heap.c revision 77cbe828a10026f8ff208eaa67d75675a3ffbdfc
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30					    __GFP_NOWARN | __GFP_NORETRY |
31					    __GFP_NO_KSWAPD) & ~__GFP_WAIT;
32static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
33					 __GFP_NOWARN);
34static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38	int i;
39	for (i = 0; i < num_orders; i++)
40		if (order == orders[i])
41			return i;
42	BUG();
43	return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48	return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52	struct ion_heap heap;
53	struct ion_page_pool **pools;
54};
55
56struct page_info {
57	struct page *page;
58	unsigned int order;
59	struct list_head list;
60};
61
62static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63				      struct ion_buffer *buffer,
64				      unsigned long order)
65{
66	bool cached = ion_buffer_cached(buffer);
67	bool split_pages = ion_buffer_fault_user_mappings(buffer);
68	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69	struct page *page;
70
71	if (!cached) {
72		page = ion_page_pool_alloc(pool);
73	} else {
74		gfp_t gfp_flags = low_order_gfp_flags;
75
76		if (order > 4)
77			gfp_flags = high_order_gfp_flags;
78		page = alloc_pages(gfp_flags, order);
79		if (!page)
80			return 0;
81		__dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
82				      DMA_BIDIRECTIONAL);
83	}
84	if (!page)
85		return 0;
86
87	if (split_pages)
88		split_page(page, order);
89	return page;
90}
91
92static void free_buffer_page(struct ion_system_heap *heap,
93			     struct ion_buffer *buffer, struct page *page,
94			     unsigned int order, struct vm_struct *vm_struct)
95{
96	bool cached = ion_buffer_cached(buffer);
97	bool split_pages = ion_buffer_fault_user_mappings(buffer);
98	int i;
99
100	if (!cached) {
101		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102		/* zero the pages before returning them to the pool for
103		   security.  This uses vmap as we want to set the pgprot so
104		   the writes to occur to noncached mappings, as the pool's
105		   purpose is to keep the pages out of the cache */
106		for (i = 0; i < (1 << order); i++) {
107			struct page *sub_page = page + i;
108			struct page **pages = &sub_page;
109			map_vm_area(vm_struct,
110					 pgprot_writecombine(PAGE_KERNEL),
111					 &pages);
112			memset(vm_struct->addr, 0, PAGE_SIZE);
113			unmap_kernel_range((unsigned long)vm_struct->addr,
114					PAGE_SIZE);
115		}
116		ion_page_pool_free(pool, page);
117	} else if (split_pages) {
118		for (i = 0; i < (1 << order); i++)
119			__free_page(page + i);
120	} else {
121		__free_pages(page, order);
122	}
123}
124
125
126static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
127						 struct ion_buffer *buffer,
128						 unsigned long size,
129						 unsigned int max_order)
130{
131	struct page *page;
132	struct page_info *info;
133	int i;
134
135	for (i = 0; i < num_orders; i++) {
136		if (size < order_to_size(orders[i]))
137			continue;
138		if (max_order < orders[i])
139			continue;
140
141		page = alloc_buffer_page(heap, buffer, orders[i]);
142		if (!page)
143			continue;
144
145		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
146		info->page = page;
147		info->order = orders[i];
148		return info;
149	}
150	return NULL;
151}
152
153static int ion_system_heap_allocate(struct ion_heap *heap,
154				     struct ion_buffer *buffer,
155				     unsigned long size, unsigned long align,
156				     unsigned long flags)
157{
158	struct ion_system_heap *sys_heap = container_of(heap,
159							struct ion_system_heap,
160							heap);
161	struct sg_table *table;
162	struct scatterlist *sg;
163	int ret;
164	struct list_head pages;
165	struct page_info *info, *tmp_info;
166	int i = 0;
167	long size_remaining = PAGE_ALIGN(size);
168	unsigned int max_order = orders[0];
169	bool split_pages = ion_buffer_fault_user_mappings(buffer);
170	struct vm_struct *vm_struct;
171	pte_t *ptes;
172
173	INIT_LIST_HEAD(&pages);
174	while (size_remaining > 0) {
175		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
176		if (!info)
177			goto err;
178		list_add_tail(&info->list, &pages);
179		size_remaining -= (1 << info->order) * PAGE_SIZE;
180		max_order = info->order;
181		i++;
182	}
183
184	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
185	if (!table)
186		goto err;
187
188	if (split_pages)
189		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
190				     GFP_KERNEL);
191	else
192		ret = sg_alloc_table(table, i, GFP_KERNEL);
193
194	if (ret)
195		goto err1;
196
197	sg = table->sgl;
198	list_for_each_entry_safe(info, tmp_info, &pages, list) {
199		struct page *page = info->page;
200		if (split_pages) {
201			for (i = 0; i < (1 << info->order); i++) {
202				sg_set_page(sg, page + i, PAGE_SIZE, 0);
203				sg = sg_next(sg);
204			}
205		} else {
206			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
207				    0);
208			sg = sg_next(sg);
209		}
210		list_del(&info->list);
211		kfree(info);
212	}
213
214	buffer->priv_virt = table;
215	return 0;
216err1:
217	kfree(table);
218err:
219	vm_struct = get_vm_area(PAGE_SIZE, &ptes);
220	list_for_each_entry(info, &pages, list) {
221		free_buffer_page(sys_heap, buffer, info->page, info->order,
222				vm_struct);
223		kfree(info);
224	}
225	free_vm_area(vm_struct);
226	return -ENOMEM;
227}
228
229void ion_system_heap_free(struct ion_buffer *buffer)
230{
231	struct ion_heap *heap = buffer->heap;
232	struct ion_system_heap *sys_heap = container_of(heap,
233							struct ion_system_heap,
234							heap);
235	struct sg_table *table = buffer->sg_table;
236	struct scatterlist *sg;
237	LIST_HEAD(pages);
238	struct vm_struct *vm_struct;
239	pte_t *ptes;
240	int i;
241
242	vm_struct = get_vm_area(PAGE_SIZE, &ptes);
243
244	for_each_sg(table->sgl, sg, table->nents, i)
245		free_buffer_page(sys_heap, buffer, sg_page(sg),
246				get_order(sg_dma_len(sg)), vm_struct);
247	free_vm_area(vm_struct);
248	sg_free_table(table);
249	kfree(table);
250}
251
252struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
253					 struct ion_buffer *buffer)
254{
255	return buffer->priv_virt;
256}
257
258void ion_system_heap_unmap_dma(struct ion_heap *heap,
259			       struct ion_buffer *buffer)
260{
261	return;
262}
263
264static struct ion_heap_ops system_heap_ops = {
265	.allocate = ion_system_heap_allocate,
266	.free = ion_system_heap_free,
267	.map_dma = ion_system_heap_map_dma,
268	.unmap_dma = ion_system_heap_unmap_dma,
269	.map_kernel = ion_heap_map_kernel,
270	.unmap_kernel = ion_heap_unmap_kernel,
271	.map_user = ion_heap_map_user,
272};
273
274static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
275				      void *unused)
276{
277
278	struct ion_system_heap *sys_heap = container_of(heap,
279							struct ion_system_heap,
280							heap);
281	int i;
282	for (i = 0; i < num_orders; i++) {
283		struct ion_page_pool *pool = sys_heap->pools[i];
284		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
285			   pool->high_count, pool->order,
286			   (1 << pool->order) * PAGE_SIZE * pool->high_count);
287		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
288			   pool->low_count, pool->order,
289			   (1 << pool->order) * PAGE_SIZE * pool->low_count);
290	}
291	return 0;
292}
293
294struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
295{
296	struct ion_system_heap *heap;
297	int i;
298
299	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
300	if (!heap)
301		return ERR_PTR(-ENOMEM);
302	heap->heap.ops = &system_heap_ops;
303	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
304	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
305			      GFP_KERNEL);
306	if (!heap->pools)
307		goto err_alloc_pools;
308	for (i = 0; i < num_orders; i++) {
309		struct ion_page_pool *pool;
310		gfp_t gfp_flags = low_order_gfp_flags;
311
312		if (orders[i] > 4)
313			gfp_flags = high_order_gfp_flags;
314		pool = ion_page_pool_create(gfp_flags, orders[i]);
315		if (!pool)
316			goto err_create_pool;
317		heap->pools[i] = pool;
318	}
319	heap->heap.debug_show = ion_system_heap_debug_show;
320	return &heap->heap;
321err_create_pool:
322	for (i = 0; i < num_orders; i++)
323		if (heap->pools[i])
324			ion_page_pool_destroy(heap->pools[i]);
325	kfree(heap->pools);
326err_alloc_pools:
327	kfree(heap);
328	return ERR_PTR(-ENOMEM);
329}
330
331void ion_system_heap_destroy(struct ion_heap *heap)
332{
333	struct ion_system_heap *sys_heap = container_of(heap,
334							struct ion_system_heap,
335							heap);
336	int i;
337
338	for (i = 0; i < num_orders; i++)
339		ion_page_pool_destroy(sys_heap->pools[i]);
340	kfree(sys_heap->pools);
341	kfree(sys_heap);
342}
343
344static int ion_system_contig_heap_allocate(struct ion_heap *heap,
345					   struct ion_buffer *buffer,
346					   unsigned long len,
347					   unsigned long align,
348					   unsigned long flags)
349{
350	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
351	if (!buffer->priv_virt)
352		return -ENOMEM;
353	return 0;
354}
355
356void ion_system_contig_heap_free(struct ion_buffer *buffer)
357{
358	kfree(buffer->priv_virt);
359}
360
361static int ion_system_contig_heap_phys(struct ion_heap *heap,
362				       struct ion_buffer *buffer,
363				       ion_phys_addr_t *addr, size_t *len)
364{
365	*addr = virt_to_phys(buffer->priv_virt);
366	*len = buffer->size;
367	return 0;
368}
369
370struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
371						struct ion_buffer *buffer)
372{
373	struct sg_table *table;
374	int ret;
375
376	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
377	if (!table)
378		return ERR_PTR(-ENOMEM);
379	ret = sg_alloc_table(table, 1, GFP_KERNEL);
380	if (ret) {
381		kfree(table);
382		return ERR_PTR(ret);
383	}
384	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
385		    0);
386	return table;
387}
388
389void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
390				      struct ion_buffer *buffer)
391{
392	sg_free_table(buffer->sg_table);
393	kfree(buffer->sg_table);
394}
395
396int ion_system_contig_heap_map_user(struct ion_heap *heap,
397				    struct ion_buffer *buffer,
398				    struct vm_area_struct *vma)
399{
400	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
401	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
402			       vma->vm_end - vma->vm_start,
403			       vma->vm_page_prot);
404
405}
406
407static struct ion_heap_ops kmalloc_ops = {
408	.allocate = ion_system_contig_heap_allocate,
409	.free = ion_system_contig_heap_free,
410	.phys = ion_system_contig_heap_phys,
411	.map_dma = ion_system_contig_heap_map_dma,
412	.unmap_dma = ion_system_contig_heap_unmap_dma,
413	.map_kernel = ion_heap_map_kernel,
414	.unmap_kernel = ion_heap_unmap_kernel,
415	.map_user = ion_system_contig_heap_map_user,
416};
417
418struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
419{
420	struct ion_heap *heap;
421
422	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
423	if (!heap)
424		return ERR_PTR(-ENOMEM);
425	heap->ops = &kmalloc_ops;
426	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
427	return heap;
428}
429
430void ion_system_contig_heap_destroy(struct ion_heap *heap)
431{
432	kfree(heap);
433}
434
435