ion_system_heap.c revision 8fae831288210dae626e212aaab459154b65ed79
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30					    __GFP_NOWARN | __GFP_NORETRY |
31					    __GFP_NO_KSWAPD) & ~__GFP_WAIT;
32static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
33					 __GFP_NOWARN);
34static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38	int i;
39	for (i = 0; i < num_orders; i++)
40		if (order == orders[i])
41			return i;
42	BUG();
43	return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48	return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52	struct ion_heap heap;
53	struct ion_page_pool **pools;
54};
55
56struct page_info {
57	struct page *page;
58	unsigned int order;
59	struct list_head list;
60};
61
62static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63				      struct ion_buffer *buffer,
64				      unsigned long order)
65{
66	bool cached = ion_buffer_cached(buffer);
67	bool split_pages = ion_buffer_fault_user_mappings(buffer);
68	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69	struct page *page;
70
71	if (!cached) {
72		page = ion_page_pool_alloc(pool);
73	} else {
74		gfp_t gfp_flags = low_order_gfp_flags;
75
76		if (order > 4)
77			gfp_flags = high_order_gfp_flags;
78		page = alloc_pages(gfp_flags, order);
79		if (!page)
80			return 0;
81		__dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
82				      DMA_BIDIRECTIONAL);
83	}
84	if (!page)
85		return 0;
86
87	if (split_pages)
88		split_page(page, order);
89	return page;
90}
91
92static void free_buffer_page(struct ion_system_heap *heap,
93			     struct ion_buffer *buffer, struct page *page,
94			     unsigned int order)
95{
96	bool cached = ion_buffer_cached(buffer);
97	bool split_pages = ion_buffer_fault_user_mappings(buffer);
98	int i;
99
100	if (!cached) {
101		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102		/* zero the pages before returning them to the pool for
103		   security.  This uses vmap as we want to set the pgprot so
104		   the writes to occur to noncached mappings, as the pool's
105		   purpose is to keep the pages out of the cache */
106		for (i = 0; i < order / PAGE_SIZE; i++) {
107			struct page *sub_page = page + i;
108			void *addr = vmap(&sub_page, 1, VM_MAP,
109					  pgprot_writecombine(PAGE_KERNEL));
110			memset(addr, 0, PAGE_SIZE);
111			vunmap(addr);
112		}
113		ion_page_pool_free(pool, page);
114	} else if (split_pages) {
115		for (i = 0; i < (1 << order); i++)
116			__free_page(page + i);
117	} else {
118		__free_pages(page, order);
119	}
120}
121
122
123static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
124						 struct ion_buffer *buffer,
125						 unsigned long size,
126						 unsigned int max_order)
127{
128	struct page *page;
129	struct page_info *info;
130	int i;
131
132	for (i = 0; i < num_orders; i++) {
133		if (size < order_to_size(orders[i]))
134			continue;
135		if (max_order < orders[i])
136			continue;
137
138		page = alloc_buffer_page(heap, buffer, orders[i]);
139		if (!page)
140			continue;
141
142		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
143		info->page = page;
144		info->order = orders[i];
145		return info;
146	}
147	return NULL;
148}
149
150static int ion_system_heap_allocate(struct ion_heap *heap,
151				     struct ion_buffer *buffer,
152				     unsigned long size, unsigned long align,
153				     unsigned long flags)
154{
155	struct ion_system_heap *sys_heap = container_of(heap,
156							struct ion_system_heap,
157							heap);
158	struct sg_table *table;
159	struct scatterlist *sg;
160	int ret;
161	struct list_head pages;
162	struct page_info *info, *tmp_info;
163	int i = 0;
164	long size_remaining = PAGE_ALIGN(size);
165	unsigned int max_order = orders[0];
166	bool split_pages = ion_buffer_fault_user_mappings(buffer);
167
168	INIT_LIST_HEAD(&pages);
169	while (size_remaining > 0) {
170		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
171		if (!info)
172			goto err;
173		list_add_tail(&info->list, &pages);
174		size_remaining -= (1 << info->order) * PAGE_SIZE;
175		max_order = info->order;
176		i++;
177	}
178
179	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
180	if (!table)
181		goto err;
182
183	if (split_pages)
184		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
185				     GFP_KERNEL);
186	else
187		ret = sg_alloc_table(table, i, GFP_KERNEL);
188
189	if (ret)
190		goto err1;
191
192	sg = table->sgl;
193	list_for_each_entry_safe(info, tmp_info, &pages, list) {
194		struct page *page = info->page;
195		if (split_pages) {
196			for (i = 0; i < (1 << info->order); i++) {
197				sg_set_page(sg, page + i, PAGE_SIZE, 0);
198				sg = sg_next(sg);
199			}
200		} else {
201			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
202				    0);
203			sg = sg_next(sg);
204		}
205		list_del(&info->list);
206		kfree(info);
207	}
208
209	buffer->priv_virt = table;
210	return 0;
211err1:
212	kfree(table);
213err:
214	list_for_each_entry(info, &pages, list) {
215		free_buffer_page(sys_heap, buffer, info->page, info->order);
216		kfree(info);
217	}
218	return -ENOMEM;
219}
220
221void ion_system_heap_free(struct ion_buffer *buffer)
222{
223	struct ion_heap *heap = buffer->heap;
224	struct ion_system_heap *sys_heap = container_of(heap,
225							struct ion_system_heap,
226							heap);
227	struct sg_table *table = buffer->priv_virt;
228	struct scatterlist *sg;
229	LIST_HEAD(pages);
230	int i;
231
232	for_each_sg(table->sgl, sg, table->nents, i)
233		free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
234	sg_free_table(table);
235	kfree(table);
236}
237
238struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
239					 struct ion_buffer *buffer)
240{
241	return buffer->priv_virt;
242}
243
244void ion_system_heap_unmap_dma(struct ion_heap *heap,
245			       struct ion_buffer *buffer)
246{
247	return;
248}
249
250void *ion_system_heap_map_kernel(struct ion_heap *heap,
251				 struct ion_buffer *buffer)
252{
253	struct scatterlist *sg;
254	int i, j;
255	void *vaddr;
256	pgprot_t pgprot;
257	struct sg_table *table = buffer->priv_virt;
258	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
259	struct page **pages = vmalloc(sizeof(struct page *) * npages);
260	struct page **tmp = pages;
261
262	if (!pages)
263		return 0;
264
265	if (buffer->flags & ION_FLAG_CACHED)
266		pgprot = PAGE_KERNEL;
267	else
268		pgprot = pgprot_writecombine(PAGE_KERNEL);
269
270	for_each_sg(table->sgl, sg, table->nents, i) {
271		int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
272		struct page *page = sg_page(sg);
273		BUG_ON(i >= npages);
274		for (j = 0; j < npages_this_entry; j++) {
275			*(tmp++) = page++;
276		}
277	}
278	vaddr = vmap(pages, npages, VM_MAP, pgprot);
279	vfree(pages);
280
281	return vaddr;
282}
283
284void ion_system_heap_unmap_kernel(struct ion_heap *heap,
285				  struct ion_buffer *buffer)
286{
287	vunmap(buffer->vaddr);
288}
289
290int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
291			     struct vm_area_struct *vma)
292{
293	struct sg_table *table = buffer->priv_virt;
294	unsigned long addr = vma->vm_start;
295	unsigned long offset = vma->vm_pgoff;
296	struct scatterlist *sg;
297	int i;
298
299	for_each_sg(table->sgl, sg, table->nents, i) {
300		if (offset) {
301			offset--;
302			continue;
303		}
304		remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
305				sg_dma_len(sg), vma->vm_page_prot);
306		addr += sg_dma_len(sg);
307		if (addr >= vma->vm_end)
308			return 0;
309	}
310	return 0;
311}
312
313static struct ion_heap_ops system_heap_ops = {
314	.allocate = ion_system_heap_allocate,
315	.free = ion_system_heap_free,
316	.map_dma = ion_system_heap_map_dma,
317	.unmap_dma = ion_system_heap_unmap_dma,
318	.map_kernel = ion_system_heap_map_kernel,
319	.unmap_kernel = ion_system_heap_unmap_kernel,
320	.map_user = ion_system_heap_map_user,
321};
322
323static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
324				      void *unused)
325{
326
327	struct ion_system_heap *sys_heap = container_of(heap,
328							struct ion_system_heap,
329							heap);
330	int i;
331	for (i = 0; i < num_orders; i++) {
332		struct ion_page_pool *pool = sys_heap->pools[i];
333		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
334			   pool->high_count, pool->order,
335			   (1 << pool->order) * PAGE_SIZE * pool->high_count);
336		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
337			   pool->low_count, pool->order,
338			   (1 << pool->order) * PAGE_SIZE * pool->low_count);
339	}
340	return 0;
341}
342
343struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
344{
345	struct ion_system_heap *heap;
346	int i;
347
348	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
349	if (!heap)
350		return ERR_PTR(-ENOMEM);
351	heap->heap.ops = &system_heap_ops;
352	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
353	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
354			      GFP_KERNEL);
355	if (!heap->pools)
356		goto err_alloc_pools;
357	for (i = 0; i < num_orders; i++) {
358		struct ion_page_pool *pool;
359		gfp_t gfp_flags = low_order_gfp_flags;
360
361		if (orders[i] > 4)
362			gfp_flags = high_order_gfp_flags;
363		pool = ion_page_pool_create(gfp_flags, orders[i]);
364		if (!pool)
365			goto err_create_pool;
366		heap->pools[i] = pool;
367	}
368	heap->heap.debug_show = ion_system_heap_debug_show;
369	return &heap->heap;
370err_create_pool:
371	for (i = 0; i < num_orders; i++)
372		if (heap->pools[i])
373			ion_page_pool_destroy(heap->pools[i]);
374	kfree(heap->pools);
375err_alloc_pools:
376	kfree(heap);
377	return ERR_PTR(-ENOMEM);
378}
379
380void ion_system_heap_destroy(struct ion_heap *heap)
381{
382	struct ion_system_heap *sys_heap = container_of(heap,
383							struct ion_system_heap,
384							heap);
385	int i;
386
387	for (i = 0; i < num_orders; i++)
388		ion_page_pool_destroy(sys_heap->pools[i]);
389	kfree(sys_heap->pools);
390	kfree(sys_heap);
391}
392
393static int ion_system_contig_heap_allocate(struct ion_heap *heap,
394					   struct ion_buffer *buffer,
395					   unsigned long len,
396					   unsigned long align,
397					   unsigned long flags)
398{
399	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
400	if (!buffer->priv_virt)
401		return -ENOMEM;
402	return 0;
403}
404
405void ion_system_contig_heap_free(struct ion_buffer *buffer)
406{
407	kfree(buffer->priv_virt);
408}
409
410static int ion_system_contig_heap_phys(struct ion_heap *heap,
411				       struct ion_buffer *buffer,
412				       ion_phys_addr_t *addr, size_t *len)
413{
414	*addr = virt_to_phys(buffer->priv_virt);
415	*len = buffer->size;
416	return 0;
417}
418
419struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
420						struct ion_buffer *buffer)
421{
422	struct sg_table *table;
423	int ret;
424
425	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
426	if (!table)
427		return ERR_PTR(-ENOMEM);
428	ret = sg_alloc_table(table, 1, GFP_KERNEL);
429	if (ret) {
430		kfree(table);
431		return ERR_PTR(ret);
432	}
433	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
434		    0);
435	return table;
436}
437
438void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
439				      struct ion_buffer *buffer)
440{
441	sg_free_table(buffer->sg_table);
442	kfree(buffer->sg_table);
443}
444
445int ion_system_contig_heap_map_user(struct ion_heap *heap,
446				    struct ion_buffer *buffer,
447				    struct vm_area_struct *vma)
448{
449	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
450	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
451			       vma->vm_end - vma->vm_start,
452			       vma->vm_page_prot);
453
454}
455
456static struct ion_heap_ops kmalloc_ops = {
457	.allocate = ion_system_contig_heap_allocate,
458	.free = ion_system_contig_heap_free,
459	.phys = ion_system_contig_heap_phys,
460	.map_dma = ion_system_contig_heap_map_dma,
461	.unmap_dma = ion_system_contig_heap_unmap_dma,
462	.map_kernel = ion_system_heap_map_kernel,
463	.unmap_kernel = ion_system_heap_unmap_kernel,
464	.map_user = ion_system_contig_heap_map_user,
465};
466
467struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
468{
469	struct ion_heap *heap;
470
471	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
472	if (!heap)
473		return ERR_PTR(-ENOMEM);
474	heap->ops = &kmalloc_ops;
475	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
476	return heap;
477}
478
479void ion_system_contig_heap_destroy(struct ion_heap *heap)
480{
481	kfree(heap);
482}
483
484