ion_system_heap.c revision 0fb9b815fe2010e9f8ff4b18bfd2a0ed9cf4eb8d
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30					    __GFP_NOWARN | __GFP_NORETRY |
31					    __GFP_NO_KSWAPD);
32static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
33					 __GFP_NOWARN);
34static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38	int i;
39	for (i = 0; i < num_orders; i++)
40		if (order == orders[i])
41			return i;
42	BUG();
43	return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48	return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52	struct ion_heap heap;
53	struct ion_page_pool **pools;
54};
55
56struct page_info {
57	struct page *page;
58	unsigned int order;
59	struct list_head list;
60};
61
62static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63				      struct ion_buffer *buffer,
64				      unsigned long order)
65{
66	bool cached = ion_buffer_cached(buffer);
67	bool split_pages = ion_buffer_fault_user_mappings(buffer);
68	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69	struct page *page;
70
71	if (!cached) {
72		page = ion_page_pool_alloc(pool);
73	} else {
74		gfp_t gfp_flags = low_order_gfp_flags;
75
76		if (order > 4)
77			gfp_flags = high_order_gfp_flags;
78		page = alloc_pages(gfp_flags, order);
79	}
80	if (!page)
81		return 0;
82	if (split_pages)
83		split_page(page, order);
84	return page;
85}
86
87static void free_buffer_page(struct ion_system_heap *heap,
88			     struct ion_buffer *buffer, struct page *page,
89			     unsigned int order)
90{
91	bool cached = ion_buffer_cached(buffer);
92	bool split_pages = ion_buffer_fault_user_mappings(buffer);
93	int i;
94
95	if (!cached) {
96		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
97		/* zero the pages before returning them to the pool for
98		   security.  This uses vmap as we want to set the pgprot so
99		   the writes to occur to noncached mappings, as the pool's
100		   purpose is to keep the pages out of the cache */
101		for (i = 0; i < order / PAGE_SIZE; i++) {
102			struct page *sub_page = page + i;
103			void *addr = vmap(&sub_page, 1, VM_MAP,
104					  pgprot_writecombine(PAGE_KERNEL));
105			memset(addr, 0, PAGE_SIZE);
106			vunmap(addr);
107		}
108		ion_page_pool_free(pool, page);
109	} else if (split_pages) {
110		for (i = 0; i < (1 << order); i++)
111			__free_page(page + i);
112	} else {
113		__free_pages(page, order);
114	}
115}
116
117
118static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
119						 struct ion_buffer *buffer,
120						 unsigned long size,
121						 unsigned int max_order)
122{
123	struct page *page;
124	struct page_info *info;
125	int i;
126
127	for (i = 0; i < num_orders; i++) {
128		if (size < order_to_size(orders[i]))
129			continue;
130		if (max_order < orders[i])
131			continue;
132
133		page = alloc_buffer_page(heap, buffer, orders[i]);
134		if (!page)
135			continue;
136
137		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
138		info->page = page;
139		info->order = orders[i];
140		return info;
141	}
142	return NULL;
143}
144
145static int ion_system_heap_allocate(struct ion_heap *heap,
146				     struct ion_buffer *buffer,
147				     unsigned long size, unsigned long align,
148				     unsigned long flags)
149{
150	struct ion_system_heap *sys_heap = container_of(heap,
151							struct ion_system_heap,
152							heap);
153	struct sg_table *table;
154	struct scatterlist *sg;
155	int ret;
156	struct list_head pages;
157	struct page_info *info, *tmp_info;
158	int i = 0;
159	long size_remaining = PAGE_ALIGN(size);
160	unsigned int max_order = orders[0];
161	bool split_pages = ion_buffer_fault_user_mappings(buffer);
162
163	INIT_LIST_HEAD(&pages);
164	while (size_remaining > 0) {
165		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
166		if (!info)
167			goto err;
168		list_add_tail(&info->list, &pages);
169		size_remaining -= (1 << info->order) * PAGE_SIZE;
170		max_order = info->order;
171		i++;
172	}
173
174	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
175	if (!table)
176		goto err;
177
178	if (split_pages)
179		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
180				     GFP_KERNEL);
181	else
182		ret = sg_alloc_table(table, i, GFP_KERNEL);
183
184	if (ret)
185		goto err1;
186
187	sg = table->sgl;
188	list_for_each_entry_safe(info, tmp_info, &pages, list) {
189		struct page *page = info->page;
190		if (split_pages) {
191			for (i = 0; i < (1 << info->order); i++) {
192				sg_set_page(sg, page + i, PAGE_SIZE, 0);
193				sg = sg_next(sg);
194			}
195		} else {
196			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
197				    0);
198			sg = sg_next(sg);
199		}
200		list_del(&info->list);
201		kfree(info);
202	}
203
204	dma_sync_sg_for_device(NULL, table->sgl, table->nents,
205			       DMA_BIDIRECTIONAL);
206
207	buffer->priv_virt = table;
208	return 0;
209err1:
210	kfree(table);
211err:
212	list_for_each_entry(info, &pages, list) {
213		free_buffer_page(sys_heap, buffer, info->page, info->order);
214		kfree(info);
215	}
216	return -ENOMEM;
217}
218
219void ion_system_heap_free(struct ion_buffer *buffer)
220{
221	struct ion_heap *heap = buffer->heap;
222	struct ion_system_heap *sys_heap = container_of(heap,
223							struct ion_system_heap,
224							heap);
225	struct sg_table *table = buffer->priv_virt;
226	struct scatterlist *sg;
227	LIST_HEAD(pages);
228	int i;
229
230	for_each_sg(table->sgl, sg, table->nents, i)
231		free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
232	sg_free_table(table);
233	kfree(table);
234}
235
236struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
237					 struct ion_buffer *buffer)
238{
239	return buffer->priv_virt;
240}
241
242void ion_system_heap_unmap_dma(struct ion_heap *heap,
243			       struct ion_buffer *buffer)
244{
245	return;
246}
247
248void *ion_system_heap_map_kernel(struct ion_heap *heap,
249				 struct ion_buffer *buffer)
250{
251	struct scatterlist *sg;
252	int i, j;
253	void *vaddr;
254	pgprot_t pgprot;
255	struct sg_table *table = buffer->priv_virt;
256	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
257	struct page **pages = vmalloc(sizeof(struct page *) * npages);
258	struct page **tmp = pages;
259
260	if (!pages)
261		return 0;
262
263	if (buffer->flags & ION_FLAG_CACHED)
264		pgprot = PAGE_KERNEL;
265	else
266		pgprot = pgprot_writecombine(PAGE_KERNEL);
267
268	for_each_sg(table->sgl, sg, table->nents, i) {
269		int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
270		struct page *page = sg_page(sg);
271		BUG_ON(i >= npages);
272		for (j = 0; j < npages_this_entry; j++) {
273			*(tmp++) = page++;
274		}
275	}
276	vaddr = vmap(pages, npages, VM_MAP, pgprot);
277	vfree(pages);
278
279	return vaddr;
280}
281
282void ion_system_heap_unmap_kernel(struct ion_heap *heap,
283				  struct ion_buffer *buffer)
284{
285	vunmap(buffer->vaddr);
286}
287
288int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
289			     struct vm_area_struct *vma)
290{
291	struct sg_table *table = buffer->priv_virt;
292	unsigned long addr = vma->vm_start;
293	unsigned long offset = vma->vm_pgoff;
294	struct scatterlist *sg;
295	int i;
296
297	for_each_sg(table->sgl, sg, table->nents, i) {
298		if (offset) {
299			offset--;
300			continue;
301		}
302		remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
303				sg_dma_len(sg), vma->vm_page_prot);
304		addr += sg_dma_len(sg);
305		if (addr >= vma->vm_end)
306			return 0;
307	}
308	return 0;
309}
310
311static struct ion_heap_ops system_heap_ops = {
312	.allocate = ion_system_heap_allocate,
313	.free = ion_system_heap_free,
314	.map_dma = ion_system_heap_map_dma,
315	.unmap_dma = ion_system_heap_unmap_dma,
316	.map_kernel = ion_system_heap_map_kernel,
317	.unmap_kernel = ion_system_heap_unmap_kernel,
318	.map_user = ion_system_heap_map_user,
319};
320
321static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
322				      void *unused)
323{
324
325	struct ion_system_heap *sys_heap = container_of(heap,
326							struct ion_system_heap,
327							heap);
328	int i;
329	for (i = 0; i < num_orders; i++) {
330		struct ion_page_pool *pool = sys_heap->pools[i];
331		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
332			   pool->high_count, pool->order,
333			   (1 << pool->order) * PAGE_SIZE * pool->high_count);
334		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
335			   pool->low_count, pool->order,
336			   (1 << pool->order) * PAGE_SIZE * pool->low_count);
337	}
338	return 0;
339}
340
341struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
342{
343	struct ion_system_heap *heap;
344	int i;
345
346	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
347	if (!heap)
348		return ERR_PTR(-ENOMEM);
349	heap->heap.ops = &system_heap_ops;
350	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
351	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
352			      GFP_KERNEL);
353	if (!heap->pools)
354		goto err_alloc_pools;
355	for (i = 0; i < num_orders; i++) {
356		struct ion_page_pool *pool;
357		gfp_t gfp_flags = low_order_gfp_flags;
358
359		if (orders[i] > 4)
360			gfp_flags = high_order_gfp_flags;
361		pool = ion_page_pool_create(gfp_flags, orders[i]);
362		if (!pool)
363			goto err_create_pool;
364		heap->pools[i] = pool;
365	}
366	heap->heap.debug_show = ion_system_heap_debug_show;
367	return &heap->heap;
368err_create_pool:
369	for (i = 0; i < num_orders; i++)
370		if (heap->pools[i])
371			ion_page_pool_destroy(heap->pools[i]);
372	kfree(heap->pools);
373err_alloc_pools:
374	kfree(heap);
375	return ERR_PTR(-ENOMEM);
376}
377
378void ion_system_heap_destroy(struct ion_heap *heap)
379{
380	struct ion_system_heap *sys_heap = container_of(heap,
381							struct ion_system_heap,
382							heap);
383	int i;
384
385	for (i = 0; i < num_orders; i++)
386		ion_page_pool_destroy(sys_heap->pools[i]);
387	kfree(sys_heap->pools);
388	kfree(sys_heap);
389}
390
391static int ion_system_contig_heap_allocate(struct ion_heap *heap,
392					   struct ion_buffer *buffer,
393					   unsigned long len,
394					   unsigned long align,
395					   unsigned long flags)
396{
397	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
398	if (!buffer->priv_virt)
399		return -ENOMEM;
400	return 0;
401}
402
403void ion_system_contig_heap_free(struct ion_buffer *buffer)
404{
405	kfree(buffer->priv_virt);
406}
407
408static int ion_system_contig_heap_phys(struct ion_heap *heap,
409				       struct ion_buffer *buffer,
410				       ion_phys_addr_t *addr, size_t *len)
411{
412	*addr = virt_to_phys(buffer->priv_virt);
413	*len = buffer->size;
414	return 0;
415}
416
417struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
418						struct ion_buffer *buffer)
419{
420	struct sg_table *table;
421	int ret;
422
423	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
424	if (!table)
425		return ERR_PTR(-ENOMEM);
426	ret = sg_alloc_table(table, 1, GFP_KERNEL);
427	if (ret) {
428		kfree(table);
429		return ERR_PTR(ret);
430	}
431	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
432		    0);
433	return table;
434}
435
436void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
437				      struct ion_buffer *buffer)
438{
439	sg_free_table(buffer->sg_table);
440	kfree(buffer->sg_table);
441}
442
443int ion_system_contig_heap_map_user(struct ion_heap *heap,
444				    struct ion_buffer *buffer,
445				    struct vm_area_struct *vma)
446{
447	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
448	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
449			       vma->vm_end - vma->vm_start,
450			       vma->vm_page_prot);
451
452}
453
454static struct ion_heap_ops kmalloc_ops = {
455	.allocate = ion_system_contig_heap_allocate,
456	.free = ion_system_contig_heap_free,
457	.phys = ion_system_contig_heap_phys,
458	.map_dma = ion_system_contig_heap_map_dma,
459	.unmap_dma = ion_system_contig_heap_unmap_dma,
460	.map_kernel = ion_system_heap_map_kernel,
461	.unmap_kernel = ion_system_heap_unmap_kernel,
462	.map_user = ion_system_contig_heap_map_user,
463};
464
465struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
466{
467	struct ion_heap *heap;
468
469	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
470	if (!heap)
471		return ERR_PTR(-ENOMEM);
472	heap->ops = &kmalloc_ops;
473	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
474	return heap;
475}
476
477void ion_system_contig_heap_destroy(struct ion_heap *heap)
478{
479	kfree(heap);
480}
481
482