ion_system_heap.c revision 45b17a809fda352086b63518e32d272cc3442ed5
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static const unsigned int orders[] = {8, 4, 0};
30static const int num_orders = ARRAY_SIZE(orders);
31static int order_to_index(unsigned int order)
32{
33	int i;
34	for (i = 0; i < num_orders; i++)
35		if (order == orders[i])
36			return i;
37	BUG();
38	return -1;
39}
40
41static unsigned int order_to_size(int order)
42{
43	return PAGE_SIZE << order;
44}
45
46struct ion_system_heap {
47	struct ion_heap heap;
48	struct ion_page_pool **pools;
49};
50
51struct page_info {
52	struct page *page;
53	unsigned int order;
54	struct list_head list;
55};
56
57static struct page *alloc_buffer_page(struct ion_system_heap *heap,
58				      struct ion_buffer *buffer,
59				      unsigned long order)
60{
61	bool cached = ion_buffer_cached(buffer);
62	bool split_pages = ion_buffer_fault_user_mappings(buffer);
63	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
64	struct page *page;
65
66	if (!cached)
67		page = ion_page_pool_alloc(pool);
68	else
69		page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
70				   __GFP_NOWARN | __GFP_NORETRY, order);
71	if (!page)
72		return 0;
73	if (split_pages)
74		split_page(page, order);
75	return page;
76}
77
78static void free_buffer_page(struct ion_system_heap *heap,
79			     struct ion_buffer *buffer, struct page *page,
80			     unsigned int order)
81{
82	bool cached = ion_buffer_cached(buffer);
83	bool split_pages = ion_buffer_fault_user_mappings(buffer);
84	int i;
85
86	if (!cached) {
87		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
88		/* zero the pages before returning them to the pool for
89		   security.  This uses vmap as we want to set the pgprot so
90		   the writes to occur to noncached mappings, as the pool's
91		   purpose is to keep the pages out of the cache */
92		for (i = 0; i < order / PAGE_SIZE; i++) {
93			struct page *sub_page = page + i;
94			void *addr = vmap(&sub_page, 1, VM_MAP,
95					  pgprot_writecombine(PAGE_KERNEL));
96			memset(addr, 0, PAGE_SIZE);
97			vunmap(addr);
98		}
99		ion_page_pool_free(pool, page);
100	} else if (split_pages) {
101		for (i = 0; i < (1 << order); i++)
102			__free_page(page + i);
103	} else {
104		__free_pages(page, order);
105	}
106}
107
108
109static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
110						 struct ion_buffer *buffer,
111						 unsigned long size,
112						 unsigned int max_order)
113{
114	struct page *page;
115	struct page_info *info;
116	int i;
117
118	for (i = 0; i < num_orders; i++) {
119		if (size < order_to_size(orders[i]))
120			continue;
121		if (max_order < orders[i])
122			continue;
123
124		page = alloc_buffer_page(heap, buffer, orders[i]);
125		if (!page)
126			continue;
127
128		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
129		info->page = page;
130		info->order = orders[i];
131		return info;
132	}
133	return NULL;
134}
135
136static int ion_system_heap_allocate(struct ion_heap *heap,
137				     struct ion_buffer *buffer,
138				     unsigned long size, unsigned long align,
139				     unsigned long flags)
140{
141	struct ion_system_heap *sys_heap = container_of(heap,
142							struct ion_system_heap,
143							heap);
144	struct sg_table *table;
145	struct scatterlist *sg;
146	int ret;
147	struct list_head pages;
148	struct page_info *info, *tmp_info;
149	int i = 0;
150	long size_remaining = PAGE_ALIGN(size);
151	unsigned int max_order = orders[0];
152	bool split_pages = ion_buffer_fault_user_mappings(buffer);
153
154	INIT_LIST_HEAD(&pages);
155	while (size_remaining > 0) {
156		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
157		if (!info)
158			goto err;
159		list_add_tail(&info->list, &pages);
160		size_remaining -= (1 << info->order) * PAGE_SIZE;
161		max_order = info->order;
162		i++;
163	}
164
165	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
166	if (!table)
167		goto err;
168
169	if (split_pages)
170		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
171				     GFP_KERNEL);
172	else
173		ret = sg_alloc_table(table, i, GFP_KERNEL);
174
175	if (ret)
176		goto err1;
177
178	sg = table->sgl;
179	list_for_each_entry_safe(info, tmp_info, &pages, list) {
180		struct page *page = info->page;
181		if (split_pages) {
182			for (i = 0; i < (1 << info->order); i++) {
183				sg_set_page(sg, page + i, PAGE_SIZE, 0);
184				sg = sg_next(sg);
185			}
186		} else {
187			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
188				    0);
189			sg = sg_next(sg);
190		}
191		list_del(&info->list);
192		kfree(info);
193	}
194
195	dma_sync_sg_for_device(NULL, table->sgl, table->nents,
196			       DMA_BIDIRECTIONAL);
197
198	buffer->priv_virt = table;
199	return 0;
200err1:
201	kfree(table);
202err:
203	list_for_each_entry(info, &pages, list) {
204		free_buffer_page(sys_heap, buffer, info->page, info->order);
205		kfree(info);
206	}
207	return -ENOMEM;
208}
209
210void ion_system_heap_free(struct ion_buffer *buffer)
211{
212	struct ion_heap *heap = buffer->heap;
213	struct ion_system_heap *sys_heap = container_of(heap,
214							struct ion_system_heap,
215							heap);
216	struct sg_table *table = buffer->priv_virt;
217	struct scatterlist *sg;
218	LIST_HEAD(pages);
219	int i;
220
221	for_each_sg(table->sgl, sg, table->nents, i)
222		free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
223	sg_free_table(table);
224	kfree(table);
225}
226
227struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
228					 struct ion_buffer *buffer)
229{
230	return buffer->priv_virt;
231}
232
233void ion_system_heap_unmap_dma(struct ion_heap *heap,
234			       struct ion_buffer *buffer)
235{
236	return;
237}
238
239void *ion_system_heap_map_kernel(struct ion_heap *heap,
240				 struct ion_buffer *buffer)
241{
242	struct scatterlist *sg;
243	int i, j;
244	void *vaddr;
245	pgprot_t pgprot;
246	struct sg_table *table = buffer->priv_virt;
247	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
248	struct page **pages = vmalloc(sizeof(struct page *) * npages);
249	struct page **tmp = pages;
250
251	if (!pages)
252		return 0;
253
254	if (buffer->flags & ION_FLAG_CACHED)
255		pgprot = PAGE_KERNEL;
256	else
257		pgprot = pgprot_writecombine(PAGE_KERNEL);
258
259	for_each_sg(table->sgl, sg, table->nents, i) {
260		int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
261		struct page *page = sg_page(sg);
262		BUG_ON(i >= npages);
263		for (j = 0; j < npages_this_entry; j++) {
264			*(tmp++) = page++;
265		}
266	}
267	vaddr = vmap(pages, npages, VM_MAP, pgprot);
268	vfree(pages);
269
270	return vaddr;
271}
272
273void ion_system_heap_unmap_kernel(struct ion_heap *heap,
274				  struct ion_buffer *buffer)
275{
276	vunmap(buffer->vaddr);
277}
278
279int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
280			     struct vm_area_struct *vma)
281{
282	struct sg_table *table = buffer->priv_virt;
283	unsigned long addr = vma->vm_start;
284	unsigned long offset = vma->vm_pgoff;
285	struct scatterlist *sg;
286	int i;
287
288	for_each_sg(table->sgl, sg, table->nents, i) {
289		if (offset) {
290			offset--;
291			continue;
292		}
293		remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
294				sg_dma_len(sg), vma->vm_page_prot);
295		addr += sg_dma_len(sg);
296		if (addr >= vma->vm_end)
297			return 0;
298	}
299	return 0;
300}
301
302static struct ion_heap_ops system_heap_ops = {
303	.allocate = ion_system_heap_allocate,
304	.free = ion_system_heap_free,
305	.map_dma = ion_system_heap_map_dma,
306	.unmap_dma = ion_system_heap_unmap_dma,
307	.map_kernel = ion_system_heap_map_kernel,
308	.unmap_kernel = ion_system_heap_unmap_kernel,
309	.map_user = ion_system_heap_map_user,
310};
311
312static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
313				      void *unused)
314{
315
316	struct ion_system_heap *sys_heap = container_of(heap,
317							struct ion_system_heap,
318							heap);
319	int i;
320	for (i = 0; i < num_orders; i++) {
321		struct ion_page_pool *pool = sys_heap->pools[i];
322		seq_printf(s, "%d order %u pages in pool = %lu total\n",
323			   pool->count, pool->order,
324			   (1 << pool->order) * PAGE_SIZE * pool->count);
325	}
326	return 0;
327}
328
329struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
330{
331	struct ion_system_heap *heap;
332	int i;
333
334	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
335	if (!heap)
336		return ERR_PTR(-ENOMEM);
337	heap->heap.ops = &system_heap_ops;
338	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
339	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
340			      GFP_KERNEL);
341	if (!heap->pools)
342		goto err_alloc_pools;
343	for (i = 0; i < num_orders; i++) {
344		struct ion_page_pool *pool;
345		pool = ion_page_pool_create(GFP_HIGHUSER | __GFP_ZERO |
346					   __GFP_NOWARN | __GFP_NORETRY,
347					   orders[i]);
348		if (!pool)
349			goto err_create_pool;
350		heap->pools[i] = pool;
351	}
352	heap->heap.debug_show = ion_system_heap_debug_show;
353	return &heap->heap;
354err_create_pool:
355	for (i = 0; i < num_orders; i++)
356		if (heap->pools[i])
357			ion_page_pool_destroy(heap->pools[i]);
358	kfree(heap->pools);
359err_alloc_pools:
360	kfree(heap);
361	return ERR_PTR(-ENOMEM);
362}
363
364void ion_system_heap_destroy(struct ion_heap *heap)
365{
366	struct ion_system_heap *sys_heap = container_of(heap,
367							struct ion_system_heap,
368							heap);
369	int i;
370
371	for (i = 0; i < num_orders; i++)
372		ion_page_pool_destroy(sys_heap->pools[i]);
373	kfree(sys_heap->pools);
374	kfree(sys_heap);
375}
376
377static int ion_system_contig_heap_allocate(struct ion_heap *heap,
378					   struct ion_buffer *buffer,
379					   unsigned long len,
380					   unsigned long align,
381					   unsigned long flags)
382{
383	buffer->priv_virt = kzalloc(len, GFP_KERNEL);
384	if (!buffer->priv_virt)
385		return -ENOMEM;
386	return 0;
387}
388
389void ion_system_contig_heap_free(struct ion_buffer *buffer)
390{
391	kfree(buffer->priv_virt);
392}
393
394static int ion_system_contig_heap_phys(struct ion_heap *heap,
395				       struct ion_buffer *buffer,
396				       ion_phys_addr_t *addr, size_t *len)
397{
398	*addr = virt_to_phys(buffer->priv_virt);
399	*len = buffer->size;
400	return 0;
401}
402
403struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
404						struct ion_buffer *buffer)
405{
406	struct sg_table *table;
407	int ret;
408
409	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
410	if (!table)
411		return ERR_PTR(-ENOMEM);
412	ret = sg_alloc_table(table, 1, GFP_KERNEL);
413	if (ret) {
414		kfree(table);
415		return ERR_PTR(ret);
416	}
417	sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
418		    0);
419	return table;
420}
421
422void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
423				      struct ion_buffer *buffer)
424{
425	sg_free_table(buffer->sg_table);
426	kfree(buffer->sg_table);
427}
428
429int ion_system_contig_heap_map_user(struct ion_heap *heap,
430				    struct ion_buffer *buffer,
431				    struct vm_area_struct *vma)
432{
433	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
434	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
435			       vma->vm_end - vma->vm_start,
436			       vma->vm_page_prot);
437
438}
439
440static struct ion_heap_ops kmalloc_ops = {
441	.allocate = ion_system_contig_heap_allocate,
442	.free = ion_system_contig_heap_free,
443	.phys = ion_system_contig_heap_phys,
444	.map_dma = ion_system_contig_heap_map_dma,
445	.unmap_dma = ion_system_contig_heap_unmap_dma,
446	.map_kernel = ion_system_heap_map_kernel,
447	.unmap_kernel = ion_system_heap_unmap_kernel,
448	.map_user = ion_system_contig_heap_map_user,
449};
450
451struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
452{
453	struct ion_heap *heap;
454
455	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
456	if (!heap)
457		return ERR_PTR(-ENOMEM);
458	heap->ops = &kmalloc_ops;
459	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
460	return heap;
461}
462
463void ion_system_contig_heap_destroy(struct ion_heap *heap)
464{
465	kfree(heap);
466}
467
468