ion_chunk_heap.c revision 0b6b2cde0928707a618ce8c07970219f21d066e5
1/*
2 * drivers/staging/android/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16//#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28#include <asm/mach/map.h>
29
30struct ion_chunk_heap {
31	struct ion_heap heap;
32	struct gen_pool *pool;
33	ion_phys_addr_t base;
34	unsigned long chunk_size;
35	unsigned long size;
36	unsigned long allocated;
37};
38
39static int ion_chunk_heap_allocate(struct ion_heap *heap,
40				      struct ion_buffer *buffer,
41				      unsigned long size, unsigned long align,
42				      unsigned long flags)
43{
44	struct ion_chunk_heap *chunk_heap =
45		container_of(heap, struct ion_chunk_heap, heap);
46	struct sg_table *table;
47	struct scatterlist *sg;
48	int ret, i;
49	unsigned long num_chunks;
50
51	if (ion_buffer_fault_user_mappings(buffer))
52		return -ENOMEM;
53
54	num_chunks = ALIGN(size, chunk_heap->chunk_size) /
55		chunk_heap->chunk_size;
56	buffer->size = num_chunks * chunk_heap->chunk_size;
57
58	if (buffer->size > chunk_heap->size - chunk_heap->allocated)
59		return -ENOMEM;
60
61	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
62	if (!table)
63		return -ENOMEM;
64	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
65	if (ret) {
66		kfree(table);
67		return ret;
68	}
69
70	sg = table->sgl;
71	for (i = 0; i < num_chunks; i++) {
72		unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
73						     chunk_heap->chunk_size);
74		if (!paddr)
75			goto err;
76		sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
77		sg = sg_next(sg);
78	}
79
80	buffer->priv_virt = table;
81	chunk_heap->allocated += buffer->size;
82	return 0;
83err:
84	sg = table->sgl;
85	for (i -= 1; i >= 0; i--) {
86		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
87			      sg_dma_len(sg));
88		sg = sg_next(sg);
89	}
90	sg_free_table(table);
91	kfree(table);
92	return -ENOMEM;
93}
94
95static void ion_chunk_heap_free(struct ion_buffer *buffer)
96{
97	struct ion_heap *heap = buffer->heap;
98	struct ion_chunk_heap *chunk_heap =
99		container_of(heap, struct ion_chunk_heap, heap);
100	struct sg_table *table = buffer->priv_virt;
101	struct scatterlist *sg;
102	int i;
103
104	ion_heap_buffer_zero(buffer);
105
106	for_each_sg(table->sgl, sg, table->nents, i) {
107		__dma_page_cpu_to_dev(sg_page(sg), 0, sg_dma_len(sg),
108				      DMA_BIDIRECTIONAL);
109		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
110			      sg_dma_len(sg));
111	}
112	chunk_heap->allocated -= buffer->size;
113	sg_free_table(table);
114	kfree(table);
115}
116
117struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
118					 struct ion_buffer *buffer)
119{
120	return buffer->priv_virt;
121}
122
123void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
124			       struct ion_buffer *buffer)
125{
126	return;
127}
128
129static struct ion_heap_ops chunk_heap_ops = {
130	.allocate = ion_chunk_heap_allocate,
131	.free = ion_chunk_heap_free,
132	.map_dma = ion_chunk_heap_map_dma,
133	.unmap_dma = ion_chunk_heap_unmap_dma,
134	.map_user = ion_heap_map_user,
135	.map_kernel = ion_heap_map_kernel,
136	.unmap_kernel = ion_heap_unmap_kernel,
137};
138
139struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
140{
141	struct ion_chunk_heap *chunk_heap;
142
143	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
144	if (!chunk_heap)
145		return ERR_PTR(-ENOMEM);
146
147	chunk_heap->chunk_size = (unsigned long)heap_data->priv;
148	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
149					   PAGE_SHIFT, -1);
150	if (!chunk_heap->pool) {
151		kfree(chunk_heap);
152		return ERR_PTR(-ENOMEM);
153	}
154	chunk_heap->base = heap_data->base;
155	chunk_heap->size = heap_data->size;
156	chunk_heap->allocated = 0;
157	__dma_page_cpu_to_dev(phys_to_page(heap_data->base), 0, heap_data->size,
158			      DMA_BIDIRECTIONAL);
159	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
160	chunk_heap->heap.ops = &chunk_heap_ops;
161	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
162	pr_info("%s: base %lu size %ld align %ld\n", __func__, chunk_heap->base,
163		heap_data->size, heap_data->align);
164
165	return &chunk_heap->heap;
166}
167
168void ion_chunk_heap_destroy(struct ion_heap *heap)
169{
170	struct ion_chunk_heap *chunk_heap =
171	     container_of(heap, struct  ion_chunk_heap, heap);
172
173	gen_pool_destroy(chunk_heap->pool);
174	kfree(chunk_heap);
175	chunk_heap = NULL;
176}
177