ion_chunk_heap.c revision 1d804535c37cb669334781afdbc2987284621623
1/*
2 * drivers/staging/android/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16//#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28struct ion_chunk_heap {
29	struct ion_heap heap;
30	struct gen_pool *pool;
31	ion_phys_addr_t base;
32	unsigned long chunk_size;
33	unsigned long size;
34	unsigned long allocated;
35};
36
37static int ion_chunk_heap_allocate(struct ion_heap *heap,
38				      struct ion_buffer *buffer,
39				      unsigned long size, unsigned long align,
40				      unsigned long flags)
41{
42	struct ion_chunk_heap *chunk_heap =
43		container_of(heap, struct ion_chunk_heap, heap);
44	struct sg_table *table;
45	struct scatterlist *sg;
46	int ret, i;
47	unsigned long num_chunks;
48	unsigned long allocated_size;
49
50	if (ion_buffer_fault_user_mappings(buffer))
51		return -ENOMEM;
52
53	allocated_size = ALIGN(size, chunk_heap->chunk_size);
54	num_chunks = allocated_size / chunk_heap->chunk_size;
55
56	if (allocated_size > chunk_heap->size - chunk_heap->allocated)
57		return -ENOMEM;
58
59	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
60	if (!table)
61		return -ENOMEM;
62	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
63	if (ret) {
64		kfree(table);
65		return ret;
66	}
67
68	sg = table->sgl;
69	for (i = 0; i < num_chunks; i++) {
70		unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
71						     chunk_heap->chunk_size);
72		if (!paddr)
73			goto err;
74		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
75				chunk_heap->chunk_size, 0);
76		sg = sg_next(sg);
77	}
78
79	buffer->priv_virt = table;
80	chunk_heap->allocated += allocated_size;
81	return 0;
82err:
83	sg = table->sgl;
84	for (i -= 1; i >= 0; i--) {
85		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
86			      sg->length);
87		sg = sg_next(sg);
88	}
89	sg_free_table(table);
90	kfree(table);
91	return -ENOMEM;
92}
93
94static void ion_chunk_heap_free(struct ion_buffer *buffer)
95{
96	struct ion_heap *heap = buffer->heap;
97	struct ion_chunk_heap *chunk_heap =
98		container_of(heap, struct ion_chunk_heap, heap);
99	struct sg_table *table = buffer->priv_virt;
100	struct scatterlist *sg;
101	int i;
102	unsigned long allocated_size;
103
104	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
105
106	ion_heap_buffer_zero(buffer);
107
108	if (ion_buffer_cached(buffer))
109		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
110								DMA_BIDIRECTIONAL);
111
112	for_each_sg(table->sgl, sg, table->nents, i) {
113		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
114			      sg->length);
115	}
116	chunk_heap->allocated -= allocated_size;
117	sg_free_table(table);
118	kfree(table);
119}
120
121struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
122					 struct ion_buffer *buffer)
123{
124	return buffer->priv_virt;
125}
126
127void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
128			       struct ion_buffer *buffer)
129{
130	return;
131}
132
133static struct ion_heap_ops chunk_heap_ops = {
134	.allocate = ion_chunk_heap_allocate,
135	.free = ion_chunk_heap_free,
136	.map_dma = ion_chunk_heap_map_dma,
137	.unmap_dma = ion_chunk_heap_unmap_dma,
138	.map_user = ion_heap_map_user,
139	.map_kernel = ion_heap_map_kernel,
140	.unmap_kernel = ion_heap_unmap_kernel,
141};
142
143struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
144{
145	struct ion_chunk_heap *chunk_heap;
146	struct vm_struct *vm_struct;
147	pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
148	int i, ret;
149
150	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
151	if (!chunk_heap)
152		return ERR_PTR(-ENOMEM);
153
154	chunk_heap->chunk_size = (unsigned long)heap_data->priv;
155	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
156					   PAGE_SHIFT, -1);
157	if (!chunk_heap->pool) {
158		ret = -ENOMEM;
159		goto error_gen_pool_create;
160	}
161	chunk_heap->base = heap_data->base;
162	chunk_heap->size = heap_data->size;
163	chunk_heap->allocated = 0;
164
165	vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
166	if (!vm_struct) {
167		ret = -ENOMEM;
168		goto error;
169	}
170	for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
171		struct page *page = pfn_to_page(PFN_DOWN(chunk_heap->base + i));
172		struct page **pages = &page;
173
174		ret = map_vm_area(vm_struct, pgprot, &pages);
175		if (ret)
176			goto error_map_vm_area;
177		memset(vm_struct->addr, 0, PAGE_SIZE);
178		unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
179	}
180	free_vm_area(vm_struct);
181
182	ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
183			heap_data->size, DMA_BIDIRECTIONAL);
184
185	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
186	chunk_heap->heap.ops = &chunk_heap_ops;
187	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
188	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
189	pr_info("%s: base %lu size %u align %ld\n", __func__, chunk_heap->base,
190		heap_data->size, heap_data->align);
191
192	return &chunk_heap->heap;
193
194error_map_vm_area:
195	free_vm_area(vm_struct);
196error:
197	gen_pool_destroy(chunk_heap->pool);
198error_gen_pool_create:
199	kfree(chunk_heap);
200	return ERR_PTR(ret);
201}
202
203void ion_chunk_heap_destroy(struct ion_heap *heap)
204{
205	struct ion_chunk_heap *chunk_heap =
206	     container_of(heap, struct  ion_chunk_heap, heap);
207
208	gen_pool_destroy(chunk_heap->pool);
209	kfree(chunk_heap);
210	chunk_heap = NULL;
211}
212