ion_chunk_heap.c revision df6cf5c8af54e3e89643511272f6f5f5cfb71a7d
1/*
2 * drivers/staging/android/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16//#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28struct ion_chunk_heap {
29	struct ion_heap heap;
30	struct gen_pool *pool;
31	ion_phys_addr_t base;
32	unsigned long chunk_size;
33	unsigned long size;
34	unsigned long allocated;
35};
36
37static int ion_chunk_heap_allocate(struct ion_heap *heap,
38				      struct ion_buffer *buffer,
39				      unsigned long size, unsigned long align,
40				      unsigned long flags)
41{
42	struct ion_chunk_heap *chunk_heap =
43		container_of(heap, struct ion_chunk_heap, heap);
44	struct sg_table *table;
45	struct scatterlist *sg;
46	int ret, i;
47	unsigned long num_chunks;
48	unsigned long allocated_size;
49
50	allocated_size = ALIGN(size, chunk_heap->chunk_size);
51	num_chunks = allocated_size / chunk_heap->chunk_size;
52
53	if (allocated_size > chunk_heap->size - chunk_heap->allocated)
54		return -ENOMEM;
55
56	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
57	if (!table)
58		return -ENOMEM;
59	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
60	if (ret) {
61		kfree(table);
62		return ret;
63	}
64
65	sg = table->sgl;
66	for (i = 0; i < num_chunks; i++) {
67		unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
68						     chunk_heap->chunk_size);
69		if (!paddr)
70			goto err;
71		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
72				chunk_heap->chunk_size, 0);
73		sg = sg_next(sg);
74	}
75
76	buffer->priv_virt = table;
77	chunk_heap->allocated += allocated_size;
78	return 0;
79err:
80	sg = table->sgl;
81	for (i -= 1; i >= 0; i--) {
82		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
83			      sg->length);
84		sg = sg_next(sg);
85	}
86	sg_free_table(table);
87	kfree(table);
88	return -ENOMEM;
89}
90
91static void ion_chunk_heap_free(struct ion_buffer *buffer)
92{
93	struct ion_heap *heap = buffer->heap;
94	struct ion_chunk_heap *chunk_heap =
95		container_of(heap, struct ion_chunk_heap, heap);
96	struct sg_table *table = buffer->priv_virt;
97	struct scatterlist *sg;
98	int i;
99	unsigned long allocated_size;
100
101	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
102
103	ion_heap_buffer_zero(buffer);
104
105	if (ion_buffer_cached(buffer))
106		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
107								DMA_BIDIRECTIONAL);
108
109	for_each_sg(table->sgl, sg, table->nents, i) {
110		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
111			      sg->length);
112	}
113	chunk_heap->allocated -= allocated_size;
114	sg_free_table(table);
115	kfree(table);
116}
117
118static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
119					       struct ion_buffer *buffer)
120{
121	return buffer->priv_virt;
122}
123
124static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
125				     struct ion_buffer *buffer)
126{
127	return;
128}
129
130static struct ion_heap_ops chunk_heap_ops = {
131	.allocate = ion_chunk_heap_allocate,
132	.free = ion_chunk_heap_free,
133	.map_dma = ion_chunk_heap_map_dma,
134	.unmap_dma = ion_chunk_heap_unmap_dma,
135	.map_user = ion_heap_map_user,
136	.map_kernel = ion_heap_map_kernel,
137	.unmap_kernel = ion_heap_unmap_kernel,
138};
139
140struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
141{
142	struct ion_chunk_heap *chunk_heap;
143	int ret;
144	struct page *page;
145	size_t size;
146
147	page = pfn_to_page(PFN_DOWN(heap_data->base));
148	size = heap_data->size;
149
150	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
151
152	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
153	if (ret)
154		return ERR_PTR(ret);
155
156	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
157	if (!chunk_heap)
158		return ERR_PTR(-ENOMEM);
159
160	chunk_heap->chunk_size = (unsigned long)heap_data->priv;
161	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
162					   PAGE_SHIFT, -1);
163	if (!chunk_heap->pool) {
164		ret = -ENOMEM;
165		goto error_gen_pool_create;
166	}
167	chunk_heap->base = heap_data->base;
168	chunk_heap->size = heap_data->size;
169	chunk_heap->allocated = 0;
170
171	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
172	chunk_heap->heap.ops = &chunk_heap_ops;
173	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
174	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
175	pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
176		heap_data->size, heap_data->align);
177
178	return &chunk_heap->heap;
179
180error_gen_pool_create:
181	kfree(chunk_heap);
182	return ERR_PTR(ret);
183}
184
185void ion_chunk_heap_destroy(struct ion_heap *heap)
186{
187	struct ion_chunk_heap *chunk_heap =
188	     container_of(heap, struct  ion_chunk_heap, heap);
189
190	gen_pool_destroy(chunk_heap->pool);
191	kfree(chunk_heap);
192	chunk_heap = NULL;
193}
194