ion_carveout_heap.c revision b6152016003b2cc2370899558bf2e7de4ebd0b09
1/*
2 * drivers/staging/android/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28struct ion_carveout_heap {
29	struct ion_heap heap;
30	struct gen_pool *pool;
31	ion_phys_addr_t base;
32};
33
34ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
35				      unsigned long size,
36				      unsigned long align)
37{
38	struct ion_carveout_heap *carveout_heap =
39		container_of(heap, struct ion_carveout_heap, heap);
40	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
41
42	if (!offset)
43		return ION_CARVEOUT_ALLOCATE_FAIL;
44
45	return offset;
46}
47
48void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
49		       unsigned long size)
50{
51	struct ion_carveout_heap *carveout_heap =
52		container_of(heap, struct ion_carveout_heap, heap);
53
54	if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
55		return;
56	gen_pool_free(carveout_heap->pool, addr, size);
57}
58
59static int ion_carveout_heap_phys(struct ion_heap *heap,
60				  struct ion_buffer *buffer,
61				  ion_phys_addr_t *addr, size_t *len)
62{
63	struct sg_table *table = buffer->priv_virt;
64	struct page *page = sg_page(table->sgl);
65	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
66
67	*addr = paddr;
68	*len = buffer->size;
69	return 0;
70}
71
72static int ion_carveout_heap_allocate(struct ion_heap *heap,
73				      struct ion_buffer *buffer,
74				      unsigned long size, unsigned long align,
75				      unsigned long flags)
76{
77	struct sg_table *table;
78	ion_phys_addr_t paddr;
79	int ret;
80
81	if (align > PAGE_SIZE)
82		return -EINVAL;
83
84	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
85	if (!table)
86		return -ENOMEM;
87	ret = sg_alloc_table(table, 1, GFP_KERNEL);
88	if (ret)
89		goto err_free;
90
91	paddr = ion_carveout_allocate(heap, size, align);
92	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
93		ret = -ENOMEM;
94		goto err_free_table;
95	}
96
97	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
98	buffer->priv_virt = table;
99
100	return 0;
101
102err_free_table:
103	sg_free_table(table);
104err_free:
105	kfree(table);
106	return ret;
107}
108
109static void ion_carveout_heap_free(struct ion_buffer *buffer)
110{
111	struct ion_heap *heap = buffer->heap;
112	struct sg_table *table = buffer->priv_virt;
113	struct page *page = sg_page(table->sgl);
114	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
115
116	ion_heap_buffer_zero(buffer);
117
118	if (ion_buffer_cached(buffer))
119		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
120							DMA_BIDIRECTIONAL);
121
122	ion_carveout_free(heap, paddr, buffer->size);
123	sg_free_table(table);
124	kfree(table);
125}
126
127static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
128						  struct ion_buffer *buffer)
129{
130	return buffer->priv_virt;
131}
132
133static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
134					struct ion_buffer *buffer)
135{
136	return;
137}
138
139static struct ion_heap_ops carveout_heap_ops = {
140	.allocate = ion_carveout_heap_allocate,
141	.free = ion_carveout_heap_free,
142	.phys = ion_carveout_heap_phys,
143	.map_dma = ion_carveout_heap_map_dma,
144	.unmap_dma = ion_carveout_heap_unmap_dma,
145	.map_user = ion_heap_map_user,
146	.map_kernel = ion_heap_map_kernel,
147	.unmap_kernel = ion_heap_unmap_kernel,
148};
149
150struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
151{
152	struct ion_carveout_heap *carveout_heap;
153	int ret;
154
155	struct page *page;
156	size_t size;
157
158	page = pfn_to_page(PFN_DOWN(heap_data->base));
159	size = heap_data->size;
160
161	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
162
163	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
164	if (ret)
165		return ERR_PTR(ret);
166
167	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
168	if (!carveout_heap)
169		return ERR_PTR(-ENOMEM);
170
171	carveout_heap->pool = gen_pool_create(12, -1);
172	if (!carveout_heap->pool) {
173		kfree(carveout_heap);
174		return ERR_PTR(-ENOMEM);
175	}
176	carveout_heap->base = heap_data->base;
177	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
178		     -1);
179	carveout_heap->heap.ops = &carveout_heap_ops;
180	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
181	carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
182
183	return &carveout_heap->heap;
184}
185
186void ion_carveout_heap_destroy(struct ion_heap *heap)
187{
188	struct ion_carveout_heap *carveout_heap =
189	     container_of(heap, struct  ion_carveout_heap, heap);
190
191	gen_pool_destroy(carveout_heap->pool);
192	kfree(carveout_heap);
193	carveout_heap = NULL;
194}
195