ion_heap.c revision f63958d80c07c04db48812d97ff7450517d80ffa
1/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
18#include <linux/freezer.h>
19#include <linux/kthread.h>
20#include <linux/mm.h>
21#include <linux/rtmutex.h>
22#include <linux/sched.h>
23#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28void *ion_heap_map_kernel(struct ion_heap *heap,
29			  struct ion_buffer *buffer)
30{
31	struct scatterlist *sg;
32	int i, j;
33	void *vaddr;
34	pgprot_t pgprot;
35	struct sg_table *table = buffer->sg_table;
36	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37	struct page **pages = vmalloc(sizeof(struct page *) * npages);
38	struct page **tmp = pages;
39
40	if (!pages)
41		return NULL;
42
43	if (buffer->flags & ION_FLAG_CACHED)
44		pgprot = PAGE_KERNEL;
45	else
46		pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48	for_each_sg(table->sgl, sg, table->nents, i) {
49		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50		struct page *page = sg_page(sg);
51		BUG_ON(i >= npages);
52		for (j = 0; j < npages_this_entry; j++) {
53			*(tmp++) = page++;
54		}
55	}
56	vaddr = vmap(pages, npages, VM_MAP, pgprot);
57	vfree(pages);
58
59	if (vaddr == NULL)
60		return ERR_PTR(-ENOMEM);
61
62	return vaddr;
63}
64
65void ion_heap_unmap_kernel(struct ion_heap *heap,
66			   struct ion_buffer *buffer)
67{
68	vunmap(buffer->vaddr);
69}
70
71int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72		      struct vm_area_struct *vma)
73{
74	struct sg_table *table = buffer->sg_table;
75	unsigned long addr = vma->vm_start;
76	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77	struct scatterlist *sg;
78	int i;
79	int ret;
80
81	for_each_sg(table->sgl, sg, table->nents, i) {
82		struct page *page = sg_page(sg);
83		unsigned long remainder = vma->vm_end - addr;
84		unsigned long len = sg->length;
85
86		if (offset >= sg->length) {
87			offset -= sg->length;
88			continue;
89		} else if (offset) {
90			page += offset / PAGE_SIZE;
91			len = sg->length - offset;
92			offset = 0;
93		}
94		len = min(len, remainder);
95		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
96				vma->vm_page_prot);
97		if (ret)
98			return ret;
99		addr += len;
100		if (addr >= vma->vm_end)
101			return 0;
102	}
103	return 0;
104}
105
106static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
107{
108	void *addr = vm_map_ram(pages, num, -1, pgprot);
109	if (!addr)
110		return -ENOMEM;
111	memset(addr, 0, PAGE_SIZE * num);
112	vm_unmap_ram(addr, num);
113
114	return 0;
115}
116
117int ion_heap_buffer_zero(struct ion_buffer *buffer)
118{
119	struct sg_table *table = buffer->sg_table;
120	pgprot_t pgprot;
121	struct scatterlist *sg;
122	int i, j, ret = 0;
123	struct page *pages[32];
124	int k = 0;
125
126	if (buffer->flags & ION_FLAG_CACHED)
127		pgprot = PAGE_KERNEL;
128	else
129		pgprot = pgprot_writecombine(PAGE_KERNEL);
130
131	for_each_sg(table->sgl, sg, table->nents, i) {
132		struct page *page = sg_page(sg);
133		unsigned long len = sg->length;
134
135		for (j = 0; j < len / PAGE_SIZE; j++) {
136			pages[k++] = page + j;
137			if (k == ARRAY_SIZE(pages)) {
138				ret = ion_heap_clear_pages(pages, k, pgprot);
139				if (ret)
140					goto end;
141				k = 0;
142			}
143		}
144		if (k)
145			ret = ion_heap_clear_pages(pages, k, pgprot);
146	}
147end:
148	return ret;
149}
150
151void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
152{
153	rt_mutex_lock(&heap->lock);
154	list_add(&buffer->list, &heap->free_list);
155	heap->free_list_size += buffer->size;
156	rt_mutex_unlock(&heap->lock);
157	wake_up(&heap->waitqueue);
158}
159
160size_t ion_heap_freelist_size(struct ion_heap *heap)
161{
162	size_t size;
163
164	rt_mutex_lock(&heap->lock);
165	size = heap->free_list_size;
166	rt_mutex_unlock(&heap->lock);
167
168	return size;
169}
170
171size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
172{
173	struct ion_buffer *buffer, *tmp;
174	size_t total_drained = 0;
175
176	if (ion_heap_freelist_size(heap) == 0)
177		return 0;
178
179	rt_mutex_lock(&heap->lock);
180	if (size == 0)
181		size = heap->free_list_size;
182
183	list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
184		if (total_drained >= size)
185			break;
186		list_del(&buffer->list);
187		heap->free_list_size -= buffer->size;
188		total_drained += buffer->size;
189		ion_buffer_destroy(buffer);
190	}
191	rt_mutex_unlock(&heap->lock);
192
193	return total_drained;
194}
195
196static int ion_heap_deferred_free(void *data)
197{
198	struct ion_heap *heap = data;
199
200	while (true) {
201		struct ion_buffer *buffer;
202
203		wait_event_freezable(heap->waitqueue,
204				     ion_heap_freelist_size(heap) > 0);
205
206		rt_mutex_lock(&heap->lock);
207		if (list_empty(&heap->free_list)) {
208			rt_mutex_unlock(&heap->lock);
209			continue;
210		}
211		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
212					  list);
213		list_del(&buffer->list);
214		heap->free_list_size -= buffer->size;
215		rt_mutex_unlock(&heap->lock);
216		ion_buffer_destroy(buffer);
217	}
218
219	return 0;
220}
221
222int ion_heap_init_deferred_free(struct ion_heap *heap)
223{
224	struct sched_param param = { .sched_priority = 0 };
225
226	INIT_LIST_HEAD(&heap->free_list);
227	heap->free_list_size = 0;
228	rt_mutex_init(&heap->lock);
229	init_waitqueue_head(&heap->waitqueue);
230	heap->task = kthread_run(ion_heap_deferred_free, heap,
231				 "%s", heap->name);
232	sched_setscheduler(heap->task, SCHED_IDLE, &param);
233	if (IS_ERR(heap->task)) {
234		pr_err("%s: creating thread for deferred free failed\n",
235		       __func__);
236		return PTR_RET(heap->task);
237	}
238	return 0;
239}
240
241struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
242{
243	struct ion_heap *heap = NULL;
244
245	switch (heap_data->type) {
246	case ION_HEAP_TYPE_SYSTEM_CONTIG:
247		heap = ion_system_contig_heap_create(heap_data);
248		break;
249	case ION_HEAP_TYPE_SYSTEM:
250		heap = ion_system_heap_create(heap_data);
251		break;
252	case ION_HEAP_TYPE_CARVEOUT:
253		heap = ion_carveout_heap_create(heap_data);
254		break;
255	case ION_HEAP_TYPE_CHUNK:
256		heap = ion_chunk_heap_create(heap_data);
257		break;
258	case ION_HEAP_TYPE_DMA:
259		heap = ion_cma_heap_create(heap_data);
260		break;
261	default:
262		pr_err("%s: Invalid heap type %d\n", __func__,
263		       heap_data->type);
264		return ERR_PTR(-EINVAL);
265	}
266
267	if (IS_ERR_OR_NULL(heap)) {
268		pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
269		       __func__, heap_data->name, heap_data->type,
270		       heap_data->base, heap_data->size);
271		return ERR_PTR(-EINVAL);
272	}
273
274	heap->name = heap_data->name;
275	heap->id = heap_data->id;
276	return heap;
277}
278
279void ion_heap_destroy(struct ion_heap *heap)
280{
281	if (!heap)
282		return;
283
284	switch (heap->type) {
285	case ION_HEAP_TYPE_SYSTEM_CONTIG:
286		ion_system_contig_heap_destroy(heap);
287		break;
288	case ION_HEAP_TYPE_SYSTEM:
289		ion_system_heap_destroy(heap);
290		break;
291	case ION_HEAP_TYPE_CARVEOUT:
292		ion_carveout_heap_destroy(heap);
293		break;
294	case ION_HEAP_TYPE_CHUNK:
295		ion_chunk_heap_destroy(heap);
296		break;
297	case ION_HEAP_TYPE_DMA:
298		ion_cma_heap_destroy(heap);
299		break;
300	default:
301		pr_err("%s: Invalid heap type %d\n", __func__,
302		       heap->type);
303	}
304}
305