ion_heap.c revision c13bd1c4eb714c08214e897fcbe51b13e0e0f279
1/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
18#include <linux/freezer.h>
19#include <linux/kthread.h>
20#include <linux/mm.h>
21#include <linux/rtmutex.h>
22#include <linux/sched.h>
23#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28void *ion_heap_map_kernel(struct ion_heap *heap,
29			  struct ion_buffer *buffer)
30{
31	struct scatterlist *sg;
32	int i, j;
33	void *vaddr;
34	pgprot_t pgprot;
35	struct sg_table *table = buffer->sg_table;
36	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37	struct page **pages = vmalloc(sizeof(struct page *) * npages);
38	struct page **tmp = pages;
39
40	if (!pages)
41		return 0;
42
43	if (buffer->flags & ION_FLAG_CACHED)
44		pgprot = PAGE_KERNEL;
45	else
46		pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48	for_each_sg(table->sgl, sg, table->nents, i) {
49		int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
50		struct page *page = sg_page(sg);
51		BUG_ON(i >= npages);
52		for (j = 0; j < npages_this_entry; j++) {
53			*(tmp++) = page++;
54		}
55	}
56	vaddr = vmap(pages, npages, VM_MAP, pgprot);
57	vfree(pages);
58
59	return vaddr;
60}
61
62void ion_heap_unmap_kernel(struct ion_heap *heap,
63			   struct ion_buffer *buffer)
64{
65	vunmap(buffer->vaddr);
66}
67
68int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
69		      struct vm_area_struct *vma)
70{
71	struct sg_table *table = buffer->sg_table;
72	unsigned long addr = vma->vm_start;
73	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
74	struct scatterlist *sg;
75	int i;
76
77	for_each_sg(table->sgl, sg, table->nents, i) {
78		struct page *page = sg_page(sg);
79		unsigned long remainder = vma->vm_end - addr;
80		unsigned long len = sg_dma_len(sg);
81
82		if (offset >= sg_dma_len(sg)) {
83			offset -= sg_dma_len(sg);
84			continue;
85		} else if (offset) {
86			page += offset / PAGE_SIZE;
87			len = sg_dma_len(sg) - offset;
88			offset = 0;
89		}
90		len = min(len, remainder);
91		remap_pfn_range(vma, addr, page_to_pfn(page), len,
92				vma->vm_page_prot);
93		addr += len;
94		if (addr >= vma->vm_end)
95			return 0;
96	}
97	return 0;
98}
99
100int ion_heap_buffer_zero(struct ion_buffer *buffer)
101{
102	struct sg_table *table = buffer->sg_table;
103	pgprot_t pgprot;
104	struct scatterlist *sg;
105	struct vm_struct *vm_struct;
106	int i, j, ret = 0;
107
108	if (buffer->flags & ION_FLAG_CACHED)
109		pgprot = PAGE_KERNEL;
110	else
111		pgprot = pgprot_writecombine(PAGE_KERNEL);
112
113	vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
114	if (!vm_struct)
115		return -ENOMEM;
116
117	for_each_sg(table->sgl, sg, table->nents, i) {
118		struct page *page = sg_page(sg);
119		unsigned long len = sg_dma_len(sg);
120
121		for (j = 0; j < len / PAGE_SIZE; j++) {
122			struct page *sub_page = page + j;
123			struct page **pages = &sub_page;
124			ret = map_vm_area(vm_struct, pgprot, &pages);
125			if (ret)
126				goto end;
127			memset(vm_struct->addr, 0, PAGE_SIZE);
128			unmap_kernel_range((unsigned long)vm_struct->addr,
129					   PAGE_SIZE);
130		}
131	}
132end:
133	free_vm_area(vm_struct);
134	return ret;
135}
136
137struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
138				  unsigned int order)
139{
140	struct page *page = alloc_pages(gfp_flags, order);
141
142	if (!page)
143		return page;
144
145	if (ion_buffer_fault_user_mappings(buffer))
146		split_page(page, order);
147
148	return page;
149}
150
151void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
152			 unsigned int order)
153{
154	int i;
155
156	if (!ion_buffer_fault_user_mappings(buffer)) {
157		__free_pages(page, order);
158		return;
159	}
160	for (i = 0; i < (1 << order); i++)
161		__free_page(page + i);
162}
163
164void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
165{
166	rt_mutex_lock(&heap->lock);
167	list_add(&buffer->list, &heap->free_list);
168	heap->free_list_size += buffer->size;
169	rt_mutex_unlock(&heap->lock);
170	wake_up(&heap->waitqueue);
171}
172
173size_t ion_heap_freelist_size(struct ion_heap *heap)
174{
175	size_t size;
176
177	rt_mutex_lock(&heap->lock);
178	size = heap->free_list_size;
179	rt_mutex_unlock(&heap->lock);
180
181	return size;
182}
183
184size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
185{
186	struct ion_buffer *buffer, *tmp;
187	size_t total_drained = 0;
188
189	if (ion_heap_freelist_size(heap) == 0)
190		return 0;
191
192	rt_mutex_lock(&heap->lock);
193	if (size == 0)
194		size = heap->free_list_size;
195
196	list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
197		if (total_drained >= size)
198			break;
199		list_del(&buffer->list);
200		ion_buffer_destroy(buffer);
201		heap->free_list_size -= buffer->size;
202		total_drained += buffer->size;
203	}
204	rt_mutex_unlock(&heap->lock);
205
206	return total_drained;
207}
208
209int ion_heap_deferred_free(void *data)
210{
211	struct ion_heap *heap = data;
212
213	while (true) {
214		struct ion_buffer *buffer;
215
216		wait_event_freezable(heap->waitqueue,
217				     ion_heap_freelist_size(heap) > 0);
218
219		rt_mutex_lock(&heap->lock);
220		if (list_empty(&heap->free_list)) {
221			rt_mutex_unlock(&heap->lock);
222			continue;
223		}
224		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
225					  list);
226		list_del(&buffer->list);
227		heap->free_list_size -= buffer->size;
228		rt_mutex_unlock(&heap->lock);
229		ion_buffer_destroy(buffer);
230	}
231
232	return 0;
233}
234
235int ion_heap_init_deferred_free(struct ion_heap *heap)
236{
237	struct sched_param param = { .sched_priority = 0 };
238
239	INIT_LIST_HEAD(&heap->free_list);
240	heap->free_list_size = 0;
241	rt_mutex_init(&heap->lock);
242	init_waitqueue_head(&heap->waitqueue);
243	heap->task = kthread_run(ion_heap_deferred_free, heap,
244				 "%s", heap->name);
245	sched_setscheduler(heap->task, SCHED_IDLE, &param);
246	if (IS_ERR(heap->task)) {
247		pr_err("%s: creating thread for deferred free failed\n",
248		       __func__);
249		return PTR_RET(heap->task);
250	}
251	return 0;
252}
253
254struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
255{
256	struct ion_heap *heap = NULL;
257
258	switch (heap_data->type) {
259	case ION_HEAP_TYPE_SYSTEM_CONTIG:
260		heap = ion_system_contig_heap_create(heap_data);
261		break;
262	case ION_HEAP_TYPE_SYSTEM:
263		heap = ion_system_heap_create(heap_data);
264		break;
265	case ION_HEAP_TYPE_CARVEOUT:
266		heap = ion_carveout_heap_create(heap_data);
267		break;
268	case ION_HEAP_TYPE_CHUNK:
269		heap = ion_chunk_heap_create(heap_data);
270		break;
271	case ION_HEAP_TYPE_DMA:
272		heap = ion_cma_heap_create(heap_data);
273		break;
274	default:
275		pr_err("%s: Invalid heap type %d\n", __func__,
276		       heap_data->type);
277		return ERR_PTR(-EINVAL);
278	}
279
280	if (IS_ERR_OR_NULL(heap)) {
281		pr_err("%s: error creating heap %s type %d base %lu size %u\n",
282		       __func__, heap_data->name, heap_data->type,
283		       heap_data->base, heap_data->size);
284		return ERR_PTR(-EINVAL);
285	}
286
287	heap->name = heap_data->name;
288	heap->id = heap_data->id;
289	return heap;
290}
291
292void ion_heap_destroy(struct ion_heap *heap)
293{
294	if (!heap)
295		return;
296
297	switch (heap->type) {
298	case ION_HEAP_TYPE_SYSTEM_CONTIG:
299		ion_system_contig_heap_destroy(heap);
300		break;
301	case ION_HEAP_TYPE_SYSTEM:
302		ion_system_heap_destroy(heap);
303		break;
304	case ION_HEAP_TYPE_CARVEOUT:
305		ion_carveout_heap_destroy(heap);
306		break;
307	case ION_HEAP_TYPE_CHUNK:
308		ion_chunk_heap_destroy(heap);
309		break;
310	case ION_HEAP_TYPE_DMA:
311		ion_cma_heap_destroy(heap);
312		break;
313	default:
314		pr_err("%s: Invalid heap type %d\n", __func__,
315		       heap->type);
316	}
317}
318