ion_heap.c revision b9daf0b60b8a6a5151fca0e8cbb2dab763a3e92a
1/*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/err.h>
18#include <linux/freezer.h>
19#include <linux/kthread.h>
20#include <linux/mm.h>
21#include <linux/rtmutex.h>
22#include <linux/sched.h>
23#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
25#include "ion.h"
26#include "ion_priv.h"
27
28void *ion_heap_map_kernel(struct ion_heap *heap,
29			  struct ion_buffer *buffer)
30{
31	struct scatterlist *sg;
32	int i, j;
33	void *vaddr;
34	pgprot_t pgprot;
35	struct sg_table *table = buffer->sg_table;
36	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37	struct page **pages = vmalloc(sizeof(struct page *) * npages);
38	struct page **tmp = pages;
39
40	if (!pages)
41		return NULL;
42
43	if (buffer->flags & ION_FLAG_CACHED)
44		pgprot = PAGE_KERNEL;
45	else
46		pgprot = pgprot_writecombine(PAGE_KERNEL);
47
48	for_each_sg(table->sgl, sg, table->nents, i) {
49		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50		struct page *page = sg_page(sg);
51		BUG_ON(i >= npages);
52		for (j = 0; j < npages_this_entry; j++)
53			*(tmp++) = page++;
54	}
55	vaddr = vmap(pages, npages, VM_MAP, pgprot);
56	vfree(pages);
57
58	if (vaddr == NULL)
59		return ERR_PTR(-ENOMEM);
60
61	return vaddr;
62}
63
64void ion_heap_unmap_kernel(struct ion_heap *heap,
65			   struct ion_buffer *buffer)
66{
67	vunmap(buffer->vaddr);
68}
69
70int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
71		      struct vm_area_struct *vma)
72{
73	struct sg_table *table = buffer->sg_table;
74	unsigned long addr = vma->vm_start;
75	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
76	struct scatterlist *sg;
77	int i;
78	int ret;
79
80	for_each_sg(table->sgl, sg, table->nents, i) {
81		struct page *page = sg_page(sg);
82		unsigned long remainder = vma->vm_end - addr;
83		unsigned long len = sg->length;
84
85		if (offset >= sg->length) {
86			offset -= sg->length;
87			continue;
88		} else if (offset) {
89			page += offset / PAGE_SIZE;
90			len = sg->length - offset;
91			offset = 0;
92		}
93		len = min(len, remainder);
94		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
95				vma->vm_page_prot);
96		if (ret)
97			return ret;
98		addr += len;
99		if (addr >= vma->vm_end)
100			return 0;
101	}
102	return 0;
103}
104
105static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
106{
107	void *addr = vm_map_ram(pages, num, -1, pgprot);
108	if (!addr)
109		return -ENOMEM;
110	memset(addr, 0, PAGE_SIZE * num);
111	vm_unmap_ram(addr, num);
112
113	return 0;
114}
115
116static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
117						pgprot_t pgprot)
118{
119	int p = 0;
120	int ret = 0;
121	struct sg_page_iter piter;
122	struct page *pages[32];
123
124	for_each_sg_page(sgl, &piter, nents, 0) {
125		pages[p++] = sg_page_iter_page(&piter);
126		if (p == ARRAY_SIZE(pages)) {
127			ret = ion_heap_clear_pages(pages, p, pgprot);
128			if (ret)
129				return ret;
130			p = 0;
131		}
132	}
133	if (p)
134		ret = ion_heap_clear_pages(pages, p, pgprot);
135
136	return ret;
137}
138
139int ion_heap_buffer_zero(struct ion_buffer *buffer)
140{
141	struct sg_table *table = buffer->sg_table;
142	pgprot_t pgprot;
143
144	if (buffer->flags & ION_FLAG_CACHED)
145		pgprot = PAGE_KERNEL;
146	else
147		pgprot = pgprot_writecombine(PAGE_KERNEL);
148
149	return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
150}
151
152int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
153{
154	struct scatterlist sg;
155
156	sg_init_table(&sg, 1);
157	sg_set_page(&sg, page, size, 0);
158	return ion_heap_sglist_zero(&sg, 1, pgprot);
159}
160
161void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
162{
163	spin_lock(&heap->free_lock);
164	list_add(&buffer->list, &heap->free_list);
165	heap->free_list_size += buffer->size;
166	spin_unlock(&heap->free_lock);
167	wake_up(&heap->waitqueue);
168}
169
170size_t ion_heap_freelist_size(struct ion_heap *heap)
171{
172	size_t size;
173
174	spin_lock(&heap->free_lock);
175	size = heap->free_list_size;
176	spin_unlock(&heap->free_lock);
177
178	return size;
179}
180
181size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
182{
183	struct ion_buffer *buffer;
184	size_t total_drained = 0;
185
186	if (ion_heap_freelist_size(heap) == 0)
187		return 0;
188
189	spin_lock(&heap->free_lock);
190	if (size == 0)
191		size = heap->free_list_size;
192
193	while (!list_empty(&heap->free_list)) {
194		if (total_drained >= size)
195			break;
196		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
197					  list);
198		list_del(&buffer->list);
199		heap->free_list_size -= buffer->size;
200		total_drained += buffer->size;
201		spin_unlock(&heap->free_lock);
202		ion_buffer_destroy(buffer);
203		spin_lock(&heap->free_lock);
204	}
205	spin_unlock(&heap->free_lock);
206
207	return total_drained;
208}
209
210static int ion_heap_deferred_free(void *data)
211{
212	struct ion_heap *heap = data;
213
214	while (true) {
215		struct ion_buffer *buffer;
216
217		wait_event_freezable(heap->waitqueue,
218				     ion_heap_freelist_size(heap) > 0);
219
220		spin_lock(&heap->free_lock);
221		if (list_empty(&heap->free_list)) {
222			spin_unlock(&heap->free_lock);
223			continue;
224		}
225		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
226					  list);
227		list_del(&buffer->list);
228		heap->free_list_size -= buffer->size;
229		spin_unlock(&heap->free_lock);
230		ion_buffer_destroy(buffer);
231	}
232
233	return 0;
234}
235
236int ion_heap_init_deferred_free(struct ion_heap *heap)
237{
238	struct sched_param param = { .sched_priority = 0 };
239
240	INIT_LIST_HEAD(&heap->free_list);
241	heap->free_list_size = 0;
242	spin_lock_init(&heap->free_lock);
243	init_waitqueue_head(&heap->waitqueue);
244	heap->task = kthread_run(ion_heap_deferred_free, heap,
245				 "%s", heap->name);
246	if (IS_ERR(heap->task)) {
247		pr_err("%s: creating thread for deferred free failed\n",
248		       __func__);
249		return PTR_ERR_OR_ZERO(heap->task);
250	}
251	sched_setscheduler(heap->task, SCHED_IDLE, &param);
252	return 0;
253}
254
255static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
256						struct shrink_control *sc)
257{
258	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
259					     shrinker);
260	int total = 0;
261
262	total = ion_heap_freelist_size(heap) / PAGE_SIZE;
263	if (heap->ops->shrink)
264		total += heap->ops->shrink(heap, sc->gfp_mask, 0);
265	return total;
266}
267
268static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
269						struct shrink_control *sc)
270{
271	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
272					     shrinker);
273	int freed = 0;
274	int to_scan = sc->nr_to_scan;
275
276	if (to_scan == 0)
277		return 0;
278
279	/*
280	 * shrink the free list first, no point in zeroing the memory if we're
281	 * just going to reclaim it
282	 */
283	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
284		freed = ion_heap_freelist_drain(heap, to_scan * PAGE_SIZE) /
285				PAGE_SIZE;
286
287	to_scan -= freed;
288	if (to_scan <= 0)
289		return freed;
290
291	if (heap->ops->shrink)
292		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
293	return freed;
294}
295
296void ion_heap_init_shrinker(struct ion_heap *heap)
297{
298	heap->shrinker.count_objects = ion_heap_shrink_count;
299	heap->shrinker.scan_objects = ion_heap_shrink_scan;
300	heap->shrinker.seeks = DEFAULT_SEEKS;
301	heap->shrinker.batch = 0;
302	register_shrinker(&heap->shrinker);
303}
304
305struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
306{
307	struct ion_heap *heap = NULL;
308
309	switch (heap_data->type) {
310	case ION_HEAP_TYPE_SYSTEM_CONTIG:
311		heap = ion_system_contig_heap_create(heap_data);
312		break;
313	case ION_HEAP_TYPE_SYSTEM:
314		heap = ion_system_heap_create(heap_data);
315		break;
316	case ION_HEAP_TYPE_CARVEOUT:
317		heap = ion_carveout_heap_create(heap_data);
318		break;
319	case ION_HEAP_TYPE_CHUNK:
320		heap = ion_chunk_heap_create(heap_data);
321		break;
322	case ION_HEAP_TYPE_DMA:
323		heap = ion_cma_heap_create(heap_data);
324		break;
325	default:
326		pr_err("%s: Invalid heap type %d\n", __func__,
327		       heap_data->type);
328		return ERR_PTR(-EINVAL);
329	}
330
331	if (IS_ERR_OR_NULL(heap)) {
332		pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
333		       __func__, heap_data->name, heap_data->type,
334		       heap_data->base, heap_data->size);
335		return ERR_PTR(-EINVAL);
336	}
337
338	heap->name = heap_data->name;
339	heap->id = heap_data->id;
340	return heap;
341}
342
343void ion_heap_destroy(struct ion_heap *heap)
344{
345	if (!heap)
346		return;
347
348	switch (heap->type) {
349	case ION_HEAP_TYPE_SYSTEM_CONTIG:
350		ion_system_contig_heap_destroy(heap);
351		break;
352	case ION_HEAP_TYPE_SYSTEM:
353		ion_system_heap_destroy(heap);
354		break;
355	case ION_HEAP_TYPE_CARVEOUT:
356		ion_carveout_heap_destroy(heap);
357		break;
358	case ION_HEAP_TYPE_CHUNK:
359		ion_chunk_heap_destroy(heap);
360		break;
361	case ION_HEAP_TYPE_DMA:
362		ion_cma_heap_destroy(heap);
363		break;
364	default:
365		pr_err("%s: Invalid heap type %d\n", __func__,
366		       heap->type);
367	}
368}
369