1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
29static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
30				     __GFP_NORETRY) & ~__GFP_WAIT;
31static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
32static const unsigned int orders[] = {8, 4, 0};
33static const int num_orders = ARRAY_SIZE(orders);
34static int order_to_index(unsigned int order)
35{
36	int i;
37
38	for (i = 0; i < num_orders; i++)
39		if (order == orders[i])
40			return i;
41	BUG();
42	return -1;
43}
44
45static inline unsigned int order_to_size(int order)
46{
47	return PAGE_SIZE << order;
48}
49
50struct ion_system_heap {
51	struct ion_heap heap;
52	struct ion_page_pool *pools[0];
53};
54
55static struct page *alloc_buffer_page(struct ion_system_heap *heap,
56				      struct ion_buffer *buffer,
57				      unsigned long order)
58{
59	bool cached = ion_buffer_cached(buffer);
60	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
61	struct page *page;
62
63	if (!cached) {
64		page = ion_page_pool_alloc(pool);
65	} else {
66		gfp_t gfp_flags = low_order_gfp_flags;
67
68		if (order > 4)
69			gfp_flags = high_order_gfp_flags;
70		page = alloc_pages(gfp_flags | __GFP_COMP, order);
71		if (!page)
72			return NULL;
73		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
74						DMA_BIDIRECTIONAL);
75	}
76
77	return page;
78}
79
80static void free_buffer_page(struct ion_system_heap *heap,
81			     struct ion_buffer *buffer, struct page *page)
82{
83	unsigned int order = compound_order(page);
84	bool cached = ion_buffer_cached(buffer);
85
86	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
87		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
88		if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
89			ion_page_pool_free_immediate(pool, page);
90		else
91			ion_page_pool_free(pool, page);
92	} else {
93		__free_pages(page, order);
94	}
95}
96
97
98static struct page *alloc_largest_available(struct ion_system_heap *heap,
99					    struct ion_buffer *buffer,
100					    unsigned long size,
101					    unsigned int max_order)
102{
103	struct page *page;
104	int i;
105
106	for (i = 0; i < num_orders; i++) {
107		if (size < order_to_size(orders[i]))
108			continue;
109		if (max_order < orders[i])
110			continue;
111
112		page = alloc_buffer_page(heap, buffer, orders[i]);
113		if (!page)
114			continue;
115
116		return page;
117	}
118
119	return NULL;
120}
121
122static int ion_system_heap_allocate(struct ion_heap *heap,
123				     struct ion_buffer *buffer,
124				     unsigned long size, unsigned long align,
125				     unsigned long flags)
126{
127	struct ion_system_heap *sys_heap = container_of(heap,
128							struct ion_system_heap,
129							heap);
130	struct sg_table *table;
131	struct scatterlist *sg;
132	struct list_head pages;
133	struct page *page, *tmp_page;
134	int i = 0;
135	unsigned long size_remaining = PAGE_ALIGN(size);
136	unsigned int max_order = orders[0];
137
138	if (align > PAGE_SIZE)
139		return -EINVAL;
140
141	if (size / PAGE_SIZE > totalram_pages / 2)
142		return -ENOMEM;
143
144	INIT_LIST_HEAD(&pages);
145	while (size_remaining > 0) {
146		page = alloc_largest_available(sys_heap, buffer, size_remaining,
147						max_order);
148		if (!page)
149			goto free_pages;
150		list_add_tail(&page->lru, &pages);
151		size_remaining -= PAGE_SIZE << compound_order(page);
152		max_order = compound_order(page);
153		i++;
154	}
155	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
156	if (!table)
157		goto free_pages;
158
159	if (sg_alloc_table(table, i, GFP_KERNEL))
160		goto free_table;
161
162	sg = table->sgl;
163	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
164		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
165		sg = sg_next(sg);
166		list_del(&page->lru);
167	}
168
169	buffer->priv_virt = table;
170	return 0;
171
172free_table:
173	kfree(table);
174free_pages:
175	list_for_each_entry_safe(page, tmp_page, &pages, lru)
176		free_buffer_page(sys_heap, buffer, page);
177	return -ENOMEM;
178}
179
180static void ion_system_heap_free(struct ion_buffer *buffer)
181{
182	struct ion_system_heap *sys_heap = container_of(buffer->heap,
183							struct ion_system_heap,
184							heap);
185	struct sg_table *table = buffer->sg_table;
186	bool cached = ion_buffer_cached(buffer);
187	struct scatterlist *sg;
188	int i;
189
190	/* uncached pages come from the page pools, zero them before returning
191	   for security purposes (other allocations are zerod at alloc time */
192	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
193		ion_heap_buffer_zero(buffer);
194
195	for_each_sg(table->sgl, sg, table->nents, i)
196		free_buffer_page(sys_heap, buffer, sg_page(sg));
197	sg_free_table(table);
198	kfree(table);
199}
200
201static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
202						struct ion_buffer *buffer)
203{
204	return buffer->priv_virt;
205}
206
207static void ion_system_heap_unmap_dma(struct ion_heap *heap,
208				      struct ion_buffer *buffer)
209{
210}
211
212static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
213					int nr_to_scan)
214{
215	struct ion_system_heap *sys_heap;
216	int nr_total = 0;
217	int i;
218
219	sys_heap = container_of(heap, struct ion_system_heap, heap);
220
221	for (i = 0; i < num_orders; i++) {
222		struct ion_page_pool *pool = sys_heap->pools[i];
223
224		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
225	}
226
227	return nr_total;
228}
229
230static struct ion_heap_ops system_heap_ops = {
231	.allocate = ion_system_heap_allocate,
232	.free = ion_system_heap_free,
233	.map_dma = ion_system_heap_map_dma,
234	.unmap_dma = ion_system_heap_unmap_dma,
235	.map_kernel = ion_heap_map_kernel,
236	.unmap_kernel = ion_heap_unmap_kernel,
237	.map_user = ion_heap_map_user,
238	.shrink = ion_system_heap_shrink,
239};
240
241static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
242				      void *unused)
243{
244
245	struct ion_system_heap *sys_heap = container_of(heap,
246							struct ion_system_heap,
247							heap);
248	int i;
249
250	for (i = 0; i < num_orders; i++) {
251		struct ion_page_pool *pool = sys_heap->pools[i];
252
253		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
254			   pool->high_count, pool->order,
255			   (PAGE_SIZE << pool->order) * pool->high_count);
256		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
257			   pool->low_count, pool->order,
258			   (PAGE_SIZE << pool->order) * pool->low_count);
259	}
260	return 0;
261}
262
263struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
264{
265	struct ion_system_heap *heap;
266	int i;
267
268	heap = kzalloc(sizeof(struct ion_system_heap) +
269			sizeof(struct ion_page_pool *) * num_orders,
270			GFP_KERNEL);
271	if (!heap)
272		return ERR_PTR(-ENOMEM);
273	heap->heap.ops = &system_heap_ops;
274	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
275	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
276
277	for (i = 0; i < num_orders; i++) {
278		struct ion_page_pool *pool;
279		gfp_t gfp_flags = low_order_gfp_flags;
280
281		if (orders[i] > 4)
282			gfp_flags = high_order_gfp_flags;
283		pool = ion_page_pool_create(gfp_flags, orders[i]);
284		if (!pool)
285			goto destroy_pools;
286		heap->pools[i] = pool;
287	}
288
289	heap->heap.debug_show = ion_system_heap_debug_show;
290	return &heap->heap;
291
292destroy_pools:
293	while (i--)
294		ion_page_pool_destroy(heap->pools[i]);
295	kfree(heap);
296	return ERR_PTR(-ENOMEM);
297}
298
299void ion_system_heap_destroy(struct ion_heap *heap)
300{
301	struct ion_system_heap *sys_heap = container_of(heap,
302							struct ion_system_heap,
303							heap);
304	int i;
305
306	for (i = 0; i < num_orders; i++)
307		ion_page_pool_destroy(sys_heap->pools[i]);
308	kfree(sys_heap);
309}
310
311static int ion_system_contig_heap_allocate(struct ion_heap *heap,
312					   struct ion_buffer *buffer,
313					   unsigned long len,
314					   unsigned long align,
315					   unsigned long flags)
316{
317	int order = get_order(len);
318	struct page *page;
319	struct sg_table *table;
320	unsigned long i;
321	int ret;
322
323	if (align > (PAGE_SIZE << order))
324		return -EINVAL;
325
326	page = alloc_pages(low_order_gfp_flags, order);
327	if (!page)
328		return -ENOMEM;
329
330	split_page(page, order);
331
332	len = PAGE_ALIGN(len);
333	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
334		__free_page(page + i);
335
336	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
337	if (!table) {
338		ret = -ENOMEM;
339		goto free_pages;
340	}
341
342	ret = sg_alloc_table(table, 1, GFP_KERNEL);
343	if (ret)
344		goto free_table;
345
346	sg_set_page(table->sgl, page, len, 0);
347
348	buffer->priv_virt = table;
349
350	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
351
352	return 0;
353
354free_table:
355	kfree(table);
356free_pages:
357	for (i = 0; i < len >> PAGE_SHIFT; i++)
358		__free_page(page + i);
359
360	return ret;
361}
362
363static void ion_system_contig_heap_free(struct ion_buffer *buffer)
364{
365	struct sg_table *table = buffer->priv_virt;
366	struct page *page = sg_page(table->sgl);
367	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
368	unsigned long i;
369
370	for (i = 0; i < pages; i++)
371		__free_page(page + i);
372	sg_free_table(table);
373	kfree(table);
374}
375
376static int ion_system_contig_heap_phys(struct ion_heap *heap,
377				       struct ion_buffer *buffer,
378				       ion_phys_addr_t *addr, size_t *len)
379{
380	struct sg_table *table = buffer->priv_virt;
381	struct page *page = sg_page(table->sgl);
382	*addr = page_to_phys(page);
383	*len = buffer->size;
384	return 0;
385}
386
387static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
388						struct ion_buffer *buffer)
389{
390	return buffer->priv_virt;
391}
392
393static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
394					     struct ion_buffer *buffer)
395{
396}
397
398static struct ion_heap_ops kmalloc_ops = {
399	.allocate = ion_system_contig_heap_allocate,
400	.free = ion_system_contig_heap_free,
401	.phys = ion_system_contig_heap_phys,
402	.map_dma = ion_system_contig_heap_map_dma,
403	.unmap_dma = ion_system_contig_heap_unmap_dma,
404	.map_kernel = ion_heap_map_kernel,
405	.unmap_kernel = ion_heap_unmap_kernel,
406	.map_user = ion_heap_map_user,
407};
408
409struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
410{
411	struct ion_heap *heap;
412
413	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
414	if (!heap)
415		return ERR_PTR(-ENOMEM);
416	heap->ops = &kmalloc_ops;
417	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
418	return heap;
419}
420
421void ion_system_contig_heap_destroy(struct ion_heap *heap)
422{
423	kfree(heap);
424}
425