ion.c revision 2540c73a5bf068b8c1c092fc3ce63c62ba949371
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/freezer.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/memblock.h>
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/debugfs.h>
36#include <linux/dma-buf.h>
37
38#include "ion.h"
39#include "ion_priv.h"
40
41/**
42 * struct ion_device - the metadata of the ion device node
43 * @dev:		the actual misc device
44 * @buffers:		an rb tree of all the existing buffers
45 * @buffer_lock:	lock protecting the tree of buffers
46 * @lock:		rwsem protecting the tree of heaps and clients
47 * @heaps:		list of all the heaps in the system
48 * @user_clients:	list of all the clients created from userspace
49 */
50struct ion_device {
51	struct miscdevice dev;
52	struct rb_root buffers;
53	struct mutex buffer_lock;
54	struct rw_semaphore lock;
55	struct plist_head heaps;
56	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
57			      unsigned long arg);
58	struct rb_root clients;
59	struct dentry *debug_root;
60};
61
62/**
63 * struct ion_client - a process/hw block local address space
64 * @node:		node in the tree of all clients
65 * @dev:		backpointer to ion device
66 * @handles:		an rb tree of all the handles in this client
67 * @lock:		lock protecting the tree of handles
68 * @name:		used for debugging
69 * @task:		used for debugging
70 *
71 * A client represents a list of buffers this client may access.
72 * The mutex stored here is used to protect both handles tree
73 * as well as the handles themselves, and should be held while modifying either.
74 */
75struct ion_client {
76	struct rb_node node;
77	struct ion_device *dev;
78	struct rb_root handles;
79	struct mutex lock;
80	const char *name;
81	struct task_struct *task;
82	pid_t pid;
83	struct dentry *debug_root;
84};
85
86/**
87 * ion_handle - a client local reference to a buffer
88 * @ref:		reference count
89 * @client:		back pointer to the client the buffer resides in
90 * @buffer:		pointer to the buffer
91 * @node:		node in the client's handle rbtree
92 * @kmap_cnt:		count of times this client has mapped to kernel
93 * @dmap_cnt:		count of times this client has mapped for dma
94 *
95 * Modifications to node, map_cnt or mapping should be protected by the
96 * lock in the client.  Other fields are never changed after initialization.
97 */
98struct ion_handle {
99	struct kref ref;
100	struct ion_client *client;
101	struct ion_buffer *buffer;
102	struct rb_node node;
103	unsigned int kmap_cnt;
104};
105
106bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
107{
108	return ((buffer->flags & ION_FLAG_CACHED) &&
109		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
110}
111
112bool ion_buffer_cached(struct ion_buffer *buffer)
113{
114	return !!(buffer->flags & ION_FLAG_CACHED);
115}
116
117static inline struct page *ion_buffer_page(struct page *page)
118{
119	return (struct page *)((unsigned long)page & ~(1UL));
120}
121
122static inline bool ion_buffer_page_is_dirty(struct page *page)
123{
124	return !!((unsigned long)page & 1UL);
125}
126
127static inline void ion_buffer_page_dirty(struct page **page)
128{
129	*page = (struct page *)((unsigned long)(*page) | 1UL);
130}
131
132static inline void ion_buffer_page_clean(struct page **page)
133{
134	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
135}
136
137/* this function should only be called while dev->lock is held */
138static void ion_buffer_add(struct ion_device *dev,
139			   struct ion_buffer *buffer)
140{
141	struct rb_node **p = &dev->buffers.rb_node;
142	struct rb_node *parent = NULL;
143	struct ion_buffer *entry;
144
145	while (*p) {
146		parent = *p;
147		entry = rb_entry(parent, struct ion_buffer, node);
148
149		if (buffer < entry) {
150			p = &(*p)->rb_left;
151		} else if (buffer > entry) {
152			p = &(*p)->rb_right;
153		} else {
154			pr_err("%s: buffer already found.", __func__);
155			BUG();
156		}
157	}
158
159	rb_link_node(&buffer->node, parent, p);
160	rb_insert_color(&buffer->node, &dev->buffers);
161}
162
163/* this function should only be called while dev->lock is held */
164static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
165				     struct ion_device *dev,
166				     unsigned long len,
167				     unsigned long align,
168				     unsigned long flags)
169{
170	struct ion_buffer *buffer;
171	struct sg_table *table;
172	struct scatterlist *sg;
173	int i, ret;
174
175	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
176	if (!buffer)
177		return ERR_PTR(-ENOMEM);
178
179	buffer->heap = heap;
180	buffer->flags = flags;
181	kref_init(&buffer->ref);
182
183	ret = heap->ops->allocate(heap, buffer, len, align, flags);
184
185	if (ret) {
186		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
187			goto err2;
188
189		ion_heap_freelist_drain(heap, 0);
190		ret = heap->ops->allocate(heap, buffer, len, align,
191					  flags);
192		if (ret)
193			goto err2;
194	}
195
196	buffer->dev = dev;
197	buffer->size = len;
198
199	table = heap->ops->map_dma(heap, buffer);
200	if (IS_ERR_OR_NULL(table)) {
201		heap->ops->free(buffer);
202		kfree(buffer);
203		return ERR_PTR(PTR_ERR(table));
204	}
205	buffer->sg_table = table;
206	if (ion_buffer_fault_user_mappings(buffer)) {
207		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
208		struct scatterlist *sg;
209		int i, j, k = 0;
210
211		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
212		if (!buffer->pages) {
213			ret = -ENOMEM;
214			goto err1;
215		}
216
217		for_each_sg(table->sgl, sg, table->nents, i) {
218			struct page *page = sg_page(sg);
219
220			for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
221				buffer->pages[k++] = page++;
222		}
223
224		if (ret)
225			goto err;
226	}
227
228	buffer->dev = dev;
229	buffer->size = len;
230	INIT_LIST_HEAD(&buffer->vmas);
231	mutex_init(&buffer->lock);
232	/* this will set up dma addresses for the sglist -- it is not
233	   technically correct as per the dma api -- a specific
234	   device isn't really taking ownership here.  However, in practice on
235	   our systems the only dma_address space is physical addresses.
236	   Additionally, we can't afford the overhead of invalidating every
237	   allocation via dma_map_sg. The implicit contract here is that
238	   memory comming from the heaps is ready for dma, ie if it has a
239	   cached mapping that mapping has been invalidated */
240	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
241		sg_dma_address(sg) = sg_phys(sg);
242	mutex_lock(&dev->buffer_lock);
243	ion_buffer_add(dev, buffer);
244	mutex_unlock(&dev->buffer_lock);
245	return buffer;
246
247err:
248	heap->ops->unmap_dma(heap, buffer);
249	heap->ops->free(buffer);
250err1:
251	if (buffer->pages)
252		vfree(buffer->pages);
253err2:
254	kfree(buffer);
255	return ERR_PTR(ret);
256}
257
258void ion_buffer_destroy(struct ion_buffer *buffer)
259{
260	if (WARN_ON(buffer->kmap_cnt > 0))
261		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
262	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
263	buffer->heap->ops->free(buffer);
264	if (buffer->pages)
265		vfree(buffer->pages);
266	kfree(buffer);
267}
268
269static void _ion_buffer_destroy(struct kref *kref)
270{
271	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
272	struct ion_heap *heap = buffer->heap;
273	struct ion_device *dev = buffer->dev;
274
275	mutex_lock(&dev->buffer_lock);
276	rb_erase(&buffer->node, &dev->buffers);
277	mutex_unlock(&dev->buffer_lock);
278
279	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
280		ion_heap_freelist_add(heap, buffer);
281	else
282		ion_buffer_destroy(buffer);
283}
284
285static void ion_buffer_get(struct ion_buffer *buffer)
286{
287	kref_get(&buffer->ref);
288}
289
290static int ion_buffer_put(struct ion_buffer *buffer)
291{
292	return kref_put(&buffer->ref, _ion_buffer_destroy);
293}
294
295static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
296{
297	mutex_lock(&buffer->lock);
298	buffer->handle_count++;
299	mutex_unlock(&buffer->lock);
300}
301
302static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
303{
304	/*
305	 * when a buffer is removed from a handle, if it is not in
306	 * any other handles, copy the taskcomm and the pid of the
307	 * process it's being removed from into the buffer.  At this
308	 * point there will be no way to track what processes this buffer is
309	 * being used by, it only exists as a dma_buf file descriptor.
310	 * The taskcomm and pid can provide a debug hint as to where this fd
311	 * is in the system
312	 */
313	mutex_lock(&buffer->lock);
314	buffer->handle_count--;
315	BUG_ON(buffer->handle_count < 0);
316	if (!buffer->handle_count) {
317		struct task_struct *task;
318
319		task = current->group_leader;
320		get_task_comm(buffer->task_comm, task);
321		buffer->pid = task_pid_nr(task);
322	}
323	mutex_unlock(&buffer->lock);
324}
325
326static struct ion_handle *ion_handle_create(struct ion_client *client,
327				     struct ion_buffer *buffer)
328{
329	struct ion_handle *handle;
330
331	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
332	if (!handle)
333		return ERR_PTR(-ENOMEM);
334	kref_init(&handle->ref);
335	RB_CLEAR_NODE(&handle->node);
336	handle->client = client;
337	ion_buffer_get(buffer);
338	ion_buffer_add_to_handle(buffer);
339	handle->buffer = buffer;
340
341	return handle;
342}
343
344static void ion_handle_kmap_put(struct ion_handle *);
345
346static void ion_handle_destroy(struct kref *kref)
347{
348	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
349	struct ion_client *client = handle->client;
350	struct ion_buffer *buffer = handle->buffer;
351
352	mutex_lock(&buffer->lock);
353	while (handle->kmap_cnt)
354		ion_handle_kmap_put(handle);
355	mutex_unlock(&buffer->lock);
356
357	if (!RB_EMPTY_NODE(&handle->node))
358		rb_erase(&handle->node, &client->handles);
359
360	ion_buffer_remove_from_handle(buffer);
361	ion_buffer_put(buffer);
362
363	kfree(handle);
364}
365
366struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
367{
368	return handle->buffer;
369}
370
371static void ion_handle_get(struct ion_handle *handle)
372{
373	kref_get(&handle->ref);
374}
375
376static int ion_handle_put(struct ion_handle *handle)
377{
378	return kref_put(&handle->ref, ion_handle_destroy);
379}
380
381static struct ion_handle *ion_handle_lookup(struct ion_client *client,
382					    struct ion_buffer *buffer)
383{
384	struct rb_node *n;
385
386	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
387		struct ion_handle *handle = rb_entry(n, struct ion_handle,
388						     node);
389		if (handle->buffer == buffer)
390			return handle;
391	}
392	return NULL;
393}
394
395static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
396{
397	struct rb_node *n = client->handles.rb_node;
398
399	while (n) {
400		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
401							  node);
402		if (handle < handle_node)
403			n = n->rb_left;
404		else if (handle > handle_node)
405			n = n->rb_right;
406		else
407			return true;
408	}
409	return false;
410}
411
412static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
413{
414	struct rb_node **p = &client->handles.rb_node;
415	struct rb_node *parent = NULL;
416	struct ion_handle *entry;
417
418	while (*p) {
419		parent = *p;
420		entry = rb_entry(parent, struct ion_handle, node);
421
422		if (handle < entry)
423			p = &(*p)->rb_left;
424		else if (handle > entry)
425			p = &(*p)->rb_right;
426		else
427			WARN(1, "%s: buffer already found.", __func__);
428	}
429
430	rb_link_node(&handle->node, parent, p);
431	rb_insert_color(&handle->node, &client->handles);
432}
433
434struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
435			     size_t align, unsigned int heap_id_mask,
436			     unsigned int flags)
437{
438	struct ion_handle *handle;
439	struct ion_device *dev = client->dev;
440	struct ion_buffer *buffer = NULL;
441	struct ion_heap *heap;
442
443	pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
444		 len, align, heap_id_mask, flags);
445	/*
446	 * traverse the list of heaps available in this system in priority
447	 * order.  If the heap type is supported by the client, and matches the
448	 * request of the caller allocate from it.  Repeat until allocate has
449	 * succeeded or all heaps have been tried
450	 */
451	if (WARN_ON(!len))
452		return ERR_PTR(-EINVAL);
453
454	len = PAGE_ALIGN(len);
455
456	down_read(&dev->lock);
457	plist_for_each_entry(heap, &dev->heaps, node) {
458		/* if the caller didn't specify this heap id */
459		if (!((1 << heap->id) & heap_id_mask))
460			continue;
461		buffer = ion_buffer_create(heap, dev, len, align, flags);
462		if (!IS_ERR_OR_NULL(buffer))
463			break;
464	}
465	up_read(&dev->lock);
466
467	if (buffer == NULL)
468		return ERR_PTR(-ENODEV);
469
470	if (IS_ERR(buffer))
471		return ERR_PTR(PTR_ERR(buffer));
472
473	handle = ion_handle_create(client, buffer);
474
475	/*
476	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
477	 * and ion_handle_create will take a second reference, drop one here
478	 */
479	ion_buffer_put(buffer);
480
481	if (!IS_ERR(handle)) {
482		mutex_lock(&client->lock);
483		ion_handle_add(client, handle);
484		mutex_unlock(&client->lock);
485	}
486
487
488	return handle;
489}
490EXPORT_SYMBOL(ion_alloc);
491
492void ion_free(struct ion_client *client, struct ion_handle *handle)
493{
494	bool valid_handle;
495
496	BUG_ON(client != handle->client);
497
498	mutex_lock(&client->lock);
499	valid_handle = ion_handle_validate(client, handle);
500
501	if (!valid_handle) {
502		WARN(1, "%s: invalid handle passed to free.\n", __func__);
503		mutex_unlock(&client->lock);
504		return;
505	}
506	ion_handle_put(handle);
507	mutex_unlock(&client->lock);
508}
509EXPORT_SYMBOL(ion_free);
510
511int ion_phys(struct ion_client *client, struct ion_handle *handle,
512	     ion_phys_addr_t *addr, size_t *len)
513{
514	struct ion_buffer *buffer;
515	int ret;
516
517	mutex_lock(&client->lock);
518	if (!ion_handle_validate(client, handle)) {
519		mutex_unlock(&client->lock);
520		return -EINVAL;
521	}
522
523	buffer = handle->buffer;
524
525	if (!buffer->heap->ops->phys) {
526		pr_err("%s: ion_phys is not implemented by this heap.\n",
527		       __func__);
528		mutex_unlock(&client->lock);
529		return -ENODEV;
530	}
531	mutex_unlock(&client->lock);
532	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
533	return ret;
534}
535EXPORT_SYMBOL(ion_phys);
536
537static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
538{
539	void *vaddr;
540
541	if (buffer->kmap_cnt) {
542		buffer->kmap_cnt++;
543		return buffer->vaddr;
544	}
545	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
546	if (IS_ERR_OR_NULL(vaddr))
547		return vaddr;
548	buffer->vaddr = vaddr;
549	buffer->kmap_cnt++;
550	return vaddr;
551}
552
553static void *ion_handle_kmap_get(struct ion_handle *handle)
554{
555	struct ion_buffer *buffer = handle->buffer;
556	void *vaddr;
557
558	if (handle->kmap_cnt) {
559		handle->kmap_cnt++;
560		return buffer->vaddr;
561	}
562	vaddr = ion_buffer_kmap_get(buffer);
563	if (IS_ERR_OR_NULL(vaddr))
564		return vaddr;
565	handle->kmap_cnt++;
566	return vaddr;
567}
568
569static void ion_buffer_kmap_put(struct ion_buffer *buffer)
570{
571	buffer->kmap_cnt--;
572	if (!buffer->kmap_cnt) {
573		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
574		buffer->vaddr = NULL;
575	}
576}
577
578static void ion_handle_kmap_put(struct ion_handle *handle)
579{
580	struct ion_buffer *buffer = handle->buffer;
581
582	handle->kmap_cnt--;
583	if (!handle->kmap_cnt)
584		ion_buffer_kmap_put(buffer);
585}
586
587void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
588{
589	struct ion_buffer *buffer;
590	void *vaddr;
591
592	mutex_lock(&client->lock);
593	if (!ion_handle_validate(client, handle)) {
594		pr_err("%s: invalid handle passed to map_kernel.\n",
595		       __func__);
596		mutex_unlock(&client->lock);
597		return ERR_PTR(-EINVAL);
598	}
599
600	buffer = handle->buffer;
601
602	if (!handle->buffer->heap->ops->map_kernel) {
603		pr_err("%s: map_kernel is not implemented by this heap.\n",
604		       __func__);
605		mutex_unlock(&client->lock);
606		return ERR_PTR(-ENODEV);
607	}
608
609	mutex_lock(&buffer->lock);
610	vaddr = ion_handle_kmap_get(handle);
611	mutex_unlock(&buffer->lock);
612	mutex_unlock(&client->lock);
613	return vaddr;
614}
615EXPORT_SYMBOL(ion_map_kernel);
616
617void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
618{
619	struct ion_buffer *buffer;
620
621	mutex_lock(&client->lock);
622	buffer = handle->buffer;
623	mutex_lock(&buffer->lock);
624	ion_handle_kmap_put(handle);
625	mutex_unlock(&buffer->lock);
626	mutex_unlock(&client->lock);
627}
628EXPORT_SYMBOL(ion_unmap_kernel);
629
630static int ion_debug_client_show(struct seq_file *s, void *unused)
631{
632	struct ion_client *client = s->private;
633	struct rb_node *n;
634	size_t sizes[ION_NUM_HEAP_IDS] = {0};
635	const char *names[ION_NUM_HEAP_IDS] = {0};
636	int i;
637
638	mutex_lock(&client->lock);
639	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
640		struct ion_handle *handle = rb_entry(n, struct ion_handle,
641						     node);
642		unsigned int id = handle->buffer->heap->id;
643
644		if (!names[id])
645			names[id] = handle->buffer->heap->name;
646		sizes[id] += handle->buffer->size;
647	}
648	mutex_unlock(&client->lock);
649
650	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
651	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
652		if (!names[i])
653			continue;
654		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
655	}
656	return 0;
657}
658
659static int ion_debug_client_open(struct inode *inode, struct file *file)
660{
661	return single_open(file, ion_debug_client_show, inode->i_private);
662}
663
664static const struct file_operations debug_client_fops = {
665	.open = ion_debug_client_open,
666	.read = seq_read,
667	.llseek = seq_lseek,
668	.release = single_release,
669};
670
671struct ion_client *ion_client_create(struct ion_device *dev,
672				     const char *name)
673{
674	struct ion_client *client;
675	struct task_struct *task;
676	struct rb_node **p;
677	struct rb_node *parent = NULL;
678	struct ion_client *entry;
679	char debug_name[64];
680	pid_t pid;
681
682	get_task_struct(current->group_leader);
683	task_lock(current->group_leader);
684	pid = task_pid_nr(current->group_leader);
685	/* don't bother to store task struct for kernel threads,
686	   they can't be killed anyway */
687	if (current->group_leader->flags & PF_KTHREAD) {
688		put_task_struct(current->group_leader);
689		task = NULL;
690	} else {
691		task = current->group_leader;
692	}
693	task_unlock(current->group_leader);
694
695	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
696	if (!client) {
697		if (task)
698			put_task_struct(current->group_leader);
699		return ERR_PTR(-ENOMEM);
700	}
701
702	client->dev = dev;
703	client->handles = RB_ROOT;
704	mutex_init(&client->lock);
705	client->name = name;
706	client->task = task;
707	client->pid = pid;
708
709	down_write(&dev->lock);
710	p = &dev->clients.rb_node;
711	while (*p) {
712		parent = *p;
713		entry = rb_entry(parent, struct ion_client, node);
714
715		if (client < entry)
716			p = &(*p)->rb_left;
717		else if (client > entry)
718			p = &(*p)->rb_right;
719	}
720	rb_link_node(&client->node, parent, p);
721	rb_insert_color(&client->node, &dev->clients);
722
723	snprintf(debug_name, 64, "%u", client->pid);
724	client->debug_root = debugfs_create_file(debug_name, 0664,
725						 dev->debug_root, client,
726						 &debug_client_fops);
727	up_write(&dev->lock);
728
729	return client;
730}
731EXPORT_SYMBOL(ion_client_create);
732
733void ion_client_destroy(struct ion_client *client)
734{
735	struct ion_device *dev = client->dev;
736	struct rb_node *n;
737
738	pr_debug("%s: %d\n", __func__, __LINE__);
739	while ((n = rb_first(&client->handles))) {
740		struct ion_handle *handle = rb_entry(n, struct ion_handle,
741						     node);
742		ion_handle_destroy(&handle->ref);
743	}
744	down_write(&dev->lock);
745	if (client->task)
746		put_task_struct(client->task);
747	rb_erase(&client->node, &dev->clients);
748	debugfs_remove_recursive(client->debug_root);
749	up_write(&dev->lock);
750
751	kfree(client);
752}
753EXPORT_SYMBOL(ion_client_destroy);
754
755struct sg_table *ion_sg_table(struct ion_client *client,
756			      struct ion_handle *handle)
757{
758	struct ion_buffer *buffer;
759	struct sg_table *table;
760
761	mutex_lock(&client->lock);
762	if (!ion_handle_validate(client, handle)) {
763		pr_err("%s: invalid handle passed to map_dma.\n",
764		       __func__);
765		mutex_unlock(&client->lock);
766		return ERR_PTR(-EINVAL);
767	}
768	buffer = handle->buffer;
769	table = buffer->sg_table;
770	mutex_unlock(&client->lock);
771	return table;
772}
773EXPORT_SYMBOL(ion_sg_table);
774
775static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
776				       struct device *dev,
777				       enum dma_data_direction direction);
778
779static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
780					enum dma_data_direction direction)
781{
782	struct dma_buf *dmabuf = attachment->dmabuf;
783	struct ion_buffer *buffer = dmabuf->priv;
784
785	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
786	return buffer->sg_table;
787}
788
789static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
790			      struct sg_table *table,
791			      enum dma_data_direction direction)
792{
793}
794
795struct ion_vma_list {
796	struct list_head list;
797	struct vm_area_struct *vma;
798};
799
800static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
801				       struct device *dev,
802				       enum dma_data_direction dir)
803{
804	struct ion_vma_list *vma_list;
805	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
806	int i;
807
808	pr_debug("%s: syncing for device %s\n", __func__,
809		 dev ? dev_name(dev) : "null");
810
811	if (!ion_buffer_fault_user_mappings(buffer))
812		return;
813
814	mutex_lock(&buffer->lock);
815	for (i = 0; i < pages; i++) {
816		struct page *page = buffer->pages[i];
817
818		if (ion_buffer_page_is_dirty(page))
819			__dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
820		ion_buffer_page_clean(buffer->pages + i);
821	}
822	list_for_each_entry(vma_list, &buffer->vmas, list) {
823		struct vm_area_struct *vma = vma_list->vma;
824
825		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
826			       NULL);
827	}
828	mutex_unlock(&buffer->lock);
829}
830
831int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
832{
833	struct ion_buffer *buffer = vma->vm_private_data;
834	int ret;
835
836	mutex_lock(&buffer->lock);
837	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
838
839	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
840	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
841			     ion_buffer_page(buffer->pages[vmf->pgoff]));
842	mutex_unlock(&buffer->lock);
843	if (ret)
844		return VM_FAULT_ERROR;
845
846	return VM_FAULT_NOPAGE;
847}
848
849static void ion_vm_open(struct vm_area_struct *vma)
850{
851	struct ion_buffer *buffer = vma->vm_private_data;
852	struct ion_vma_list *vma_list;
853
854	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
855	if (!vma_list)
856		return;
857	vma_list->vma = vma;
858	mutex_lock(&buffer->lock);
859	list_add(&vma_list->list, &buffer->vmas);
860	mutex_unlock(&buffer->lock);
861	pr_debug("%s: adding %p\n", __func__, vma);
862}
863
864static void ion_vm_close(struct vm_area_struct *vma)
865{
866	struct ion_buffer *buffer = vma->vm_private_data;
867	struct ion_vma_list *vma_list, *tmp;
868
869	pr_debug("%s\n", __func__);
870	mutex_lock(&buffer->lock);
871	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
872		if (vma_list->vma != vma)
873			continue;
874		list_del(&vma_list->list);
875		kfree(vma_list);
876		pr_debug("%s: deleting %p\n", __func__, vma);
877		break;
878	}
879	mutex_unlock(&buffer->lock);
880}
881
882struct vm_operations_struct ion_vma_ops = {
883	.open = ion_vm_open,
884	.close = ion_vm_close,
885	.fault = ion_vm_fault,
886};
887
888static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
889{
890	struct ion_buffer *buffer = dmabuf->priv;
891	int ret = 0;
892
893	if (!buffer->heap->ops->map_user) {
894		pr_err("%s: this heap does not define a method for mapping "
895		       "to userspace\n", __func__);
896		return -EINVAL;
897	}
898
899	if (ion_buffer_fault_user_mappings(buffer)) {
900		vma->vm_private_data = buffer;
901		vma->vm_ops = &ion_vma_ops;
902		ion_vm_open(vma);
903		return 0;
904	}
905
906	if (!(buffer->flags & ION_FLAG_CACHED))
907		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
908
909	mutex_lock(&buffer->lock);
910	/* now map it to userspace */
911	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
912	mutex_unlock(&buffer->lock);
913
914	if (ret)
915		pr_err("%s: failure mapping buffer to userspace\n",
916		       __func__);
917
918	return ret;
919}
920
921static void ion_dma_buf_release(struct dma_buf *dmabuf)
922{
923	struct ion_buffer *buffer = dmabuf->priv;
924	ion_buffer_put(buffer);
925}
926
927static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
928{
929	struct ion_buffer *buffer = dmabuf->priv;
930	return buffer->vaddr + offset * PAGE_SIZE;
931}
932
933static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
934			       void *ptr)
935{
936	return;
937}
938
939static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
940					size_t len,
941					enum dma_data_direction direction)
942{
943	struct ion_buffer *buffer = dmabuf->priv;
944	void *vaddr;
945
946	if (!buffer->heap->ops->map_kernel) {
947		pr_err("%s: map kernel is not implemented by this heap.\n",
948		       __func__);
949		return -ENODEV;
950	}
951
952	mutex_lock(&buffer->lock);
953	vaddr = ion_buffer_kmap_get(buffer);
954	mutex_unlock(&buffer->lock);
955	if (IS_ERR(vaddr))
956		return PTR_ERR(vaddr);
957	if (!vaddr)
958		return -ENOMEM;
959	return 0;
960}
961
962static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
963				       size_t len,
964				       enum dma_data_direction direction)
965{
966	struct ion_buffer *buffer = dmabuf->priv;
967
968	mutex_lock(&buffer->lock);
969	ion_buffer_kmap_put(buffer);
970	mutex_unlock(&buffer->lock);
971}
972
973struct dma_buf_ops dma_buf_ops = {
974	.map_dma_buf = ion_map_dma_buf,
975	.unmap_dma_buf = ion_unmap_dma_buf,
976	.mmap = ion_mmap,
977	.release = ion_dma_buf_release,
978	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
979	.end_cpu_access = ion_dma_buf_end_cpu_access,
980	.kmap_atomic = ion_dma_buf_kmap,
981	.kunmap_atomic = ion_dma_buf_kunmap,
982	.kmap = ion_dma_buf_kmap,
983	.kunmap = ion_dma_buf_kunmap,
984};
985
986struct dma_buf *ion_share_dma_buf(struct ion_client *client,
987						struct ion_handle *handle)
988{
989	struct ion_buffer *buffer;
990	struct dma_buf *dmabuf;
991	bool valid_handle;
992
993	mutex_lock(&client->lock);
994	valid_handle = ion_handle_validate(client, handle);
995	mutex_unlock(&client->lock);
996	if (!valid_handle) {
997		WARN(1, "%s: invalid handle passed to share.\n", __func__);
998		return ERR_PTR(-EINVAL);
999	}
1000
1001	buffer = handle->buffer;
1002	ion_buffer_get(buffer);
1003	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1004	if (IS_ERR(dmabuf)) {
1005		ion_buffer_put(buffer);
1006		return dmabuf;
1007	}
1008
1009	return dmabuf;
1010}
1011EXPORT_SYMBOL(ion_share_dma_buf);
1012
1013int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1014{
1015	struct dma_buf *dmabuf;
1016	int fd;
1017
1018	dmabuf = ion_share_dma_buf(client, handle);
1019	if (IS_ERR(dmabuf))
1020		return PTR_ERR(dmabuf);
1021
1022	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1023	if (fd < 0)
1024		dma_buf_put(dmabuf);
1025
1026	return fd;
1027}
1028EXPORT_SYMBOL(ion_share_dma_buf_fd);
1029
1030struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1031{
1032	struct dma_buf *dmabuf;
1033	struct ion_buffer *buffer;
1034	struct ion_handle *handle;
1035
1036	dmabuf = dma_buf_get(fd);
1037	if (IS_ERR_OR_NULL(dmabuf))
1038		return ERR_PTR(PTR_ERR(dmabuf));
1039	/* if this memory came from ion */
1040
1041	if (dmabuf->ops != &dma_buf_ops) {
1042		pr_err("%s: can not import dmabuf from another exporter\n",
1043		       __func__);
1044		dma_buf_put(dmabuf);
1045		return ERR_PTR(-EINVAL);
1046	}
1047	buffer = dmabuf->priv;
1048
1049	mutex_lock(&client->lock);
1050	/* if a handle exists for this buffer just take a reference to it */
1051	handle = ion_handle_lookup(client, buffer);
1052	if (!IS_ERR_OR_NULL(handle)) {
1053		ion_handle_get(handle);
1054		goto end;
1055	}
1056	handle = ion_handle_create(client, buffer);
1057	if (IS_ERR_OR_NULL(handle))
1058		goto end;
1059	ion_handle_add(client, handle);
1060end:
1061	mutex_unlock(&client->lock);
1062	dma_buf_put(dmabuf);
1063	return handle;
1064}
1065EXPORT_SYMBOL(ion_import_dma_buf);
1066
1067static int ion_sync_for_device(struct ion_client *client, int fd)
1068{
1069	struct dma_buf *dmabuf;
1070	struct ion_buffer *buffer;
1071
1072	dmabuf = dma_buf_get(fd);
1073	if (IS_ERR_OR_NULL(dmabuf))
1074		return PTR_ERR(dmabuf);
1075
1076	/* if this memory came from ion */
1077	if (dmabuf->ops != &dma_buf_ops) {
1078		pr_err("%s: can not sync dmabuf from another exporter\n",
1079		       __func__);
1080		dma_buf_put(dmabuf);
1081		return -EINVAL;
1082	}
1083	buffer = dmabuf->priv;
1084
1085	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1086			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1087	dma_buf_put(dmabuf);
1088	return 0;
1089}
1090
1091static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1092{
1093	struct ion_client *client = filp->private_data;
1094
1095	switch (cmd) {
1096	case ION_IOC_ALLOC:
1097	{
1098		struct ion_allocation_data data;
1099
1100		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1101			return -EFAULT;
1102		data.handle = ion_alloc(client, data.len, data.align,
1103					     data.heap_id_mask, data.flags);
1104
1105		if (IS_ERR(data.handle))
1106			return PTR_ERR(data.handle);
1107
1108		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1109			ion_free(client, data.handle);
1110			return -EFAULT;
1111		}
1112		break;
1113	}
1114	case ION_IOC_FREE:
1115	{
1116		struct ion_handle_data data;
1117		bool valid;
1118
1119		if (copy_from_user(&data, (void __user *)arg,
1120				   sizeof(struct ion_handle_data)))
1121			return -EFAULT;
1122		mutex_lock(&client->lock);
1123		valid = ion_handle_validate(client, data.handle);
1124		mutex_unlock(&client->lock);
1125		if (!valid)
1126			return -EINVAL;
1127		ion_free(client, data.handle);
1128		break;
1129	}
1130	case ION_IOC_SHARE:
1131	case ION_IOC_MAP:
1132	{
1133		struct ion_fd_data data;
1134
1135		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1136			return -EFAULT;
1137		data.fd = ion_share_dma_buf_fd(client, data.handle);
1138		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1139			return -EFAULT;
1140		if (data.fd < 0)
1141			return data.fd;
1142		break;
1143	}
1144	case ION_IOC_IMPORT:
1145	{
1146		struct ion_fd_data data;
1147		int ret = 0;
1148		if (copy_from_user(&data, (void __user *)arg,
1149				   sizeof(struct ion_fd_data)))
1150			return -EFAULT;
1151		data.handle = ion_import_dma_buf(client, data.fd);
1152		if (IS_ERR(data.handle)) {
1153			ret = PTR_ERR(data.handle);
1154			data.handle = NULL;
1155		}
1156		if (copy_to_user((void __user *)arg, &data,
1157				 sizeof(struct ion_fd_data)))
1158			return -EFAULT;
1159		if (ret < 0)
1160			return ret;
1161		break;
1162	}
1163	case ION_IOC_SYNC:
1164	{
1165		struct ion_fd_data data;
1166		if (copy_from_user(&data, (void __user *)arg,
1167				   sizeof(struct ion_fd_data)))
1168			return -EFAULT;
1169		ion_sync_for_device(client, data.fd);
1170		break;
1171	}
1172	case ION_IOC_CUSTOM:
1173	{
1174		struct ion_device *dev = client->dev;
1175		struct ion_custom_data data;
1176
1177		if (!dev->custom_ioctl)
1178			return -ENOTTY;
1179		if (copy_from_user(&data, (void __user *)arg,
1180				sizeof(struct ion_custom_data)))
1181			return -EFAULT;
1182		return dev->custom_ioctl(client, data.cmd, data.arg);
1183	}
1184	default:
1185		return -ENOTTY;
1186	}
1187	return 0;
1188}
1189
1190static int ion_release(struct inode *inode, struct file *file)
1191{
1192	struct ion_client *client = file->private_data;
1193
1194	pr_debug("%s: %d\n", __func__, __LINE__);
1195	ion_client_destroy(client);
1196	return 0;
1197}
1198
1199static int ion_open(struct inode *inode, struct file *file)
1200{
1201	struct miscdevice *miscdev = file->private_data;
1202	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1203	struct ion_client *client;
1204
1205	pr_debug("%s: %d\n", __func__, __LINE__);
1206	client = ion_client_create(dev, "user");
1207	if (IS_ERR_OR_NULL(client))
1208		return PTR_ERR(client);
1209	file->private_data = client;
1210
1211	return 0;
1212}
1213
1214static const struct file_operations ion_fops = {
1215	.owner          = THIS_MODULE,
1216	.open           = ion_open,
1217	.release        = ion_release,
1218	.unlocked_ioctl = ion_ioctl,
1219};
1220
1221static size_t ion_debug_heap_total(struct ion_client *client,
1222				   unsigned int id)
1223{
1224	size_t size = 0;
1225	struct rb_node *n;
1226
1227	mutex_lock(&client->lock);
1228	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1229		struct ion_handle *handle = rb_entry(n,
1230						     struct ion_handle,
1231						     node);
1232		if (handle->buffer->heap->id == id)
1233			size += handle->buffer->size;
1234	}
1235	mutex_unlock(&client->lock);
1236	return size;
1237}
1238
1239static int ion_debug_heap_show(struct seq_file *s, void *unused)
1240{
1241	struct ion_heap *heap = s->private;
1242	struct ion_device *dev = heap->dev;
1243	struct rb_node *n;
1244	size_t total_size = 0;
1245	size_t total_orphaned_size = 0;
1246
1247	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1248	seq_printf(s, "----------------------------------------------------\n");
1249
1250	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1251		struct ion_client *client = rb_entry(n, struct ion_client,
1252						     node);
1253		size_t size = ion_debug_heap_total(client, heap->id);
1254		if (!size)
1255			continue;
1256		if (client->task) {
1257			char task_comm[TASK_COMM_LEN];
1258
1259			get_task_comm(task_comm, client->task);
1260			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1261				   client->pid, size);
1262		} else {
1263			seq_printf(s, "%16.s %16u %16u\n", client->name,
1264				   client->pid, size);
1265		}
1266	}
1267	seq_printf(s, "----------------------------------------------------\n");
1268	seq_printf(s, "orphaned allocations (info is from last known client):"
1269		   "\n");
1270	mutex_lock(&dev->buffer_lock);
1271	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1272		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1273						     node);
1274		if (buffer->heap->id != heap->id)
1275			continue;
1276		total_size += buffer->size;
1277		if (!buffer->handle_count) {
1278			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1279				   buffer->pid, buffer->size, buffer->kmap_cnt,
1280				   atomic_read(&buffer->ref.refcount));
1281			total_orphaned_size += buffer->size;
1282		}
1283	}
1284	mutex_unlock(&dev->buffer_lock);
1285	seq_printf(s, "----------------------------------------------------\n");
1286	seq_printf(s, "%16.s %16u\n", "total orphaned",
1287		   total_orphaned_size);
1288	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1289	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1290		seq_printf(s, "%16.s %16u\n", "deferred free",
1291				heap->free_list_size);
1292	seq_printf(s, "----------------------------------------------------\n");
1293
1294	if (heap->debug_show)
1295		heap->debug_show(heap, s, unused);
1296
1297	return 0;
1298}
1299
1300static int ion_debug_heap_open(struct inode *inode, struct file *file)
1301{
1302	return single_open(file, ion_debug_heap_show, inode->i_private);
1303}
1304
1305static const struct file_operations debug_heap_fops = {
1306	.open = ion_debug_heap_open,
1307	.read = seq_read,
1308	.llseek = seq_lseek,
1309	.release = single_release,
1310};
1311
1312#ifdef DEBUG_HEAP_SHRINKER
1313static int debug_shrink_set(void *data, u64 val)
1314{
1315        struct ion_heap *heap = data;
1316        struct shrink_control sc;
1317        int objs;
1318
1319        sc.gfp_mask = -1;
1320        sc.nr_to_scan = 0;
1321
1322        if (!val)
1323                return 0;
1324
1325        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1326        sc.nr_to_scan = objs;
1327
1328        heap->shrinker.shrink(&heap->shrinker, &sc);
1329        return 0;
1330}
1331
1332static int debug_shrink_get(void *data, u64 *val)
1333{
1334        struct ion_heap *heap = data;
1335        struct shrink_control sc;
1336        int objs;
1337
1338        sc.gfp_mask = -1;
1339        sc.nr_to_scan = 0;
1340
1341        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1342        *val = objs;
1343        return 0;
1344}
1345
1346DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1347                        debug_shrink_set, "%llu\n");
1348#endif
1349
1350void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1351{
1352	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1353	    !heap->ops->unmap_dma)
1354		pr_err("%s: can not add heap with invalid ops struct.\n",
1355		       __func__);
1356
1357	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1358		ion_heap_init_deferred_free(heap);
1359
1360	heap->dev = dev;
1361	down_write(&dev->lock);
1362	/* use negative heap->id to reverse the priority -- when traversing
1363	   the list later attempt higher id numbers first */
1364	plist_node_init(&heap->node, -heap->id);
1365	plist_add(&heap->node, &dev->heaps);
1366	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1367			    &debug_heap_fops);
1368#ifdef DEBUG_HEAP_SHRINKER
1369	if (heap->shrinker.shrink) {
1370		char debug_name[64];
1371
1372		snprintf(debug_name, 64, "%s_shrink", heap->name);
1373		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1374				    &debug_shrink_fops);
1375	}
1376#endif
1377	up_write(&dev->lock);
1378}
1379
1380struct ion_device *ion_device_create(long (*custom_ioctl)
1381				     (struct ion_client *client,
1382				      unsigned int cmd,
1383				      unsigned long arg))
1384{
1385	struct ion_device *idev;
1386	int ret;
1387
1388	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1389	if (!idev)
1390		return ERR_PTR(-ENOMEM);
1391
1392	idev->dev.minor = MISC_DYNAMIC_MINOR;
1393	idev->dev.name = "ion";
1394	idev->dev.fops = &ion_fops;
1395	idev->dev.parent = NULL;
1396	ret = misc_register(&idev->dev);
1397	if (ret) {
1398		pr_err("ion: failed to register misc device.\n");
1399		return ERR_PTR(ret);
1400	}
1401
1402	idev->debug_root = debugfs_create_dir("ion", NULL);
1403	if (IS_ERR_OR_NULL(idev->debug_root))
1404		pr_err("ion: failed to create debug files.\n");
1405
1406	idev->custom_ioctl = custom_ioctl;
1407	idev->buffers = RB_ROOT;
1408	mutex_init(&idev->buffer_lock);
1409	init_rwsem(&idev->lock);
1410	plist_head_init(&idev->heaps);
1411	idev->clients = RB_ROOT;
1412	return idev;
1413}
1414
1415void ion_device_destroy(struct ion_device *dev)
1416{
1417	misc_deregister(&dev->dev);
1418	/* XXX need to free the heaps and clients ? */
1419	kfree(dev);
1420}
1421
1422void __init ion_reserve(struct ion_platform_data *data)
1423{
1424	int i;
1425
1426	for (i = 0; i < data->nr; i++) {
1427		if (data->heaps[i].size == 0)
1428			continue;
1429
1430		if (data->heaps[i].base == 0) {
1431			phys_addr_t paddr;
1432			paddr = memblock_alloc_base(data->heaps[i].size,
1433						    data->heaps[i].align,
1434						    MEMBLOCK_ALLOC_ANYWHERE);
1435			if (!paddr) {
1436				pr_err("%s: error allocating memblock for "
1437				       "heap %d\n",
1438					__func__, i);
1439				continue;
1440			}
1441			data->heaps[i].base = paddr;
1442		} else {
1443			int ret = memblock_reserve(data->heaps[i].base,
1444					       data->heaps[i].size);
1445			if (ret)
1446				pr_err("memblock reserve of %x@%lx failed\n",
1447				       data->heaps[i].size,
1448				       data->heaps[i].base);
1449		}
1450		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1451			data->heaps[i].name,
1452			data->heaps[i].base,
1453			data->heaps[i].size);
1454	}
1455}
1456