ion.c revision 827c849e37b71cb6396d56096ad497748bbbe98f
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/freezer.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/memblock.h>
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/debugfs.h>
36#include <linux/dma-buf.h>
37#include <linux/idr.h>
38
39#include "ion.h"
40#include "ion_priv.h"
41#include "compat_ion.h"
42
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev:		the actual misc device
46 * @buffers:		an rb tree of all the existing buffers
47 * @buffer_lock:	lock protecting the tree of buffers
48 * @lock:		rwsem protecting the tree of heaps and clients
49 * @heaps:		list of all the heaps in the system
50 * @user_clients:	list of all the clients created from userspace
51 */
52struct ion_device {
53	struct miscdevice dev;
54	struct rb_root buffers;
55	struct mutex buffer_lock;
56	struct rw_semaphore lock;
57	struct plist_head heaps;
58	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59			      unsigned long arg);
60	struct rb_root clients;
61	struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
66 * @node:		node in the tree of all clients
67 * @dev:		backpointer to ion device
68 * @handles:		an rb tree of all the handles in this client
69 * @idr:		an idr space for allocating handle ids
70 * @lock:		lock protecting the tree of handles
71 * @name:		used for debugging
72 * @task:		used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
79	struct rb_node node;
80	struct ion_device *dev;
81	struct rb_root handles;
82	struct idr idr;
83	struct mutex lock;
84	const char *name;
85	struct task_struct *task;
86	pid_t pid;
87	struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref:		reference count
93 * @client:		back pointer to the client the buffer resides in
94 * @buffer:		pointer to the buffer
95 * @node:		node in the client's handle rbtree
96 * @kmap_cnt:		count of times this client has mapped to kernel
97 * @id:			client-unique id allocated by client->idr
98 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client.  Other fields are never changed after initialization.
101 */
102struct ion_handle {
103	struct kref ref;
104	struct ion_client *client;
105	struct ion_buffer *buffer;
106	struct rb_node node;
107	unsigned int kmap_cnt;
108	int id;
109};
110
111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
113	return ((buffer->flags & ION_FLAG_CACHED) &&
114		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
115}
116
117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
119	return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
122static inline struct page *ion_buffer_page(struct page *page)
123{
124	return (struct page *)((unsigned long)page & ~(1UL));
125}
126
127static inline bool ion_buffer_page_is_dirty(struct page *page)
128{
129	return !!((unsigned long)page & 1UL);
130}
131
132static inline void ion_buffer_page_dirty(struct page **page)
133{
134	*page = (struct page *)((unsigned long)(*page) | 1UL);
135}
136
137static inline void ion_buffer_page_clean(struct page **page)
138{
139	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
140}
141
142/* this function should only be called while dev->lock is held */
143static void ion_buffer_add(struct ion_device *dev,
144			   struct ion_buffer *buffer)
145{
146	struct rb_node **p = &dev->buffers.rb_node;
147	struct rb_node *parent = NULL;
148	struct ion_buffer *entry;
149
150	while (*p) {
151		parent = *p;
152		entry = rb_entry(parent, struct ion_buffer, node);
153
154		if (buffer < entry) {
155			p = &(*p)->rb_left;
156		} else if (buffer > entry) {
157			p = &(*p)->rb_right;
158		} else {
159			pr_err("%s: buffer already found.", __func__);
160			BUG();
161		}
162	}
163
164	rb_link_node(&buffer->node, parent, p);
165	rb_insert_color(&buffer->node, &dev->buffers);
166}
167
168/* this function should only be called while dev->lock is held */
169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170				     struct ion_device *dev,
171				     unsigned long len,
172				     unsigned long align,
173				     unsigned long flags)
174{
175	struct ion_buffer *buffer;
176	struct sg_table *table;
177	struct scatterlist *sg;
178	int i, ret;
179
180	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181	if (!buffer)
182		return ERR_PTR(-ENOMEM);
183
184	buffer->heap = heap;
185	buffer->flags = flags;
186	kref_init(&buffer->ref);
187
188	ret = heap->ops->allocate(heap, buffer, len, align, flags);
189
190	if (ret) {
191		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192			goto err2;
193
194		ion_heap_freelist_drain(heap, 0);
195		ret = heap->ops->allocate(heap, buffer, len, align,
196					  flags);
197		if (ret)
198			goto err2;
199	}
200
201	buffer->dev = dev;
202	buffer->size = len;
203
204	table = heap->ops->map_dma(heap, buffer);
205	if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
206		table = ERR_PTR(-EINVAL);
207	if (IS_ERR(table)) {
208		heap->ops->free(buffer);
209		kfree(buffer);
210		return ERR_PTR(PTR_ERR(table));
211	}
212	buffer->sg_table = table;
213	if (ion_buffer_fault_user_mappings(buffer)) {
214		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
215		struct scatterlist *sg;
216		int i, j, k = 0;
217
218		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
219		if (!buffer->pages) {
220			ret = -ENOMEM;
221			goto err1;
222		}
223
224		for_each_sg(table->sgl, sg, table->nents, i) {
225			struct page *page = sg_page(sg);
226
227			for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
228				buffer->pages[k++] = page++;
229		}
230
231		if (ret)
232			goto err;
233	}
234
235	buffer->dev = dev;
236	buffer->size = len;
237	INIT_LIST_HEAD(&buffer->vmas);
238	mutex_init(&buffer->lock);
239	/* this will set up dma addresses for the sglist -- it is not
240	   technically correct as per the dma api -- a specific
241	   device isn't really taking ownership here.  However, in practice on
242	   our systems the only dma_address space is physical addresses.
243	   Additionally, we can't afford the overhead of invalidating every
244	   allocation via dma_map_sg. The implicit contract here is that
245	   memory comming from the heaps is ready for dma, ie if it has a
246	   cached mapping that mapping has been invalidated */
247	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
248		sg_dma_address(sg) = sg_phys(sg);
249	mutex_lock(&dev->buffer_lock);
250	ion_buffer_add(dev, buffer);
251	mutex_unlock(&dev->buffer_lock);
252	return buffer;
253
254err:
255	heap->ops->unmap_dma(heap, buffer);
256	heap->ops->free(buffer);
257err1:
258	if (buffer->pages)
259		vfree(buffer->pages);
260err2:
261	kfree(buffer);
262	return ERR_PTR(ret);
263}
264
265void ion_buffer_destroy(struct ion_buffer *buffer)
266{
267	if (WARN_ON(buffer->kmap_cnt > 0))
268		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
269	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
270	buffer->heap->ops->free(buffer);
271	if (buffer->pages)
272		vfree(buffer->pages);
273	kfree(buffer);
274}
275
276static void _ion_buffer_destroy(struct kref *kref)
277{
278	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
279	struct ion_heap *heap = buffer->heap;
280	struct ion_device *dev = buffer->dev;
281
282	mutex_lock(&dev->buffer_lock);
283	rb_erase(&buffer->node, &dev->buffers);
284	mutex_unlock(&dev->buffer_lock);
285
286	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287		ion_heap_freelist_add(heap, buffer);
288	else
289		ion_buffer_destroy(buffer);
290}
291
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294	kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
299	return kref_put(&buffer->ref, _ion_buffer_destroy);
300}
301
302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
304	mutex_lock(&buffer->lock);
305	buffer->handle_count++;
306	mutex_unlock(&buffer->lock);
307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311	/*
312	 * when a buffer is removed from a handle, if it is not in
313	 * any other handles, copy the taskcomm and the pid of the
314	 * process it's being removed from into the buffer.  At this
315	 * point there will be no way to track what processes this buffer is
316	 * being used by, it only exists as a dma_buf file descriptor.
317	 * The taskcomm and pid can provide a debug hint as to where this fd
318	 * is in the system
319	 */
320	mutex_lock(&buffer->lock);
321	buffer->handle_count--;
322	BUG_ON(buffer->handle_count < 0);
323	if (!buffer->handle_count) {
324		struct task_struct *task;
325
326		task = current->group_leader;
327		get_task_comm(buffer->task_comm, task);
328		buffer->pid = task_pid_nr(task);
329	}
330	mutex_unlock(&buffer->lock);
331}
332
333static struct ion_handle *ion_handle_create(struct ion_client *client,
334				     struct ion_buffer *buffer)
335{
336	struct ion_handle *handle;
337
338	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
339	if (!handle)
340		return ERR_PTR(-ENOMEM);
341	kref_init(&handle->ref);
342	RB_CLEAR_NODE(&handle->node);
343	handle->client = client;
344	ion_buffer_get(buffer);
345	ion_buffer_add_to_handle(buffer);
346	handle->buffer = buffer;
347
348	return handle;
349}
350
351static void ion_handle_kmap_put(struct ion_handle *);
352
353static void ion_handle_destroy(struct kref *kref)
354{
355	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
356	struct ion_client *client = handle->client;
357	struct ion_buffer *buffer = handle->buffer;
358
359	mutex_lock(&buffer->lock);
360	while (handle->kmap_cnt)
361		ion_handle_kmap_put(handle);
362	mutex_unlock(&buffer->lock);
363
364	idr_remove(&client->idr, handle->id);
365	if (!RB_EMPTY_NODE(&handle->node))
366		rb_erase(&handle->node, &client->handles);
367
368	ion_buffer_remove_from_handle(buffer);
369	ion_buffer_put(buffer);
370
371	kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376	return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381	kref_get(&handle->ref);
382}
383
384static int ion_handle_put(struct ion_handle *handle)
385{
386	return kref_put(&handle->ref, ion_handle_destroy);
387}
388
389static struct ion_handle *ion_handle_lookup(struct ion_client *client,
390					    struct ion_buffer *buffer)
391{
392	struct rb_node *n = client->handles.rb_node;
393
394	while (n) {
395		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
396		if (buffer < entry->buffer)
397			n = n->rb_left;
398		else if (buffer > entry->buffer)
399			n = n->rb_right;
400		else
401			return entry;
402	}
403	return ERR_PTR(-EINVAL);
404}
405
406static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
407{
408	return idr_find(&client->idr, id);
409}
410
411static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
412{
413	return (ion_uhandle_get(client, handle->id) == handle);
414}
415
416static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
417{
418	int rc;
419	struct rb_node **p = &client->handles.rb_node;
420	struct rb_node *parent = NULL;
421	struct ion_handle *entry;
422
423	do {
424		int id;
425		rc = idr_pre_get(&client->idr, GFP_KERNEL);
426		if (!rc)
427			return -ENOMEM;
428		rc = idr_get_new_above(&client->idr, handle, 1, &id);
429		handle->id = id;
430	} while (rc == -EAGAIN);
431
432	if (rc < 0)
433		return rc;
434
435	while (*p) {
436		parent = *p;
437		entry = rb_entry(parent, struct ion_handle, node);
438
439		if (handle->buffer < entry->buffer)
440			p = &(*p)->rb_left;
441		else if (handle->buffer > entry->buffer)
442			p = &(*p)->rb_right;
443		else
444			WARN(1, "%s: buffer already found.", __func__);
445	}
446
447	rb_link_node(&handle->node, parent, p);
448	rb_insert_color(&handle->node, &client->handles);
449
450	return 0;
451}
452
453struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
454			     size_t align, unsigned int heap_id_mask,
455			     unsigned int flags)
456{
457	struct ion_handle *handle;
458	struct ion_device *dev = client->dev;
459	struct ion_buffer *buffer = NULL;
460	struct ion_heap *heap;
461	int ret;
462
463	pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
464		 len, align, heap_id_mask, flags);
465	/*
466	 * traverse the list of heaps available in this system in priority
467	 * order.  If the heap type is supported by the client, and matches the
468	 * request of the caller allocate from it.  Repeat until allocate has
469	 * succeeded or all heaps have been tried
470	 */
471	if (WARN_ON(!len))
472		return ERR_PTR(-EINVAL);
473
474	len = PAGE_ALIGN(len);
475
476	down_read(&dev->lock);
477	plist_for_each_entry(heap, &dev->heaps, node) {
478		/* if the caller didn't specify this heap id */
479		if (!((1 << heap->id) & heap_id_mask))
480			continue;
481		buffer = ion_buffer_create(heap, dev, len, align, flags);
482		if (!IS_ERR(buffer))
483			break;
484	}
485	up_read(&dev->lock);
486
487	if (buffer == NULL)
488		return ERR_PTR(-ENODEV);
489
490	if (IS_ERR(buffer))
491		return ERR_PTR(PTR_ERR(buffer));
492
493	handle = ion_handle_create(client, buffer);
494
495	/*
496	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
497	 * and ion_handle_create will take a second reference, drop one here
498	 */
499	ion_buffer_put(buffer);
500
501	if (IS_ERR(handle))
502		return handle;
503
504	mutex_lock(&client->lock);
505	ret = ion_handle_add(client, handle);
506	if (ret) {
507		ion_handle_put(handle);
508		handle = ERR_PTR(ret);
509	}
510	mutex_unlock(&client->lock);
511
512	return handle;
513}
514EXPORT_SYMBOL(ion_alloc);
515
516void ion_free(struct ion_client *client, struct ion_handle *handle)
517{
518	bool valid_handle;
519
520	BUG_ON(client != handle->client);
521
522	mutex_lock(&client->lock);
523	valid_handle = ion_handle_validate(client, handle);
524
525	if (!valid_handle) {
526		WARN(1, "%s: invalid handle passed to free.\n", __func__);
527		mutex_unlock(&client->lock);
528		return;
529	}
530	ion_handle_put(handle);
531	mutex_unlock(&client->lock);
532}
533EXPORT_SYMBOL(ion_free);
534
535int ion_phys(struct ion_client *client, struct ion_handle *handle,
536	     ion_phys_addr_t *addr, size_t *len)
537{
538	struct ion_buffer *buffer;
539	int ret;
540
541	mutex_lock(&client->lock);
542	if (!ion_handle_validate(client, handle)) {
543		mutex_unlock(&client->lock);
544		return -EINVAL;
545	}
546
547	buffer = handle->buffer;
548
549	if (!buffer->heap->ops->phys) {
550		pr_err("%s: ion_phys is not implemented by this heap.\n",
551		       __func__);
552		mutex_unlock(&client->lock);
553		return -ENODEV;
554	}
555	mutex_unlock(&client->lock);
556	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
557	return ret;
558}
559EXPORT_SYMBOL(ion_phys);
560
561static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
562{
563	void *vaddr;
564
565	if (buffer->kmap_cnt) {
566		buffer->kmap_cnt++;
567		return buffer->vaddr;
568	}
569	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
570	if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
571		return ERR_PTR(-EINVAL);
572	if (IS_ERR(vaddr))
573		return vaddr;
574	buffer->vaddr = vaddr;
575	buffer->kmap_cnt++;
576	return vaddr;
577}
578
579static void *ion_handle_kmap_get(struct ion_handle *handle)
580{
581	struct ion_buffer *buffer = handle->buffer;
582	void *vaddr;
583
584	if (handle->kmap_cnt) {
585		handle->kmap_cnt++;
586		return buffer->vaddr;
587	}
588	vaddr = ion_buffer_kmap_get(buffer);
589	if (IS_ERR(vaddr))
590		return vaddr;
591	handle->kmap_cnt++;
592	return vaddr;
593}
594
595static void ion_buffer_kmap_put(struct ion_buffer *buffer)
596{
597	buffer->kmap_cnt--;
598	if (!buffer->kmap_cnt) {
599		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
600		buffer->vaddr = NULL;
601	}
602}
603
604static void ion_handle_kmap_put(struct ion_handle *handle)
605{
606	struct ion_buffer *buffer = handle->buffer;
607
608	handle->kmap_cnt--;
609	if (!handle->kmap_cnt)
610		ion_buffer_kmap_put(buffer);
611}
612
613void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
614{
615	struct ion_buffer *buffer;
616	void *vaddr;
617
618	mutex_lock(&client->lock);
619	if (!ion_handle_validate(client, handle)) {
620		pr_err("%s: invalid handle passed to map_kernel.\n",
621		       __func__);
622		mutex_unlock(&client->lock);
623		return ERR_PTR(-EINVAL);
624	}
625
626	buffer = handle->buffer;
627
628	if (!handle->buffer->heap->ops->map_kernel) {
629		pr_err("%s: map_kernel is not implemented by this heap.\n",
630		       __func__);
631		mutex_unlock(&client->lock);
632		return ERR_PTR(-ENODEV);
633	}
634
635	mutex_lock(&buffer->lock);
636	vaddr = ion_handle_kmap_get(handle);
637	mutex_unlock(&buffer->lock);
638	mutex_unlock(&client->lock);
639	return vaddr;
640}
641EXPORT_SYMBOL(ion_map_kernel);
642
643void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
644{
645	struct ion_buffer *buffer;
646
647	mutex_lock(&client->lock);
648	buffer = handle->buffer;
649	mutex_lock(&buffer->lock);
650	ion_handle_kmap_put(handle);
651	mutex_unlock(&buffer->lock);
652	mutex_unlock(&client->lock);
653}
654EXPORT_SYMBOL(ion_unmap_kernel);
655
656static int ion_debug_client_show(struct seq_file *s, void *unused)
657{
658	struct ion_client *client = s->private;
659	struct rb_node *n;
660	size_t sizes[ION_NUM_HEAP_IDS] = {0};
661	const char *names[ION_NUM_HEAP_IDS] = {0};
662	int i;
663
664	mutex_lock(&client->lock);
665	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
666		struct ion_handle *handle = rb_entry(n, struct ion_handle,
667						     node);
668		unsigned int id = handle->buffer->heap->id;
669
670		if (!names[id])
671			names[id] = handle->buffer->heap->name;
672		sizes[id] += handle->buffer->size;
673	}
674	mutex_unlock(&client->lock);
675
676	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
677	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
678		if (!names[i])
679			continue;
680		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
681	}
682	return 0;
683}
684
685static int ion_debug_client_open(struct inode *inode, struct file *file)
686{
687	return single_open(file, ion_debug_client_show, inode->i_private);
688}
689
690static const struct file_operations debug_client_fops = {
691	.open = ion_debug_client_open,
692	.read = seq_read,
693	.llseek = seq_lseek,
694	.release = single_release,
695};
696
697struct ion_client *ion_client_create(struct ion_device *dev,
698				     const char *name)
699{
700	struct ion_client *client;
701	struct task_struct *task;
702	struct rb_node **p;
703	struct rb_node *parent = NULL;
704	struct ion_client *entry;
705	char debug_name[64];
706	pid_t pid;
707
708	get_task_struct(current->group_leader);
709	task_lock(current->group_leader);
710	pid = task_pid_nr(current->group_leader);
711	/* don't bother to store task struct for kernel threads,
712	   they can't be killed anyway */
713	if (current->group_leader->flags & PF_KTHREAD) {
714		put_task_struct(current->group_leader);
715		task = NULL;
716	} else {
717		task = current->group_leader;
718	}
719	task_unlock(current->group_leader);
720
721	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
722	if (!client) {
723		if (task)
724			put_task_struct(current->group_leader);
725		return ERR_PTR(-ENOMEM);
726	}
727
728	client->dev = dev;
729	client->handles = RB_ROOT;
730	idr_init(&client->idr);
731	mutex_init(&client->lock);
732	client->name = name;
733	client->task = task;
734	client->pid = pid;
735
736	down_write(&dev->lock);
737	p = &dev->clients.rb_node;
738	while (*p) {
739		parent = *p;
740		entry = rb_entry(parent, struct ion_client, node);
741
742		if (client < entry)
743			p = &(*p)->rb_left;
744		else if (client > entry)
745			p = &(*p)->rb_right;
746	}
747	rb_link_node(&client->node, parent, p);
748	rb_insert_color(&client->node, &dev->clients);
749
750	snprintf(debug_name, 64, "%u", client->pid);
751	client->debug_root = debugfs_create_file(debug_name, 0664,
752						 dev->debug_root, client,
753						 &debug_client_fops);
754	up_write(&dev->lock);
755
756	return client;
757}
758EXPORT_SYMBOL(ion_client_create);
759
760void ion_client_destroy(struct ion_client *client)
761{
762	struct ion_device *dev = client->dev;
763	struct rb_node *n;
764
765	pr_debug("%s: %d\n", __func__, __LINE__);
766	while ((n = rb_first(&client->handles))) {
767		struct ion_handle *handle = rb_entry(n, struct ion_handle,
768						     node);
769		ion_handle_destroy(&handle->ref);
770	}
771
772	idr_remove_all(&client->idr);
773	idr_destroy(&client->idr);
774
775	down_write(&dev->lock);
776	if (client->task)
777		put_task_struct(client->task);
778	rb_erase(&client->node, &dev->clients);
779	debugfs_remove_recursive(client->debug_root);
780	up_write(&dev->lock);
781
782	kfree(client);
783}
784EXPORT_SYMBOL(ion_client_destroy);
785
786struct sg_table *ion_sg_table(struct ion_client *client,
787			      struct ion_handle *handle)
788{
789	struct ion_buffer *buffer;
790	struct sg_table *table;
791
792	mutex_lock(&client->lock);
793	if (!ion_handle_validate(client, handle)) {
794		pr_err("%s: invalid handle passed to map_dma.\n",
795		       __func__);
796		mutex_unlock(&client->lock);
797		return ERR_PTR(-EINVAL);
798	}
799	buffer = handle->buffer;
800	table = buffer->sg_table;
801	mutex_unlock(&client->lock);
802	return table;
803}
804EXPORT_SYMBOL(ion_sg_table);
805
806static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
807				       struct device *dev,
808				       enum dma_data_direction direction);
809
810static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
811					enum dma_data_direction direction)
812{
813	struct dma_buf *dmabuf = attachment->dmabuf;
814	struct ion_buffer *buffer = dmabuf->priv;
815
816	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
817	return buffer->sg_table;
818}
819
820static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
821			      struct sg_table *table,
822			      enum dma_data_direction direction)
823{
824}
825
826struct ion_vma_list {
827	struct list_head list;
828	struct vm_area_struct *vma;
829};
830
831static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
832				       struct device *dev,
833				       enum dma_data_direction dir)
834{
835	struct ion_vma_list *vma_list;
836	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
837	int i;
838
839	pr_debug("%s: syncing for device %s\n", __func__,
840		 dev ? dev_name(dev) : "null");
841
842	if (!ion_buffer_fault_user_mappings(buffer))
843		return;
844
845	mutex_lock(&buffer->lock);
846	for (i = 0; i < pages; i++) {
847		struct page *page = buffer->pages[i];
848
849		if (ion_buffer_page_is_dirty(page))
850			__dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
851		ion_buffer_page_clean(buffer->pages + i);
852	}
853	list_for_each_entry(vma_list, &buffer->vmas, list) {
854		struct vm_area_struct *vma = vma_list->vma;
855
856		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
857			       NULL);
858	}
859	mutex_unlock(&buffer->lock);
860}
861
862int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
863{
864	struct ion_buffer *buffer = vma->vm_private_data;
865	int ret;
866
867	mutex_lock(&buffer->lock);
868	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
869
870	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
871	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
872			     ion_buffer_page(buffer->pages[vmf->pgoff]));
873	mutex_unlock(&buffer->lock);
874	if (ret)
875		return VM_FAULT_ERROR;
876
877	return VM_FAULT_NOPAGE;
878}
879
880static void ion_vm_open(struct vm_area_struct *vma)
881{
882	struct ion_buffer *buffer = vma->vm_private_data;
883	struct ion_vma_list *vma_list;
884
885	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
886	if (!vma_list)
887		return;
888	vma_list->vma = vma;
889	mutex_lock(&buffer->lock);
890	list_add(&vma_list->list, &buffer->vmas);
891	mutex_unlock(&buffer->lock);
892	pr_debug("%s: adding %p\n", __func__, vma);
893}
894
895static void ion_vm_close(struct vm_area_struct *vma)
896{
897	struct ion_buffer *buffer = vma->vm_private_data;
898	struct ion_vma_list *vma_list, *tmp;
899
900	pr_debug("%s\n", __func__);
901	mutex_lock(&buffer->lock);
902	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
903		if (vma_list->vma != vma)
904			continue;
905		list_del(&vma_list->list);
906		kfree(vma_list);
907		pr_debug("%s: deleting %p\n", __func__, vma);
908		break;
909	}
910	mutex_unlock(&buffer->lock);
911}
912
913struct vm_operations_struct ion_vma_ops = {
914	.open = ion_vm_open,
915	.close = ion_vm_close,
916	.fault = ion_vm_fault,
917};
918
919static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
920{
921	struct ion_buffer *buffer = dmabuf->priv;
922	int ret = 0;
923
924	if (!buffer->heap->ops->map_user) {
925		pr_err("%s: this heap does not define a method for mapping "
926		       "to userspace\n", __func__);
927		return -EINVAL;
928	}
929
930	if (ion_buffer_fault_user_mappings(buffer)) {
931		vma->vm_private_data = buffer;
932		vma->vm_ops = &ion_vma_ops;
933		ion_vm_open(vma);
934		return 0;
935	}
936
937	if (!(buffer->flags & ION_FLAG_CACHED))
938		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
939
940	mutex_lock(&buffer->lock);
941	/* now map it to userspace */
942	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
943	mutex_unlock(&buffer->lock);
944
945	if (ret)
946		pr_err("%s: failure mapping buffer to userspace\n",
947		       __func__);
948
949	return ret;
950}
951
952static void ion_dma_buf_release(struct dma_buf *dmabuf)
953{
954	struct ion_buffer *buffer = dmabuf->priv;
955	ion_buffer_put(buffer);
956}
957
958static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
959{
960	struct ion_buffer *buffer = dmabuf->priv;
961	return buffer->vaddr + offset * PAGE_SIZE;
962}
963
964static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
965			       void *ptr)
966{
967	return;
968}
969
970static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
971					size_t len,
972					enum dma_data_direction direction)
973{
974	struct ion_buffer *buffer = dmabuf->priv;
975	void *vaddr;
976
977	if (!buffer->heap->ops->map_kernel) {
978		pr_err("%s: map kernel is not implemented by this heap.\n",
979		       __func__);
980		return -ENODEV;
981	}
982
983	mutex_lock(&buffer->lock);
984	vaddr = ion_buffer_kmap_get(buffer);
985	mutex_unlock(&buffer->lock);
986	if (IS_ERR(vaddr))
987		return PTR_ERR(vaddr);
988	return 0;
989}
990
991static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
992				       size_t len,
993				       enum dma_data_direction direction)
994{
995	struct ion_buffer *buffer = dmabuf->priv;
996
997	mutex_lock(&buffer->lock);
998	ion_buffer_kmap_put(buffer);
999	mutex_unlock(&buffer->lock);
1000}
1001
1002struct dma_buf_ops dma_buf_ops = {
1003	.map_dma_buf = ion_map_dma_buf,
1004	.unmap_dma_buf = ion_unmap_dma_buf,
1005	.mmap = ion_mmap,
1006	.release = ion_dma_buf_release,
1007	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1008	.end_cpu_access = ion_dma_buf_end_cpu_access,
1009	.kmap_atomic = ion_dma_buf_kmap,
1010	.kunmap_atomic = ion_dma_buf_kunmap,
1011	.kmap = ion_dma_buf_kmap,
1012	.kunmap = ion_dma_buf_kunmap,
1013};
1014
1015struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1016						struct ion_handle *handle)
1017{
1018	struct ion_buffer *buffer;
1019	struct dma_buf *dmabuf;
1020	bool valid_handle;
1021
1022	mutex_lock(&client->lock);
1023	valid_handle = ion_handle_validate(client, handle);
1024	mutex_unlock(&client->lock);
1025	if (!valid_handle) {
1026		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1027		return ERR_PTR(-EINVAL);
1028	}
1029
1030	buffer = handle->buffer;
1031	ion_buffer_get(buffer);
1032	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1033	if (IS_ERR(dmabuf)) {
1034		ion_buffer_put(buffer);
1035		return dmabuf;
1036	}
1037
1038	return dmabuf;
1039}
1040EXPORT_SYMBOL(ion_share_dma_buf);
1041
1042int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1043{
1044	struct dma_buf *dmabuf;
1045	int fd;
1046
1047	dmabuf = ion_share_dma_buf(client, handle);
1048	if (IS_ERR(dmabuf))
1049		return PTR_ERR(dmabuf);
1050
1051	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1052	if (fd < 0)
1053		dma_buf_put(dmabuf);
1054
1055	return fd;
1056}
1057EXPORT_SYMBOL(ion_share_dma_buf_fd);
1058
1059struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1060{
1061	struct dma_buf *dmabuf;
1062	struct ion_buffer *buffer;
1063	struct ion_handle *handle;
1064	int ret;
1065
1066	dmabuf = dma_buf_get(fd);
1067	if (IS_ERR(dmabuf))
1068		return ERR_PTR(PTR_ERR(dmabuf));
1069	/* if this memory came from ion */
1070
1071	if (dmabuf->ops != &dma_buf_ops) {
1072		pr_err("%s: can not import dmabuf from another exporter\n",
1073		       __func__);
1074		dma_buf_put(dmabuf);
1075		return ERR_PTR(-EINVAL);
1076	}
1077	buffer = dmabuf->priv;
1078
1079	mutex_lock(&client->lock);
1080	/* if a handle exists for this buffer just take a reference to it */
1081	handle = ion_handle_lookup(client, buffer);
1082	if (!IS_ERR(handle)) {
1083		ion_handle_get(handle);
1084		goto end;
1085	}
1086	handle = ion_handle_create(client, buffer);
1087	if (IS_ERR(handle))
1088		goto end;
1089	ret = ion_handle_add(client, handle);
1090	if (ret) {
1091		ion_handle_put(handle);
1092		handle = ERR_PTR(ret);
1093	}
1094end:
1095	mutex_unlock(&client->lock);
1096	dma_buf_put(dmabuf);
1097	return handle;
1098}
1099EXPORT_SYMBOL(ion_import_dma_buf);
1100
1101static int ion_sync_for_device(struct ion_client *client, int fd)
1102{
1103	struct dma_buf *dmabuf;
1104	struct ion_buffer *buffer;
1105
1106	dmabuf = dma_buf_get(fd);
1107	if (IS_ERR(dmabuf))
1108		return PTR_ERR(dmabuf);
1109
1110	/* if this memory came from ion */
1111	if (dmabuf->ops != &dma_buf_ops) {
1112		pr_err("%s: can not sync dmabuf from another exporter\n",
1113		       __func__);
1114		dma_buf_put(dmabuf);
1115		return -EINVAL;
1116	}
1117	buffer = dmabuf->priv;
1118
1119	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1120			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1121	dma_buf_put(dmabuf);
1122	return 0;
1123}
1124
1125static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1126{
1127	struct ion_client *client = filp->private_data;
1128
1129	switch (cmd) {
1130	case ION_IOC_ALLOC:
1131	{
1132		struct ion_allocation_data data;
1133		struct ion_handle *handle;
1134
1135		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1136			return -EFAULT;
1137		handle = ion_alloc(client, data.len, data.align,
1138					     data.heap_id_mask, data.flags);
1139
1140		if (IS_ERR(handle))
1141			return PTR_ERR(handle);
1142
1143		data.handle = handle->id;
1144
1145		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1146			ion_free(client, handle);
1147			return -EFAULT;
1148		}
1149		break;
1150	}
1151	case ION_IOC_FREE:
1152	{
1153		struct ion_handle_data data;
1154		struct ion_handle *handle;
1155
1156		if (copy_from_user(&data, (void __user *)arg,
1157				   sizeof(struct ion_handle_data)))
1158			return -EFAULT;
1159		mutex_lock(&client->lock);
1160		handle = ion_uhandle_get(client, data.handle);
1161		mutex_unlock(&client->lock);
1162		if (!handle)
1163			return -EINVAL;
1164		ion_free(client, handle);
1165		break;
1166	}
1167	case ION_IOC_SHARE:
1168	case ION_IOC_MAP:
1169	{
1170		struct ion_fd_data data;
1171		struct ion_handle *handle;
1172
1173		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1174			return -EFAULT;
1175		handle = ion_uhandle_get(client, data.handle);
1176		data.fd = ion_share_dma_buf_fd(client, handle);
1177		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1178			return -EFAULT;
1179		if (data.fd < 0)
1180			return data.fd;
1181		break;
1182	}
1183	case ION_IOC_IMPORT:
1184	{
1185		struct ion_fd_data data;
1186		struct ion_handle *handle;
1187		int ret = 0;
1188		if (copy_from_user(&data, (void __user *)arg,
1189				   sizeof(struct ion_fd_data)))
1190			return -EFAULT;
1191		handle = ion_import_dma_buf(client, data.fd);
1192		if (IS_ERR(handle))
1193			ret = PTR_ERR(handle);
1194		else
1195			data.handle = handle->id;
1196
1197		if (copy_to_user((void __user *)arg, &data,
1198				 sizeof(struct ion_fd_data)))
1199			return -EFAULT;
1200		if (ret < 0)
1201			return ret;
1202		break;
1203	}
1204	case ION_IOC_SYNC:
1205	{
1206		struct ion_fd_data data;
1207		if (copy_from_user(&data, (void __user *)arg,
1208				   sizeof(struct ion_fd_data)))
1209			return -EFAULT;
1210		ion_sync_for_device(client, data.fd);
1211		break;
1212	}
1213	case ION_IOC_CUSTOM:
1214	{
1215		struct ion_device *dev = client->dev;
1216		struct ion_custom_data data;
1217
1218		if (!dev->custom_ioctl)
1219			return -ENOTTY;
1220		if (copy_from_user(&data, (void __user *)arg,
1221				sizeof(struct ion_custom_data)))
1222			return -EFAULT;
1223		return dev->custom_ioctl(client, data.cmd, data.arg);
1224	}
1225	default:
1226		return -ENOTTY;
1227	}
1228	return 0;
1229}
1230
1231static int ion_release(struct inode *inode, struct file *file)
1232{
1233	struct ion_client *client = file->private_data;
1234
1235	pr_debug("%s: %d\n", __func__, __LINE__);
1236	ion_client_destroy(client);
1237	return 0;
1238}
1239
1240static int ion_open(struct inode *inode, struct file *file)
1241{
1242	struct miscdevice *miscdev = file->private_data;
1243	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1244	struct ion_client *client;
1245
1246	pr_debug("%s: %d\n", __func__, __LINE__);
1247	client = ion_client_create(dev, "user");
1248	if (IS_ERR(client))
1249		return PTR_ERR(client);
1250	file->private_data = client;
1251
1252	return 0;
1253}
1254
1255static const struct file_operations ion_fops = {
1256	.owner          = THIS_MODULE,
1257	.open           = ion_open,
1258	.release        = ion_release,
1259	.unlocked_ioctl = ion_ioctl,
1260	.compat_ioctl   = compat_ion_ioctl,
1261};
1262
1263static size_t ion_debug_heap_total(struct ion_client *client,
1264				   unsigned int id)
1265{
1266	size_t size = 0;
1267	struct rb_node *n;
1268
1269	mutex_lock(&client->lock);
1270	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1271		struct ion_handle *handle = rb_entry(n,
1272						     struct ion_handle,
1273						     node);
1274		if (handle->buffer->heap->id == id)
1275			size += handle->buffer->size;
1276	}
1277	mutex_unlock(&client->lock);
1278	return size;
1279}
1280
1281static int ion_debug_heap_show(struct seq_file *s, void *unused)
1282{
1283	struct ion_heap *heap = s->private;
1284	struct ion_device *dev = heap->dev;
1285	struct rb_node *n;
1286	size_t total_size = 0;
1287	size_t total_orphaned_size = 0;
1288
1289	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1290	seq_printf(s, "----------------------------------------------------\n");
1291
1292	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1293		struct ion_client *client = rb_entry(n, struct ion_client,
1294						     node);
1295		size_t size = ion_debug_heap_total(client, heap->id);
1296		if (!size)
1297			continue;
1298		if (client->task) {
1299			char task_comm[TASK_COMM_LEN];
1300
1301			get_task_comm(task_comm, client->task);
1302			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1303				   client->pid, size);
1304		} else {
1305			seq_printf(s, "%16.s %16u %16u\n", client->name,
1306				   client->pid, size);
1307		}
1308	}
1309	seq_printf(s, "----------------------------------------------------\n");
1310	seq_printf(s, "orphaned allocations (info is from last known client):"
1311		   "\n");
1312	mutex_lock(&dev->buffer_lock);
1313	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1314		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1315						     node);
1316		if (buffer->heap->id != heap->id)
1317			continue;
1318		total_size += buffer->size;
1319		if (!buffer->handle_count) {
1320			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1321				   buffer->pid, buffer->size, buffer->kmap_cnt,
1322				   atomic_read(&buffer->ref.refcount));
1323			total_orphaned_size += buffer->size;
1324		}
1325	}
1326	mutex_unlock(&dev->buffer_lock);
1327	seq_printf(s, "----------------------------------------------------\n");
1328	seq_printf(s, "%16.s %16u\n", "total orphaned",
1329		   total_orphaned_size);
1330	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1331	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1332		seq_printf(s, "%16.s %16u\n", "deferred free",
1333				heap->free_list_size);
1334	seq_printf(s, "----------------------------------------------------\n");
1335
1336	if (heap->debug_show)
1337		heap->debug_show(heap, s, unused);
1338
1339	return 0;
1340}
1341
1342static int ion_debug_heap_open(struct inode *inode, struct file *file)
1343{
1344	return single_open(file, ion_debug_heap_show, inode->i_private);
1345}
1346
1347static const struct file_operations debug_heap_fops = {
1348	.open = ion_debug_heap_open,
1349	.read = seq_read,
1350	.llseek = seq_lseek,
1351	.release = single_release,
1352};
1353
1354#ifdef DEBUG_HEAP_SHRINKER
1355static int debug_shrink_set(void *data, u64 val)
1356{
1357        struct ion_heap *heap = data;
1358        struct shrink_control sc;
1359        int objs;
1360
1361        sc.gfp_mask = -1;
1362        sc.nr_to_scan = 0;
1363
1364        if (!val)
1365                return 0;
1366
1367        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1368        sc.nr_to_scan = objs;
1369
1370        heap->shrinker.shrink(&heap->shrinker, &sc);
1371        return 0;
1372}
1373
1374static int debug_shrink_get(void *data, u64 *val)
1375{
1376        struct ion_heap *heap = data;
1377        struct shrink_control sc;
1378        int objs;
1379
1380        sc.gfp_mask = -1;
1381        sc.nr_to_scan = 0;
1382
1383        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1384        *val = objs;
1385        return 0;
1386}
1387
1388DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1389                        debug_shrink_set, "%llu\n");
1390#endif
1391
1392void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1393{
1394	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1395	    !heap->ops->unmap_dma)
1396		pr_err("%s: can not add heap with invalid ops struct.\n",
1397		       __func__);
1398
1399	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1400		ion_heap_init_deferred_free(heap);
1401
1402	heap->dev = dev;
1403	down_write(&dev->lock);
1404	/* use negative heap->id to reverse the priority -- when traversing
1405	   the list later attempt higher id numbers first */
1406	plist_node_init(&heap->node, -heap->id);
1407	plist_add(&heap->node, &dev->heaps);
1408	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1409			    &debug_heap_fops);
1410#ifdef DEBUG_HEAP_SHRINKER
1411	if (heap->shrinker.shrink) {
1412		char debug_name[64];
1413
1414		snprintf(debug_name, 64, "%s_shrink", heap->name);
1415		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1416				    &debug_shrink_fops);
1417	}
1418#endif
1419	up_write(&dev->lock);
1420}
1421
1422struct ion_device *ion_device_create(long (*custom_ioctl)
1423				     (struct ion_client *client,
1424				      unsigned int cmd,
1425				      unsigned long arg))
1426{
1427	struct ion_device *idev;
1428	int ret;
1429
1430	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1431	if (!idev)
1432		return ERR_PTR(-ENOMEM);
1433
1434	idev->dev.minor = MISC_DYNAMIC_MINOR;
1435	idev->dev.name = "ion";
1436	idev->dev.fops = &ion_fops;
1437	idev->dev.parent = NULL;
1438	ret = misc_register(&idev->dev);
1439	if (ret) {
1440		pr_err("ion: failed to register misc device.\n");
1441		return ERR_PTR(ret);
1442	}
1443
1444	idev->debug_root = debugfs_create_dir("ion", NULL);
1445	if (!idev->debug_root)
1446		pr_err("ion: failed to create debug files.\n");
1447
1448	idev->custom_ioctl = custom_ioctl;
1449	idev->buffers = RB_ROOT;
1450	mutex_init(&idev->buffer_lock);
1451	init_rwsem(&idev->lock);
1452	plist_head_init(&idev->heaps);
1453	idev->clients = RB_ROOT;
1454	return idev;
1455}
1456
1457void ion_device_destroy(struct ion_device *dev)
1458{
1459	misc_deregister(&dev->dev);
1460	/* XXX need to free the heaps and clients ? */
1461	kfree(dev);
1462}
1463
1464void __init ion_reserve(struct ion_platform_data *data)
1465{
1466	int i;
1467
1468	for (i = 0; i < data->nr; i++) {
1469		if (data->heaps[i].size == 0)
1470			continue;
1471
1472		if (data->heaps[i].base == 0) {
1473			phys_addr_t paddr;
1474			paddr = memblock_alloc_base(data->heaps[i].size,
1475						    data->heaps[i].align,
1476						    MEMBLOCK_ALLOC_ANYWHERE);
1477			if (!paddr) {
1478				pr_err("%s: error allocating memblock for "
1479				       "heap %d\n",
1480					__func__, i);
1481				continue;
1482			}
1483			data->heaps[i].base = paddr;
1484		} else {
1485			int ret = memblock_reserve(data->heaps[i].base,
1486					       data->heaps[i].size);
1487			if (ret)
1488				pr_err("memblock reserve of %x@%lx failed\n",
1489				       data->heaps[i].size,
1490				       data->heaps[i].base);
1491		}
1492		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1493			data->heaps[i].name,
1494			data->heaps[i].base,
1495			data->heaps[i].size);
1496	}
1497}
1498