ion.c revision 483ed03f5eee1d1207e8648e923d615ce0599814
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/file.h>
21#include <linux/freezer.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/kthread.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/vmalloc.h>
36#include <linux/debugfs.h>
37#include <linux/dma-buf.h>
38#include <linux/idr.h>
39
40#include "ion.h"
41#include "ion_priv.h"
42#include "compat_ion.h"
43
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev:		the actual misc device
47 * @buffers:		an rb tree of all the existing buffers
48 * @buffer_lock:	lock protecting the tree of buffers
49 * @lock:		rwsem protecting the tree of heaps and clients
50 * @heaps:		list of all the heaps in the system
51 * @user_clients:	list of all the clients created from userspace
52 */
53struct ion_device {
54	struct miscdevice dev;
55	struct rb_root buffers;
56	struct mutex buffer_lock;
57	struct rw_semaphore lock;
58	struct plist_head heaps;
59	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60			     unsigned long arg);
61	struct rb_root clients;
62	struct dentry *debug_root;
63	struct dentry *heaps_debug_root;
64	struct dentry *clients_debug_root;
65};
66
67/**
68 * struct ion_client - a process/hw block local address space
69 * @node:		node in the tree of all clients
70 * @dev:		backpointer to ion device
71 * @handles:		an rb tree of all the handles in this client
72 * @idr:		an idr space for allocating handle ids
73 * @lock:		lock protecting the tree of handles
74 * @name:		used for debugging
75 * @task:		used for debugging
76 *
77 * A client represents a list of buffers this client may access.
78 * The mutex stored here is used to protect both handles tree
79 * as well as the handles themselves, and should be held while modifying either.
80 */
81struct ion_client {
82	struct rb_node node;
83	struct ion_device *dev;
84	struct rb_root handles;
85	struct idr idr;
86	struct mutex lock;
87	const char *name;
88	struct task_struct *task;
89	pid_t pid;
90	struct dentry *debug_root;
91};
92
93/**
94 * ion_handle - a client local reference to a buffer
95 * @ref:		reference count
96 * @client:		back pointer to the client the buffer resides in
97 * @buffer:		pointer to the buffer
98 * @node:		node in the client's handle rbtree
99 * @kmap_cnt:		count of times this client has mapped to kernel
100 * @id:			client-unique id allocated by client->idr
101 *
102 * Modifications to node, map_cnt or mapping should be protected by the
103 * lock in the client.  Other fields are never changed after initialization.
104 */
105struct ion_handle {
106	struct kref ref;
107	struct ion_client *client;
108	struct ion_buffer *buffer;
109	struct rb_node node;
110	unsigned int kmap_cnt;
111	int id;
112};
113
114bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
115{
116	return (buffer->flags & ION_FLAG_CACHED) &&
117		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
118}
119
120bool ion_buffer_cached(struct ion_buffer *buffer)
121{
122	return !!(buffer->flags & ION_FLAG_CACHED);
123}
124
125static inline struct page *ion_buffer_page(struct page *page)
126{
127	return (struct page *)((unsigned long)page & ~(1UL));
128}
129
130static inline bool ion_buffer_page_is_dirty(struct page *page)
131{
132	return !!((unsigned long)page & 1UL);
133}
134
135static inline void ion_buffer_page_dirty(struct page **page)
136{
137	*page = (struct page *)((unsigned long)(*page) | 1UL);
138}
139
140static inline void ion_buffer_page_clean(struct page **page)
141{
142	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
143}
144
145/* this function should only be called while dev->lock is held */
146static void ion_buffer_add(struct ion_device *dev,
147			   struct ion_buffer *buffer)
148{
149	struct rb_node **p = &dev->buffers.rb_node;
150	struct rb_node *parent = NULL;
151	struct ion_buffer *entry;
152
153	while (*p) {
154		parent = *p;
155		entry = rb_entry(parent, struct ion_buffer, node);
156
157		if (buffer < entry) {
158			p = &(*p)->rb_left;
159		} else if (buffer > entry) {
160			p = &(*p)->rb_right;
161		} else {
162			pr_err("%s: buffer already found.", __func__);
163			BUG();
164		}
165	}
166
167	rb_link_node(&buffer->node, parent, p);
168	rb_insert_color(&buffer->node, &dev->buffers);
169}
170
171/* this function should only be called while dev->lock is held */
172static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
173				     struct ion_device *dev,
174				     unsigned long len,
175				     unsigned long align,
176				     unsigned long flags)
177{
178	struct ion_buffer *buffer;
179	struct sg_table *table;
180	struct scatterlist *sg;
181	int i, ret;
182
183	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
184	if (!buffer)
185		return ERR_PTR(-ENOMEM);
186
187	buffer->heap = heap;
188	buffer->flags = flags;
189	kref_init(&buffer->ref);
190
191	ret = heap->ops->allocate(heap, buffer, len, align, flags);
192
193	if (ret) {
194		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
195			goto err2;
196
197		ion_heap_freelist_drain(heap, 0);
198		ret = heap->ops->allocate(heap, buffer, len, align,
199					  flags);
200		if (ret)
201			goto err2;
202	}
203
204	buffer->dev = dev;
205	buffer->size = len;
206
207	table = heap->ops->map_dma(heap, buffer);
208	if (WARN_ONCE(table == NULL,
209			"heap->ops->map_dma should return ERR_PTR on error"))
210		table = ERR_PTR(-EINVAL);
211	if (IS_ERR(table)) {
212		heap->ops->free(buffer);
213		kfree(buffer);
214		return ERR_PTR(PTR_ERR(table));
215	}
216	buffer->sg_table = table;
217	if (ion_buffer_fault_user_mappings(buffer)) {
218		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
219		struct scatterlist *sg;
220		int i, j, k = 0;
221
222		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
223		if (!buffer->pages) {
224			ret = -ENOMEM;
225			goto err1;
226		}
227
228		for_each_sg(table->sgl, sg, table->nents, i) {
229			struct page *page = sg_page(sg);
230
231			for (j = 0; j < sg->length / PAGE_SIZE; j++)
232				buffer->pages[k++] = page++;
233		}
234
235		if (ret)
236			goto err;
237	}
238
239	buffer->dev = dev;
240	buffer->size = len;
241	INIT_LIST_HEAD(&buffer->vmas);
242	mutex_init(&buffer->lock);
243	/* this will set up dma addresses for the sglist -- it is not
244	   technically correct as per the dma api -- a specific
245	   device isn't really taking ownership here.  However, in practice on
246	   our systems the only dma_address space is physical addresses.
247	   Additionally, we can't afford the overhead of invalidating every
248	   allocation via dma_map_sg. The implicit contract here is that
249	   memory comming from the heaps is ready for dma, ie if it has a
250	   cached mapping that mapping has been invalidated */
251	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
252		sg_dma_address(sg) = sg_phys(sg);
253	mutex_lock(&dev->buffer_lock);
254	ion_buffer_add(dev, buffer);
255	mutex_unlock(&dev->buffer_lock);
256	return buffer;
257
258err:
259	heap->ops->unmap_dma(heap, buffer);
260	heap->ops->free(buffer);
261err1:
262	if (buffer->pages)
263		vfree(buffer->pages);
264err2:
265	kfree(buffer);
266	return ERR_PTR(ret);
267}
268
269void ion_buffer_destroy(struct ion_buffer *buffer)
270{
271	if (WARN_ON(buffer->kmap_cnt > 0))
272		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
273	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
274	buffer->heap->ops->free(buffer);
275	if (buffer->pages)
276		vfree(buffer->pages);
277	kfree(buffer);
278}
279
280static void _ion_buffer_destroy(struct kref *kref)
281{
282	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
283	struct ion_heap *heap = buffer->heap;
284	struct ion_device *dev = buffer->dev;
285
286	mutex_lock(&dev->buffer_lock);
287	rb_erase(&buffer->node, &dev->buffers);
288	mutex_unlock(&dev->buffer_lock);
289
290	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
291		ion_heap_freelist_add(heap, buffer);
292	else
293		ion_buffer_destroy(buffer);
294}
295
296static void ion_buffer_get(struct ion_buffer *buffer)
297{
298	kref_get(&buffer->ref);
299}
300
301static int ion_buffer_put(struct ion_buffer *buffer)
302{
303	return kref_put(&buffer->ref, _ion_buffer_destroy);
304}
305
306static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
307{
308	mutex_lock(&buffer->lock);
309	buffer->handle_count++;
310	mutex_unlock(&buffer->lock);
311}
312
313static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
314{
315	/*
316	 * when a buffer is removed from a handle, if it is not in
317	 * any other handles, copy the taskcomm and the pid of the
318	 * process it's being removed from into the buffer.  At this
319	 * point there will be no way to track what processes this buffer is
320	 * being used by, it only exists as a dma_buf file descriptor.
321	 * The taskcomm and pid can provide a debug hint as to where this fd
322	 * is in the system
323	 */
324	mutex_lock(&buffer->lock);
325	buffer->handle_count--;
326	BUG_ON(buffer->handle_count < 0);
327	if (!buffer->handle_count) {
328		struct task_struct *task;
329
330		task = current->group_leader;
331		get_task_comm(buffer->task_comm, task);
332		buffer->pid = task_pid_nr(task);
333	}
334	mutex_unlock(&buffer->lock);
335}
336
337static struct ion_handle *ion_handle_create(struct ion_client *client,
338				     struct ion_buffer *buffer)
339{
340	struct ion_handle *handle;
341
342	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
343	if (!handle)
344		return ERR_PTR(-ENOMEM);
345	kref_init(&handle->ref);
346	RB_CLEAR_NODE(&handle->node);
347	handle->client = client;
348	ion_buffer_get(buffer);
349	ion_buffer_add_to_handle(buffer);
350	handle->buffer = buffer;
351
352	return handle;
353}
354
355static void ion_handle_kmap_put(struct ion_handle *);
356
357static void ion_handle_destroy(struct kref *kref)
358{
359	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
360	struct ion_client *client = handle->client;
361	struct ion_buffer *buffer = handle->buffer;
362
363	mutex_lock(&buffer->lock);
364	while (handle->kmap_cnt)
365		ion_handle_kmap_put(handle);
366	mutex_unlock(&buffer->lock);
367
368	idr_remove(&client->idr, handle->id);
369	if (!RB_EMPTY_NODE(&handle->node))
370		rb_erase(&handle->node, &client->handles);
371
372	ion_buffer_remove_from_handle(buffer);
373	ion_buffer_put(buffer);
374
375	kfree(handle);
376}
377
378struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
379{
380	return handle->buffer;
381}
382
383static void ion_handle_get(struct ion_handle *handle)
384{
385	kref_get(&handle->ref);
386}
387
388static int ion_handle_put(struct ion_handle *handle)
389{
390	struct ion_client *client = handle->client;
391	int ret;
392
393	mutex_lock(&client->lock);
394	ret = kref_put(&handle->ref, ion_handle_destroy);
395	mutex_unlock(&client->lock);
396
397	return ret;
398}
399
400static struct ion_handle *ion_handle_lookup(struct ion_client *client,
401					    struct ion_buffer *buffer)
402{
403	struct rb_node *n = client->handles.rb_node;
404
405	while (n) {
406		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
407		if (buffer < entry->buffer)
408			n = n->rb_left;
409		else if (buffer > entry->buffer)
410			n = n->rb_right;
411		else
412			return entry;
413	}
414	return ERR_PTR(-EINVAL);
415}
416
417static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
418						int id)
419{
420	struct ion_handle *handle;
421
422	mutex_lock(&client->lock);
423	handle = idr_find(&client->idr, id);
424	if (handle)
425		ion_handle_get(handle);
426	mutex_unlock(&client->lock);
427
428	return handle ? handle : ERR_PTR(-EINVAL);
429}
430
431static bool ion_handle_validate(struct ion_client *client,
432				struct ion_handle *handle)
433{
434	WARN_ON(!mutex_is_locked(&client->lock));
435	return idr_find(&client->idr, handle->id) == handle;
436}
437
438static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
439{
440	int id;
441	struct rb_node **p = &client->handles.rb_node;
442	struct rb_node *parent = NULL;
443	struct ion_handle *entry;
444
445	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
446	if (id < 0)
447		return id;
448
449	handle->id = id;
450
451	while (*p) {
452		parent = *p;
453		entry = rb_entry(parent, struct ion_handle, node);
454
455		if (handle->buffer < entry->buffer)
456			p = &(*p)->rb_left;
457		else if (handle->buffer > entry->buffer)
458			p = &(*p)->rb_right;
459		else
460			WARN(1, "%s: buffer already found.", __func__);
461	}
462
463	rb_link_node(&handle->node, parent, p);
464	rb_insert_color(&handle->node, &client->handles);
465
466	return 0;
467}
468
469struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
470			     size_t align, unsigned int heap_id_mask,
471			     unsigned int flags)
472{
473	struct ion_handle *handle;
474	struct ion_device *dev = client->dev;
475	struct ion_buffer *buffer = NULL;
476	struct ion_heap *heap;
477	int ret;
478
479	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
480		 len, align, heap_id_mask, flags);
481	/*
482	 * traverse the list of heaps available in this system in priority
483	 * order.  If the heap type is supported by the client, and matches the
484	 * request of the caller allocate from it.  Repeat until allocate has
485	 * succeeded or all heaps have been tried
486	 */
487	len = PAGE_ALIGN(len);
488
489	if (!len)
490		return ERR_PTR(-EINVAL);
491
492	down_read(&dev->lock);
493	plist_for_each_entry(heap, &dev->heaps, node) {
494		/* if the caller didn't specify this heap id */
495		if (!((1 << heap->id) & heap_id_mask))
496			continue;
497		buffer = ion_buffer_create(heap, dev, len, align, flags);
498		if (!IS_ERR(buffer))
499			break;
500	}
501	up_read(&dev->lock);
502
503	if (buffer == NULL)
504		return ERR_PTR(-ENODEV);
505
506	if (IS_ERR(buffer))
507		return ERR_PTR(PTR_ERR(buffer));
508
509	handle = ion_handle_create(client, buffer);
510
511	/*
512	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
513	 * and ion_handle_create will take a second reference, drop one here
514	 */
515	ion_buffer_put(buffer);
516
517	if (IS_ERR(handle))
518		return handle;
519
520	mutex_lock(&client->lock);
521	ret = ion_handle_add(client, handle);
522	mutex_unlock(&client->lock);
523	if (ret) {
524		ion_handle_put(handle);
525		handle = ERR_PTR(ret);
526	}
527
528	return handle;
529}
530EXPORT_SYMBOL(ion_alloc);
531
532void ion_free(struct ion_client *client, struct ion_handle *handle)
533{
534	bool valid_handle;
535
536	BUG_ON(client != handle->client);
537
538	mutex_lock(&client->lock);
539	valid_handle = ion_handle_validate(client, handle);
540
541	if (!valid_handle) {
542		WARN(1, "%s: invalid handle passed to free.\n", __func__);
543		mutex_unlock(&client->lock);
544		return;
545	}
546	mutex_unlock(&client->lock);
547	ion_handle_put(handle);
548}
549EXPORT_SYMBOL(ion_free);
550
551int ion_phys(struct ion_client *client, struct ion_handle *handle,
552	     ion_phys_addr_t *addr, size_t *len)
553{
554	struct ion_buffer *buffer;
555	int ret;
556
557	mutex_lock(&client->lock);
558	if (!ion_handle_validate(client, handle)) {
559		mutex_unlock(&client->lock);
560		return -EINVAL;
561	}
562
563	buffer = handle->buffer;
564
565	if (!buffer->heap->ops->phys) {
566		pr_err("%s: ion_phys is not implemented by this heap.\n",
567		       __func__);
568		mutex_unlock(&client->lock);
569		return -ENODEV;
570	}
571	mutex_unlock(&client->lock);
572	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
573	return ret;
574}
575EXPORT_SYMBOL(ion_phys);
576
577static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
578{
579	void *vaddr;
580
581	if (buffer->kmap_cnt) {
582		buffer->kmap_cnt++;
583		return buffer->vaddr;
584	}
585	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
586	if (WARN_ONCE(vaddr == NULL,
587			"heap->ops->map_kernel should return ERR_PTR on error"))
588		return ERR_PTR(-EINVAL);
589	if (IS_ERR(vaddr))
590		return vaddr;
591	buffer->vaddr = vaddr;
592	buffer->kmap_cnt++;
593	return vaddr;
594}
595
596static void *ion_handle_kmap_get(struct ion_handle *handle)
597{
598	struct ion_buffer *buffer = handle->buffer;
599	void *vaddr;
600
601	if (handle->kmap_cnt) {
602		handle->kmap_cnt++;
603		return buffer->vaddr;
604	}
605	vaddr = ion_buffer_kmap_get(buffer);
606	if (IS_ERR(vaddr))
607		return vaddr;
608	handle->kmap_cnt++;
609	return vaddr;
610}
611
612static void ion_buffer_kmap_put(struct ion_buffer *buffer)
613{
614	buffer->kmap_cnt--;
615	if (!buffer->kmap_cnt) {
616		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
617		buffer->vaddr = NULL;
618	}
619}
620
621static void ion_handle_kmap_put(struct ion_handle *handle)
622{
623	struct ion_buffer *buffer = handle->buffer;
624
625	handle->kmap_cnt--;
626	if (!handle->kmap_cnt)
627		ion_buffer_kmap_put(buffer);
628}
629
630void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
631{
632	struct ion_buffer *buffer;
633	void *vaddr;
634
635	mutex_lock(&client->lock);
636	if (!ion_handle_validate(client, handle)) {
637		pr_err("%s: invalid handle passed to map_kernel.\n",
638		       __func__);
639		mutex_unlock(&client->lock);
640		return ERR_PTR(-EINVAL);
641	}
642
643	buffer = handle->buffer;
644
645	if (!handle->buffer->heap->ops->map_kernel) {
646		pr_err("%s: map_kernel is not implemented by this heap.\n",
647		       __func__);
648		mutex_unlock(&client->lock);
649		return ERR_PTR(-ENODEV);
650	}
651
652	mutex_lock(&buffer->lock);
653	vaddr = ion_handle_kmap_get(handle);
654	mutex_unlock(&buffer->lock);
655	mutex_unlock(&client->lock);
656	return vaddr;
657}
658EXPORT_SYMBOL(ion_map_kernel);
659
660void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
661{
662	struct ion_buffer *buffer;
663
664	mutex_lock(&client->lock);
665	buffer = handle->buffer;
666	mutex_lock(&buffer->lock);
667	ion_handle_kmap_put(handle);
668	mutex_unlock(&buffer->lock);
669	mutex_unlock(&client->lock);
670}
671EXPORT_SYMBOL(ion_unmap_kernel);
672
673static int ion_debug_client_show(struct seq_file *s, void *unused)
674{
675	struct ion_client *client = s->private;
676	struct rb_node *n;
677	size_t sizes[ION_NUM_HEAP_IDS] = {0};
678	const char *names[ION_NUM_HEAP_IDS] = {NULL};
679	int i;
680
681	mutex_lock(&client->lock);
682	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
683		struct ion_handle *handle = rb_entry(n, struct ion_handle,
684						     node);
685		unsigned int id = handle->buffer->heap->id;
686
687		if (!names[id])
688			names[id] = handle->buffer->heap->name;
689		sizes[id] += handle->buffer->size;
690	}
691	mutex_unlock(&client->lock);
692
693	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
694	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
695		if (!names[i])
696			continue;
697		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
698	}
699	return 0;
700}
701
702static int ion_debug_client_open(struct inode *inode, struct file *file)
703{
704	return single_open(file, ion_debug_client_show, inode->i_private);
705}
706
707static const struct file_operations debug_client_fops = {
708	.open = ion_debug_client_open,
709	.read = seq_read,
710	.llseek = seq_lseek,
711	.release = single_release,
712};
713
714struct ion_client *ion_client_create(struct ion_device *dev,
715				     const char *name)
716{
717	struct ion_client *client;
718	struct task_struct *task;
719	struct rb_node **p;
720	struct rb_node *parent = NULL;
721	struct ion_client *entry;
722	pid_t pid;
723
724	get_task_struct(current->group_leader);
725	task_lock(current->group_leader);
726	pid = task_pid_nr(current->group_leader);
727	/* don't bother to store task struct for kernel threads,
728	   they can't be killed anyway */
729	if (current->group_leader->flags & PF_KTHREAD) {
730		put_task_struct(current->group_leader);
731		task = NULL;
732	} else {
733		task = current->group_leader;
734	}
735	task_unlock(current->group_leader);
736
737	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
738	if (!client) {
739		if (task)
740			put_task_struct(current->group_leader);
741		return ERR_PTR(-ENOMEM);
742	}
743
744	client->dev = dev;
745	client->handles = RB_ROOT;
746	idr_init(&client->idr);
747	mutex_init(&client->lock);
748	client->name = name;
749	client->task = task;
750	client->pid = pid;
751
752	down_write(&dev->lock);
753	p = &dev->clients.rb_node;
754	while (*p) {
755		parent = *p;
756		entry = rb_entry(parent, struct ion_client, node);
757
758		if (client < entry)
759			p = &(*p)->rb_left;
760		else if (client > entry)
761			p = &(*p)->rb_right;
762	}
763	rb_link_node(&client->node, parent, p);
764	rb_insert_color(&client->node, &dev->clients);
765
766	client->debug_root = debugfs_create_file(name, 0664,
767						dev->clients_debug_root,
768						client, &debug_client_fops);
769	if (!client->debug_root) {
770		char buf[256], *path;
771		path = dentry_path(dev->clients_debug_root, buf, 256);
772		pr_err("Failed to create client debugfs at %s/%s\n",
773			path, name);
774	}
775
776	up_write(&dev->lock);
777
778	return client;
779}
780EXPORT_SYMBOL(ion_client_create);
781
782void ion_client_destroy(struct ion_client *client)
783{
784	struct ion_device *dev = client->dev;
785	struct rb_node *n;
786
787	pr_debug("%s: %d\n", __func__, __LINE__);
788	while ((n = rb_first(&client->handles))) {
789		struct ion_handle *handle = rb_entry(n, struct ion_handle,
790						     node);
791		ion_handle_destroy(&handle->ref);
792	}
793
794	idr_destroy(&client->idr);
795
796	down_write(&dev->lock);
797	if (client->task)
798		put_task_struct(client->task);
799	rb_erase(&client->node, &dev->clients);
800	debugfs_remove_recursive(client->debug_root);
801	up_write(&dev->lock);
802
803	kfree(client);
804}
805EXPORT_SYMBOL(ion_client_destroy);
806
807struct sg_table *ion_sg_table(struct ion_client *client,
808			      struct ion_handle *handle)
809{
810	struct ion_buffer *buffer;
811	struct sg_table *table;
812
813	mutex_lock(&client->lock);
814	if (!ion_handle_validate(client, handle)) {
815		pr_err("%s: invalid handle passed to map_dma.\n",
816		       __func__);
817		mutex_unlock(&client->lock);
818		return ERR_PTR(-EINVAL);
819	}
820	buffer = handle->buffer;
821	table = buffer->sg_table;
822	mutex_unlock(&client->lock);
823	return table;
824}
825EXPORT_SYMBOL(ion_sg_table);
826
827static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
828				       struct device *dev,
829				       enum dma_data_direction direction);
830
831static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
832					enum dma_data_direction direction)
833{
834	struct dma_buf *dmabuf = attachment->dmabuf;
835	struct ion_buffer *buffer = dmabuf->priv;
836
837	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
838	return buffer->sg_table;
839}
840
841static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
842			      struct sg_table *table,
843			      enum dma_data_direction direction)
844{
845}
846
847void ion_pages_sync_for_device(struct device *dev, struct page *page,
848		size_t size, enum dma_data_direction dir)
849{
850	struct scatterlist sg;
851
852	sg_init_table(&sg, 1);
853	sg_set_page(&sg, page, size, 0);
854	/*
855	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
856	 * for the the targeted device, but this works on the currently targeted
857	 * hardware.
858	 */
859	sg_dma_address(&sg) = page_to_phys(page);
860	dma_sync_sg_for_device(dev, &sg, 1, dir);
861}
862
863struct ion_vma_list {
864	struct list_head list;
865	struct vm_area_struct *vma;
866};
867
868static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
869				       struct device *dev,
870				       enum dma_data_direction dir)
871{
872	struct ion_vma_list *vma_list;
873	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
874	int i;
875
876	pr_debug("%s: syncing for device %s\n", __func__,
877		 dev ? dev_name(dev) : "null");
878
879	if (!ion_buffer_fault_user_mappings(buffer))
880		return;
881
882	mutex_lock(&buffer->lock);
883	for (i = 0; i < pages; i++) {
884		struct page *page = buffer->pages[i];
885
886		if (ion_buffer_page_is_dirty(page))
887			ion_pages_sync_for_device(dev, ion_buffer_page(page),
888							PAGE_SIZE, dir);
889
890		ion_buffer_page_clean(buffer->pages + i);
891	}
892	list_for_each_entry(vma_list, &buffer->vmas, list) {
893		struct vm_area_struct *vma = vma_list->vma;
894
895		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
896			       NULL);
897	}
898	mutex_unlock(&buffer->lock);
899}
900
901static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
902{
903	struct ion_buffer *buffer = vma->vm_private_data;
904	unsigned long pfn;
905	int ret;
906
907	mutex_lock(&buffer->lock);
908	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
909	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
910
911	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
912	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
913	mutex_unlock(&buffer->lock);
914	if (ret)
915		return VM_FAULT_ERROR;
916
917	return VM_FAULT_NOPAGE;
918}
919
920static void ion_vm_open(struct vm_area_struct *vma)
921{
922	struct ion_buffer *buffer = vma->vm_private_data;
923	struct ion_vma_list *vma_list;
924
925	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
926	if (!vma_list)
927		return;
928	vma_list->vma = vma;
929	mutex_lock(&buffer->lock);
930	list_add(&vma_list->list, &buffer->vmas);
931	mutex_unlock(&buffer->lock);
932	pr_debug("%s: adding %p\n", __func__, vma);
933}
934
935static void ion_vm_close(struct vm_area_struct *vma)
936{
937	struct ion_buffer *buffer = vma->vm_private_data;
938	struct ion_vma_list *vma_list, *tmp;
939
940	pr_debug("%s\n", __func__);
941	mutex_lock(&buffer->lock);
942	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
943		if (vma_list->vma != vma)
944			continue;
945		list_del(&vma_list->list);
946		kfree(vma_list);
947		pr_debug("%s: deleting %p\n", __func__, vma);
948		break;
949	}
950	mutex_unlock(&buffer->lock);
951}
952
953static struct vm_operations_struct ion_vma_ops = {
954	.open = ion_vm_open,
955	.close = ion_vm_close,
956	.fault = ion_vm_fault,
957};
958
959static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
960{
961	struct ion_buffer *buffer = dmabuf->priv;
962	int ret = 0;
963
964	if (!buffer->heap->ops->map_user) {
965		pr_err("%s: this heap does not define a method for mapping "
966		       "to userspace\n", __func__);
967		return -EINVAL;
968	}
969
970	if (ion_buffer_fault_user_mappings(buffer)) {
971		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
972							VM_DONTDUMP;
973		vma->vm_private_data = buffer;
974		vma->vm_ops = &ion_vma_ops;
975		ion_vm_open(vma);
976		return 0;
977	}
978
979	if (!(buffer->flags & ION_FLAG_CACHED))
980		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
981
982	mutex_lock(&buffer->lock);
983	/* now map it to userspace */
984	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
985	mutex_unlock(&buffer->lock);
986
987	if (ret)
988		pr_err("%s: failure mapping buffer to userspace\n",
989		       __func__);
990
991	return ret;
992}
993
994static void ion_dma_buf_release(struct dma_buf *dmabuf)
995{
996	struct ion_buffer *buffer = dmabuf->priv;
997	ion_buffer_put(buffer);
998}
999
1000static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1001{
1002	struct ion_buffer *buffer = dmabuf->priv;
1003	return buffer->vaddr + offset * PAGE_SIZE;
1004}
1005
1006static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1007			       void *ptr)
1008{
1009	return;
1010}
1011
1012static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1013					size_t len,
1014					enum dma_data_direction direction)
1015{
1016	struct ion_buffer *buffer = dmabuf->priv;
1017	void *vaddr;
1018
1019	if (!buffer->heap->ops->map_kernel) {
1020		pr_err("%s: map kernel is not implemented by this heap.\n",
1021		       __func__);
1022		return -ENODEV;
1023	}
1024
1025	mutex_lock(&buffer->lock);
1026	vaddr = ion_buffer_kmap_get(buffer);
1027	mutex_unlock(&buffer->lock);
1028	return PTR_ERR_OR_ZERO(vaddr);
1029}
1030
1031static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1032				       size_t len,
1033				       enum dma_data_direction direction)
1034{
1035	struct ion_buffer *buffer = dmabuf->priv;
1036
1037	mutex_lock(&buffer->lock);
1038	ion_buffer_kmap_put(buffer);
1039	mutex_unlock(&buffer->lock);
1040}
1041
1042static struct dma_buf_ops dma_buf_ops = {
1043	.map_dma_buf = ion_map_dma_buf,
1044	.unmap_dma_buf = ion_unmap_dma_buf,
1045	.mmap = ion_mmap,
1046	.release = ion_dma_buf_release,
1047	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1048	.end_cpu_access = ion_dma_buf_end_cpu_access,
1049	.kmap_atomic = ion_dma_buf_kmap,
1050	.kunmap_atomic = ion_dma_buf_kunmap,
1051	.kmap = ion_dma_buf_kmap,
1052	.kunmap = ion_dma_buf_kunmap,
1053};
1054
1055struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1056						struct ion_handle *handle)
1057{
1058	struct ion_buffer *buffer;
1059	struct dma_buf *dmabuf;
1060	bool valid_handle;
1061
1062	mutex_lock(&client->lock);
1063	valid_handle = ion_handle_validate(client, handle);
1064	if (!valid_handle) {
1065		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1066		mutex_unlock(&client->lock);
1067		return ERR_PTR(-EINVAL);
1068	}
1069	buffer = handle->buffer;
1070	ion_buffer_get(buffer);
1071	mutex_unlock(&client->lock);
1072
1073	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1074	if (IS_ERR(dmabuf)) {
1075		ion_buffer_put(buffer);
1076		return dmabuf;
1077	}
1078
1079	return dmabuf;
1080}
1081EXPORT_SYMBOL(ion_share_dma_buf);
1082
1083int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1084{
1085	struct dma_buf *dmabuf;
1086	int fd;
1087
1088	dmabuf = ion_share_dma_buf(client, handle);
1089	if (IS_ERR(dmabuf))
1090		return PTR_ERR(dmabuf);
1091
1092	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1093	if (fd < 0)
1094		dma_buf_put(dmabuf);
1095
1096	return fd;
1097}
1098EXPORT_SYMBOL(ion_share_dma_buf_fd);
1099
1100struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1101{
1102	struct dma_buf *dmabuf;
1103	struct ion_buffer *buffer;
1104	struct ion_handle *handle;
1105	int ret;
1106
1107	dmabuf = dma_buf_get(fd);
1108	if (IS_ERR(dmabuf))
1109		return ERR_PTR(PTR_ERR(dmabuf));
1110	/* if this memory came from ion */
1111
1112	if (dmabuf->ops != &dma_buf_ops) {
1113		pr_err("%s: can not import dmabuf from another exporter\n",
1114		       __func__);
1115		dma_buf_put(dmabuf);
1116		return ERR_PTR(-EINVAL);
1117	}
1118	buffer = dmabuf->priv;
1119
1120	mutex_lock(&client->lock);
1121	/* if a handle exists for this buffer just take a reference to it */
1122	handle = ion_handle_lookup(client, buffer);
1123	if (!IS_ERR(handle)) {
1124		ion_handle_get(handle);
1125		mutex_unlock(&client->lock);
1126		goto end;
1127	}
1128	mutex_unlock(&client->lock);
1129
1130	handle = ion_handle_create(client, buffer);
1131	if (IS_ERR(handle))
1132		goto end;
1133
1134	mutex_lock(&client->lock);
1135	ret = ion_handle_add(client, handle);
1136	mutex_unlock(&client->lock);
1137	if (ret) {
1138		ion_handle_put(handle);
1139		handle = ERR_PTR(ret);
1140	}
1141
1142end:
1143	dma_buf_put(dmabuf);
1144	return handle;
1145}
1146EXPORT_SYMBOL(ion_import_dma_buf);
1147
1148static int ion_sync_for_device(struct ion_client *client, int fd)
1149{
1150	struct dma_buf *dmabuf;
1151	struct ion_buffer *buffer;
1152
1153	dmabuf = dma_buf_get(fd);
1154	if (IS_ERR(dmabuf))
1155		return PTR_ERR(dmabuf);
1156
1157	/* if this memory came from ion */
1158	if (dmabuf->ops != &dma_buf_ops) {
1159		pr_err("%s: can not sync dmabuf from another exporter\n",
1160		       __func__);
1161		dma_buf_put(dmabuf);
1162		return -EINVAL;
1163	}
1164	buffer = dmabuf->priv;
1165
1166	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1167			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1168	dma_buf_put(dmabuf);
1169	return 0;
1170}
1171
1172/* fix up the cases where the ioctl direction bits are incorrect */
1173static unsigned int ion_ioctl_dir(unsigned int cmd)
1174{
1175	switch (cmd) {
1176	case ION_IOC_SYNC:
1177	case ION_IOC_FREE:
1178	case ION_IOC_CUSTOM:
1179		return _IOC_WRITE;
1180	default:
1181		return _IOC_DIR(cmd);
1182	}
1183}
1184
1185static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1186{
1187	struct ion_client *client = filp->private_data;
1188	struct ion_device *dev = client->dev;
1189	struct ion_handle *cleanup_handle = NULL;
1190	int ret = 0;
1191	unsigned int dir;
1192
1193	union {
1194		struct ion_fd_data fd;
1195		struct ion_allocation_data allocation;
1196		struct ion_handle_data handle;
1197		struct ion_custom_data custom;
1198	} data;
1199
1200	dir = ion_ioctl_dir(cmd);
1201
1202	if (_IOC_SIZE(cmd) > sizeof(data))
1203		return -EINVAL;
1204
1205	if (dir & _IOC_WRITE)
1206		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1207			return -EFAULT;
1208
1209	switch (cmd) {
1210	case ION_IOC_ALLOC:
1211	{
1212		struct ion_handle *handle;
1213
1214		handle = ion_alloc(client, data.allocation.len,
1215						data.allocation.align,
1216						data.allocation.heap_id_mask,
1217						data.allocation.flags);
1218		if (IS_ERR(handle))
1219			return PTR_ERR(handle);
1220
1221		data.allocation.handle = handle->id;
1222
1223		cleanup_handle = handle;
1224		break;
1225	}
1226	case ION_IOC_FREE:
1227	{
1228		struct ion_handle *handle;
1229
1230		handle = ion_handle_get_by_id(client, data.handle.handle);
1231		if (IS_ERR(handle))
1232			return PTR_ERR(handle);
1233		ion_free(client, handle);
1234		ion_handle_put(handle);
1235		break;
1236	}
1237	case ION_IOC_SHARE:
1238	case ION_IOC_MAP:
1239	{
1240		struct ion_handle *handle;
1241
1242		handle = ion_handle_get_by_id(client, data.handle.handle);
1243		if (IS_ERR(handle))
1244			return PTR_ERR(handle);
1245		data.fd.fd = ion_share_dma_buf_fd(client, handle);
1246		ion_handle_put(handle);
1247		if (data.fd.fd < 0)
1248			ret = data.fd.fd;
1249		break;
1250	}
1251	case ION_IOC_IMPORT:
1252	{
1253		struct ion_handle *handle;
1254		handle = ion_import_dma_buf(client, data.fd.fd);
1255		if (IS_ERR(handle))
1256			ret = PTR_ERR(handle);
1257		else
1258			data.handle.handle = handle->id;
1259		break;
1260	}
1261	case ION_IOC_SYNC:
1262	{
1263		ret = ion_sync_for_device(client, data.fd.fd);
1264		break;
1265	}
1266	case ION_IOC_CUSTOM:
1267	{
1268		if (!dev->custom_ioctl)
1269			return -ENOTTY;
1270		ret = dev->custom_ioctl(client, data.custom.cmd,
1271						data.custom.arg);
1272		break;
1273	}
1274	default:
1275		return -ENOTTY;
1276	}
1277
1278	if (dir & _IOC_READ) {
1279		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1280			if (cleanup_handle)
1281				ion_free(client, cleanup_handle);
1282			return -EFAULT;
1283		}
1284	}
1285	return ret;
1286}
1287
1288static int ion_release(struct inode *inode, struct file *file)
1289{
1290	struct ion_client *client = file->private_data;
1291
1292	pr_debug("%s: %d\n", __func__, __LINE__);
1293	ion_client_destroy(client);
1294	return 0;
1295}
1296
1297static int ion_open(struct inode *inode, struct file *file)
1298{
1299	struct miscdevice *miscdev = file->private_data;
1300	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1301	struct ion_client *client;
1302	char debug_name[64];
1303
1304	pr_debug("%s: %d\n", __func__, __LINE__);
1305	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1306	client = ion_client_create(dev, debug_name);
1307	if (IS_ERR(client))
1308		return PTR_ERR(client);
1309	file->private_data = client;
1310
1311	return 0;
1312}
1313
1314static const struct file_operations ion_fops = {
1315	.owner          = THIS_MODULE,
1316	.open           = ion_open,
1317	.release        = ion_release,
1318	.unlocked_ioctl = ion_ioctl,
1319	.compat_ioctl   = compat_ion_ioctl,
1320};
1321
1322static size_t ion_debug_heap_total(struct ion_client *client,
1323				   unsigned int id)
1324{
1325	size_t size = 0;
1326	struct rb_node *n;
1327
1328	mutex_lock(&client->lock);
1329	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1330		struct ion_handle *handle = rb_entry(n,
1331						     struct ion_handle,
1332						     node);
1333		if (handle->buffer->heap->id == id)
1334			size += handle->buffer->size;
1335	}
1336	mutex_unlock(&client->lock);
1337	return size;
1338}
1339
1340static int ion_debug_heap_show(struct seq_file *s, void *unused)
1341{
1342	struct ion_heap *heap = s->private;
1343	struct ion_device *dev = heap->dev;
1344	struct rb_node *n;
1345	size_t total_size = 0;
1346	size_t total_orphaned_size = 0;
1347
1348	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1349	seq_printf(s, "----------------------------------------------------\n");
1350
1351	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1352		struct ion_client *client = rb_entry(n, struct ion_client,
1353						     node);
1354		size_t size = ion_debug_heap_total(client, heap->id);
1355		if (!size)
1356			continue;
1357		if (client->task) {
1358			char task_comm[TASK_COMM_LEN];
1359
1360			get_task_comm(task_comm, client->task);
1361			seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1362				   client->pid, size);
1363		} else {
1364			seq_printf(s, "%16.s %16u %16zu\n", client->name,
1365				   client->pid, size);
1366		}
1367	}
1368	seq_printf(s, "----------------------------------------------------\n");
1369	seq_printf(s, "orphaned allocations (info is from last known client):"
1370		   "\n");
1371	mutex_lock(&dev->buffer_lock);
1372	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1373		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1374						     node);
1375		if (buffer->heap->id != heap->id)
1376			continue;
1377		total_size += buffer->size;
1378		if (!buffer->handle_count) {
1379			seq_printf(s, "%16.s %16u %16zu %d %d\n",
1380				   buffer->task_comm, buffer->pid,
1381				   buffer->size, buffer->kmap_cnt,
1382				   atomic_read(&buffer->ref.refcount));
1383			total_orphaned_size += buffer->size;
1384		}
1385	}
1386	mutex_unlock(&dev->buffer_lock);
1387	seq_printf(s, "----------------------------------------------------\n");
1388	seq_printf(s, "%16.s %16zu\n", "total orphaned",
1389		   total_orphaned_size);
1390	seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1391	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1392		seq_printf(s, "%16.s %16zu\n", "deferred free",
1393				heap->free_list_size);
1394	seq_printf(s, "----------------------------------------------------\n");
1395
1396	if (heap->debug_show)
1397		heap->debug_show(heap, s, unused);
1398
1399	return 0;
1400}
1401
1402static int ion_debug_heap_open(struct inode *inode, struct file *file)
1403{
1404	return single_open(file, ion_debug_heap_show, inode->i_private);
1405}
1406
1407static const struct file_operations debug_heap_fops = {
1408	.open = ion_debug_heap_open,
1409	.read = seq_read,
1410	.llseek = seq_lseek,
1411	.release = single_release,
1412};
1413
1414#ifdef DEBUG_HEAP_SHRINKER
1415static int debug_shrink_set(void *data, u64 val)
1416{
1417	struct ion_heap *heap = data;
1418	struct shrink_control sc;
1419	int objs;
1420
1421	sc.gfp_mask = -1;
1422	sc.nr_to_scan = 0;
1423
1424	if (!val)
1425		return 0;
1426
1427	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1428	sc.nr_to_scan = objs;
1429
1430	heap->shrinker.shrink(&heap->shrinker, &sc);
1431	return 0;
1432}
1433
1434static int debug_shrink_get(void *data, u64 *val)
1435{
1436	struct ion_heap *heap = data;
1437	struct shrink_control sc;
1438	int objs;
1439
1440	sc.gfp_mask = -1;
1441	sc.nr_to_scan = 0;
1442
1443	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1444	*val = objs;
1445	return 0;
1446}
1447
1448DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1449			debug_shrink_set, "%llu\n");
1450#endif
1451
1452void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1453{
1454	struct dentry *debug_file;
1455
1456	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1457	    !heap->ops->unmap_dma)
1458		pr_err("%s: can not add heap with invalid ops struct.\n",
1459		       __func__);
1460
1461	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1462		ion_heap_init_deferred_free(heap);
1463
1464	heap->dev = dev;
1465	down_write(&dev->lock);
1466	/* use negative heap->id to reverse the priority -- when traversing
1467	   the list later attempt higher id numbers first */
1468	plist_node_init(&heap->node, -heap->id);
1469	plist_add(&heap->node, &dev->heaps);
1470	debug_file = debugfs_create_file(heap->name, 0664,
1471					dev->heaps_debug_root, heap,
1472					&debug_heap_fops);
1473
1474	if (!debug_file) {
1475		char buf[256], *path;
1476		path = dentry_path(dev->heaps_debug_root, buf, 256);
1477		pr_err("Failed to create heap debugfs at %s/%s\n",
1478			path, heap->name);
1479	}
1480
1481#ifdef DEBUG_HEAP_SHRINKER
1482	if (heap->shrinker.shrink) {
1483		char debug_name[64];
1484
1485		snprintf(debug_name, 64, "%s_shrink", heap->name);
1486		debug_file = debugfs_create_file(
1487			debug_name, 0644, dev->heaps_debug_root, heap,
1488			&debug_shrink_fops);
1489		if (!debug_file) {
1490			char buf[256], *path;
1491			path = dentry_path(dev->heaps_debug_root, buf, 256);
1492			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1493				path, debug_name);
1494		}
1495	}
1496#endif
1497	up_write(&dev->lock);
1498}
1499
1500struct ion_device *ion_device_create(long (*custom_ioctl)
1501				     (struct ion_client *client,
1502				      unsigned int cmd,
1503				      unsigned long arg))
1504{
1505	struct ion_device *idev;
1506	int ret;
1507
1508	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1509	if (!idev)
1510		return ERR_PTR(-ENOMEM);
1511
1512	idev->dev.minor = MISC_DYNAMIC_MINOR;
1513	idev->dev.name = "ion";
1514	idev->dev.fops = &ion_fops;
1515	idev->dev.parent = NULL;
1516	ret = misc_register(&idev->dev);
1517	if (ret) {
1518		pr_err("ion: failed to register misc device.\n");
1519		return ERR_PTR(ret);
1520	}
1521
1522	idev->debug_root = debugfs_create_dir("ion", NULL);
1523	if (!idev->debug_root) {
1524		pr_err("ion: failed to create debugfs root directory.\n");
1525		goto debugfs_done;
1526	}
1527	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1528	if (!idev->heaps_debug_root) {
1529		pr_err("ion: failed to create debugfs heaps directory.\n");
1530		goto debugfs_done;
1531	}
1532	idev->clients_debug_root = debugfs_create_dir("clients",
1533						idev->debug_root);
1534	if (!idev->clients_debug_root)
1535		pr_err("ion: failed to create debugfs clients directory.\n");
1536
1537debugfs_done:
1538
1539	idev->custom_ioctl = custom_ioctl;
1540	idev->buffers = RB_ROOT;
1541	mutex_init(&idev->buffer_lock);
1542	init_rwsem(&idev->lock);
1543	plist_head_init(&idev->heaps);
1544	idev->clients = RB_ROOT;
1545	return idev;
1546}
1547
1548void ion_device_destroy(struct ion_device *dev)
1549{
1550	misc_deregister(&dev->dev);
1551	debugfs_remove_recursive(dev->debug_root);
1552	/* XXX need to free the heaps and clients ? */
1553	kfree(dev);
1554}
1555
1556void __init ion_reserve(struct ion_platform_data *data)
1557{
1558	int i;
1559
1560	for (i = 0; i < data->nr; i++) {
1561		if (data->heaps[i].size == 0)
1562			continue;
1563
1564		if (data->heaps[i].base == 0) {
1565			phys_addr_t paddr;
1566			paddr = memblock_alloc_base(data->heaps[i].size,
1567						    data->heaps[i].align,
1568						    MEMBLOCK_ALLOC_ANYWHERE);
1569			if (!paddr) {
1570				pr_err("%s: error allocating memblock for heap %d\n",
1571					__func__, i);
1572				continue;
1573			}
1574			data->heaps[i].base = paddr;
1575		} else {
1576			int ret = memblock_reserve(data->heaps[i].base,
1577					       data->heaps[i].size);
1578			if (ret)
1579				pr_err("memblock reserve of %zx@%lx failed\n",
1580				       data->heaps[i].size,
1581				       data->heaps[i].base);
1582		}
1583		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1584			data->heaps[i].name,
1585			data->heaps[i].base,
1586			data->heaps[i].size);
1587	}
1588}
1589