ion.c revision 462be0c616c297776556b24daa1511bcc0e0cd2c
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/freezer.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/memblock.h>
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/debugfs.h>
36#include <linux/dma-buf.h>
37#include <linux/idr.h>
38
39#include "ion.h"
40#include "ion_priv.h"
41#include "compat_ion.h"
42
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev:		the actual misc device
46 * @buffers:		an rb tree of all the existing buffers
47 * @buffer_lock:	lock protecting the tree of buffers
48 * @lock:		rwsem protecting the tree of heaps and clients
49 * @heaps:		list of all the heaps in the system
50 * @user_clients:	list of all the clients created from userspace
51 */
52struct ion_device {
53	struct miscdevice dev;
54	struct rb_root buffers;
55	struct mutex buffer_lock;
56	struct rw_semaphore lock;
57	struct plist_head heaps;
58	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59			      unsigned long arg);
60	struct rb_root clients;
61	struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
66 * @node:		node in the tree of all clients
67 * @dev:		backpointer to ion device
68 * @handles:		an rb tree of all the handles in this client
69 * @idr:		an idr space for allocating handle ids
70 * @lock:		lock protecting the tree of handles
71 * @name:		used for debugging
72 * @task:		used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
79	struct rb_node node;
80	struct ion_device *dev;
81	struct rb_root handles;
82	struct idr idr;
83	struct mutex lock;
84	const char *name;
85	struct task_struct *task;
86	pid_t pid;
87	struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref:		reference count
93 * @client:		back pointer to the client the buffer resides in
94 * @buffer:		pointer to the buffer
95 * @node:		node in the client's handle rbtree
96 * @kmap_cnt:		count of times this client has mapped to kernel
97 * @id:			client-unique id allocated by client->idr
98 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client.  Other fields are never changed after initialization.
101 */
102struct ion_handle {
103	struct kref ref;
104	struct ion_client *client;
105	struct ion_buffer *buffer;
106	struct rb_node node;
107	unsigned int kmap_cnt;
108	int id;
109};
110
111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
113	return ((buffer->flags & ION_FLAG_CACHED) &&
114		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
115}
116
117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
119	return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
122static inline struct page *ion_buffer_page(struct page *page)
123{
124	return (struct page *)((unsigned long)page & ~(1UL));
125}
126
127static inline bool ion_buffer_page_is_dirty(struct page *page)
128{
129	return !!((unsigned long)page & 1UL);
130}
131
132static inline void ion_buffer_page_dirty(struct page **page)
133{
134	*page = (struct page *)((unsigned long)(*page) | 1UL);
135}
136
137static inline void ion_buffer_page_clean(struct page **page)
138{
139	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
140}
141
142/* this function should only be called while dev->lock is held */
143static void ion_buffer_add(struct ion_device *dev,
144			   struct ion_buffer *buffer)
145{
146	struct rb_node **p = &dev->buffers.rb_node;
147	struct rb_node *parent = NULL;
148	struct ion_buffer *entry;
149
150	while (*p) {
151		parent = *p;
152		entry = rb_entry(parent, struct ion_buffer, node);
153
154		if (buffer < entry) {
155			p = &(*p)->rb_left;
156		} else if (buffer > entry) {
157			p = &(*p)->rb_right;
158		} else {
159			pr_err("%s: buffer already found.", __func__);
160			BUG();
161		}
162	}
163
164	rb_link_node(&buffer->node, parent, p);
165	rb_insert_color(&buffer->node, &dev->buffers);
166}
167
168/* this function should only be called while dev->lock is held */
169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170				     struct ion_device *dev,
171				     unsigned long len,
172				     unsigned long align,
173				     unsigned long flags)
174{
175	struct ion_buffer *buffer;
176	struct sg_table *table;
177	struct scatterlist *sg;
178	int i, ret;
179
180	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181	if (!buffer)
182		return ERR_PTR(-ENOMEM);
183
184	buffer->heap = heap;
185	buffer->flags = flags;
186	kref_init(&buffer->ref);
187
188	ret = heap->ops->allocate(heap, buffer, len, align, flags);
189
190	if (ret) {
191		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192			goto err2;
193
194		ion_heap_freelist_drain(heap, 0);
195		ret = heap->ops->allocate(heap, buffer, len, align,
196					  flags);
197		if (ret)
198			goto err2;
199	}
200
201	buffer->dev = dev;
202	buffer->size = len;
203
204	table = heap->ops->map_dma(heap, buffer);
205	if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
206		table = ERR_PTR(-EINVAL);
207	if (IS_ERR(table)) {
208		heap->ops->free(buffer);
209		kfree(buffer);
210		return ERR_PTR(PTR_ERR(table));
211	}
212	buffer->sg_table = table;
213	if (ion_buffer_fault_user_mappings(buffer)) {
214		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
215		struct scatterlist *sg;
216		int i, j, k = 0;
217
218		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
219		if (!buffer->pages) {
220			ret = -ENOMEM;
221			goto err1;
222		}
223
224		for_each_sg(table->sgl, sg, table->nents, i) {
225			struct page *page = sg_page(sg);
226
227			for (j = 0; j < sg->length / PAGE_SIZE; j++)
228				buffer->pages[k++] = page++;
229		}
230
231		if (ret)
232			goto err;
233	}
234
235	buffer->dev = dev;
236	buffer->size = len;
237	INIT_LIST_HEAD(&buffer->vmas);
238	mutex_init(&buffer->lock);
239	/* this will set up dma addresses for the sglist -- it is not
240	   technically correct as per the dma api -- a specific
241	   device isn't really taking ownership here.  However, in practice on
242	   our systems the only dma_address space is physical addresses.
243	   Additionally, we can't afford the overhead of invalidating every
244	   allocation via dma_map_sg. The implicit contract here is that
245	   memory comming from the heaps is ready for dma, ie if it has a
246	   cached mapping that mapping has been invalidated */
247	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
248		sg_dma_address(sg) = sg_phys(sg);
249	mutex_lock(&dev->buffer_lock);
250	ion_buffer_add(dev, buffer);
251	mutex_unlock(&dev->buffer_lock);
252	return buffer;
253
254err:
255	heap->ops->unmap_dma(heap, buffer);
256	heap->ops->free(buffer);
257err1:
258	if (buffer->pages)
259		vfree(buffer->pages);
260err2:
261	kfree(buffer);
262	return ERR_PTR(ret);
263}
264
265void ion_buffer_destroy(struct ion_buffer *buffer)
266{
267	if (WARN_ON(buffer->kmap_cnt > 0))
268		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
269	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
270	buffer->heap->ops->free(buffer);
271	if (buffer->pages)
272		vfree(buffer->pages);
273	kfree(buffer);
274}
275
276static void _ion_buffer_destroy(struct kref *kref)
277{
278	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
279	struct ion_heap *heap = buffer->heap;
280	struct ion_device *dev = buffer->dev;
281
282	mutex_lock(&dev->buffer_lock);
283	rb_erase(&buffer->node, &dev->buffers);
284	mutex_unlock(&dev->buffer_lock);
285
286	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287		ion_heap_freelist_add(heap, buffer);
288	else
289		ion_buffer_destroy(buffer);
290}
291
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294	kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
299	return kref_put(&buffer->ref, _ion_buffer_destroy);
300}
301
302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
304	mutex_lock(&buffer->lock);
305	buffer->handle_count++;
306	mutex_unlock(&buffer->lock);
307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311	/*
312	 * when a buffer is removed from a handle, if it is not in
313	 * any other handles, copy the taskcomm and the pid of the
314	 * process it's being removed from into the buffer.  At this
315	 * point there will be no way to track what processes this buffer is
316	 * being used by, it only exists as a dma_buf file descriptor.
317	 * The taskcomm and pid can provide a debug hint as to where this fd
318	 * is in the system
319	 */
320	mutex_lock(&buffer->lock);
321	buffer->handle_count--;
322	BUG_ON(buffer->handle_count < 0);
323	if (!buffer->handle_count) {
324		struct task_struct *task;
325
326		task = current->group_leader;
327		get_task_comm(buffer->task_comm, task);
328		buffer->pid = task_pid_nr(task);
329	}
330	mutex_unlock(&buffer->lock);
331}
332
333static struct ion_handle *ion_handle_create(struct ion_client *client,
334				     struct ion_buffer *buffer)
335{
336	struct ion_handle *handle;
337
338	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
339	if (!handle)
340		return ERR_PTR(-ENOMEM);
341	kref_init(&handle->ref);
342	RB_CLEAR_NODE(&handle->node);
343	handle->client = client;
344	ion_buffer_get(buffer);
345	ion_buffer_add_to_handle(buffer);
346	handle->buffer = buffer;
347
348	return handle;
349}
350
351static void ion_handle_kmap_put(struct ion_handle *);
352
353static void ion_handle_destroy(struct kref *kref)
354{
355	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
356	struct ion_client *client = handle->client;
357	struct ion_buffer *buffer = handle->buffer;
358
359	mutex_lock(&buffer->lock);
360	while (handle->kmap_cnt)
361		ion_handle_kmap_put(handle);
362	mutex_unlock(&buffer->lock);
363
364	idr_remove(&client->idr, handle->id);
365	if (!RB_EMPTY_NODE(&handle->node))
366		rb_erase(&handle->node, &client->handles);
367
368	ion_buffer_remove_from_handle(buffer);
369	ion_buffer_put(buffer);
370
371	kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376	return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381	kref_get(&handle->ref);
382}
383
384static int ion_handle_put(struct ion_handle *handle)
385{
386	struct ion_client *client = handle->client;
387	int ret;
388
389	mutex_lock(&client->lock);
390	ret = kref_put(&handle->ref, ion_handle_destroy);
391	mutex_unlock(&client->lock);
392
393	return ret;
394}
395
396static struct ion_handle *ion_handle_lookup(struct ion_client *client,
397					    struct ion_buffer *buffer)
398{
399	struct rb_node *n = client->handles.rb_node;
400
401	while (n) {
402		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
403		if (buffer < entry->buffer)
404			n = n->rb_left;
405		else if (buffer > entry->buffer)
406			n = n->rb_right;
407		else
408			return entry;
409	}
410	return ERR_PTR(-EINVAL);
411}
412
413static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
414						int id)
415{
416	struct ion_handle *handle;
417
418	mutex_lock(&client->lock);
419	handle = idr_find(&client->idr, id);
420	if (handle)
421		ion_handle_get(handle);
422	mutex_unlock(&client->lock);
423
424	return handle ? handle : ERR_PTR(-EINVAL);
425}
426
427static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
428{
429	WARN_ON(!mutex_is_locked(&client->lock));
430	return (idr_find(&client->idr, handle->id) == handle);
431}
432
433static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
434{
435	int id;
436	struct rb_node **p = &client->handles.rb_node;
437	struct rb_node *parent = NULL;
438	struct ion_handle *entry;
439
440	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
441	if (id < 0)
442		return id;
443
444	handle->id = id;
445
446	while (*p) {
447		parent = *p;
448		entry = rb_entry(parent, struct ion_handle, node);
449
450		if (handle->buffer < entry->buffer)
451			p = &(*p)->rb_left;
452		else if (handle->buffer > entry->buffer)
453			p = &(*p)->rb_right;
454		else
455			WARN(1, "%s: buffer already found.", __func__);
456	}
457
458	rb_link_node(&handle->node, parent, p);
459	rb_insert_color(&handle->node, &client->handles);
460
461	return 0;
462}
463
464struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
465			     size_t align, unsigned int heap_id_mask,
466			     unsigned int flags)
467{
468	struct ion_handle *handle;
469	struct ion_device *dev = client->dev;
470	struct ion_buffer *buffer = NULL;
471	struct ion_heap *heap;
472	int ret;
473
474	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
475		 len, align, heap_id_mask, flags);
476	/*
477	 * traverse the list of heaps available in this system in priority
478	 * order.  If the heap type is supported by the client, and matches the
479	 * request of the caller allocate from it.  Repeat until allocate has
480	 * succeeded or all heaps have been tried
481	 */
482	len = PAGE_ALIGN(len);
483
484	if (!len)
485		return ERR_PTR(-EINVAL);
486
487	down_read(&dev->lock);
488	plist_for_each_entry(heap, &dev->heaps, node) {
489		/* if the caller didn't specify this heap id */
490		if (!((1 << heap->id) & heap_id_mask))
491			continue;
492		buffer = ion_buffer_create(heap, dev, len, align, flags);
493		if (!IS_ERR(buffer))
494			break;
495	}
496	up_read(&dev->lock);
497
498	if (buffer == NULL)
499		return ERR_PTR(-ENODEV);
500
501	if (IS_ERR(buffer))
502		return ERR_PTR(PTR_ERR(buffer));
503
504	handle = ion_handle_create(client, buffer);
505
506	/*
507	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
508	 * and ion_handle_create will take a second reference, drop one here
509	 */
510	ion_buffer_put(buffer);
511
512	if (IS_ERR(handle))
513		return handle;
514
515	mutex_lock(&client->lock);
516	ret = ion_handle_add(client, handle);
517	mutex_unlock(&client->lock);
518	if (ret) {
519		ion_handle_put(handle);
520		handle = ERR_PTR(ret);
521	}
522
523	return handle;
524}
525EXPORT_SYMBOL(ion_alloc);
526
527void ion_free(struct ion_client *client, struct ion_handle *handle)
528{
529	bool valid_handle;
530
531	BUG_ON(client != handle->client);
532
533	mutex_lock(&client->lock);
534	valid_handle = ion_handle_validate(client, handle);
535
536	if (!valid_handle) {
537		WARN(1, "%s: invalid handle passed to free.\n", __func__);
538		mutex_unlock(&client->lock);
539		return;
540	}
541	mutex_unlock(&client->lock);
542	ion_handle_put(handle);
543}
544EXPORT_SYMBOL(ion_free);
545
546int ion_phys(struct ion_client *client, struct ion_handle *handle,
547	     ion_phys_addr_t *addr, size_t *len)
548{
549	struct ion_buffer *buffer;
550	int ret;
551
552	mutex_lock(&client->lock);
553	if (!ion_handle_validate(client, handle)) {
554		mutex_unlock(&client->lock);
555		return -EINVAL;
556	}
557
558	buffer = handle->buffer;
559
560	if (!buffer->heap->ops->phys) {
561		pr_err("%s: ion_phys is not implemented by this heap.\n",
562		       __func__);
563		mutex_unlock(&client->lock);
564		return -ENODEV;
565	}
566	mutex_unlock(&client->lock);
567	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
568	return ret;
569}
570EXPORT_SYMBOL(ion_phys);
571
572static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
573{
574	void *vaddr;
575
576	if (buffer->kmap_cnt) {
577		buffer->kmap_cnt++;
578		return buffer->vaddr;
579	}
580	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
581	if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
582		return ERR_PTR(-EINVAL);
583	if (IS_ERR(vaddr))
584		return vaddr;
585	buffer->vaddr = vaddr;
586	buffer->kmap_cnt++;
587	return vaddr;
588}
589
590static void *ion_handle_kmap_get(struct ion_handle *handle)
591{
592	struct ion_buffer *buffer = handle->buffer;
593	void *vaddr;
594
595	if (handle->kmap_cnt) {
596		handle->kmap_cnt++;
597		return buffer->vaddr;
598	}
599	vaddr = ion_buffer_kmap_get(buffer);
600	if (IS_ERR(vaddr))
601		return vaddr;
602	handle->kmap_cnt++;
603	return vaddr;
604}
605
606static void ion_buffer_kmap_put(struct ion_buffer *buffer)
607{
608	buffer->kmap_cnt--;
609	if (!buffer->kmap_cnt) {
610		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
611		buffer->vaddr = NULL;
612	}
613}
614
615static void ion_handle_kmap_put(struct ion_handle *handle)
616{
617	struct ion_buffer *buffer = handle->buffer;
618
619	handle->kmap_cnt--;
620	if (!handle->kmap_cnt)
621		ion_buffer_kmap_put(buffer);
622}
623
624void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
625{
626	struct ion_buffer *buffer;
627	void *vaddr;
628
629	mutex_lock(&client->lock);
630	if (!ion_handle_validate(client, handle)) {
631		pr_err("%s: invalid handle passed to map_kernel.\n",
632		       __func__);
633		mutex_unlock(&client->lock);
634		return ERR_PTR(-EINVAL);
635	}
636
637	buffer = handle->buffer;
638
639	if (!handle->buffer->heap->ops->map_kernel) {
640		pr_err("%s: map_kernel is not implemented by this heap.\n",
641		       __func__);
642		mutex_unlock(&client->lock);
643		return ERR_PTR(-ENODEV);
644	}
645
646	mutex_lock(&buffer->lock);
647	vaddr = ion_handle_kmap_get(handle);
648	mutex_unlock(&buffer->lock);
649	mutex_unlock(&client->lock);
650	return vaddr;
651}
652EXPORT_SYMBOL(ion_map_kernel);
653
654void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
655{
656	struct ion_buffer *buffer;
657
658	mutex_lock(&client->lock);
659	buffer = handle->buffer;
660	mutex_lock(&buffer->lock);
661	ion_handle_kmap_put(handle);
662	mutex_unlock(&buffer->lock);
663	mutex_unlock(&client->lock);
664}
665EXPORT_SYMBOL(ion_unmap_kernel);
666
667static int ion_debug_client_show(struct seq_file *s, void *unused)
668{
669	struct ion_client *client = s->private;
670	struct rb_node *n;
671	size_t sizes[ION_NUM_HEAP_IDS] = {0};
672	const char *names[ION_NUM_HEAP_IDS] = {0};
673	int i;
674
675	mutex_lock(&client->lock);
676	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
677		struct ion_handle *handle = rb_entry(n, struct ion_handle,
678						     node);
679		unsigned int id = handle->buffer->heap->id;
680
681		if (!names[id])
682			names[id] = handle->buffer->heap->name;
683		sizes[id] += handle->buffer->size;
684	}
685	mutex_unlock(&client->lock);
686
687	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
688	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
689		if (!names[i])
690			continue;
691		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
692	}
693	return 0;
694}
695
696static int ion_debug_client_open(struct inode *inode, struct file *file)
697{
698	return single_open(file, ion_debug_client_show, inode->i_private);
699}
700
701static const struct file_operations debug_client_fops = {
702	.open = ion_debug_client_open,
703	.read = seq_read,
704	.llseek = seq_lseek,
705	.release = single_release,
706};
707
708struct ion_client *ion_client_create(struct ion_device *dev,
709				     const char *name)
710{
711	struct ion_client *client;
712	struct task_struct *task;
713	struct rb_node **p;
714	struct rb_node *parent = NULL;
715	struct ion_client *entry;
716	char debug_name[64];
717	pid_t pid;
718
719	get_task_struct(current->group_leader);
720	task_lock(current->group_leader);
721	pid = task_pid_nr(current->group_leader);
722	/* don't bother to store task struct for kernel threads,
723	   they can't be killed anyway */
724	if (current->group_leader->flags & PF_KTHREAD) {
725		put_task_struct(current->group_leader);
726		task = NULL;
727	} else {
728		task = current->group_leader;
729	}
730	task_unlock(current->group_leader);
731
732	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
733	if (!client) {
734		if (task)
735			put_task_struct(current->group_leader);
736		return ERR_PTR(-ENOMEM);
737	}
738
739	client->dev = dev;
740	client->handles = RB_ROOT;
741	idr_init(&client->idr);
742	mutex_init(&client->lock);
743	client->name = name;
744	client->task = task;
745	client->pid = pid;
746
747	down_write(&dev->lock);
748	p = &dev->clients.rb_node;
749	while (*p) {
750		parent = *p;
751		entry = rb_entry(parent, struct ion_client, node);
752
753		if (client < entry)
754			p = &(*p)->rb_left;
755		else if (client > entry)
756			p = &(*p)->rb_right;
757	}
758	rb_link_node(&client->node, parent, p);
759	rb_insert_color(&client->node, &dev->clients);
760
761	snprintf(debug_name, 64, "%u", client->pid);
762	client->debug_root = debugfs_create_file(debug_name, 0664,
763						 dev->debug_root, client,
764						 &debug_client_fops);
765	up_write(&dev->lock);
766
767	return client;
768}
769EXPORT_SYMBOL(ion_client_create);
770
771void ion_client_destroy(struct ion_client *client)
772{
773	struct ion_device *dev = client->dev;
774	struct rb_node *n;
775
776	pr_debug("%s: %d\n", __func__, __LINE__);
777	while ((n = rb_first(&client->handles))) {
778		struct ion_handle *handle = rb_entry(n, struct ion_handle,
779						     node);
780		ion_handle_destroy(&handle->ref);
781	}
782
783	idr_destroy(&client->idr);
784
785	down_write(&dev->lock);
786	if (client->task)
787		put_task_struct(client->task);
788	rb_erase(&client->node, &dev->clients);
789	debugfs_remove_recursive(client->debug_root);
790	up_write(&dev->lock);
791
792	kfree(client);
793}
794EXPORT_SYMBOL(ion_client_destroy);
795
796struct sg_table *ion_sg_table(struct ion_client *client,
797			      struct ion_handle *handle)
798{
799	struct ion_buffer *buffer;
800	struct sg_table *table;
801
802	mutex_lock(&client->lock);
803	if (!ion_handle_validate(client, handle)) {
804		pr_err("%s: invalid handle passed to map_dma.\n",
805		       __func__);
806		mutex_unlock(&client->lock);
807		return ERR_PTR(-EINVAL);
808	}
809	buffer = handle->buffer;
810	table = buffer->sg_table;
811	mutex_unlock(&client->lock);
812	return table;
813}
814EXPORT_SYMBOL(ion_sg_table);
815
816static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
817				       struct device *dev,
818				       enum dma_data_direction direction);
819
820static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
821					enum dma_data_direction direction)
822{
823	struct dma_buf *dmabuf = attachment->dmabuf;
824	struct ion_buffer *buffer = dmabuf->priv;
825
826	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
827	return buffer->sg_table;
828}
829
830static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
831			      struct sg_table *table,
832			      enum dma_data_direction direction)
833{
834}
835
836void ion_pages_sync_for_device(struct device *dev, struct page *page,
837		size_t size, enum dma_data_direction dir)
838{
839	struct scatterlist sg;
840
841	sg_init_table(&sg, 1);
842	sg_set_page(&sg, page, size, 0);
843	/*
844	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
845	 * for the the targeted device, but this works on the currently targeted
846	 * hardware.
847	 */
848	sg_dma_address(&sg) = page_to_phys(page);
849	dma_sync_sg_for_device(dev, &sg, 1, dir);
850}
851
852struct ion_vma_list {
853	struct list_head list;
854	struct vm_area_struct *vma;
855};
856
857static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
858				       struct device *dev,
859				       enum dma_data_direction dir)
860{
861	struct ion_vma_list *vma_list;
862	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
863	int i;
864
865	pr_debug("%s: syncing for device %s\n", __func__,
866		 dev ? dev_name(dev) : "null");
867
868	if (!ion_buffer_fault_user_mappings(buffer))
869		return;
870
871	mutex_lock(&buffer->lock);
872	for (i = 0; i < pages; i++) {
873		struct page *page = buffer->pages[i];
874
875		if (ion_buffer_page_is_dirty(page))
876			ion_pages_sync_for_device(dev, ion_buffer_page(page),
877							PAGE_SIZE, dir);
878
879		ion_buffer_page_clean(buffer->pages + i);
880	}
881	list_for_each_entry(vma_list, &buffer->vmas, list) {
882		struct vm_area_struct *vma = vma_list->vma;
883
884		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
885			       NULL);
886	}
887	mutex_unlock(&buffer->lock);
888}
889
890int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
891{
892	struct ion_buffer *buffer = vma->vm_private_data;
893	unsigned long pfn;
894	int ret;
895
896	mutex_lock(&buffer->lock);
897	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
898	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
899
900	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
901	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
902	mutex_unlock(&buffer->lock);
903	if (ret)
904		return VM_FAULT_ERROR;
905
906	return VM_FAULT_NOPAGE;
907}
908
909static void ion_vm_open(struct vm_area_struct *vma)
910{
911	struct ion_buffer *buffer = vma->vm_private_data;
912	struct ion_vma_list *vma_list;
913
914	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
915	if (!vma_list)
916		return;
917	vma_list->vma = vma;
918	mutex_lock(&buffer->lock);
919	list_add(&vma_list->list, &buffer->vmas);
920	mutex_unlock(&buffer->lock);
921	pr_debug("%s: adding %p\n", __func__, vma);
922}
923
924static void ion_vm_close(struct vm_area_struct *vma)
925{
926	struct ion_buffer *buffer = vma->vm_private_data;
927	struct ion_vma_list *vma_list, *tmp;
928
929	pr_debug("%s\n", __func__);
930	mutex_lock(&buffer->lock);
931	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
932		if (vma_list->vma != vma)
933			continue;
934		list_del(&vma_list->list);
935		kfree(vma_list);
936		pr_debug("%s: deleting %p\n", __func__, vma);
937		break;
938	}
939	mutex_unlock(&buffer->lock);
940}
941
942struct vm_operations_struct ion_vma_ops = {
943	.open = ion_vm_open,
944	.close = ion_vm_close,
945	.fault = ion_vm_fault,
946};
947
948static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
949{
950	struct ion_buffer *buffer = dmabuf->priv;
951	int ret = 0;
952
953	if (!buffer->heap->ops->map_user) {
954		pr_err("%s: this heap does not define a method for mapping "
955		       "to userspace\n", __func__);
956		return -EINVAL;
957	}
958
959	if (ion_buffer_fault_user_mappings(buffer)) {
960		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
961							VM_DONTDUMP;
962		vma->vm_private_data = buffer;
963		vma->vm_ops = &ion_vma_ops;
964		ion_vm_open(vma);
965		return 0;
966	}
967
968	if (!(buffer->flags & ION_FLAG_CACHED))
969		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
970
971	mutex_lock(&buffer->lock);
972	/* now map it to userspace */
973	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
974	mutex_unlock(&buffer->lock);
975
976	if (ret)
977		pr_err("%s: failure mapping buffer to userspace\n",
978		       __func__);
979
980	return ret;
981}
982
983static void ion_dma_buf_release(struct dma_buf *dmabuf)
984{
985	struct ion_buffer *buffer = dmabuf->priv;
986	ion_buffer_put(buffer);
987}
988
989static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
990{
991	struct ion_buffer *buffer = dmabuf->priv;
992	return buffer->vaddr + offset * PAGE_SIZE;
993}
994
995static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
996			       void *ptr)
997{
998	return;
999}
1000
1001static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1002					size_t len,
1003					enum dma_data_direction direction)
1004{
1005	struct ion_buffer *buffer = dmabuf->priv;
1006	void *vaddr;
1007
1008	if (!buffer->heap->ops->map_kernel) {
1009		pr_err("%s: map kernel is not implemented by this heap.\n",
1010		       __func__);
1011		return -ENODEV;
1012	}
1013
1014	mutex_lock(&buffer->lock);
1015	vaddr = ion_buffer_kmap_get(buffer);
1016	mutex_unlock(&buffer->lock);
1017	if (IS_ERR(vaddr))
1018		return PTR_ERR(vaddr);
1019	return 0;
1020}
1021
1022static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1023				       size_t len,
1024				       enum dma_data_direction direction)
1025{
1026	struct ion_buffer *buffer = dmabuf->priv;
1027
1028	mutex_lock(&buffer->lock);
1029	ion_buffer_kmap_put(buffer);
1030	mutex_unlock(&buffer->lock);
1031}
1032
1033struct dma_buf_ops dma_buf_ops = {
1034	.map_dma_buf = ion_map_dma_buf,
1035	.unmap_dma_buf = ion_unmap_dma_buf,
1036	.mmap = ion_mmap,
1037	.release = ion_dma_buf_release,
1038	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1039	.end_cpu_access = ion_dma_buf_end_cpu_access,
1040	.kmap_atomic = ion_dma_buf_kmap,
1041	.kunmap_atomic = ion_dma_buf_kunmap,
1042	.kmap = ion_dma_buf_kmap,
1043	.kunmap = ion_dma_buf_kunmap,
1044};
1045
1046struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1047						struct ion_handle *handle)
1048{
1049	struct ion_buffer *buffer;
1050	struct dma_buf *dmabuf;
1051	bool valid_handle;
1052
1053	mutex_lock(&client->lock);
1054	valid_handle = ion_handle_validate(client, handle);
1055	if (!valid_handle) {
1056		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1057		mutex_unlock(&client->lock);
1058		return ERR_PTR(-EINVAL);
1059	}
1060	buffer = handle->buffer;
1061	ion_buffer_get(buffer);
1062	mutex_unlock(&client->lock);
1063
1064	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1065	if (IS_ERR(dmabuf)) {
1066		ion_buffer_put(buffer);
1067		return dmabuf;
1068	}
1069
1070	return dmabuf;
1071}
1072EXPORT_SYMBOL(ion_share_dma_buf);
1073
1074int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1075{
1076	struct dma_buf *dmabuf;
1077	int fd;
1078
1079	dmabuf = ion_share_dma_buf(client, handle);
1080	if (IS_ERR(dmabuf))
1081		return PTR_ERR(dmabuf);
1082
1083	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1084	if (fd < 0)
1085		dma_buf_put(dmabuf);
1086
1087	return fd;
1088}
1089EXPORT_SYMBOL(ion_share_dma_buf_fd);
1090
1091struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1092{
1093	struct dma_buf *dmabuf;
1094	struct ion_buffer *buffer;
1095	struct ion_handle *handle;
1096	int ret;
1097
1098	dmabuf = dma_buf_get(fd);
1099	if (IS_ERR(dmabuf))
1100		return ERR_PTR(PTR_ERR(dmabuf));
1101	/* if this memory came from ion */
1102
1103	if (dmabuf->ops != &dma_buf_ops) {
1104		pr_err("%s: can not import dmabuf from another exporter\n",
1105		       __func__);
1106		dma_buf_put(dmabuf);
1107		return ERR_PTR(-EINVAL);
1108	}
1109	buffer = dmabuf->priv;
1110
1111	mutex_lock(&client->lock);
1112	/* if a handle exists for this buffer just take a reference to it */
1113	handle = ion_handle_lookup(client, buffer);
1114	if (!IS_ERR(handle)) {
1115		ion_handle_get(handle);
1116		mutex_unlock(&client->lock);
1117		goto end;
1118	}
1119	mutex_unlock(&client->lock);
1120
1121	handle = ion_handle_create(client, buffer);
1122	if (IS_ERR(handle))
1123		goto end;
1124
1125	mutex_lock(&client->lock);
1126	ret = ion_handle_add(client, handle);
1127	mutex_unlock(&client->lock);
1128	if (ret) {
1129		ion_handle_put(handle);
1130		handle = ERR_PTR(ret);
1131	}
1132
1133end:
1134	dma_buf_put(dmabuf);
1135	return handle;
1136}
1137EXPORT_SYMBOL(ion_import_dma_buf);
1138
1139static int ion_sync_for_device(struct ion_client *client, int fd)
1140{
1141	struct dma_buf *dmabuf;
1142	struct ion_buffer *buffer;
1143
1144	dmabuf = dma_buf_get(fd);
1145	if (IS_ERR(dmabuf))
1146		return PTR_ERR(dmabuf);
1147
1148	/* if this memory came from ion */
1149	if (dmabuf->ops != &dma_buf_ops) {
1150		pr_err("%s: can not sync dmabuf from another exporter\n",
1151		       __func__);
1152		dma_buf_put(dmabuf);
1153		return -EINVAL;
1154	}
1155	buffer = dmabuf->priv;
1156
1157	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1158			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1159	dma_buf_put(dmabuf);
1160	return 0;
1161}
1162
1163/* fix up the cases where the ioctl direction bits are incorrect */
1164static unsigned int ion_ioctl_dir(unsigned int cmd)
1165{
1166	switch (cmd) {
1167	case ION_IOC_SYNC:
1168	case ION_IOC_FREE:
1169	case ION_IOC_CUSTOM:
1170		return _IOC_WRITE;
1171	default:
1172		return _IOC_DIR(cmd);
1173	}
1174}
1175
1176static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1177{
1178	struct ion_client *client = filp->private_data;
1179	struct ion_device *dev = client->dev;
1180	struct ion_handle *cleanup_handle = NULL;
1181	int ret = 0;
1182	unsigned int dir;
1183
1184	union {
1185		struct ion_fd_data fd;
1186		struct ion_allocation_data allocation;
1187		struct ion_handle_data handle;
1188		struct ion_custom_data custom;
1189	} data;
1190
1191	dir = ion_ioctl_dir(cmd);
1192
1193	if (_IOC_SIZE(cmd) > sizeof(data))
1194		return -EINVAL;
1195
1196	if (dir & _IOC_WRITE)
1197		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1198			return -EFAULT;
1199
1200	switch (cmd) {
1201	case ION_IOC_ALLOC:
1202	{
1203		struct ion_handle *handle;
1204
1205		handle = ion_alloc(client, data.allocation.len,
1206						data.allocation.align,
1207						data.allocation.heap_id_mask,
1208						data.allocation.flags);
1209		if (IS_ERR(handle))
1210			return PTR_ERR(handle);
1211
1212		data.allocation.handle = handle->id;
1213
1214		cleanup_handle = handle;
1215		break;
1216	}
1217	case ION_IOC_FREE:
1218	{
1219		struct ion_handle *handle;
1220
1221		handle = ion_handle_get_by_id(client, data.handle.handle);
1222		if (IS_ERR(handle))
1223			return PTR_ERR(handle);
1224		ion_free(client, handle);
1225		ion_handle_put(handle);
1226		break;
1227	}
1228	case ION_IOC_SHARE:
1229	case ION_IOC_MAP:
1230	{
1231		struct ion_handle *handle;
1232
1233		handle = ion_handle_get_by_id(client, data.handle.handle);
1234		if (IS_ERR(handle))
1235			return PTR_ERR(handle);
1236		data.fd.fd = ion_share_dma_buf_fd(client, handle);
1237		ion_handle_put(handle);
1238		if (data.fd.fd < 0)
1239			ret = data.fd.fd;
1240		break;
1241	}
1242	case ION_IOC_IMPORT:
1243	{
1244		struct ion_handle *handle;
1245		handle = ion_import_dma_buf(client, data.fd.fd);
1246		if (IS_ERR(handle))
1247			ret = PTR_ERR(handle);
1248		else
1249			data.handle.handle = handle->id;
1250		break;
1251	}
1252	case ION_IOC_SYNC:
1253	{
1254		ret = ion_sync_for_device(client, data.fd.fd);
1255		break;
1256	}
1257	case ION_IOC_CUSTOM:
1258	{
1259		if (!dev->custom_ioctl)
1260			return -ENOTTY;
1261		ret = dev->custom_ioctl(client, data.custom.cmd,
1262						data.custom.arg);
1263		break;
1264	}
1265	default:
1266		return -ENOTTY;
1267	}
1268
1269	if (dir & _IOC_READ) {
1270		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1271			if (cleanup_handle)
1272				ion_free(client, cleanup_handle);
1273			return -EFAULT;
1274		}
1275	}
1276	return ret;
1277}
1278
1279static int ion_release(struct inode *inode, struct file *file)
1280{
1281	struct ion_client *client = file->private_data;
1282
1283	pr_debug("%s: %d\n", __func__, __LINE__);
1284	ion_client_destroy(client);
1285	return 0;
1286}
1287
1288static int ion_open(struct inode *inode, struct file *file)
1289{
1290	struct miscdevice *miscdev = file->private_data;
1291	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1292	struct ion_client *client;
1293
1294	pr_debug("%s: %d\n", __func__, __LINE__);
1295	client = ion_client_create(dev, "user");
1296	if (IS_ERR(client))
1297		return PTR_ERR(client);
1298	file->private_data = client;
1299
1300	return 0;
1301}
1302
1303static const struct file_operations ion_fops = {
1304	.owner          = THIS_MODULE,
1305	.open           = ion_open,
1306	.release        = ion_release,
1307	.unlocked_ioctl = ion_ioctl,
1308	.compat_ioctl   = compat_ion_ioctl,
1309};
1310
1311static size_t ion_debug_heap_total(struct ion_client *client,
1312				   unsigned int id)
1313{
1314	size_t size = 0;
1315	struct rb_node *n;
1316
1317	mutex_lock(&client->lock);
1318	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1319		struct ion_handle *handle = rb_entry(n,
1320						     struct ion_handle,
1321						     node);
1322		if (handle->buffer->heap->id == id)
1323			size += handle->buffer->size;
1324	}
1325	mutex_unlock(&client->lock);
1326	return size;
1327}
1328
1329static int ion_debug_heap_show(struct seq_file *s, void *unused)
1330{
1331	struct ion_heap *heap = s->private;
1332	struct ion_device *dev = heap->dev;
1333	struct rb_node *n;
1334	size_t total_size = 0;
1335	size_t total_orphaned_size = 0;
1336
1337	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1338	seq_printf(s, "----------------------------------------------------\n");
1339
1340	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1341		struct ion_client *client = rb_entry(n, struct ion_client,
1342						     node);
1343		size_t size = ion_debug_heap_total(client, heap->id);
1344		if (!size)
1345			continue;
1346		if (client->task) {
1347			char task_comm[TASK_COMM_LEN];
1348
1349			get_task_comm(task_comm, client->task);
1350			seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1351				   client->pid, size);
1352		} else {
1353			seq_printf(s, "%16.s %16u %16zu\n", client->name,
1354				   client->pid, size);
1355		}
1356	}
1357	seq_printf(s, "----------------------------------------------------\n");
1358	seq_printf(s, "orphaned allocations (info is from last known client):"
1359		   "\n");
1360	mutex_lock(&dev->buffer_lock);
1361	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1362		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1363						     node);
1364		if (buffer->heap->id != heap->id)
1365			continue;
1366		total_size += buffer->size;
1367		if (!buffer->handle_count) {
1368			seq_printf(s, "%16.s %16u %16zu %d %d\n",
1369				   buffer->task_comm, buffer->pid,
1370				   buffer->size, buffer->kmap_cnt,
1371				   atomic_read(&buffer->ref.refcount));
1372			total_orphaned_size += buffer->size;
1373		}
1374	}
1375	mutex_unlock(&dev->buffer_lock);
1376	seq_printf(s, "----------------------------------------------------\n");
1377	seq_printf(s, "%16.s %16zu\n", "total orphaned",
1378		   total_orphaned_size);
1379	seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1380	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1381		seq_printf(s, "%16.s %16zu\n", "deferred free",
1382				heap->free_list_size);
1383	seq_printf(s, "----------------------------------------------------\n");
1384
1385	if (heap->debug_show)
1386		heap->debug_show(heap, s, unused);
1387
1388	return 0;
1389}
1390
1391static int ion_debug_heap_open(struct inode *inode, struct file *file)
1392{
1393	return single_open(file, ion_debug_heap_show, inode->i_private);
1394}
1395
1396static const struct file_operations debug_heap_fops = {
1397	.open = ion_debug_heap_open,
1398	.read = seq_read,
1399	.llseek = seq_lseek,
1400	.release = single_release,
1401};
1402
1403#ifdef DEBUG_HEAP_SHRINKER
1404static int debug_shrink_set(void *data, u64 val)
1405{
1406        struct ion_heap *heap = data;
1407        struct shrink_control sc;
1408        int objs;
1409
1410        sc.gfp_mask = -1;
1411        sc.nr_to_scan = 0;
1412
1413        if (!val)
1414                return 0;
1415
1416        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1417        sc.nr_to_scan = objs;
1418
1419        heap->shrinker.shrink(&heap->shrinker, &sc);
1420        return 0;
1421}
1422
1423static int debug_shrink_get(void *data, u64 *val)
1424{
1425        struct ion_heap *heap = data;
1426        struct shrink_control sc;
1427        int objs;
1428
1429        sc.gfp_mask = -1;
1430        sc.nr_to_scan = 0;
1431
1432        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1433        *val = objs;
1434        return 0;
1435}
1436
1437DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1438                        debug_shrink_set, "%llu\n");
1439#endif
1440
1441void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1442{
1443	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1444	    !heap->ops->unmap_dma)
1445		pr_err("%s: can not add heap with invalid ops struct.\n",
1446		       __func__);
1447
1448	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1449		ion_heap_init_deferred_free(heap);
1450
1451	heap->dev = dev;
1452	down_write(&dev->lock);
1453	/* use negative heap->id to reverse the priority -- when traversing
1454	   the list later attempt higher id numbers first */
1455	plist_node_init(&heap->node, -heap->id);
1456	plist_add(&heap->node, &dev->heaps);
1457	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1458			    &debug_heap_fops);
1459#ifdef DEBUG_HEAP_SHRINKER
1460	if (heap->shrinker.shrink) {
1461		char debug_name[64];
1462
1463		snprintf(debug_name, 64, "%s_shrink", heap->name);
1464		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1465				    &debug_shrink_fops);
1466	}
1467#endif
1468	up_write(&dev->lock);
1469}
1470
1471struct ion_device *ion_device_create(long (*custom_ioctl)
1472				     (struct ion_client *client,
1473				      unsigned int cmd,
1474				      unsigned long arg))
1475{
1476	struct ion_device *idev;
1477	int ret;
1478
1479	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1480	if (!idev)
1481		return ERR_PTR(-ENOMEM);
1482
1483	idev->dev.minor = MISC_DYNAMIC_MINOR;
1484	idev->dev.name = "ion";
1485	idev->dev.fops = &ion_fops;
1486	idev->dev.parent = NULL;
1487	ret = misc_register(&idev->dev);
1488	if (ret) {
1489		pr_err("ion: failed to register misc device.\n");
1490		return ERR_PTR(ret);
1491	}
1492
1493	idev->debug_root = debugfs_create_dir("ion", NULL);
1494	if (!idev->debug_root)
1495		pr_err("ion: failed to create debug files.\n");
1496
1497	idev->custom_ioctl = custom_ioctl;
1498	idev->buffers = RB_ROOT;
1499	mutex_init(&idev->buffer_lock);
1500	init_rwsem(&idev->lock);
1501	plist_head_init(&idev->heaps);
1502	idev->clients = RB_ROOT;
1503	return idev;
1504}
1505
1506void ion_device_destroy(struct ion_device *dev)
1507{
1508	misc_deregister(&dev->dev);
1509	/* XXX need to free the heaps and clients ? */
1510	kfree(dev);
1511}
1512
1513void __init ion_reserve(struct ion_platform_data *data)
1514{
1515	int i;
1516
1517	for (i = 0; i < data->nr; i++) {
1518		if (data->heaps[i].size == 0)
1519			continue;
1520
1521		if (data->heaps[i].base == 0) {
1522			phys_addr_t paddr;
1523			paddr = memblock_alloc_base(data->heaps[i].size,
1524						    data->heaps[i].align,
1525						    MEMBLOCK_ALLOC_ANYWHERE);
1526			if (!paddr) {
1527				pr_err("%s: error allocating memblock for "
1528				       "heap %d\n",
1529					__func__, i);
1530				continue;
1531			}
1532			data->heaps[i].base = paddr;
1533		} else {
1534			int ret = memblock_reserve(data->heaps[i].base,
1535					       data->heaps[i].size);
1536			if (ret)
1537				pr_err("memblock reserve of %zx@%lx failed\n",
1538				       data->heaps[i].size,
1539				       data->heaps[i].base);
1540		}
1541		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1542			data->heaps[i].name,
1543			data->heaps[i].base,
1544			data->heaps[i].size);
1545	}
1546}
1547