ion.c revision ab0c069ab59aba0dd21b3e52e81a88f2c21e4448
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/file.h>
21#include <linux/freezer.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/kthread.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/vmalloc.h>
36#include <linux/debugfs.h>
37#include <linux/dma-buf.h>
38#include <linux/idr.h>
39
40#include "ion.h"
41#include "ion_priv.h"
42#include "compat_ion.h"
43
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev:		the actual misc device
47 * @buffers:		an rb tree of all the existing buffers
48 * @buffer_lock:	lock protecting the tree of buffers
49 * @lock:		rwsem protecting the tree of heaps and clients
50 * @heaps:		list of all the heaps in the system
51 * @user_clients:	list of all the clients created from userspace
52 */
53struct ion_device {
54	struct miscdevice dev;
55	struct rb_root buffers;
56	struct mutex buffer_lock;
57	struct rw_semaphore lock;
58	struct plist_head heaps;
59	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
60			      unsigned long arg);
61	struct rb_root clients;
62	struct dentry *debug_root;
63};
64
65/**
66 * struct ion_client - a process/hw block local address space
67 * @node:		node in the tree of all clients
68 * @dev:		backpointer to ion device
69 * @handles:		an rb tree of all the handles in this client
70 * @idr:		an idr space for allocating handle ids
71 * @lock:		lock protecting the tree of handles
72 * @name:		used for debugging
73 * @task:		used for debugging
74 *
75 * A client represents a list of buffers this client may access.
76 * The mutex stored here is used to protect both handles tree
77 * as well as the handles themselves, and should be held while modifying either.
78 */
79struct ion_client {
80	struct rb_node node;
81	struct ion_device *dev;
82	struct rb_root handles;
83	struct idr idr;
84	struct mutex lock;
85	const char *name;
86	struct task_struct *task;
87	pid_t pid;
88	struct dentry *debug_root;
89};
90
91/**
92 * ion_handle - a client local reference to a buffer
93 * @ref:		reference count
94 * @client:		back pointer to the client the buffer resides in
95 * @buffer:		pointer to the buffer
96 * @node:		node in the client's handle rbtree
97 * @kmap_cnt:		count of times this client has mapped to kernel
98 * @id:			client-unique id allocated by client->idr
99 *
100 * Modifications to node, map_cnt or mapping should be protected by the
101 * lock in the client.  Other fields are never changed after initialization.
102 */
103struct ion_handle {
104	struct kref ref;
105	struct ion_client *client;
106	struct ion_buffer *buffer;
107	struct rb_node node;
108	unsigned int kmap_cnt;
109	int id;
110};
111
112bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
113{
114	return (buffer->flags & ION_FLAG_CACHED) &&
115		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
116}
117
118bool ion_buffer_cached(struct ion_buffer *buffer)
119{
120	return !!(buffer->flags & ION_FLAG_CACHED);
121}
122
123static inline struct page *ion_buffer_page(struct page *page)
124{
125	return (struct page *)((unsigned long)page & ~(1UL));
126}
127
128static inline bool ion_buffer_page_is_dirty(struct page *page)
129{
130	return !!((unsigned long)page & 1UL);
131}
132
133static inline void ion_buffer_page_dirty(struct page **page)
134{
135	*page = (struct page *)((unsigned long)(*page) | 1UL);
136}
137
138static inline void ion_buffer_page_clean(struct page **page)
139{
140	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
141}
142
143/* this function should only be called while dev->lock is held */
144static void ion_buffer_add(struct ion_device *dev,
145			   struct ion_buffer *buffer)
146{
147	struct rb_node **p = &dev->buffers.rb_node;
148	struct rb_node *parent = NULL;
149	struct ion_buffer *entry;
150
151	while (*p) {
152		parent = *p;
153		entry = rb_entry(parent, struct ion_buffer, node);
154
155		if (buffer < entry) {
156			p = &(*p)->rb_left;
157		} else if (buffer > entry) {
158			p = &(*p)->rb_right;
159		} else {
160			pr_err("%s: buffer already found.", __func__);
161			BUG();
162		}
163	}
164
165	rb_link_node(&buffer->node, parent, p);
166	rb_insert_color(&buffer->node, &dev->buffers);
167}
168
169/* this function should only be called while dev->lock is held */
170static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
171				     struct ion_device *dev,
172				     unsigned long len,
173				     unsigned long align,
174				     unsigned long flags)
175{
176	struct ion_buffer *buffer;
177	struct sg_table *table;
178	struct scatterlist *sg;
179	int i, ret;
180
181	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
182	if (!buffer)
183		return ERR_PTR(-ENOMEM);
184
185	buffer->heap = heap;
186	buffer->flags = flags;
187	kref_init(&buffer->ref);
188
189	ret = heap->ops->allocate(heap, buffer, len, align, flags);
190
191	if (ret) {
192		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
193			goto err2;
194
195		ion_heap_freelist_drain(heap, 0);
196		ret = heap->ops->allocate(heap, buffer, len, align,
197					  flags);
198		if (ret)
199			goto err2;
200	}
201
202	buffer->dev = dev;
203	buffer->size = len;
204
205	table = heap->ops->map_dma(heap, buffer);
206	if (WARN_ONCE(table == NULL,
207			"heap->ops->map_dma should return ERR_PTR on error"))
208		table = ERR_PTR(-EINVAL);
209	if (IS_ERR(table)) {
210		heap->ops->free(buffer);
211		kfree(buffer);
212		return ERR_PTR(PTR_ERR(table));
213	}
214	buffer->sg_table = table;
215	if (ion_buffer_fault_user_mappings(buffer)) {
216		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
217		struct scatterlist *sg;
218		int i, j, k = 0;
219
220		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
221		if (!buffer->pages) {
222			ret = -ENOMEM;
223			goto err1;
224		}
225
226		for_each_sg(table->sgl, sg, table->nents, i) {
227			struct page *page = sg_page(sg);
228
229			for (j = 0; j < sg->length / PAGE_SIZE; j++)
230				buffer->pages[k++] = page++;
231		}
232
233		if (ret)
234			goto err;
235	}
236
237	buffer->dev = dev;
238	buffer->size = len;
239	INIT_LIST_HEAD(&buffer->vmas);
240	mutex_init(&buffer->lock);
241	/* this will set up dma addresses for the sglist -- it is not
242	   technically correct as per the dma api -- a specific
243	   device isn't really taking ownership here.  However, in practice on
244	   our systems the only dma_address space is physical addresses.
245	   Additionally, we can't afford the overhead of invalidating every
246	   allocation via dma_map_sg. The implicit contract here is that
247	   memory comming from the heaps is ready for dma, ie if it has a
248	   cached mapping that mapping has been invalidated */
249	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
250		sg_dma_address(sg) = sg_phys(sg);
251	mutex_lock(&dev->buffer_lock);
252	ion_buffer_add(dev, buffer);
253	mutex_unlock(&dev->buffer_lock);
254	return buffer;
255
256err:
257	heap->ops->unmap_dma(heap, buffer);
258	heap->ops->free(buffer);
259err1:
260	if (buffer->pages)
261		vfree(buffer->pages);
262err2:
263	kfree(buffer);
264	return ERR_PTR(ret);
265}
266
267void ion_buffer_destroy(struct ion_buffer *buffer)
268{
269	if (WARN_ON(buffer->kmap_cnt > 0))
270		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
271	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
272	buffer->heap->ops->free(buffer);
273	if (buffer->pages)
274		vfree(buffer->pages);
275	kfree(buffer);
276}
277
278static void _ion_buffer_destroy(struct kref *kref)
279{
280	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
281	struct ion_heap *heap = buffer->heap;
282	struct ion_device *dev = buffer->dev;
283
284	mutex_lock(&dev->buffer_lock);
285	rb_erase(&buffer->node, &dev->buffers);
286	mutex_unlock(&dev->buffer_lock);
287
288	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
289		ion_heap_freelist_add(heap, buffer);
290	else
291		ion_buffer_destroy(buffer);
292}
293
294static void ion_buffer_get(struct ion_buffer *buffer)
295{
296	kref_get(&buffer->ref);
297}
298
299static int ion_buffer_put(struct ion_buffer *buffer)
300{
301	return kref_put(&buffer->ref, _ion_buffer_destroy);
302}
303
304static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
305{
306	mutex_lock(&buffer->lock);
307	buffer->handle_count++;
308	mutex_unlock(&buffer->lock);
309}
310
311static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
312{
313	/*
314	 * when a buffer is removed from a handle, if it is not in
315	 * any other handles, copy the taskcomm and the pid of the
316	 * process it's being removed from into the buffer.  At this
317	 * point there will be no way to track what processes this buffer is
318	 * being used by, it only exists as a dma_buf file descriptor.
319	 * The taskcomm and pid can provide a debug hint as to where this fd
320	 * is in the system
321	 */
322	mutex_lock(&buffer->lock);
323	buffer->handle_count--;
324	BUG_ON(buffer->handle_count < 0);
325	if (!buffer->handle_count) {
326		struct task_struct *task;
327
328		task = current->group_leader;
329		get_task_comm(buffer->task_comm, task);
330		buffer->pid = task_pid_nr(task);
331	}
332	mutex_unlock(&buffer->lock);
333}
334
335static struct ion_handle *ion_handle_create(struct ion_client *client,
336				     struct ion_buffer *buffer)
337{
338	struct ion_handle *handle;
339
340	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
341	if (!handle)
342		return ERR_PTR(-ENOMEM);
343	kref_init(&handle->ref);
344	RB_CLEAR_NODE(&handle->node);
345	handle->client = client;
346	ion_buffer_get(buffer);
347	ion_buffer_add_to_handle(buffer);
348	handle->buffer = buffer;
349
350	return handle;
351}
352
353static void ion_handle_kmap_put(struct ion_handle *);
354
355static void ion_handle_destroy(struct kref *kref)
356{
357	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
358	struct ion_client *client = handle->client;
359	struct ion_buffer *buffer = handle->buffer;
360
361	mutex_lock(&buffer->lock);
362	while (handle->kmap_cnt)
363		ion_handle_kmap_put(handle);
364	mutex_unlock(&buffer->lock);
365
366	idr_remove(&client->idr, handle->id);
367	if (!RB_EMPTY_NODE(&handle->node))
368		rb_erase(&handle->node, &client->handles);
369
370	ion_buffer_remove_from_handle(buffer);
371	ion_buffer_put(buffer);
372
373	kfree(handle);
374}
375
376struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
377{
378	return handle->buffer;
379}
380
381static void ion_handle_get(struct ion_handle *handle)
382{
383	kref_get(&handle->ref);
384}
385
386static int ion_handle_put(struct ion_handle *handle)
387{
388	struct ion_client *client = handle->client;
389	int ret;
390
391	mutex_lock(&client->lock);
392	ret = kref_put(&handle->ref, ion_handle_destroy);
393	mutex_unlock(&client->lock);
394
395	return ret;
396}
397
398static struct ion_handle *ion_handle_lookup(struct ion_client *client,
399					    struct ion_buffer *buffer)
400{
401	struct rb_node *n = client->handles.rb_node;
402
403	while (n) {
404		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
405		if (buffer < entry->buffer)
406			n = n->rb_left;
407		else if (buffer > entry->buffer)
408			n = n->rb_right;
409		else
410			return entry;
411	}
412	return ERR_PTR(-EINVAL);
413}
414
415static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
416						int id)
417{
418	struct ion_handle *handle;
419
420	mutex_lock(&client->lock);
421	handle = idr_find(&client->idr, id);
422	if (handle)
423		ion_handle_get(handle);
424	mutex_unlock(&client->lock);
425
426	return handle ? handle : ERR_PTR(-EINVAL);
427}
428
429static bool ion_handle_validate(struct ion_client *client,
430				struct ion_handle *handle)
431{
432	WARN_ON(!mutex_is_locked(&client->lock));
433	return (idr_find(&client->idr, handle->id) == handle);
434}
435
436static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
437{
438	int id;
439	struct rb_node **p = &client->handles.rb_node;
440	struct rb_node *parent = NULL;
441	struct ion_handle *entry;
442
443	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
444	if (id < 0)
445		return id;
446
447	handle->id = id;
448
449	while (*p) {
450		parent = *p;
451		entry = rb_entry(parent, struct ion_handle, node);
452
453		if (handle->buffer < entry->buffer)
454			p = &(*p)->rb_left;
455		else if (handle->buffer > entry->buffer)
456			p = &(*p)->rb_right;
457		else
458			WARN(1, "%s: buffer already found.", __func__);
459	}
460
461	rb_link_node(&handle->node, parent, p);
462	rb_insert_color(&handle->node, &client->handles);
463
464	return 0;
465}
466
467struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
468			     size_t align, unsigned int heap_id_mask,
469			     unsigned int flags)
470{
471	struct ion_handle *handle;
472	struct ion_device *dev = client->dev;
473	struct ion_buffer *buffer = NULL;
474	struct ion_heap *heap;
475	int ret;
476
477	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
478		 len, align, heap_id_mask, flags);
479	/*
480	 * traverse the list of heaps available in this system in priority
481	 * order.  If the heap type is supported by the client, and matches the
482	 * request of the caller allocate from it.  Repeat until allocate has
483	 * succeeded or all heaps have been tried
484	 */
485	len = PAGE_ALIGN(len);
486
487	if (!len)
488		return ERR_PTR(-EINVAL);
489
490	down_read(&dev->lock);
491	plist_for_each_entry(heap, &dev->heaps, node) {
492		/* if the caller didn't specify this heap id */
493		if (!((1 << heap->id) & heap_id_mask))
494			continue;
495		buffer = ion_buffer_create(heap, dev, len, align, flags);
496		if (!IS_ERR(buffer))
497			break;
498	}
499	up_read(&dev->lock);
500
501	if (buffer == NULL)
502		return ERR_PTR(-ENODEV);
503
504	if (IS_ERR(buffer))
505		return ERR_PTR(PTR_ERR(buffer));
506
507	handle = ion_handle_create(client, buffer);
508
509	/*
510	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
511	 * and ion_handle_create will take a second reference, drop one here
512	 */
513	ion_buffer_put(buffer);
514
515	if (IS_ERR(handle))
516		return handle;
517
518	mutex_lock(&client->lock);
519	ret = ion_handle_add(client, handle);
520	mutex_unlock(&client->lock);
521	if (ret) {
522		ion_handle_put(handle);
523		handle = ERR_PTR(ret);
524	}
525
526	return handle;
527}
528EXPORT_SYMBOL(ion_alloc);
529
530void ion_free(struct ion_client *client, struct ion_handle *handle)
531{
532	bool valid_handle;
533
534	BUG_ON(client != handle->client);
535
536	mutex_lock(&client->lock);
537	valid_handle = ion_handle_validate(client, handle);
538
539	if (!valid_handle) {
540		WARN(1, "%s: invalid handle passed to free.\n", __func__);
541		mutex_unlock(&client->lock);
542		return;
543	}
544	mutex_unlock(&client->lock);
545	ion_handle_put(handle);
546}
547EXPORT_SYMBOL(ion_free);
548
549int ion_phys(struct ion_client *client, struct ion_handle *handle,
550	     ion_phys_addr_t *addr, size_t *len)
551{
552	struct ion_buffer *buffer;
553	int ret;
554
555	mutex_lock(&client->lock);
556	if (!ion_handle_validate(client, handle)) {
557		mutex_unlock(&client->lock);
558		return -EINVAL;
559	}
560
561	buffer = handle->buffer;
562
563	if (!buffer->heap->ops->phys) {
564		pr_err("%s: ion_phys is not implemented by this heap.\n",
565		       __func__);
566		mutex_unlock(&client->lock);
567		return -ENODEV;
568	}
569	mutex_unlock(&client->lock);
570	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
571	return ret;
572}
573EXPORT_SYMBOL(ion_phys);
574
575static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
576{
577	void *vaddr;
578
579	if (buffer->kmap_cnt) {
580		buffer->kmap_cnt++;
581		return buffer->vaddr;
582	}
583	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
584	if (WARN_ONCE(vaddr == NULL,
585			"heap->ops->map_kernel should return ERR_PTR on error"))
586		return ERR_PTR(-EINVAL);
587	if (IS_ERR(vaddr))
588		return vaddr;
589	buffer->vaddr = vaddr;
590	buffer->kmap_cnt++;
591	return vaddr;
592}
593
594static void *ion_handle_kmap_get(struct ion_handle *handle)
595{
596	struct ion_buffer *buffer = handle->buffer;
597	void *vaddr;
598
599	if (handle->kmap_cnt) {
600		handle->kmap_cnt++;
601		return buffer->vaddr;
602	}
603	vaddr = ion_buffer_kmap_get(buffer);
604	if (IS_ERR(vaddr))
605		return vaddr;
606	handle->kmap_cnt++;
607	return vaddr;
608}
609
610static void ion_buffer_kmap_put(struct ion_buffer *buffer)
611{
612	buffer->kmap_cnt--;
613	if (!buffer->kmap_cnt) {
614		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
615		buffer->vaddr = NULL;
616	}
617}
618
619static void ion_handle_kmap_put(struct ion_handle *handle)
620{
621	struct ion_buffer *buffer = handle->buffer;
622
623	handle->kmap_cnt--;
624	if (!handle->kmap_cnt)
625		ion_buffer_kmap_put(buffer);
626}
627
628void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
629{
630	struct ion_buffer *buffer;
631	void *vaddr;
632
633	mutex_lock(&client->lock);
634	if (!ion_handle_validate(client, handle)) {
635		pr_err("%s: invalid handle passed to map_kernel.\n",
636		       __func__);
637		mutex_unlock(&client->lock);
638		return ERR_PTR(-EINVAL);
639	}
640
641	buffer = handle->buffer;
642
643	if (!handle->buffer->heap->ops->map_kernel) {
644		pr_err("%s: map_kernel is not implemented by this heap.\n",
645		       __func__);
646		mutex_unlock(&client->lock);
647		return ERR_PTR(-ENODEV);
648	}
649
650	mutex_lock(&buffer->lock);
651	vaddr = ion_handle_kmap_get(handle);
652	mutex_unlock(&buffer->lock);
653	mutex_unlock(&client->lock);
654	return vaddr;
655}
656EXPORT_SYMBOL(ion_map_kernel);
657
658void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
659{
660	struct ion_buffer *buffer;
661
662	mutex_lock(&client->lock);
663	buffer = handle->buffer;
664	mutex_lock(&buffer->lock);
665	ion_handle_kmap_put(handle);
666	mutex_unlock(&buffer->lock);
667	mutex_unlock(&client->lock);
668}
669EXPORT_SYMBOL(ion_unmap_kernel);
670
671static int ion_debug_client_show(struct seq_file *s, void *unused)
672{
673	struct ion_client *client = s->private;
674	struct rb_node *n;
675	size_t sizes[ION_NUM_HEAP_IDS] = {0};
676	const char *names[ION_NUM_HEAP_IDS] = {NULL};
677	int i;
678
679	mutex_lock(&client->lock);
680	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
681		struct ion_handle *handle = rb_entry(n, struct ion_handle,
682						     node);
683		unsigned int id = handle->buffer->heap->id;
684
685		if (!names[id])
686			names[id] = handle->buffer->heap->name;
687		sizes[id] += handle->buffer->size;
688	}
689	mutex_unlock(&client->lock);
690
691	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
692	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
693		if (!names[i])
694			continue;
695		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
696	}
697	return 0;
698}
699
700static int ion_debug_client_open(struct inode *inode, struct file *file)
701{
702	return single_open(file, ion_debug_client_show, inode->i_private);
703}
704
705static const struct file_operations debug_client_fops = {
706	.open = ion_debug_client_open,
707	.read = seq_read,
708	.llseek = seq_lseek,
709	.release = single_release,
710};
711
712struct ion_client *ion_client_create(struct ion_device *dev,
713				     const char *name)
714{
715	struct ion_client *client;
716	struct task_struct *task;
717	struct rb_node **p;
718	struct rb_node *parent = NULL;
719	struct ion_client *entry;
720	char debug_name[64];
721	pid_t pid;
722
723	get_task_struct(current->group_leader);
724	task_lock(current->group_leader);
725	pid = task_pid_nr(current->group_leader);
726	/* don't bother to store task struct for kernel threads,
727	   they can't be killed anyway */
728	if (current->group_leader->flags & PF_KTHREAD) {
729		put_task_struct(current->group_leader);
730		task = NULL;
731	} else {
732		task = current->group_leader;
733	}
734	task_unlock(current->group_leader);
735
736	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
737	if (!client) {
738		if (task)
739			put_task_struct(current->group_leader);
740		return ERR_PTR(-ENOMEM);
741	}
742
743	client->dev = dev;
744	client->handles = RB_ROOT;
745	idr_init(&client->idr);
746	mutex_init(&client->lock);
747	client->name = name;
748	client->task = task;
749	client->pid = pid;
750
751	down_write(&dev->lock);
752	p = &dev->clients.rb_node;
753	while (*p) {
754		parent = *p;
755		entry = rb_entry(parent, struct ion_client, node);
756
757		if (client < entry)
758			p = &(*p)->rb_left;
759		else if (client > entry)
760			p = &(*p)->rb_right;
761	}
762	rb_link_node(&client->node, parent, p);
763	rb_insert_color(&client->node, &dev->clients);
764
765	snprintf(debug_name, 64, "%u", client->pid);
766	client->debug_root = debugfs_create_file(debug_name, 0664,
767						 dev->debug_root, client,
768						 &debug_client_fops);
769	up_write(&dev->lock);
770
771	return client;
772}
773EXPORT_SYMBOL(ion_client_create);
774
775void ion_client_destroy(struct ion_client *client)
776{
777	struct ion_device *dev = client->dev;
778	struct rb_node *n;
779
780	pr_debug("%s: %d\n", __func__, __LINE__);
781	while ((n = rb_first(&client->handles))) {
782		struct ion_handle *handle = rb_entry(n, struct ion_handle,
783						     node);
784		ion_handle_destroy(&handle->ref);
785	}
786
787	idr_destroy(&client->idr);
788
789	down_write(&dev->lock);
790	if (client->task)
791		put_task_struct(client->task);
792	rb_erase(&client->node, &dev->clients);
793	debugfs_remove_recursive(client->debug_root);
794	up_write(&dev->lock);
795
796	kfree(client);
797}
798EXPORT_SYMBOL(ion_client_destroy);
799
800struct sg_table *ion_sg_table(struct ion_client *client,
801			      struct ion_handle *handle)
802{
803	struct ion_buffer *buffer;
804	struct sg_table *table;
805
806	mutex_lock(&client->lock);
807	if (!ion_handle_validate(client, handle)) {
808		pr_err("%s: invalid handle passed to map_dma.\n",
809		       __func__);
810		mutex_unlock(&client->lock);
811		return ERR_PTR(-EINVAL);
812	}
813	buffer = handle->buffer;
814	table = buffer->sg_table;
815	mutex_unlock(&client->lock);
816	return table;
817}
818EXPORT_SYMBOL(ion_sg_table);
819
820static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
821				       struct device *dev,
822				       enum dma_data_direction direction);
823
824static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
825					enum dma_data_direction direction)
826{
827	struct dma_buf *dmabuf = attachment->dmabuf;
828	struct ion_buffer *buffer = dmabuf->priv;
829
830	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
831	return buffer->sg_table;
832}
833
834static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
835			      struct sg_table *table,
836			      enum dma_data_direction direction)
837{
838}
839
840void ion_pages_sync_for_device(struct device *dev, struct page *page,
841		size_t size, enum dma_data_direction dir)
842{
843	struct scatterlist sg;
844
845	sg_init_table(&sg, 1);
846	sg_set_page(&sg, page, size, 0);
847	/*
848	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
849	 * for the the targeted device, but this works on the currently targeted
850	 * hardware.
851	 */
852	sg_dma_address(&sg) = page_to_phys(page);
853	dma_sync_sg_for_device(dev, &sg, 1, dir);
854}
855
856struct ion_vma_list {
857	struct list_head list;
858	struct vm_area_struct *vma;
859};
860
861static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
862				       struct device *dev,
863				       enum dma_data_direction dir)
864{
865	struct ion_vma_list *vma_list;
866	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
867	int i;
868
869	pr_debug("%s: syncing for device %s\n", __func__,
870		 dev ? dev_name(dev) : "null");
871
872	if (!ion_buffer_fault_user_mappings(buffer))
873		return;
874
875	mutex_lock(&buffer->lock);
876	for (i = 0; i < pages; i++) {
877		struct page *page = buffer->pages[i];
878
879		if (ion_buffer_page_is_dirty(page))
880			ion_pages_sync_for_device(dev, ion_buffer_page(page),
881							PAGE_SIZE, dir);
882
883		ion_buffer_page_clean(buffer->pages + i);
884	}
885	list_for_each_entry(vma_list, &buffer->vmas, list) {
886		struct vm_area_struct *vma = vma_list->vma;
887
888		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
889			       NULL);
890	}
891	mutex_unlock(&buffer->lock);
892}
893
894static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
895{
896	struct ion_buffer *buffer = vma->vm_private_data;
897	unsigned long pfn;
898	int ret;
899
900	mutex_lock(&buffer->lock);
901	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
902	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
903
904	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
905	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
906	mutex_unlock(&buffer->lock);
907	if (ret)
908		return VM_FAULT_ERROR;
909
910	return VM_FAULT_NOPAGE;
911}
912
913static void ion_vm_open(struct vm_area_struct *vma)
914{
915	struct ion_buffer *buffer = vma->vm_private_data;
916	struct ion_vma_list *vma_list;
917
918	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
919	if (!vma_list)
920		return;
921	vma_list->vma = vma;
922	mutex_lock(&buffer->lock);
923	list_add(&vma_list->list, &buffer->vmas);
924	mutex_unlock(&buffer->lock);
925	pr_debug("%s: adding %p\n", __func__, vma);
926}
927
928static void ion_vm_close(struct vm_area_struct *vma)
929{
930	struct ion_buffer *buffer = vma->vm_private_data;
931	struct ion_vma_list *vma_list, *tmp;
932
933	pr_debug("%s\n", __func__);
934	mutex_lock(&buffer->lock);
935	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
936		if (vma_list->vma != vma)
937			continue;
938		list_del(&vma_list->list);
939		kfree(vma_list);
940		pr_debug("%s: deleting %p\n", __func__, vma);
941		break;
942	}
943	mutex_unlock(&buffer->lock);
944}
945
946static struct vm_operations_struct ion_vma_ops = {
947	.open = ion_vm_open,
948	.close = ion_vm_close,
949	.fault = ion_vm_fault,
950};
951
952static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
953{
954	struct ion_buffer *buffer = dmabuf->priv;
955	int ret = 0;
956
957	if (!buffer->heap->ops->map_user) {
958		pr_err("%s: this heap does not define a method for mapping "
959		       "to userspace\n", __func__);
960		return -EINVAL;
961	}
962
963	if (ion_buffer_fault_user_mappings(buffer)) {
964		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
965							VM_DONTDUMP;
966		vma->vm_private_data = buffer;
967		vma->vm_ops = &ion_vma_ops;
968		ion_vm_open(vma);
969		return 0;
970	}
971
972	if (!(buffer->flags & ION_FLAG_CACHED))
973		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
974
975	mutex_lock(&buffer->lock);
976	/* now map it to userspace */
977	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
978	mutex_unlock(&buffer->lock);
979
980	if (ret)
981		pr_err("%s: failure mapping buffer to userspace\n",
982		       __func__);
983
984	return ret;
985}
986
987static void ion_dma_buf_release(struct dma_buf *dmabuf)
988{
989	struct ion_buffer *buffer = dmabuf->priv;
990	ion_buffer_put(buffer);
991}
992
993static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
994{
995	struct ion_buffer *buffer = dmabuf->priv;
996	return buffer->vaddr + offset * PAGE_SIZE;
997}
998
999static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1000			       void *ptr)
1001{
1002	return;
1003}
1004
1005static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1006					size_t len,
1007					enum dma_data_direction direction)
1008{
1009	struct ion_buffer *buffer = dmabuf->priv;
1010	void *vaddr;
1011
1012	if (!buffer->heap->ops->map_kernel) {
1013		pr_err("%s: map kernel is not implemented by this heap.\n",
1014		       __func__);
1015		return -ENODEV;
1016	}
1017
1018	mutex_lock(&buffer->lock);
1019	vaddr = ion_buffer_kmap_get(buffer);
1020	mutex_unlock(&buffer->lock);
1021	return PTR_ERR_OR_ZERO(vaddr);
1022}
1023
1024static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1025				       size_t len,
1026				       enum dma_data_direction direction)
1027{
1028	struct ion_buffer *buffer = dmabuf->priv;
1029
1030	mutex_lock(&buffer->lock);
1031	ion_buffer_kmap_put(buffer);
1032	mutex_unlock(&buffer->lock);
1033}
1034
1035static struct dma_buf_ops dma_buf_ops = {
1036	.map_dma_buf = ion_map_dma_buf,
1037	.unmap_dma_buf = ion_unmap_dma_buf,
1038	.mmap = ion_mmap,
1039	.release = ion_dma_buf_release,
1040	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1041	.end_cpu_access = ion_dma_buf_end_cpu_access,
1042	.kmap_atomic = ion_dma_buf_kmap,
1043	.kunmap_atomic = ion_dma_buf_kunmap,
1044	.kmap = ion_dma_buf_kmap,
1045	.kunmap = ion_dma_buf_kunmap,
1046};
1047
1048struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1049						struct ion_handle *handle)
1050{
1051	struct ion_buffer *buffer;
1052	struct dma_buf *dmabuf;
1053	bool valid_handle;
1054
1055	mutex_lock(&client->lock);
1056	valid_handle = ion_handle_validate(client, handle);
1057	if (!valid_handle) {
1058		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1059		mutex_unlock(&client->lock);
1060		return ERR_PTR(-EINVAL);
1061	}
1062	buffer = handle->buffer;
1063	ion_buffer_get(buffer);
1064	mutex_unlock(&client->lock);
1065
1066	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1067	if (IS_ERR(dmabuf)) {
1068		ion_buffer_put(buffer);
1069		return dmabuf;
1070	}
1071
1072	return dmabuf;
1073}
1074EXPORT_SYMBOL(ion_share_dma_buf);
1075
1076int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1077{
1078	struct dma_buf *dmabuf;
1079	int fd;
1080
1081	dmabuf = ion_share_dma_buf(client, handle);
1082	if (IS_ERR(dmabuf))
1083		return PTR_ERR(dmabuf);
1084
1085	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1086	if (fd < 0)
1087		dma_buf_put(dmabuf);
1088
1089	return fd;
1090}
1091EXPORT_SYMBOL(ion_share_dma_buf_fd);
1092
1093struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1094{
1095	struct dma_buf *dmabuf;
1096	struct ion_buffer *buffer;
1097	struct ion_handle *handle;
1098	int ret;
1099
1100	dmabuf = dma_buf_get(fd);
1101	if (IS_ERR(dmabuf))
1102		return ERR_PTR(PTR_ERR(dmabuf));
1103	/* if this memory came from ion */
1104
1105	if (dmabuf->ops != &dma_buf_ops) {
1106		pr_err("%s: can not import dmabuf from another exporter\n",
1107		       __func__);
1108		dma_buf_put(dmabuf);
1109		return ERR_PTR(-EINVAL);
1110	}
1111	buffer = dmabuf->priv;
1112
1113	mutex_lock(&client->lock);
1114	/* if a handle exists for this buffer just take a reference to it */
1115	handle = ion_handle_lookup(client, buffer);
1116	if (!IS_ERR(handle)) {
1117		ion_handle_get(handle);
1118		mutex_unlock(&client->lock);
1119		goto end;
1120	}
1121	mutex_unlock(&client->lock);
1122
1123	handle = ion_handle_create(client, buffer);
1124	if (IS_ERR(handle))
1125		goto end;
1126
1127	mutex_lock(&client->lock);
1128	ret = ion_handle_add(client, handle);
1129	mutex_unlock(&client->lock);
1130	if (ret) {
1131		ion_handle_put(handle);
1132		handle = ERR_PTR(ret);
1133	}
1134
1135end:
1136	dma_buf_put(dmabuf);
1137	return handle;
1138}
1139EXPORT_SYMBOL(ion_import_dma_buf);
1140
1141static int ion_sync_for_device(struct ion_client *client, int fd)
1142{
1143	struct dma_buf *dmabuf;
1144	struct ion_buffer *buffer;
1145
1146	dmabuf = dma_buf_get(fd);
1147	if (IS_ERR(dmabuf))
1148		return PTR_ERR(dmabuf);
1149
1150	/* if this memory came from ion */
1151	if (dmabuf->ops != &dma_buf_ops) {
1152		pr_err("%s: can not sync dmabuf from another exporter\n",
1153		       __func__);
1154		dma_buf_put(dmabuf);
1155		return -EINVAL;
1156	}
1157	buffer = dmabuf->priv;
1158
1159	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1160			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1161	dma_buf_put(dmabuf);
1162	return 0;
1163}
1164
1165/* fix up the cases where the ioctl direction bits are incorrect */
1166static unsigned int ion_ioctl_dir(unsigned int cmd)
1167{
1168	switch (cmd) {
1169	case ION_IOC_SYNC:
1170	case ION_IOC_FREE:
1171	case ION_IOC_CUSTOM:
1172		return _IOC_WRITE;
1173	default:
1174		return _IOC_DIR(cmd);
1175	}
1176}
1177
1178static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1179{
1180	struct ion_client *client = filp->private_data;
1181	struct ion_device *dev = client->dev;
1182	struct ion_handle *cleanup_handle = NULL;
1183	int ret = 0;
1184	unsigned int dir;
1185
1186	union {
1187		struct ion_fd_data fd;
1188		struct ion_allocation_data allocation;
1189		struct ion_handle_data handle;
1190		struct ion_custom_data custom;
1191	} data;
1192
1193	dir = ion_ioctl_dir(cmd);
1194
1195	if (_IOC_SIZE(cmd) > sizeof(data))
1196		return -EINVAL;
1197
1198	if (dir & _IOC_WRITE)
1199		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1200			return -EFAULT;
1201
1202	switch (cmd) {
1203	case ION_IOC_ALLOC:
1204	{
1205		struct ion_handle *handle;
1206
1207		handle = ion_alloc(client, data.allocation.len,
1208						data.allocation.align,
1209						data.allocation.heap_id_mask,
1210						data.allocation.flags);
1211		if (IS_ERR(handle))
1212			return PTR_ERR(handle);
1213
1214		data.allocation.handle = handle->id;
1215
1216		cleanup_handle = handle;
1217		break;
1218	}
1219	case ION_IOC_FREE:
1220	{
1221		struct ion_handle *handle;
1222
1223		handle = ion_handle_get_by_id(client, data.handle.handle);
1224		if (IS_ERR(handle))
1225			return PTR_ERR(handle);
1226		ion_free(client, handle);
1227		ion_handle_put(handle);
1228		break;
1229	}
1230	case ION_IOC_SHARE:
1231	case ION_IOC_MAP:
1232	{
1233		struct ion_handle *handle;
1234
1235		handle = ion_handle_get_by_id(client, data.handle.handle);
1236		if (IS_ERR(handle))
1237			return PTR_ERR(handle);
1238		data.fd.fd = ion_share_dma_buf_fd(client, handle);
1239		ion_handle_put(handle);
1240		if (data.fd.fd < 0)
1241			ret = data.fd.fd;
1242		break;
1243	}
1244	case ION_IOC_IMPORT:
1245	{
1246		struct ion_handle *handle;
1247		handle = ion_import_dma_buf(client, data.fd.fd);
1248		if (IS_ERR(handle))
1249			ret = PTR_ERR(handle);
1250		else
1251			data.handle.handle = handle->id;
1252		break;
1253	}
1254	case ION_IOC_SYNC:
1255	{
1256		ret = ion_sync_for_device(client, data.fd.fd);
1257		break;
1258	}
1259	case ION_IOC_CUSTOM:
1260	{
1261		if (!dev->custom_ioctl)
1262			return -ENOTTY;
1263		ret = dev->custom_ioctl(client, data.custom.cmd,
1264						data.custom.arg);
1265		break;
1266	}
1267	default:
1268		return -ENOTTY;
1269	}
1270
1271	if (dir & _IOC_READ) {
1272		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1273			if (cleanup_handle)
1274				ion_free(client, cleanup_handle);
1275			return -EFAULT;
1276		}
1277	}
1278	return ret;
1279}
1280
1281static int ion_release(struct inode *inode, struct file *file)
1282{
1283	struct ion_client *client = file->private_data;
1284
1285	pr_debug("%s: %d\n", __func__, __LINE__);
1286	ion_client_destroy(client);
1287	return 0;
1288}
1289
1290static int ion_open(struct inode *inode, struct file *file)
1291{
1292	struct miscdevice *miscdev = file->private_data;
1293	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1294	struct ion_client *client;
1295
1296	pr_debug("%s: %d\n", __func__, __LINE__);
1297	client = ion_client_create(dev, "user");
1298	if (IS_ERR(client))
1299		return PTR_ERR(client);
1300	file->private_data = client;
1301
1302	return 0;
1303}
1304
1305static const struct file_operations ion_fops = {
1306	.owner          = THIS_MODULE,
1307	.open           = ion_open,
1308	.release        = ion_release,
1309	.unlocked_ioctl = ion_ioctl,
1310	.compat_ioctl   = compat_ion_ioctl,
1311};
1312
1313static size_t ion_debug_heap_total(struct ion_client *client,
1314				   unsigned int id)
1315{
1316	size_t size = 0;
1317	struct rb_node *n;
1318
1319	mutex_lock(&client->lock);
1320	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1321		struct ion_handle *handle = rb_entry(n,
1322						     struct ion_handle,
1323						     node);
1324		if (handle->buffer->heap->id == id)
1325			size += handle->buffer->size;
1326	}
1327	mutex_unlock(&client->lock);
1328	return size;
1329}
1330
1331static int ion_debug_heap_show(struct seq_file *s, void *unused)
1332{
1333	struct ion_heap *heap = s->private;
1334	struct ion_device *dev = heap->dev;
1335	struct rb_node *n;
1336	size_t total_size = 0;
1337	size_t total_orphaned_size = 0;
1338
1339	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1340	seq_printf(s, "----------------------------------------------------\n");
1341
1342	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1343		struct ion_client *client = rb_entry(n, struct ion_client,
1344						     node);
1345		size_t size = ion_debug_heap_total(client, heap->id);
1346		if (!size)
1347			continue;
1348		if (client->task) {
1349			char task_comm[TASK_COMM_LEN];
1350
1351			get_task_comm(task_comm, client->task);
1352			seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1353				   client->pid, size);
1354		} else {
1355			seq_printf(s, "%16.s %16u %16zu\n", client->name,
1356				   client->pid, size);
1357		}
1358	}
1359	seq_printf(s, "----------------------------------------------------\n");
1360	seq_printf(s, "orphaned allocations (info is from last known client):"
1361		   "\n");
1362	mutex_lock(&dev->buffer_lock);
1363	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1364		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1365						     node);
1366		if (buffer->heap->id != heap->id)
1367			continue;
1368		total_size += buffer->size;
1369		if (!buffer->handle_count) {
1370			seq_printf(s, "%16.s %16u %16zu %d %d\n",
1371				   buffer->task_comm, buffer->pid,
1372				   buffer->size, buffer->kmap_cnt,
1373				   atomic_read(&buffer->ref.refcount));
1374			total_orphaned_size += buffer->size;
1375		}
1376	}
1377	mutex_unlock(&dev->buffer_lock);
1378	seq_printf(s, "----------------------------------------------------\n");
1379	seq_printf(s, "%16.s %16zu\n", "total orphaned",
1380		   total_orphaned_size);
1381	seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1382	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1383		seq_printf(s, "%16.s %16zu\n", "deferred free",
1384				heap->free_list_size);
1385	seq_printf(s, "----------------------------------------------------\n");
1386
1387	if (heap->debug_show)
1388		heap->debug_show(heap, s, unused);
1389
1390	return 0;
1391}
1392
1393static int ion_debug_heap_open(struct inode *inode, struct file *file)
1394{
1395	return single_open(file, ion_debug_heap_show, inode->i_private);
1396}
1397
1398static const struct file_operations debug_heap_fops = {
1399	.open = ion_debug_heap_open,
1400	.read = seq_read,
1401	.llseek = seq_lseek,
1402	.release = single_release,
1403};
1404
1405#ifdef DEBUG_HEAP_SHRINKER
1406static int debug_shrink_set(void *data, u64 val)
1407{
1408	struct ion_heap *heap = data;
1409	struct shrink_control sc;
1410	int objs;
1411
1412	sc.gfp_mask = -1;
1413	sc.nr_to_scan = 0;
1414
1415	if (!val)
1416		return 0;
1417
1418	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1419	sc.nr_to_scan = objs;
1420
1421	heap->shrinker.shrink(&heap->shrinker, &sc);
1422	return 0;
1423}
1424
1425static int debug_shrink_get(void *data, u64 *val)
1426{
1427	struct ion_heap *heap = data;
1428	struct shrink_control sc;
1429	int objs;
1430
1431	sc.gfp_mask = -1;
1432	sc.nr_to_scan = 0;
1433
1434	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1435	*val = objs;
1436	return 0;
1437}
1438
1439DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1440			debug_shrink_set, "%llu\n");
1441#endif
1442
1443void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1444{
1445	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1446	    !heap->ops->unmap_dma)
1447		pr_err("%s: can not add heap with invalid ops struct.\n",
1448		       __func__);
1449
1450	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1451		ion_heap_init_deferred_free(heap);
1452
1453	heap->dev = dev;
1454	down_write(&dev->lock);
1455	/* use negative heap->id to reverse the priority -- when traversing
1456	   the list later attempt higher id numbers first */
1457	plist_node_init(&heap->node, -heap->id);
1458	plist_add(&heap->node, &dev->heaps);
1459	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1460			    &debug_heap_fops);
1461#ifdef DEBUG_HEAP_SHRINKER
1462	if (heap->shrinker.shrink) {
1463		char debug_name[64];
1464
1465		snprintf(debug_name, 64, "%s_shrink", heap->name);
1466		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1467				    &debug_shrink_fops);
1468	}
1469#endif
1470	up_write(&dev->lock);
1471}
1472
1473struct ion_device *ion_device_create(long (*custom_ioctl)
1474				     (struct ion_client *client,
1475				      unsigned int cmd,
1476				      unsigned long arg))
1477{
1478	struct ion_device *idev;
1479	int ret;
1480
1481	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1482	if (!idev)
1483		return ERR_PTR(-ENOMEM);
1484
1485	idev->dev.minor = MISC_DYNAMIC_MINOR;
1486	idev->dev.name = "ion";
1487	idev->dev.fops = &ion_fops;
1488	idev->dev.parent = NULL;
1489	ret = misc_register(&idev->dev);
1490	if (ret) {
1491		pr_err("ion: failed to register misc device.\n");
1492		return ERR_PTR(ret);
1493	}
1494
1495	idev->debug_root = debugfs_create_dir("ion", NULL);
1496	if (!idev->debug_root)
1497		pr_err("ion: failed to create debug files.\n");
1498
1499	idev->custom_ioctl = custom_ioctl;
1500	idev->buffers = RB_ROOT;
1501	mutex_init(&idev->buffer_lock);
1502	init_rwsem(&idev->lock);
1503	plist_head_init(&idev->heaps);
1504	idev->clients = RB_ROOT;
1505	return idev;
1506}
1507
1508void ion_device_destroy(struct ion_device *dev)
1509{
1510	misc_deregister(&dev->dev);
1511	/* XXX need to free the heaps and clients ? */
1512	kfree(dev);
1513}
1514
1515void __init ion_reserve(struct ion_platform_data *data)
1516{
1517	int i;
1518
1519	for (i = 0; i < data->nr; i++) {
1520		if (data->heaps[i].size == 0)
1521			continue;
1522
1523		if (data->heaps[i].base == 0) {
1524			phys_addr_t paddr;
1525			paddr = memblock_alloc_base(data->heaps[i].size,
1526						    data->heaps[i].align,
1527						    MEMBLOCK_ALLOC_ANYWHERE);
1528			if (!paddr) {
1529				pr_err("%s: error allocating memblock for "
1530				       "heap %d\n",
1531					__func__, i);
1532				continue;
1533			}
1534			data->heaps[i].base = paddr;
1535		} else {
1536			int ret = memblock_reserve(data->heaps[i].base,
1537					       data->heaps[i].size);
1538			if (ret)
1539				pr_err("memblock reserve of %zx@%lx failed\n",
1540				       data->heaps[i].size,
1541				       data->heaps[i].base);
1542		}
1543		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1544			data->heaps[i].name,
1545			data->heaps[i].base,
1546			data->heaps[i].size);
1547	}
1548}
1549