ion.c revision 54ac078421ae89b8ac28357bb615262174aec255
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33
34#include "ion.h"
35#include "ion_priv.h"
36#define DEBUG
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev:		the actual misc device
41 * @buffers:	an rb tree of all the existing buffers
42 * @lock:		lock protecting the buffers & heaps trees
43 * @heaps:		list of all the heaps in the system
44 * @user_clients:	list of all the clients created from userspace
45 */
46struct ion_device {
47	struct miscdevice dev;
48	struct rb_root buffers;
49	struct mutex lock;
50	struct rb_root heaps;
51	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52			      unsigned long arg);
53	struct rb_root user_clients;
54	struct rb_root kernel_clients;
55	struct dentry *debug_root;
56};
57
58/**
59 * struct ion_client - a process/hw block local address space
60 * @ref:		for reference counting the client
61 * @node:		node in the tree of all clients
62 * @dev:		backpointer to ion device
63 * @handles:		an rb tree of all the handles in this client
64 * @lock:		lock protecting the tree of handles
65 * @heap_mask:		mask of all supported heaps
66 * @name:		used for debugging
67 * @task:		used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73struct ion_client {
74	struct kref ref;
75	struct rb_node node;
76	struct ion_device *dev;
77	struct rb_root handles;
78	struct mutex lock;
79	unsigned int heap_mask;
80	const char *name;
81	struct task_struct *task;
82	pid_t pid;
83	struct dentry *debug_root;
84};
85
86/**
87 * ion_handle - a client local reference to a buffer
88 * @ref:		reference count
89 * @client:		back pointer to the client the buffer resides in
90 * @buffer:		pointer to the buffer
91 * @node:		node in the client's handle rbtree
92 * @kmap_cnt:		count of times this client has mapped to kernel
93 * @dmap_cnt:		count of times this client has mapped for dma
94 * @usermap_cnt:	count of times this client has mapped for userspace
95 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client.  Other fields are never changed after initialization.
98 */
99struct ion_handle {
100	struct kref ref;
101	struct ion_client *client;
102	struct ion_buffer *buffer;
103	struct rb_node node;
104	unsigned int kmap_cnt;
105	unsigned int dmap_cnt;
106	unsigned int usermap_cnt;
107};
108
109/* this function should only be called while dev->lock is held */
110static void ion_buffer_add(struct ion_device *dev,
111			   struct ion_buffer *buffer)
112{
113	struct rb_node **p = &dev->buffers.rb_node;
114	struct rb_node *parent = NULL;
115	struct ion_buffer *entry;
116
117	while (*p) {
118		parent = *p;
119		entry = rb_entry(parent, struct ion_buffer, node);
120
121		if (buffer < entry) {
122			p = &(*p)->rb_left;
123		} else if (buffer > entry) {
124			p = &(*p)->rb_right;
125		} else {
126			pr_err("%s: buffer already found.", __func__);
127			BUG();
128		}
129	}
130
131	rb_link_node(&buffer->node, parent, p);
132	rb_insert_color(&buffer->node, &dev->buffers);
133}
134
135/* this function should only be called while dev->lock is held */
136static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
137				     struct ion_device *dev,
138				     unsigned long len,
139				     unsigned long align,
140				     unsigned long flags)
141{
142	struct ion_buffer *buffer;
143	int ret;
144
145	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
146	if (!buffer)
147		return ERR_PTR(-ENOMEM);
148
149	buffer->heap = heap;
150	kref_init(&buffer->ref);
151
152	ret = heap->ops->allocate(heap, buffer, len, align, flags);
153	if (ret) {
154		kfree(buffer);
155		return ERR_PTR(ret);
156	}
157	buffer->dev = dev;
158	buffer->size = len;
159	mutex_init(&buffer->lock);
160	ion_buffer_add(dev, buffer);
161	return buffer;
162}
163
164static void ion_buffer_destroy(struct kref *kref)
165{
166	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
167	struct ion_device *dev = buffer->dev;
168
169	if (WARN_ON(buffer->kmap_cnt > 0))
170		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
171
172	if (WARN_ON(buffer->dmap_cnt > 0))
173		buffer->heap->ops->unmap_dma(buffer->heap, buffer);
174
175	buffer->heap->ops->free(buffer);
176	mutex_lock(&dev->lock);
177	rb_erase(&buffer->node, &dev->buffers);
178	mutex_unlock(&dev->lock);
179	kfree(buffer);
180}
181
182static void ion_buffer_get(struct ion_buffer *buffer)
183{
184	kref_get(&buffer->ref);
185}
186
187static int ion_buffer_put(struct ion_buffer *buffer)
188{
189	return kref_put(&buffer->ref, ion_buffer_destroy);
190}
191
192static struct ion_handle *ion_handle_create(struct ion_client *client,
193				     struct ion_buffer *buffer)
194{
195	struct ion_handle *handle;
196
197	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
198	if (!handle)
199		return ERR_PTR(-ENOMEM);
200	kref_init(&handle->ref);
201	RB_CLEAR_NODE(&handle->node);
202	handle->client = client;
203	ion_buffer_get(buffer);
204	handle->buffer = buffer;
205
206	return handle;
207}
208
209static void ion_handle_destroy(struct kref *kref)
210{
211	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
212	/* XXX Can a handle be destroyed while it's map count is non-zero?:
213	   if (handle->map_cnt) unmap
214	 */
215	ion_buffer_put(handle->buffer);
216	mutex_lock(&handle->client->lock);
217	if (!RB_EMPTY_NODE(&handle->node))
218		rb_erase(&handle->node, &handle->client->handles);
219	mutex_unlock(&handle->client->lock);
220	kfree(handle);
221}
222
223struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
224{
225	return handle->buffer;
226}
227
228static void ion_handle_get(struct ion_handle *handle)
229{
230	kref_get(&handle->ref);
231}
232
233static int ion_handle_put(struct ion_handle *handle)
234{
235	return kref_put(&handle->ref, ion_handle_destroy);
236}
237
238static struct ion_handle *ion_handle_lookup(struct ion_client *client,
239					    struct ion_buffer *buffer)
240{
241	struct rb_node *n;
242
243	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
244		struct ion_handle *handle = rb_entry(n, struct ion_handle,
245						     node);
246		if (handle->buffer == buffer)
247			return handle;
248	}
249	return NULL;
250}
251
252static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
253{
254	struct rb_node *n = client->handles.rb_node;
255
256	while (n) {
257		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
258							  node);
259		if (handle < handle_node)
260			n = n->rb_left;
261		else if (handle > handle_node)
262			n = n->rb_right;
263		else
264			return true;
265	}
266	return false;
267}
268
269static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
270{
271	struct rb_node **p = &client->handles.rb_node;
272	struct rb_node *parent = NULL;
273	struct ion_handle *entry;
274
275	while (*p) {
276		parent = *p;
277		entry = rb_entry(parent, struct ion_handle, node);
278
279		if (handle < entry)
280			p = &(*p)->rb_left;
281		else if (handle > entry)
282			p = &(*p)->rb_right;
283		else
284			WARN(1, "%s: buffer already found.", __func__);
285	}
286
287	rb_link_node(&handle->node, parent, p);
288	rb_insert_color(&handle->node, &client->handles);
289}
290
291struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
292			     size_t align, unsigned int flags)
293{
294	struct rb_node *n;
295	struct ion_handle *handle;
296	struct ion_device *dev = client->dev;
297	struct ion_buffer *buffer = NULL;
298
299	/*
300	 * traverse the list of heaps available in this system in priority
301	 * order.  If the heap type is supported by the client, and matches the
302	 * request of the caller allocate from it.  Repeat until allocate has
303	 * succeeded or all heaps have been tried
304	 */
305	if (WARN_ON(!len))
306		return ERR_PTR(-EINVAL);
307
308	len = PAGE_ALIGN(len);
309
310	mutex_lock(&dev->lock);
311	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
312		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
313		/* if the client doesn't support this heap type */
314		if (!((1 << heap->type) & client->heap_mask))
315			continue;
316		/* if the caller didn't specify this heap type */
317		if (!((1 << heap->id) & flags))
318			continue;
319		buffer = ion_buffer_create(heap, dev, len, align, flags);
320		if (!IS_ERR_OR_NULL(buffer))
321			break;
322	}
323	mutex_unlock(&dev->lock);
324
325	if (buffer == NULL)
326		return ERR_PTR(-ENODEV);
327
328	if (IS_ERR(buffer))
329		return ERR_PTR(PTR_ERR(buffer));
330
331	handle = ion_handle_create(client, buffer);
332
333	/*
334	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
335	 * and ion_handle_create will take a second reference, drop one here
336	 */
337	ion_buffer_put(buffer);
338
339	if (!IS_ERR(handle)) {
340		mutex_lock(&client->lock);
341		ion_handle_add(client, handle);
342		mutex_unlock(&client->lock);
343	}
344
345	return handle;
346}
347
348void ion_free(struct ion_client *client, struct ion_handle *handle)
349{
350	bool valid_handle;
351
352	BUG_ON(client != handle->client);
353
354	mutex_lock(&client->lock);
355	valid_handle = ion_handle_validate(client, handle);
356	mutex_unlock(&client->lock);
357
358	if (!valid_handle) {
359		WARN("%s: invalid handle passed to free.\n", __func__);
360		return;
361	}
362	ion_handle_put(handle);
363}
364
365static void ion_client_get(struct ion_client *client);
366static int ion_client_put(struct ion_client *client);
367
368static bool _ion_map(int *buffer_cnt, int *handle_cnt)
369{
370	bool map;
371
372	BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
373
374	if (*buffer_cnt)
375		map = false;
376	else
377		map = true;
378	if (*handle_cnt == 0)
379		(*buffer_cnt)++;
380	(*handle_cnt)++;
381	return map;
382}
383
384static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
385{
386	BUG_ON(*handle_cnt == 0);
387	(*handle_cnt)--;
388	if (*handle_cnt != 0)
389		return false;
390	BUG_ON(*buffer_cnt == 0);
391	(*buffer_cnt)--;
392	if (*buffer_cnt == 0)
393		return true;
394	return false;
395}
396
397int ion_phys(struct ion_client *client, struct ion_handle *handle,
398	     ion_phys_addr_t *addr, size_t *len)
399{
400	struct ion_buffer *buffer;
401	int ret;
402
403	mutex_lock(&client->lock);
404	if (!ion_handle_validate(client, handle)) {
405		mutex_unlock(&client->lock);
406		return -EINVAL;
407	}
408
409	buffer = handle->buffer;
410
411	if (!buffer->heap->ops->phys) {
412		pr_err("%s: ion_phys is not implemented by this heap.\n",
413		       __func__);
414		mutex_unlock(&client->lock);
415		return -ENODEV;
416	}
417	mutex_unlock(&client->lock);
418	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
419	return ret;
420}
421
422void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
423{
424	struct ion_buffer *buffer;
425	void *vaddr;
426
427	mutex_lock(&client->lock);
428	if (!ion_handle_validate(client, handle)) {
429		pr_err("%s: invalid handle passed to map_kernel.\n",
430		       __func__);
431		mutex_unlock(&client->lock);
432		return ERR_PTR(-EINVAL);
433	}
434
435	buffer = handle->buffer;
436	mutex_lock(&buffer->lock);
437
438	if (!handle->buffer->heap->ops->map_kernel) {
439		pr_err("%s: map_kernel is not implemented by this heap.\n",
440		       __func__);
441		mutex_unlock(&buffer->lock);
442		mutex_unlock(&client->lock);
443		return ERR_PTR(-ENODEV);
444	}
445
446	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
447		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
448		if (IS_ERR_OR_NULL(vaddr))
449			_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
450		buffer->vaddr = vaddr;
451	} else {
452		vaddr = buffer->vaddr;
453	}
454	mutex_unlock(&buffer->lock);
455	mutex_unlock(&client->lock);
456	return vaddr;
457}
458
459struct sg_table *ion_map_dma(struct ion_client *client,
460				struct ion_handle *handle)
461{
462	struct ion_buffer *buffer;
463	struct sg_table *table;
464
465	mutex_lock(&client->lock);
466	if (!ion_handle_validate(client, handle)) {
467		pr_err("%s: invalid handle passed to map_dma.\n",
468		       __func__);
469		mutex_unlock(&client->lock);
470		return ERR_PTR(-EINVAL);
471	}
472	buffer = handle->buffer;
473	mutex_lock(&buffer->lock);
474
475	if (!handle->buffer->heap->ops->map_dma) {
476		pr_err("%s: map_kernel is not implemented by this heap.\n",
477		       __func__);
478		mutex_unlock(&buffer->lock);
479		mutex_unlock(&client->lock);
480		return ERR_PTR(-ENODEV);
481	}
482	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
483		table = buffer->heap->ops->map_dma(buffer->heap, buffer);
484		if (IS_ERR_OR_NULL(table))
485			_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
486		buffer->sg_table = table;
487	} else {
488		table = buffer->sg_table;
489	}
490	mutex_unlock(&buffer->lock);
491	mutex_unlock(&client->lock);
492	return table;
493}
494
495void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
496{
497	struct ion_buffer *buffer;
498
499	mutex_lock(&client->lock);
500	buffer = handle->buffer;
501	mutex_lock(&buffer->lock);
502	if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
503		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
504		buffer->vaddr = NULL;
505	}
506	mutex_unlock(&buffer->lock);
507	mutex_unlock(&client->lock);
508}
509
510void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
511{
512	struct ion_buffer *buffer;
513
514	mutex_lock(&client->lock);
515	buffer = handle->buffer;
516	mutex_lock(&buffer->lock);
517	if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
518		buffer->heap->ops->unmap_dma(buffer->heap, buffer);
519		buffer->sg_table = NULL;
520	}
521	mutex_unlock(&buffer->lock);
522	mutex_unlock(&client->lock);
523}
524
525
526struct ion_buffer *ion_share(struct ion_client *client,
527				 struct ion_handle *handle)
528{
529	bool valid_handle;
530
531	mutex_lock(&client->lock);
532	valid_handle = ion_handle_validate(client, handle);
533	mutex_unlock(&client->lock);
534	if (!valid_handle) {
535		WARN("%s: invalid handle passed to share.\n", __func__);
536		return ERR_PTR(-EINVAL);
537	}
538
539	/* do not take an extra reference here, the burden is on the caller
540	 * to make sure the buffer doesn't go away while it's passing it
541	 * to another client -- ion_free should not be called on this handle
542	 * until the buffer has been imported into the other client
543	 */
544	return handle->buffer;
545}
546
547struct ion_handle *ion_import(struct ion_client *client,
548			      struct ion_buffer *buffer)
549{
550	struct ion_handle *handle = NULL;
551
552	mutex_lock(&client->lock);
553	/* if a handle exists for this buffer just take a reference to it */
554	handle = ion_handle_lookup(client, buffer);
555	if (!IS_ERR_OR_NULL(handle)) {
556		ion_handle_get(handle);
557		goto end;
558	}
559	handle = ion_handle_create(client, buffer);
560	if (IS_ERR_OR_NULL(handle))
561		goto end;
562	ion_handle_add(client, handle);
563end:
564	mutex_unlock(&client->lock);
565	return handle;
566}
567
568static const struct file_operations ion_share_fops;
569
570struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
571{
572	struct file *file = fget(fd);
573	struct ion_handle *handle;
574
575	if (!file) {
576		pr_err("%s: imported fd not found in file table.\n", __func__);
577		return ERR_PTR(-EINVAL);
578	}
579	if (file->f_op != &ion_share_fops) {
580		pr_err("%s: imported file is not a shared ion file.\n",
581		       __func__);
582		handle = ERR_PTR(-EINVAL);
583		goto end;
584	}
585	handle = ion_import(client, file->private_data);
586end:
587	fput(file);
588	return handle;
589}
590
591static int ion_debug_client_show(struct seq_file *s, void *unused)
592{
593	struct ion_client *client = s->private;
594	struct rb_node *n;
595	size_t sizes[ION_NUM_HEAPS] = {0};
596	const char *names[ION_NUM_HEAPS] = {0};
597	int i;
598
599	mutex_lock(&client->lock);
600	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
601		struct ion_handle *handle = rb_entry(n, struct ion_handle,
602						     node);
603		enum ion_heap_type type = handle->buffer->heap->type;
604
605		if (!names[type])
606			names[type] = handle->buffer->heap->name;
607		sizes[type] += handle->buffer->size;
608	}
609	mutex_unlock(&client->lock);
610
611	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
612	for (i = 0; i < ION_NUM_HEAPS; i++) {
613		if (!names[i])
614			continue;
615		seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
616			   atomic_read(&client->ref.refcount));
617	}
618	return 0;
619}
620
621static int ion_debug_client_open(struct inode *inode, struct file *file)
622{
623	return single_open(file, ion_debug_client_show, inode->i_private);
624}
625
626static const struct file_operations debug_client_fops = {
627	.open = ion_debug_client_open,
628	.read = seq_read,
629	.llseek = seq_lseek,
630	.release = single_release,
631};
632
633static struct ion_client *ion_client_lookup(struct ion_device *dev,
634					    struct task_struct *task)
635{
636	struct rb_node *n = dev->user_clients.rb_node;
637	struct ion_client *client;
638
639	mutex_lock(&dev->lock);
640	while (n) {
641		client = rb_entry(n, struct ion_client, node);
642		if (task == client->task) {
643			ion_client_get(client);
644			mutex_unlock(&dev->lock);
645			return client;
646		} else if (task < client->task) {
647			n = n->rb_left;
648		} else if (task > client->task) {
649			n = n->rb_right;
650		}
651	}
652	mutex_unlock(&dev->lock);
653	return NULL;
654}
655
656struct ion_client *ion_client_create(struct ion_device *dev,
657				     unsigned int heap_mask,
658				     const char *name)
659{
660	struct ion_client *client;
661	struct task_struct *task;
662	struct rb_node **p;
663	struct rb_node *parent = NULL;
664	struct ion_client *entry;
665	char debug_name[64];
666	pid_t pid;
667
668	get_task_struct(current->group_leader);
669	task_lock(current->group_leader);
670	pid = task_pid_nr(current->group_leader);
671	/* don't bother to store task struct for kernel threads,
672	   they can't be killed anyway */
673	if (current->group_leader->flags & PF_KTHREAD) {
674		put_task_struct(current->group_leader);
675		task = NULL;
676	} else {
677		task = current->group_leader;
678	}
679	task_unlock(current->group_leader);
680
681	/* if this isn't a kernel thread, see if a client already
682	   exists */
683	if (task) {
684		client = ion_client_lookup(dev, task);
685		if (!IS_ERR_OR_NULL(client)) {
686			put_task_struct(current->group_leader);
687			return client;
688		}
689	}
690
691	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
692	if (!client) {
693		if (task)
694			put_task_struct(current->group_leader);
695		return ERR_PTR(-ENOMEM);
696	}
697
698	client->dev = dev;
699	client->handles = RB_ROOT;
700	mutex_init(&client->lock);
701	client->name = name;
702	client->heap_mask = heap_mask;
703	client->task = task;
704	client->pid = pid;
705	kref_init(&client->ref);
706
707	mutex_lock(&dev->lock);
708	if (task) {
709		p = &dev->user_clients.rb_node;
710		while (*p) {
711			parent = *p;
712			entry = rb_entry(parent, struct ion_client, node);
713
714			if (task < entry->task)
715				p = &(*p)->rb_left;
716			else if (task > entry->task)
717				p = &(*p)->rb_right;
718		}
719		rb_link_node(&client->node, parent, p);
720		rb_insert_color(&client->node, &dev->user_clients);
721	} else {
722		p = &dev->kernel_clients.rb_node;
723		while (*p) {
724			parent = *p;
725			entry = rb_entry(parent, struct ion_client, node);
726
727			if (client < entry)
728				p = &(*p)->rb_left;
729			else if (client > entry)
730				p = &(*p)->rb_right;
731		}
732		rb_link_node(&client->node, parent, p);
733		rb_insert_color(&client->node, &dev->kernel_clients);
734	}
735
736	snprintf(debug_name, 64, "%u", client->pid);
737	client->debug_root = debugfs_create_file(debug_name, 0664,
738						 dev->debug_root, client,
739						 &debug_client_fops);
740	mutex_unlock(&dev->lock);
741
742	return client;
743}
744
745static void _ion_client_destroy(struct kref *kref)
746{
747	struct ion_client *client = container_of(kref, struct ion_client, ref);
748	struct ion_device *dev = client->dev;
749	struct rb_node *n;
750
751	pr_debug("%s: %d\n", __func__, __LINE__);
752	while ((n = rb_first(&client->handles))) {
753		struct ion_handle *handle = rb_entry(n, struct ion_handle,
754						     node);
755		ion_handle_destroy(&handle->ref);
756	}
757	mutex_lock(&dev->lock);
758	if (client->task) {
759		rb_erase(&client->node, &dev->user_clients);
760		put_task_struct(client->task);
761	} else {
762		rb_erase(&client->node, &dev->kernel_clients);
763	}
764	debugfs_remove_recursive(client->debug_root);
765	mutex_unlock(&dev->lock);
766
767	kfree(client);
768}
769
770static void ion_client_get(struct ion_client *client)
771{
772	kref_get(&client->ref);
773}
774
775static int ion_client_put(struct ion_client *client)
776{
777	return kref_put(&client->ref, _ion_client_destroy);
778}
779
780void ion_client_destroy(struct ion_client *client)
781{
782	ion_client_put(client);
783}
784
785static int ion_share_release(struct inode *inode, struct file* file)
786{
787	struct ion_buffer *buffer = file->private_data;
788
789	pr_debug("%s: %d\n", __func__, __LINE__);
790	/* drop the reference to the buffer -- this prevents the
791	   buffer from going away because the client holding it exited
792	   while it was being passed */
793	ion_buffer_put(buffer);
794	return 0;
795}
796
797static void ion_vma_open(struct vm_area_struct *vma)
798{
799
800	struct ion_buffer *buffer = vma->vm_file->private_data;
801	struct ion_handle *handle = vma->vm_private_data;
802	struct ion_client *client;
803
804	pr_debug("%s: %d\n", __func__, __LINE__);
805	/* check that the client still exists and take a reference so
806	   it can't go away until this vma is closed */
807	client = ion_client_lookup(buffer->dev, current->group_leader);
808	if (IS_ERR_OR_NULL(client)) {
809		vma->vm_private_data = NULL;
810		return;
811	}
812
813	if (!ion_handle_validate(client, handle)) {
814		ion_client_put(client);
815		vma->vm_private_data = NULL;
816		return;
817	}
818
819	ion_handle_get(handle);
820
821	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
822		 __func__, __LINE__,
823		 atomic_read(&client->ref.refcount),
824		 atomic_read(&handle->ref.refcount),
825		 atomic_read(&buffer->ref.refcount));
826}
827
828static void ion_vma_close(struct vm_area_struct *vma)
829{
830	struct ion_handle *handle = vma->vm_private_data;
831	struct ion_buffer *buffer = vma->vm_file->private_data;
832	struct ion_client *client;
833
834	pr_debug("%s: %d\n", __func__, __LINE__);
835	/* this indicates the client is gone, nothing to do here */
836	if (!handle)
837		return;
838	client = handle->client;
839	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
840		 __func__, __LINE__,
841		 atomic_read(&client->ref.refcount),
842		 atomic_read(&handle->ref.refcount),
843		 atomic_read(&buffer->ref.refcount));
844	ion_handle_put(handle);
845	ion_client_put(client);
846	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
847		 __func__, __LINE__,
848		 atomic_read(&client->ref.refcount),
849		 atomic_read(&handle->ref.refcount),
850		 atomic_read(&buffer->ref.refcount));
851}
852
853static struct vm_operations_struct ion_vm_ops = {
854	.open = ion_vma_open,
855	.close = ion_vma_close,
856};
857
858static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
859{
860	struct ion_buffer *buffer = file->private_data;
861	unsigned long size = vma->vm_end - vma->vm_start;
862	struct ion_client *client;
863	struct ion_handle *handle;
864	int ret;
865
866	pr_debug("%s: %d\n", __func__, __LINE__);
867	/* make sure the client still exists, it's possible for the client to
868	   have gone away but the map/share fd still to be around, take
869	   a reference to it so it can't go away while this mapping exists */
870	client = ion_client_lookup(buffer->dev, current->group_leader);
871	if (IS_ERR_OR_NULL(client)) {
872		pr_err("%s: trying to mmap an ion handle in a process with no "
873		       "ion client\n", __func__);
874		return -EINVAL;
875	}
876
877	if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
878				     buffer->size)) {
879		pr_err("%s: trying to map larger area than handle has available"
880		       "\n", __func__);
881		ret = -EINVAL;
882		goto err;
883	}
884
885	/* find the handle and take a reference to it */
886	handle = ion_import(client, buffer);
887	if (IS_ERR_OR_NULL(handle)) {
888		ret = -EINVAL;
889		goto err;
890	}
891
892	if (!handle->buffer->heap->ops->map_user) {
893		pr_err("%s: this heap does not define a method for mapping "
894		       "to userspace\n", __func__);
895		ret = -EINVAL;
896		goto err1;
897	}
898
899	mutex_lock(&buffer->lock);
900	/* now map it to userspace */
901	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
902	mutex_unlock(&buffer->lock);
903	if (ret) {
904		pr_err("%s: failure mapping buffer to userspace\n",
905		       __func__);
906		goto err1;
907	}
908
909	vma->vm_ops = &ion_vm_ops;
910	/* move the handle into the vm_private_data so we can access it from
911	   vma_open/close */
912	vma->vm_private_data = handle;
913	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
914		 __func__, __LINE__,
915		 atomic_read(&client->ref.refcount),
916		 atomic_read(&handle->ref.refcount),
917		 atomic_read(&buffer->ref.refcount));
918	return 0;
919
920err1:
921	/* drop the reference to the handle */
922	ion_handle_put(handle);
923err:
924	/* drop the reference to the client */
925	ion_client_put(client);
926	return ret;
927}
928
929static const struct file_operations ion_share_fops = {
930	.owner		= THIS_MODULE,
931	.release	= ion_share_release,
932	.mmap		= ion_share_mmap,
933};
934
935static int ion_ioctl_share(struct file *parent, struct ion_client *client,
936			   struct ion_handle *handle)
937{
938	int fd = get_unused_fd();
939	struct file *file;
940
941	if (fd < 0)
942		return fd;
943
944	file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
945				  handle->buffer, O_RDWR);
946	if (IS_ERR_OR_NULL(file))
947		goto err;
948	ion_buffer_get(handle->buffer);
949	fd_install(fd, file);
950
951	return fd;
952
953err:
954	put_unused_fd(fd);
955	return -ENFILE;
956}
957
958static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
959{
960	struct ion_client *client = filp->private_data;
961
962	switch (cmd) {
963	case ION_IOC_ALLOC:
964	{
965		struct ion_allocation_data data;
966
967		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
968			return -EFAULT;
969		data.handle = ion_alloc(client, data.len, data.align,
970					     data.flags);
971
972		if (IS_ERR(data.handle))
973			return PTR_ERR(data.handle);
974
975		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
976			ion_free(client, data.handle);
977			return -EFAULT;
978		}
979		break;
980	}
981	case ION_IOC_FREE:
982	{
983		struct ion_handle_data data;
984		bool valid;
985
986		if (copy_from_user(&data, (void __user *)arg,
987				   sizeof(struct ion_handle_data)))
988			return -EFAULT;
989		mutex_lock(&client->lock);
990		valid = ion_handle_validate(client, data.handle);
991		mutex_unlock(&client->lock);
992		if (!valid)
993			return -EINVAL;
994		ion_free(client, data.handle);
995		break;
996	}
997	case ION_IOC_MAP:
998	case ION_IOC_SHARE:
999	{
1000		struct ion_fd_data data;
1001
1002		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1003			return -EFAULT;
1004		mutex_lock(&client->lock);
1005		if (!ion_handle_validate(client, data.handle)) {
1006			pr_err("%s: invalid handle passed to share ioctl.\n",
1007			       __func__);
1008			mutex_unlock(&client->lock);
1009			return -EINVAL;
1010		}
1011		data.fd = ion_ioctl_share(filp, client, data.handle);
1012		mutex_unlock(&client->lock);
1013		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1014			return -EFAULT;
1015		break;
1016	}
1017	case ION_IOC_IMPORT:
1018	{
1019		struct ion_fd_data data;
1020		if (copy_from_user(&data, (void __user *)arg,
1021				   sizeof(struct ion_fd_data)))
1022			return -EFAULT;
1023
1024		data.handle = ion_import_fd(client, data.fd);
1025		if (IS_ERR(data.handle))
1026			data.handle = NULL;
1027		if (copy_to_user((void __user *)arg, &data,
1028				 sizeof(struct ion_fd_data)))
1029			return -EFAULT;
1030		break;
1031	}
1032	case ION_IOC_CUSTOM:
1033	{
1034		struct ion_device *dev = client->dev;
1035		struct ion_custom_data data;
1036
1037		if (!dev->custom_ioctl)
1038			return -ENOTTY;
1039		if (copy_from_user(&data, (void __user *)arg,
1040				sizeof(struct ion_custom_data)))
1041			return -EFAULT;
1042		return dev->custom_ioctl(client, data.cmd, data.arg);
1043	}
1044	default:
1045		return -ENOTTY;
1046	}
1047	return 0;
1048}
1049
1050static int ion_release(struct inode *inode, struct file *file)
1051{
1052	struct ion_client *client = file->private_data;
1053
1054	pr_debug("%s: %d\n", __func__, __LINE__);
1055	ion_client_put(client);
1056	return 0;
1057}
1058
1059static int ion_open(struct inode *inode, struct file *file)
1060{
1061	struct miscdevice *miscdev = file->private_data;
1062	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1063	struct ion_client *client;
1064
1065	pr_debug("%s: %d\n", __func__, __LINE__);
1066	client = ion_client_create(dev, -1, "user");
1067	if (IS_ERR_OR_NULL(client))
1068		return PTR_ERR(client);
1069	file->private_data = client;
1070
1071	return 0;
1072}
1073
1074static const struct file_operations ion_fops = {
1075	.owner          = THIS_MODULE,
1076	.open           = ion_open,
1077	.release        = ion_release,
1078	.unlocked_ioctl = ion_ioctl,
1079};
1080
1081static size_t ion_debug_heap_total(struct ion_client *client,
1082				   enum ion_heap_type type)
1083{
1084	size_t size = 0;
1085	struct rb_node *n;
1086
1087	mutex_lock(&client->lock);
1088	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1089		struct ion_handle *handle = rb_entry(n,
1090						     struct ion_handle,
1091						     node);
1092		if (handle->buffer->heap->type == type)
1093			size += handle->buffer->size;
1094	}
1095	mutex_unlock(&client->lock);
1096	return size;
1097}
1098
1099static int ion_debug_heap_show(struct seq_file *s, void *unused)
1100{
1101	struct ion_heap *heap = s->private;
1102	struct ion_device *dev = heap->dev;
1103	struct rb_node *n;
1104
1105	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1106	for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1107		struct ion_client *client = rb_entry(n, struct ion_client,
1108						     node);
1109		char task_comm[TASK_COMM_LEN];
1110		size_t size = ion_debug_heap_total(client, heap->type);
1111		if (!size)
1112			continue;
1113
1114		get_task_comm(task_comm, client->task);
1115		seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1116			   size);
1117	}
1118
1119	for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1120		struct ion_client *client = rb_entry(n, struct ion_client,
1121						     node);
1122		size_t size = ion_debug_heap_total(client, heap->type);
1123		if (!size)
1124			continue;
1125		seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1126			   size);
1127	}
1128	return 0;
1129}
1130
1131static int ion_debug_heap_open(struct inode *inode, struct file *file)
1132{
1133	return single_open(file, ion_debug_heap_show, inode->i_private);
1134}
1135
1136static const struct file_operations debug_heap_fops = {
1137	.open = ion_debug_heap_open,
1138	.read = seq_read,
1139	.llseek = seq_lseek,
1140	.release = single_release,
1141};
1142
1143void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1144{
1145	struct rb_node **p = &dev->heaps.rb_node;
1146	struct rb_node *parent = NULL;
1147	struct ion_heap *entry;
1148
1149	heap->dev = dev;
1150	mutex_lock(&dev->lock);
1151	while (*p) {
1152		parent = *p;
1153		entry = rb_entry(parent, struct ion_heap, node);
1154
1155		if (heap->id < entry->id) {
1156			p = &(*p)->rb_left;
1157		} else if (heap->id > entry->id ) {
1158			p = &(*p)->rb_right;
1159		} else {
1160			pr_err("%s: can not insert multiple heaps with "
1161				"id %d\n", __func__, heap->id);
1162			goto end;
1163		}
1164	}
1165
1166	rb_link_node(&heap->node, parent, p);
1167	rb_insert_color(&heap->node, &dev->heaps);
1168	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1169			    &debug_heap_fops);
1170end:
1171	mutex_unlock(&dev->lock);
1172}
1173
1174struct ion_device *ion_device_create(long (*custom_ioctl)
1175				     (struct ion_client *client,
1176				      unsigned int cmd,
1177				      unsigned long arg))
1178{
1179	struct ion_device *idev;
1180	int ret;
1181
1182	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1183	if (!idev)
1184		return ERR_PTR(-ENOMEM);
1185
1186	idev->dev.minor = MISC_DYNAMIC_MINOR;
1187	idev->dev.name = "ion";
1188	idev->dev.fops = &ion_fops;
1189	idev->dev.parent = NULL;
1190	ret = misc_register(&idev->dev);
1191	if (ret) {
1192		pr_err("ion: failed to register misc device.\n");
1193		return ERR_PTR(ret);
1194	}
1195
1196	idev->debug_root = debugfs_create_dir("ion", NULL);
1197	if (IS_ERR_OR_NULL(idev->debug_root))
1198		pr_err("ion: failed to create debug files.\n");
1199
1200	idev->custom_ioctl = custom_ioctl;
1201	idev->buffers = RB_ROOT;
1202	mutex_init(&idev->lock);
1203	idev->heaps = RB_ROOT;
1204	idev->user_clients = RB_ROOT;
1205	idev->kernel_clients = RB_ROOT;
1206	return idev;
1207}
1208
1209void ion_device_destroy(struct ion_device *dev)
1210{
1211	misc_deregister(&dev->dev);
1212	/* XXX need to free the heaps and clients ? */
1213	kfree(dev);
1214}
1215
1216void __init ion_reserve(struct ion_platform_data *data)
1217{
1218	int i, ret;
1219
1220	for (i = 0; i < data->nr; i++) {
1221		if (data->heaps[i].size == 0)
1222			continue;
1223		ret = memblock_reserve(data->heaps[i].base,
1224				       data->heaps[i].size);
1225		if (ret)
1226			pr_err("memblock reserve of %x@%lx failed\n",
1227			       data->heaps[i].size,
1228			       data->heaps[i].base);
1229	}
1230}
1231