ion.c revision 4d5ca3299fb7b27ceb6c33a62bc10ce4d408dc0b
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/miscdevice.h>
23#include <linux/export.h>
24#include <linux/mm.h>
25#include <linux/mm_types.h>
26#include <linux/rbtree.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/debugfs.h>
32
33#include "ion.h"
34#include "ion_priv.h"
35#define DEBUG
36
37/**
38 * struct ion_device - the metadata of the ion device node
39 * @dev:		the actual misc device
40 * @buffers:	an rb tree of all the existing buffers
41 * @lock:		lock protecting the buffers & heaps trees
42 * @heaps:		list of all the heaps in the system
43 * @user_clients:	list of all the clients created from userspace
44 */
45struct ion_device {
46	struct miscdevice dev;
47	struct rb_root buffers;
48	struct mutex lock;
49	struct rb_root heaps;
50	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
51			      unsigned long arg);
52	struct rb_root user_clients;
53	struct rb_root kernel_clients;
54	struct dentry *debug_root;
55};
56
57/**
58 * struct ion_client - a process/hw block local address space
59 * @ref:		for reference counting the client
60 * @node:		node in the tree of all clients
61 * @dev:		backpointer to ion device
62 * @handles:		an rb tree of all the handles in this client
63 * @lock:		lock protecting the tree of handles
64 * @heap_mask:		mask of all supported heaps
65 * @name:		used for debugging
66 * @task:		used for debugging
67 *
68 * A client represents a list of buffers this client may access.
69 * The mutex stored here is used to protect both handles tree
70 * as well as the handles themselves, and should be held while modifying either.
71 */
72struct ion_client {
73	struct kref ref;
74	struct rb_node node;
75	struct ion_device *dev;
76	struct rb_root handles;
77	struct mutex lock;
78	unsigned int heap_mask;
79	const char *name;
80	struct task_struct *task;
81	pid_t pid;
82	struct dentry *debug_root;
83};
84
85/**
86 * ion_handle - a client local reference to a buffer
87 * @ref:		reference count
88 * @client:		back pointer to the client the buffer resides in
89 * @buffer:		pointer to the buffer
90 * @node:		node in the client's handle rbtree
91 * @kmap_cnt:		count of times this client has mapped to kernel
92 * @dmap_cnt:		count of times this client has mapped for dma
93 * @usermap_cnt:	count of times this client has mapped for userspace
94 *
95 * Modifications to node, map_cnt or mapping should be protected by the
96 * lock in the client.  Other fields are never changed after initialization.
97 */
98struct ion_handle {
99	struct kref ref;
100	struct ion_client *client;
101	struct ion_buffer *buffer;
102	struct rb_node node;
103	unsigned int kmap_cnt;
104	unsigned int dmap_cnt;
105	unsigned int usermap_cnt;
106};
107
108/* this function should only be called while dev->lock is held */
109static void ion_buffer_add(struct ion_device *dev,
110			   struct ion_buffer *buffer)
111{
112	struct rb_node **p = &dev->buffers.rb_node;
113	struct rb_node *parent = NULL;
114	struct ion_buffer *entry;
115
116	while (*p) {
117		parent = *p;
118		entry = rb_entry(parent, struct ion_buffer, node);
119
120		if (buffer < entry) {
121			p = &(*p)->rb_left;
122		} else if (buffer > entry) {
123			p = &(*p)->rb_right;
124		} else {
125			pr_err("%s: buffer already found.", __func__);
126			BUG();
127		}
128	}
129
130	rb_link_node(&buffer->node, parent, p);
131	rb_insert_color(&buffer->node, &dev->buffers);
132}
133
134/* this function should only be called while dev->lock is held */
135static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
136				     struct ion_device *dev,
137				     unsigned long len,
138				     unsigned long align,
139				     unsigned long flags)
140{
141	struct ion_buffer *buffer;
142	int ret;
143
144	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
145	if (!buffer)
146		return ERR_PTR(-ENOMEM);
147
148	buffer->heap = heap;
149	kref_init(&buffer->ref);
150
151	ret = heap->ops->allocate(heap, buffer, len, align, flags);
152	if (ret) {
153		kfree(buffer);
154		return ERR_PTR(ret);
155	}
156	buffer->dev = dev;
157	buffer->size = len;
158	mutex_init(&buffer->lock);
159	ion_buffer_add(dev, buffer);
160	return buffer;
161}
162
163static void ion_buffer_destroy(struct kref *kref)
164{
165	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
166	struct ion_device *dev = buffer->dev;
167
168	buffer->heap->ops->free(buffer);
169	mutex_lock(&dev->lock);
170	rb_erase(&buffer->node, &dev->buffers);
171	mutex_unlock(&dev->lock);
172	kfree(buffer);
173}
174
175static void ion_buffer_get(struct ion_buffer *buffer)
176{
177	kref_get(&buffer->ref);
178}
179
180static int ion_buffer_put(struct ion_buffer *buffer)
181{
182	return kref_put(&buffer->ref, ion_buffer_destroy);
183}
184
185static struct ion_handle *ion_handle_create(struct ion_client *client,
186				     struct ion_buffer *buffer)
187{
188	struct ion_handle *handle;
189
190	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
191	if (!handle)
192		return ERR_PTR(-ENOMEM);
193	kref_init(&handle->ref);
194	RB_CLEAR_NODE(&handle->node);
195	handle->client = client;
196	ion_buffer_get(buffer);
197	handle->buffer = buffer;
198
199	return handle;
200}
201
202static void ion_handle_destroy(struct kref *kref)
203{
204	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
205	/* XXX Can a handle be destroyed while it's map count is non-zero?:
206	   if (handle->map_cnt) unmap
207	 */
208	ion_buffer_put(handle->buffer);
209	mutex_lock(&handle->client->lock);
210	if (!RB_EMPTY_NODE(&handle->node))
211		rb_erase(&handle->node, &handle->client->handles);
212	mutex_unlock(&handle->client->lock);
213	kfree(handle);
214}
215
216struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
217{
218	return handle->buffer;
219}
220
221static void ion_handle_get(struct ion_handle *handle)
222{
223	kref_get(&handle->ref);
224}
225
226static int ion_handle_put(struct ion_handle *handle)
227{
228	return kref_put(&handle->ref, ion_handle_destroy);
229}
230
231static struct ion_handle *ion_handle_lookup(struct ion_client *client,
232					    struct ion_buffer *buffer)
233{
234	struct rb_node *n;
235
236	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
237		struct ion_handle *handle = rb_entry(n, struct ion_handle,
238						     node);
239		if (handle->buffer == buffer)
240			return handle;
241	}
242	return NULL;
243}
244
245static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
246{
247	struct rb_node *n = client->handles.rb_node;
248
249	while (n) {
250		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
251							  node);
252		if (handle < handle_node)
253			n = n->rb_left;
254		else if (handle > handle_node)
255			n = n->rb_right;
256		else
257			return true;
258	}
259	return false;
260}
261
262static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
263{
264	struct rb_node **p = &client->handles.rb_node;
265	struct rb_node *parent = NULL;
266	struct ion_handle *entry;
267
268	while (*p) {
269		parent = *p;
270		entry = rb_entry(parent, struct ion_handle, node);
271
272		if (handle < entry)
273			p = &(*p)->rb_left;
274		else if (handle > entry)
275			p = &(*p)->rb_right;
276		else
277			WARN(1, "%s: buffer already found.", __func__);
278	}
279
280	rb_link_node(&handle->node, parent, p);
281	rb_insert_color(&handle->node, &client->handles);
282}
283
284struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
285			     size_t align, unsigned int flags)
286{
287	struct rb_node *n;
288	struct ion_handle *handle;
289	struct ion_device *dev = client->dev;
290	struct ion_buffer *buffer = NULL;
291
292	/*
293	 * traverse the list of heaps available in this system in priority
294	 * order.  If the heap type is supported by the client, and matches the
295	 * request of the caller allocate from it.  Repeat until allocate has
296	 * succeeded or all heaps have been tried
297	 */
298	mutex_lock(&dev->lock);
299	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
300		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
301		/* if the client doesn't support this heap type */
302		if (!((1 << heap->type) & client->heap_mask))
303			continue;
304		/* if the caller didn't specify this heap type */
305		if (!((1 << heap->id) & flags))
306			continue;
307		buffer = ion_buffer_create(heap, dev, len, align, flags);
308		if (!IS_ERR_OR_NULL(buffer))
309			break;
310	}
311	mutex_unlock(&dev->lock);
312
313	if (IS_ERR_OR_NULL(buffer))
314		return ERR_PTR(PTR_ERR(buffer));
315
316	handle = ion_handle_create(client, buffer);
317
318	if (IS_ERR_OR_NULL(handle))
319		goto end;
320
321	/*
322	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
323	 * and ion_handle_create will take a second reference, drop one here
324	 */
325	ion_buffer_put(buffer);
326
327	mutex_lock(&client->lock);
328	ion_handle_add(client, handle);
329	mutex_unlock(&client->lock);
330	return handle;
331
332end:
333	ion_buffer_put(buffer);
334	return handle;
335}
336
337void ion_free(struct ion_client *client, struct ion_handle *handle)
338{
339	bool valid_handle;
340
341	BUG_ON(client != handle->client);
342
343	mutex_lock(&client->lock);
344	valid_handle = ion_handle_validate(client, handle);
345	mutex_unlock(&client->lock);
346
347	if (!valid_handle) {
348		WARN("%s: invalid handle passed to free.\n", __func__);
349		return;
350	}
351	ion_handle_put(handle);
352}
353
354static void ion_client_get(struct ion_client *client);
355static int ion_client_put(struct ion_client *client);
356
357static bool _ion_map(int *buffer_cnt, int *handle_cnt)
358{
359	bool map;
360
361	BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
362
363	if (*buffer_cnt)
364		map = false;
365	else
366		map = true;
367	if (*handle_cnt == 0)
368		(*buffer_cnt)++;
369	(*handle_cnt)++;
370	return map;
371}
372
373static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
374{
375	BUG_ON(*handle_cnt == 0);
376	(*handle_cnt)--;
377	if (*handle_cnt != 0)
378		return false;
379	BUG_ON(*buffer_cnt == 0);
380	(*buffer_cnt)--;
381	if (*buffer_cnt == 0)
382		return true;
383	return false;
384}
385
386int ion_phys(struct ion_client *client, struct ion_handle *handle,
387	     ion_phys_addr_t *addr, size_t *len)
388{
389	struct ion_buffer *buffer;
390	int ret;
391
392	mutex_lock(&client->lock);
393	if (!ion_handle_validate(client, handle)) {
394		mutex_unlock(&client->lock);
395		return -EINVAL;
396	}
397
398	buffer = handle->buffer;
399
400	if (!buffer->heap->ops->phys) {
401		pr_err("%s: ion_phys is not implemented by this heap.\n",
402		       __func__);
403		mutex_unlock(&client->lock);
404		return -ENODEV;
405	}
406	mutex_unlock(&client->lock);
407	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
408	return ret;
409}
410
411void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
412{
413	struct ion_buffer *buffer;
414	void *vaddr;
415
416	mutex_lock(&client->lock);
417	if (!ion_handle_validate(client, handle)) {
418		pr_err("%s: invalid handle passed to map_kernel.\n",
419		       __func__);
420		mutex_unlock(&client->lock);
421		return ERR_PTR(-EINVAL);
422	}
423
424	buffer = handle->buffer;
425	mutex_lock(&buffer->lock);
426
427	if (!handle->buffer->heap->ops->map_kernel) {
428		pr_err("%s: map_kernel is not implemented by this heap.\n",
429		       __func__);
430		mutex_unlock(&buffer->lock);
431		mutex_unlock(&client->lock);
432		return ERR_PTR(-ENODEV);
433	}
434
435	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
436		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
437		if (IS_ERR_OR_NULL(vaddr))
438			_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
439		buffer->vaddr = vaddr;
440	} else {
441		vaddr = buffer->vaddr;
442	}
443	mutex_unlock(&buffer->lock);
444	mutex_unlock(&client->lock);
445	return vaddr;
446}
447
448struct sg_table *ion_map_dma(struct ion_client *client,
449				struct ion_handle *handle)
450{
451	struct ion_buffer *buffer;
452	struct sg_table *table;
453
454	mutex_lock(&client->lock);
455	if (!ion_handle_validate(client, handle)) {
456		pr_err("%s: invalid handle passed to map_dma.\n",
457		       __func__);
458		mutex_unlock(&client->lock);
459		return ERR_PTR(-EINVAL);
460	}
461	buffer = handle->buffer;
462	mutex_lock(&buffer->lock);
463
464	if (!handle->buffer->heap->ops->map_dma) {
465		pr_err("%s: map_kernel is not implemented by this heap.\n",
466		       __func__);
467		mutex_unlock(&buffer->lock);
468		mutex_unlock(&client->lock);
469		return ERR_PTR(-ENODEV);
470	}
471	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
472		table = buffer->heap->ops->map_dma(buffer->heap, buffer);
473		if (IS_ERR_OR_NULL(table))
474			_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
475		buffer->sg_table = table;
476	} else {
477		table = buffer->sg_table;
478	}
479	mutex_unlock(&buffer->lock);
480	mutex_unlock(&client->lock);
481	return table;
482}
483
484void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
485{
486	struct ion_buffer *buffer;
487
488	mutex_lock(&client->lock);
489	buffer = handle->buffer;
490	mutex_lock(&buffer->lock);
491	if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
492		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
493		buffer->vaddr = NULL;
494	}
495	mutex_unlock(&buffer->lock);
496	mutex_unlock(&client->lock);
497}
498
499void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
500{
501	struct ion_buffer *buffer;
502
503	mutex_lock(&client->lock);
504	buffer = handle->buffer;
505	mutex_lock(&buffer->lock);
506	if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
507		buffer->heap->ops->unmap_dma(buffer->heap, buffer);
508		buffer->sg_table = NULL;
509	}
510	mutex_unlock(&buffer->lock);
511	mutex_unlock(&client->lock);
512}
513
514
515struct ion_buffer *ion_share(struct ion_client *client,
516				 struct ion_handle *handle)
517{
518	bool valid_handle;
519
520	mutex_lock(&client->lock);
521	valid_handle = ion_handle_validate(client, handle);
522	mutex_unlock(&client->lock);
523	if (!valid_handle) {
524		WARN("%s: invalid handle passed to share.\n", __func__);
525		return ERR_PTR(-EINVAL);
526	}
527
528	/* do not take an extra reference here, the burden is on the caller
529	 * to make sure the buffer doesn't go away while it's passing it
530	 * to another client -- ion_free should not be called on this handle
531	 * until the buffer has been imported into the other client
532	 */
533	return handle->buffer;
534}
535
536struct ion_handle *ion_import(struct ion_client *client,
537			      struct ion_buffer *buffer)
538{
539	struct ion_handle *handle = NULL;
540
541	mutex_lock(&client->lock);
542	/* if a handle exists for this buffer just take a reference to it */
543	handle = ion_handle_lookup(client, buffer);
544	if (!IS_ERR_OR_NULL(handle)) {
545		ion_handle_get(handle);
546		goto end;
547	}
548	handle = ion_handle_create(client, buffer);
549	if (IS_ERR_OR_NULL(handle))
550		goto end;
551	ion_handle_add(client, handle);
552end:
553	mutex_unlock(&client->lock);
554	return handle;
555}
556
557static const struct file_operations ion_share_fops;
558
559struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
560{
561	struct file *file = fget(fd);
562	struct ion_handle *handle;
563
564	if (!file) {
565		pr_err("%s: imported fd not found in file table.\n", __func__);
566		return ERR_PTR(-EINVAL);
567	}
568	if (file->f_op != &ion_share_fops) {
569		pr_err("%s: imported file is not a shared ion file.\n",
570		       __func__);
571		handle = ERR_PTR(-EINVAL);
572		goto end;
573	}
574	handle = ion_import(client, file->private_data);
575end:
576	fput(file);
577	return handle;
578}
579
580static int ion_debug_client_show(struct seq_file *s, void *unused)
581{
582	struct ion_client *client = s->private;
583	struct rb_node *n;
584	size_t sizes[ION_NUM_HEAPS] = {0};
585	const char *names[ION_NUM_HEAPS] = {0};
586	int i;
587
588	mutex_lock(&client->lock);
589	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
590		struct ion_handle *handle = rb_entry(n, struct ion_handle,
591						     node);
592		enum ion_heap_type type = handle->buffer->heap->type;
593
594		if (!names[type])
595			names[type] = handle->buffer->heap->name;
596		sizes[type] += handle->buffer->size;
597	}
598	mutex_unlock(&client->lock);
599
600	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
601	for (i = 0; i < ION_NUM_HEAPS; i++) {
602		if (!names[i])
603			continue;
604		seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
605			   atomic_read(&client->ref.refcount));
606	}
607	return 0;
608}
609
610static int ion_debug_client_open(struct inode *inode, struct file *file)
611{
612	return single_open(file, ion_debug_client_show, inode->i_private);
613}
614
615static const struct file_operations debug_client_fops = {
616	.open = ion_debug_client_open,
617	.read = seq_read,
618	.llseek = seq_lseek,
619	.release = single_release,
620};
621
622static struct ion_client *ion_client_lookup(struct ion_device *dev,
623					    struct task_struct *task)
624{
625	struct rb_node *n = dev->user_clients.rb_node;
626	struct ion_client *client;
627
628	mutex_lock(&dev->lock);
629	while (n) {
630		client = rb_entry(n, struct ion_client, node);
631		if (task == client->task) {
632			ion_client_get(client);
633			mutex_unlock(&dev->lock);
634			return client;
635		} else if (task < client->task) {
636			n = n->rb_left;
637		} else if (task > client->task) {
638			n = n->rb_right;
639		}
640	}
641	mutex_unlock(&dev->lock);
642	return NULL;
643}
644
645struct ion_client *ion_client_create(struct ion_device *dev,
646				     unsigned int heap_mask,
647				     const char *name)
648{
649	struct ion_client *client;
650	struct task_struct *task;
651	struct rb_node **p;
652	struct rb_node *parent = NULL;
653	struct ion_client *entry;
654	char debug_name[64];
655	pid_t pid;
656
657	get_task_struct(current->group_leader);
658	task_lock(current->group_leader);
659	pid = task_pid_nr(current->group_leader);
660	/* don't bother to store task struct for kernel threads,
661	   they can't be killed anyway */
662	if (current->group_leader->flags & PF_KTHREAD) {
663		put_task_struct(current->group_leader);
664		task = NULL;
665	} else {
666		task = current->group_leader;
667	}
668	task_unlock(current->group_leader);
669
670	/* if this isn't a kernel thread, see if a client already
671	   exists */
672	if (task) {
673		client = ion_client_lookup(dev, task);
674		if (!IS_ERR_OR_NULL(client)) {
675			put_task_struct(current->group_leader);
676			return client;
677		}
678	}
679
680	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
681	if (!client) {
682		put_task_struct(current->group_leader);
683		return ERR_PTR(-ENOMEM);
684	}
685
686	client->dev = dev;
687	client->handles = RB_ROOT;
688	mutex_init(&client->lock);
689	client->name = name;
690	client->heap_mask = heap_mask;
691	client->task = task;
692	client->pid = pid;
693	kref_init(&client->ref);
694
695	mutex_lock(&dev->lock);
696	if (task) {
697		p = &dev->user_clients.rb_node;
698		while (*p) {
699			parent = *p;
700			entry = rb_entry(parent, struct ion_client, node);
701
702			if (task < entry->task)
703				p = &(*p)->rb_left;
704			else if (task > entry->task)
705				p = &(*p)->rb_right;
706		}
707		rb_link_node(&client->node, parent, p);
708		rb_insert_color(&client->node, &dev->user_clients);
709	} else {
710		p = &dev->kernel_clients.rb_node;
711		while (*p) {
712			parent = *p;
713			entry = rb_entry(parent, struct ion_client, node);
714
715			if (client < entry)
716				p = &(*p)->rb_left;
717			else if (client > entry)
718				p = &(*p)->rb_right;
719		}
720		rb_link_node(&client->node, parent, p);
721		rb_insert_color(&client->node, &dev->kernel_clients);
722	}
723
724	snprintf(debug_name, 64, "%u", client->pid);
725	client->debug_root = debugfs_create_file(debug_name, 0664,
726						 dev->debug_root, client,
727						 &debug_client_fops);
728	mutex_unlock(&dev->lock);
729
730	return client;
731}
732
733static void _ion_client_destroy(struct kref *kref)
734{
735	struct ion_client *client = container_of(kref, struct ion_client, ref);
736	struct ion_device *dev = client->dev;
737	struct rb_node *n;
738
739	pr_debug("%s: %d\n", __func__, __LINE__);
740	while ((n = rb_first(&client->handles))) {
741		struct ion_handle *handle = rb_entry(n, struct ion_handle,
742						     node);
743		ion_handle_destroy(&handle->ref);
744	}
745	mutex_lock(&dev->lock);
746	if (client->task) {
747		rb_erase(&client->node, &dev->user_clients);
748		put_task_struct(client->task);
749	} else {
750		rb_erase(&client->node, &dev->kernel_clients);
751	}
752	debugfs_remove_recursive(client->debug_root);
753	mutex_unlock(&dev->lock);
754
755	kfree(client);
756}
757
758static void ion_client_get(struct ion_client *client)
759{
760	kref_get(&client->ref);
761}
762
763static int ion_client_put(struct ion_client *client)
764{
765	return kref_put(&client->ref, _ion_client_destroy);
766}
767
768void ion_client_destroy(struct ion_client *client)
769{
770	ion_client_put(client);
771}
772
773static int ion_share_release(struct inode *inode, struct file* file)
774{
775	struct ion_buffer *buffer = file->private_data;
776
777	pr_debug("%s: %d\n", __func__, __LINE__);
778	/* drop the reference to the buffer -- this prevents the
779	   buffer from going away because the client holding it exited
780	   while it was being passed */
781	ion_buffer_put(buffer);
782	return 0;
783}
784
785static void ion_vma_open(struct vm_area_struct *vma)
786{
787
788	struct ion_buffer *buffer = vma->vm_file->private_data;
789	struct ion_handle *handle = vma->vm_private_data;
790	struct ion_client *client;
791
792	pr_debug("%s: %d\n", __func__, __LINE__);
793	/* check that the client still exists and take a reference so
794	   it can't go away until this vma is closed */
795	client = ion_client_lookup(buffer->dev, current->group_leader);
796	if (IS_ERR_OR_NULL(client)) {
797		vma->vm_private_data = NULL;
798		return;
799	}
800	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
801		 __func__, __LINE__,
802		 atomic_read(&client->ref.refcount),
803		 atomic_read(&handle->ref.refcount),
804		 atomic_read(&buffer->ref.refcount));
805}
806
807static void ion_vma_close(struct vm_area_struct *vma)
808{
809	struct ion_handle *handle = vma->vm_private_data;
810	struct ion_buffer *buffer = vma->vm_file->private_data;
811	struct ion_client *client;
812
813	pr_debug("%s: %d\n", __func__, __LINE__);
814	/* this indicates the client is gone, nothing to do here */
815	if (!handle)
816		return;
817	client = handle->client;
818	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
819		 __func__, __LINE__,
820		 atomic_read(&client->ref.refcount),
821		 atomic_read(&handle->ref.refcount),
822		 atomic_read(&buffer->ref.refcount));
823	ion_handle_put(handle);
824	ion_client_put(client);
825	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
826		 __func__, __LINE__,
827		 atomic_read(&client->ref.refcount),
828		 atomic_read(&handle->ref.refcount),
829		 atomic_read(&buffer->ref.refcount));
830}
831
832static struct vm_operations_struct ion_vm_ops = {
833	.open = ion_vma_open,
834	.close = ion_vma_close,
835};
836
837static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
838{
839	struct ion_buffer *buffer = file->private_data;
840	unsigned long size = vma->vm_end - vma->vm_start;
841	struct ion_client *client;
842	struct ion_handle *handle;
843	int ret;
844
845	pr_debug("%s: %d\n", __func__, __LINE__);
846	/* make sure the client still exists, it's possible for the client to
847	   have gone away but the map/share fd still to be around, take
848	   a reference to it so it can't go away while this mapping exists */
849	client = ion_client_lookup(buffer->dev, current->group_leader);
850	if (IS_ERR_OR_NULL(client)) {
851		pr_err("%s: trying to mmap an ion handle in a process with no "
852		       "ion client\n", __func__);
853		return -EINVAL;
854	}
855
856	if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
857				     buffer->size)) {
858		pr_err("%s: trying to map larger area than handle has available"
859		       "\n", __func__);
860		ret = -EINVAL;
861		goto err;
862	}
863
864	/* find the handle and take a reference to it */
865	handle = ion_import(client, buffer);
866	if (IS_ERR_OR_NULL(handle)) {
867		ret = -EINVAL;
868		goto err;
869	}
870
871	if (!handle->buffer->heap->ops->map_user) {
872		pr_err("%s: this heap does not define a method for mapping "
873		       "to userspace\n", __func__);
874		ret = -EINVAL;
875		goto err1;
876	}
877
878	mutex_lock(&buffer->lock);
879	/* now map it to userspace */
880	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
881	mutex_unlock(&buffer->lock);
882	if (ret) {
883		pr_err("%s: failure mapping buffer to userspace\n",
884		       __func__);
885		goto err1;
886	}
887
888	vma->vm_ops = &ion_vm_ops;
889	/* move the handle into the vm_private_data so we can access it from
890	   vma_open/close */
891	vma->vm_private_data = handle;
892	pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
893		 __func__, __LINE__,
894		 atomic_read(&client->ref.refcount),
895		 atomic_read(&handle->ref.refcount),
896		 atomic_read(&buffer->ref.refcount));
897	return 0;
898
899err1:
900	/* drop the reference to the handle */
901	ion_handle_put(handle);
902err:
903	/* drop the reference to the client */
904	ion_client_put(client);
905	return ret;
906}
907
908static const struct file_operations ion_share_fops = {
909	.owner		= THIS_MODULE,
910	.release	= ion_share_release,
911	.mmap		= ion_share_mmap,
912};
913
914static int ion_ioctl_share(struct file *parent, struct ion_client *client,
915			   struct ion_handle *handle)
916{
917	int fd = get_unused_fd();
918	struct file *file;
919
920	if (fd < 0)
921		return -ENFILE;
922
923	file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
924				  handle->buffer, O_RDWR);
925	if (IS_ERR_OR_NULL(file))
926		goto err;
927	ion_buffer_get(handle->buffer);
928	fd_install(fd, file);
929
930	return fd;
931
932err:
933	put_unused_fd(fd);
934	return -ENFILE;
935}
936
937static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
938{
939	struct ion_client *client = filp->private_data;
940
941	switch (cmd) {
942	case ION_IOC_ALLOC:
943	{
944		struct ion_allocation_data data;
945
946		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
947			return -EFAULT;
948		data.handle = ion_alloc(client, data.len, data.align,
949					     data.flags);
950		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
951			return -EFAULT;
952		break;
953	}
954	case ION_IOC_FREE:
955	{
956		struct ion_handle_data data;
957		bool valid;
958
959		if (copy_from_user(&data, (void __user *)arg,
960				   sizeof(struct ion_handle_data)))
961			return -EFAULT;
962		mutex_lock(&client->lock);
963		valid = ion_handle_validate(client, data.handle);
964		mutex_unlock(&client->lock);
965		if (!valid)
966			return -EINVAL;
967		ion_free(client, data.handle);
968		break;
969	}
970	case ION_IOC_MAP:
971	case ION_IOC_SHARE:
972	{
973		struct ion_fd_data data;
974
975		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
976			return -EFAULT;
977		mutex_lock(&client->lock);
978		if (!ion_handle_validate(client, data.handle)) {
979			pr_err("%s: invalid handle passed to share ioctl.\n",
980			       __func__);
981			mutex_unlock(&client->lock);
982			return -EINVAL;
983		}
984		data.fd = ion_ioctl_share(filp, client, data.handle);
985		mutex_unlock(&client->lock);
986		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
987			return -EFAULT;
988		break;
989	}
990	case ION_IOC_IMPORT:
991	{
992		struct ion_fd_data data;
993		if (copy_from_user(&data, (void __user *)arg,
994				   sizeof(struct ion_fd_data)))
995			return -EFAULT;
996
997		data.handle = ion_import_fd(client, data.fd);
998		if (IS_ERR(data.handle))
999			data.handle = NULL;
1000		if (copy_to_user((void __user *)arg, &data,
1001				 sizeof(struct ion_fd_data)))
1002			return -EFAULT;
1003		break;
1004	}
1005	case ION_IOC_CUSTOM:
1006	{
1007		struct ion_device *dev = client->dev;
1008		struct ion_custom_data data;
1009
1010		if (!dev->custom_ioctl)
1011			return -ENOTTY;
1012		if (copy_from_user(&data, (void __user *)arg,
1013				sizeof(struct ion_custom_data)))
1014			return -EFAULT;
1015		return dev->custom_ioctl(client, data.cmd, data.arg);
1016	}
1017	default:
1018		return -ENOTTY;
1019	}
1020	return 0;
1021}
1022
1023static int ion_release(struct inode *inode, struct file *file)
1024{
1025	struct ion_client *client = file->private_data;
1026
1027	pr_debug("%s: %d\n", __func__, __LINE__);
1028	ion_client_put(client);
1029	return 0;
1030}
1031
1032static int ion_open(struct inode *inode, struct file *file)
1033{
1034	struct miscdevice *miscdev = file->private_data;
1035	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1036	struct ion_client *client;
1037
1038	pr_debug("%s: %d\n", __func__, __LINE__);
1039	client = ion_client_create(dev, -1, "user");
1040	if (IS_ERR_OR_NULL(client))
1041		return PTR_ERR(client);
1042	file->private_data = client;
1043
1044	return 0;
1045}
1046
1047static const struct file_operations ion_fops = {
1048	.owner          = THIS_MODULE,
1049	.open           = ion_open,
1050	.release        = ion_release,
1051	.unlocked_ioctl = ion_ioctl,
1052};
1053
1054static size_t ion_debug_heap_total(struct ion_client *client,
1055				   enum ion_heap_type type)
1056{
1057	size_t size = 0;
1058	struct rb_node *n;
1059
1060	mutex_lock(&client->lock);
1061	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1062		struct ion_handle *handle = rb_entry(n,
1063						     struct ion_handle,
1064						     node);
1065		if (handle->buffer->heap->type == type)
1066			size += handle->buffer->size;
1067	}
1068	mutex_unlock(&client->lock);
1069	return size;
1070}
1071
1072static int ion_debug_heap_show(struct seq_file *s, void *unused)
1073{
1074	struct ion_heap *heap = s->private;
1075	struct ion_device *dev = heap->dev;
1076	struct rb_node *n;
1077
1078	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1079	for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1080		struct ion_client *client = rb_entry(n, struct ion_client,
1081						     node);
1082		char task_comm[TASK_COMM_LEN];
1083		size_t size = ion_debug_heap_total(client, heap->type);
1084		if (!size)
1085			continue;
1086
1087		get_task_comm(task_comm, client->task);
1088		seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1089			   size);
1090	}
1091
1092	for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1093		struct ion_client *client = rb_entry(n, struct ion_client,
1094						     node);
1095		size_t size = ion_debug_heap_total(client, heap->type);
1096		if (!size)
1097			continue;
1098		seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1099			   size);
1100	}
1101	return 0;
1102}
1103
1104static int ion_debug_heap_open(struct inode *inode, struct file *file)
1105{
1106	return single_open(file, ion_debug_heap_show, inode->i_private);
1107}
1108
1109static const struct file_operations debug_heap_fops = {
1110	.open = ion_debug_heap_open,
1111	.read = seq_read,
1112	.llseek = seq_lseek,
1113	.release = single_release,
1114};
1115
1116void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1117{
1118	struct rb_node **p = &dev->heaps.rb_node;
1119	struct rb_node *parent = NULL;
1120	struct ion_heap *entry;
1121
1122	heap->dev = dev;
1123	mutex_lock(&dev->lock);
1124	while (*p) {
1125		parent = *p;
1126		entry = rb_entry(parent, struct ion_heap, node);
1127
1128		if (heap->id < entry->id) {
1129			p = &(*p)->rb_left;
1130		} else if (heap->id > entry->id ) {
1131			p = &(*p)->rb_right;
1132		} else {
1133			pr_err("%s: can not insert multiple heaps with "
1134				"id %d\n", __func__, heap->id);
1135			goto end;
1136		}
1137	}
1138
1139	rb_link_node(&heap->node, parent, p);
1140	rb_insert_color(&heap->node, &dev->heaps);
1141	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1142			    &debug_heap_fops);
1143end:
1144	mutex_unlock(&dev->lock);
1145}
1146
1147struct ion_device *ion_device_create(long (*custom_ioctl)
1148				     (struct ion_client *client,
1149				      unsigned int cmd,
1150				      unsigned long arg))
1151{
1152	struct ion_device *idev;
1153	int ret;
1154
1155	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1156	if (!idev)
1157		return ERR_PTR(-ENOMEM);
1158
1159	idev->dev.minor = MISC_DYNAMIC_MINOR;
1160	idev->dev.name = "ion";
1161	idev->dev.fops = &ion_fops;
1162	idev->dev.parent = NULL;
1163	ret = misc_register(&idev->dev);
1164	if (ret) {
1165		pr_err("ion: failed to register misc device.\n");
1166		return ERR_PTR(ret);
1167	}
1168
1169	idev->debug_root = debugfs_create_dir("ion", NULL);
1170	if (IS_ERR_OR_NULL(idev->debug_root))
1171		pr_err("ion: failed to create debug files.\n");
1172
1173	idev->custom_ioctl = custom_ioctl;
1174	idev->buffers = RB_ROOT;
1175	mutex_init(&idev->lock);
1176	idev->heaps = RB_ROOT;
1177	idev->user_clients = RB_ROOT;
1178	idev->kernel_clients = RB_ROOT;
1179	return idev;
1180}
1181
1182void ion_device_destroy(struct ion_device *dev)
1183{
1184	misc_deregister(&dev->dev);
1185	/* XXX need to free the heaps and clients ? */
1186	kfree(dev);
1187}
1188