ion.c revision ce1f147a2ed5ed468ad3a2f8418dddfd205d1fd9
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/dma-buf.h>
34
35#include "ion.h"
36#include "ion_priv.h"
37#define DEBUG
38
39/**
40 * struct ion_device - the metadata of the ion device node
41 * @dev:		the actual misc device
42 * @buffers:	an rb tree of all the existing buffers
43 * @lock:		lock protecting the buffers & heaps trees
44 * @heaps:		list of all the heaps in the system
45 * @user_clients:	list of all the clients created from userspace
46 */
47struct ion_device {
48	struct miscdevice dev;
49	struct rb_root buffers;
50	struct mutex lock;
51	struct rb_root heaps;
52	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53			      unsigned long arg);
54	struct rb_root clients;
55	struct dentry *debug_root;
56};
57
58/**
59 * struct ion_client - a process/hw block local address space
60 * @node:		node in the tree of all clients
61 * @dev:		backpointer to ion device
62 * @handles:		an rb tree of all the handles in this client
63 * @lock:		lock protecting the tree of handles
64 * @heap_mask:		mask of all supported heaps
65 * @name:		used for debugging
66 * @task:		used for debugging
67 *
68 * A client represents a list of buffers this client may access.
69 * The mutex stored here is used to protect both handles tree
70 * as well as the handles themselves, and should be held while modifying either.
71 */
72struct ion_client {
73	struct rb_node node;
74	struct ion_device *dev;
75	struct rb_root handles;
76	struct mutex lock;
77	unsigned int heap_mask;
78	const char *name;
79	struct task_struct *task;
80	pid_t pid;
81	struct dentry *debug_root;
82};
83
84/**
85 * ion_handle - a client local reference to a buffer
86 * @ref:		reference count
87 * @client:		back pointer to the client the buffer resides in
88 * @buffer:		pointer to the buffer
89 * @node:		node in the client's handle rbtree
90 * @kmap_cnt:		count of times this client has mapped to kernel
91 * @dmap_cnt:		count of times this client has mapped for dma
92 *
93 * Modifications to node, map_cnt or mapping should be protected by the
94 * lock in the client.  Other fields are never changed after initialization.
95 */
96struct ion_handle {
97	struct kref ref;
98	struct ion_client *client;
99	struct ion_buffer *buffer;
100	struct rb_node node;
101	unsigned int kmap_cnt;
102};
103
104/* this function should only be called while dev->lock is held */
105static void ion_buffer_add(struct ion_device *dev,
106			   struct ion_buffer *buffer)
107{
108	struct rb_node **p = &dev->buffers.rb_node;
109	struct rb_node *parent = NULL;
110	struct ion_buffer *entry;
111
112	while (*p) {
113		parent = *p;
114		entry = rb_entry(parent, struct ion_buffer, node);
115
116		if (buffer < entry) {
117			p = &(*p)->rb_left;
118		} else if (buffer > entry) {
119			p = &(*p)->rb_right;
120		} else {
121			pr_err("%s: buffer already found.", __func__);
122			BUG();
123		}
124	}
125
126	rb_link_node(&buffer->node, parent, p);
127	rb_insert_color(&buffer->node, &dev->buffers);
128}
129
130/* this function should only be called while dev->lock is held */
131static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
132				     struct ion_device *dev,
133				     unsigned long len,
134				     unsigned long align,
135				     unsigned long flags)
136{
137	struct ion_buffer *buffer;
138	struct sg_table *table;
139	int ret;
140
141	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
142	if (!buffer)
143		return ERR_PTR(-ENOMEM);
144
145	buffer->heap = heap;
146	kref_init(&buffer->ref);
147
148	ret = heap->ops->allocate(heap, buffer, len, align, flags);
149	if (ret) {
150		kfree(buffer);
151		return ERR_PTR(ret);
152	}
153
154	table = buffer->heap->ops->map_dma(buffer->heap, buffer);
155	if (IS_ERR_OR_NULL(table)) {
156		heap->ops->free(buffer);
157		kfree(buffer);
158		return ERR_PTR(PTR_ERR(table));
159	}
160	buffer->sg_table = table;
161
162	buffer->dev = dev;
163	buffer->size = len;
164	mutex_init(&buffer->lock);
165	ion_buffer_add(dev, buffer);
166	return buffer;
167}
168
169static void ion_buffer_destroy(struct kref *kref)
170{
171	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
172	struct ion_device *dev = buffer->dev;
173
174	if (WARN_ON(buffer->kmap_cnt > 0))
175		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
176
177	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
178	buffer->heap->ops->free(buffer);
179	mutex_lock(&dev->lock);
180	rb_erase(&buffer->node, &dev->buffers);
181	mutex_unlock(&dev->lock);
182	kfree(buffer);
183}
184
185static void ion_buffer_get(struct ion_buffer *buffer)
186{
187	kref_get(&buffer->ref);
188}
189
190static int ion_buffer_put(struct ion_buffer *buffer)
191{
192	return kref_put(&buffer->ref, ion_buffer_destroy);
193}
194
195static struct ion_handle *ion_handle_create(struct ion_client *client,
196				     struct ion_buffer *buffer)
197{
198	struct ion_handle *handle;
199
200	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
201	if (!handle)
202		return ERR_PTR(-ENOMEM);
203	kref_init(&handle->ref);
204	RB_CLEAR_NODE(&handle->node);
205	handle->client = client;
206	ion_buffer_get(buffer);
207	handle->buffer = buffer;
208
209	return handle;
210}
211
212static void ion_handle_kmap_put(struct ion_handle *);
213
214static void ion_handle_destroy(struct kref *kref)
215{
216	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
217	struct ion_client *client = handle->client;
218	struct ion_buffer *buffer = handle->buffer;
219
220	mutex_lock(&client->lock);
221
222	mutex_lock(&buffer->lock);
223	while (buffer->kmap_cnt)
224		ion_handle_kmap_put(handle);
225	mutex_unlock(&buffer->lock);
226
227	if (!RB_EMPTY_NODE(&handle->node))
228		rb_erase(&handle->node, &client->handles);
229	mutex_unlock(&client->lock);
230
231	ion_buffer_put(buffer);
232	kfree(handle);
233}
234
235struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
236{
237	return handle->buffer;
238}
239
240static void ion_handle_get(struct ion_handle *handle)
241{
242	kref_get(&handle->ref);
243}
244
245static int ion_handle_put(struct ion_handle *handle)
246{
247	return kref_put(&handle->ref, ion_handle_destroy);
248}
249
250static struct ion_handle *ion_handle_lookup(struct ion_client *client,
251					    struct ion_buffer *buffer)
252{
253	struct rb_node *n;
254
255	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
256		struct ion_handle *handle = rb_entry(n, struct ion_handle,
257						     node);
258		if (handle->buffer == buffer)
259			return handle;
260	}
261	return NULL;
262}
263
264static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
265{
266	struct rb_node *n = client->handles.rb_node;
267
268	while (n) {
269		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
270							  node);
271		if (handle < handle_node)
272			n = n->rb_left;
273		else if (handle > handle_node)
274			n = n->rb_right;
275		else
276			return true;
277	}
278	return false;
279}
280
281static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
282{
283	struct rb_node **p = &client->handles.rb_node;
284	struct rb_node *parent = NULL;
285	struct ion_handle *entry;
286
287	while (*p) {
288		parent = *p;
289		entry = rb_entry(parent, struct ion_handle, node);
290
291		if (handle < entry)
292			p = &(*p)->rb_left;
293		else if (handle > entry)
294			p = &(*p)->rb_right;
295		else
296			WARN(1, "%s: buffer already found.", __func__);
297	}
298
299	rb_link_node(&handle->node, parent, p);
300	rb_insert_color(&handle->node, &client->handles);
301}
302
303struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
304			     size_t align, unsigned int flags)
305{
306	struct rb_node *n;
307	struct ion_handle *handle;
308	struct ion_device *dev = client->dev;
309	struct ion_buffer *buffer = NULL;
310
311	/*
312	 * traverse the list of heaps available in this system in priority
313	 * order.  If the heap type is supported by the client, and matches the
314	 * request of the caller allocate from it.  Repeat until allocate has
315	 * succeeded or all heaps have been tried
316	 */
317	if (WARN_ON(!len))
318		return ERR_PTR(-EINVAL);
319
320	len = PAGE_ALIGN(len);
321
322	mutex_lock(&dev->lock);
323	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
324		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
325		/* if the client doesn't support this heap type */
326		if (!((1 << heap->type) & client->heap_mask))
327			continue;
328		/* if the caller didn't specify this heap type */
329		if (!((1 << heap->id) & flags))
330			continue;
331		buffer = ion_buffer_create(heap, dev, len, align, flags);
332		if (!IS_ERR_OR_NULL(buffer))
333			break;
334	}
335	mutex_unlock(&dev->lock);
336
337	if (buffer == NULL)
338		return ERR_PTR(-ENODEV);
339
340	if (IS_ERR(buffer))
341		return ERR_PTR(PTR_ERR(buffer));
342
343	handle = ion_handle_create(client, buffer);
344
345	/*
346	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
347	 * and ion_handle_create will take a second reference, drop one here
348	 */
349	ion_buffer_put(buffer);
350
351	if (!IS_ERR(handle)) {
352		mutex_lock(&client->lock);
353		ion_handle_add(client, handle);
354		mutex_unlock(&client->lock);
355	}
356
357
358	return handle;
359}
360
361void ion_free(struct ion_client *client, struct ion_handle *handle)
362{
363	bool valid_handle;
364
365	BUG_ON(client != handle->client);
366
367	mutex_lock(&client->lock);
368	valid_handle = ion_handle_validate(client, handle);
369	mutex_unlock(&client->lock);
370
371	if (!valid_handle) {
372		WARN("%s: invalid handle passed to free.\n", __func__);
373		return;
374	}
375	ion_handle_put(handle);
376}
377
378int ion_phys(struct ion_client *client, struct ion_handle *handle,
379	     ion_phys_addr_t *addr, size_t *len)
380{
381	struct ion_buffer *buffer;
382	int ret;
383
384	mutex_lock(&client->lock);
385	if (!ion_handle_validate(client, handle)) {
386		mutex_unlock(&client->lock);
387		return -EINVAL;
388	}
389
390	buffer = handle->buffer;
391
392	if (!buffer->heap->ops->phys) {
393		pr_err("%s: ion_phys is not implemented by this heap.\n",
394		       __func__);
395		mutex_unlock(&client->lock);
396		return -ENODEV;
397	}
398	mutex_unlock(&client->lock);
399	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
400	return ret;
401}
402
403static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
404{
405	void *vaddr;
406
407	if (buffer->kmap_cnt) {
408		buffer->kmap_cnt++;
409		return buffer->vaddr;
410	}
411	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
412	if (IS_ERR_OR_NULL(vaddr))
413		return vaddr;
414	buffer->vaddr = vaddr;
415	buffer->kmap_cnt++;
416	return vaddr;
417}
418
419static void *ion_handle_kmap_get(struct ion_handle *handle)
420{
421	struct ion_buffer *buffer = handle->buffer;
422	void *vaddr;
423
424	if (handle->kmap_cnt) {
425		handle->kmap_cnt++;
426		return buffer->vaddr;
427	}
428	vaddr = ion_buffer_kmap_get(buffer);
429	if (IS_ERR_OR_NULL(vaddr))
430		return vaddr;
431	handle->kmap_cnt++;
432	return vaddr;
433}
434
435static void ion_buffer_kmap_put(struct ion_buffer *buffer)
436{
437	buffer->kmap_cnt--;
438	if (!buffer->kmap_cnt) {
439		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
440		buffer->vaddr = NULL;
441	}
442}
443
444static void ion_handle_kmap_put(struct ion_handle *handle)
445{
446	struct ion_buffer *buffer = handle->buffer;
447
448	handle->kmap_cnt--;
449	if (!handle->kmap_cnt)
450		ion_buffer_kmap_put(buffer);
451}
452
453void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
454{
455	struct ion_buffer *buffer;
456	void *vaddr;
457
458	mutex_lock(&client->lock);
459	if (!ion_handle_validate(client, handle)) {
460		pr_err("%s: invalid handle passed to map_kernel.\n",
461		       __func__);
462		mutex_unlock(&client->lock);
463		return ERR_PTR(-EINVAL);
464	}
465
466	buffer = handle->buffer;
467
468	if (!handle->buffer->heap->ops->map_kernel) {
469		pr_err("%s: map_kernel is not implemented by this heap.\n",
470		       __func__);
471		mutex_unlock(&client->lock);
472		return ERR_PTR(-ENODEV);
473	}
474
475	mutex_lock(&buffer->lock);
476	vaddr = ion_handle_kmap_get(handle);
477	mutex_unlock(&buffer->lock);
478	mutex_unlock(&client->lock);
479	return vaddr;
480}
481
482void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
483{
484	struct ion_buffer *buffer;
485
486	mutex_lock(&client->lock);
487	buffer = handle->buffer;
488	mutex_lock(&buffer->lock);
489	ion_handle_kmap_put(handle);
490	mutex_unlock(&buffer->lock);
491	mutex_unlock(&client->lock);
492}
493
494static int ion_debug_client_show(struct seq_file *s, void *unused)
495{
496	struct ion_client *client = s->private;
497	struct rb_node *n;
498	size_t sizes[ION_NUM_HEAPS] = {0};
499	const char *names[ION_NUM_HEAPS] = {0};
500	int i;
501
502	mutex_lock(&client->lock);
503	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
504		struct ion_handle *handle = rb_entry(n, struct ion_handle,
505						     node);
506		enum ion_heap_type type = handle->buffer->heap->type;
507
508		if (!names[type])
509			names[type] = handle->buffer->heap->name;
510		sizes[type] += handle->buffer->size;
511	}
512	mutex_unlock(&client->lock);
513
514	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
515	for (i = 0; i < ION_NUM_HEAPS; i++) {
516		if (!names[i])
517			continue;
518		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
519	}
520	return 0;
521}
522
523static int ion_debug_client_open(struct inode *inode, struct file *file)
524{
525	return single_open(file, ion_debug_client_show, inode->i_private);
526}
527
528static const struct file_operations debug_client_fops = {
529	.open = ion_debug_client_open,
530	.read = seq_read,
531	.llseek = seq_lseek,
532	.release = single_release,
533};
534
535struct ion_client *ion_client_create(struct ion_device *dev,
536				     unsigned int heap_mask,
537				     const char *name)
538{
539	struct ion_client *client;
540	struct task_struct *task;
541	struct rb_node **p;
542	struct rb_node *parent = NULL;
543	struct ion_client *entry;
544	char debug_name[64];
545	pid_t pid;
546
547	get_task_struct(current->group_leader);
548	task_lock(current->group_leader);
549	pid = task_pid_nr(current->group_leader);
550	/* don't bother to store task struct for kernel threads,
551	   they can't be killed anyway */
552	if (current->group_leader->flags & PF_KTHREAD) {
553		put_task_struct(current->group_leader);
554		task = NULL;
555	} else {
556		task = current->group_leader;
557	}
558	task_unlock(current->group_leader);
559
560	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
561	if (!client) {
562		if (task)
563			put_task_struct(current->group_leader);
564		return ERR_PTR(-ENOMEM);
565	}
566
567	client->dev = dev;
568	client->handles = RB_ROOT;
569	mutex_init(&client->lock);
570	client->name = name;
571	client->heap_mask = heap_mask;
572	client->task = task;
573	client->pid = pid;
574
575	mutex_lock(&dev->lock);
576	p = &dev->clients.rb_node;
577	while (*p) {
578		parent = *p;
579		entry = rb_entry(parent, struct ion_client, node);
580
581		if (client < entry)
582			p = &(*p)->rb_left;
583		else if (client > entry)
584			p = &(*p)->rb_right;
585	}
586	rb_link_node(&client->node, parent, p);
587	rb_insert_color(&client->node, &dev->clients);
588
589	snprintf(debug_name, 64, "%u", client->pid);
590	client->debug_root = debugfs_create_file(debug_name, 0664,
591						 dev->debug_root, client,
592						 &debug_client_fops);
593	mutex_unlock(&dev->lock);
594
595	return client;
596}
597
598void ion_client_destroy(struct ion_client *client)
599{
600	struct ion_device *dev = client->dev;
601	struct rb_node *n;
602
603	pr_debug("%s: %d\n", __func__, __LINE__);
604	while ((n = rb_first(&client->handles))) {
605		struct ion_handle *handle = rb_entry(n, struct ion_handle,
606						     node);
607		ion_handle_destroy(&handle->ref);
608	}
609	mutex_lock(&dev->lock);
610	if (client->task)
611		put_task_struct(client->task);
612	rb_erase(&client->node, &dev->clients);
613	debugfs_remove_recursive(client->debug_root);
614	mutex_unlock(&dev->lock);
615
616	kfree(client);
617}
618
619struct sg_table *ion_sg_table(struct ion_client *client,
620			      struct ion_handle *handle)
621{
622	struct ion_buffer *buffer;
623	struct sg_table *table;
624
625	mutex_lock(&client->lock);
626	if (!ion_handle_validate(client, handle)) {
627		pr_err("%s: invalid handle passed to map_dma.\n",
628		       __func__);
629		mutex_unlock(&client->lock);
630		return ERR_PTR(-EINVAL);
631	}
632	buffer = handle->buffer;
633	table = buffer->sg_table;
634	mutex_unlock(&client->lock);
635	return table;
636}
637
638static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
639					enum dma_data_direction direction)
640{
641	struct dma_buf *dmabuf = attachment->dmabuf;
642	struct ion_buffer *buffer = dmabuf->priv;
643
644	return buffer->sg_table;
645}
646
647static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
648			      struct sg_table *table,
649			      enum dma_data_direction direction)
650{
651}
652
653static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
654{
655	struct ion_buffer *buffer = dmabuf->priv;
656	int ret;
657
658	if (!buffer->heap->ops->map_user) {
659		pr_err("%s: this heap does not define a method for mapping "
660		       "to userspace\n", __func__);
661		return -EINVAL;
662	}
663
664	mutex_lock(&buffer->lock);
665	/* now map it to userspace */
666	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
667	mutex_unlock(&buffer->lock);
668
669	if (ret)
670		pr_err("%s: failure mapping buffer to userspace\n",
671		       __func__);
672
673	return ret;
674}
675
676static void ion_dma_buf_release(struct dma_buf *dmabuf)
677{
678	struct ion_buffer *buffer = dmabuf->priv;
679	ion_buffer_put(buffer);
680}
681
682static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
683{
684	struct ion_buffer *buffer = dmabuf->priv;
685	return buffer->vaddr + offset;
686}
687
688static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
689			       void *ptr)
690{
691	return;
692}
693
694static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
695					size_t len,
696					enum dma_data_direction direction)
697{
698	struct ion_buffer *buffer = dmabuf->priv;
699	void *vaddr;
700
701	if (!buffer->heap->ops->map_kernel) {
702		pr_err("%s: map kernel is not implemented by this heap.\n",
703		       __func__);
704		return -ENODEV;
705	}
706
707	mutex_lock(&buffer->lock);
708	vaddr = ion_buffer_kmap_get(buffer);
709	mutex_unlock(&buffer->lock);
710	if (IS_ERR(vaddr))
711		return PTR_ERR(vaddr);
712	if (!vaddr)
713		return -ENOMEM;
714	return 0;
715}
716
717static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
718				       size_t len,
719				       enum dma_data_direction direction)
720{
721	struct ion_buffer *buffer = dmabuf->priv;
722
723	mutex_lock(&buffer->lock);
724	ion_buffer_kmap_put(buffer);
725	mutex_unlock(&buffer->lock);
726}
727
728struct dma_buf_ops dma_buf_ops = {
729	.map_dma_buf = ion_map_dma_buf,
730	.unmap_dma_buf = ion_unmap_dma_buf,
731	.mmap = ion_mmap,
732	.release = ion_dma_buf_release,
733	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
734	.end_cpu_access = ion_dma_buf_end_cpu_access,
735	.kmap_atomic = ion_dma_buf_kmap,
736	.kunmap_atomic = ion_dma_buf_kunmap,
737	.kmap = ion_dma_buf_kmap,
738	.kunmap = ion_dma_buf_kunmap,
739};
740
741int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
742{
743	struct ion_buffer *buffer;
744	struct dma_buf *dmabuf;
745	bool valid_handle;
746	int fd;
747
748	mutex_lock(&client->lock);
749	valid_handle = ion_handle_validate(client, handle);
750	mutex_unlock(&client->lock);
751	if (!valid_handle) {
752		WARN("%s: invalid handle passed to share.\n", __func__);
753		return -EINVAL;
754	}
755
756	buffer = handle->buffer;
757	ion_buffer_get(buffer);
758	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
759	if (IS_ERR(dmabuf)) {
760		ion_buffer_put(buffer);
761		return PTR_ERR(dmabuf);
762	}
763	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
764	if (fd < 0) {
765		dma_buf_put(dmabuf);
766		ion_buffer_put(buffer);
767	}
768	return fd;
769}
770
771struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
772{
773	struct dma_buf *dmabuf;
774	struct ion_buffer *buffer;
775	struct ion_handle *handle;
776
777	dmabuf = dma_buf_get(fd);
778	if (IS_ERR_OR_NULL(dmabuf))
779		return ERR_PTR(PTR_ERR(dmabuf));
780	/* if this memory came from ion */
781
782	if (dmabuf->ops != &dma_buf_ops) {
783		pr_err("%s: can not import dmabuf from another exporter\n",
784		       __func__);
785		dma_buf_put(dmabuf);
786		return ERR_PTR(-EINVAL);
787	}
788	buffer = dmabuf->priv;
789
790	mutex_lock(&client->lock);
791	/* if a handle exists for this buffer just take a reference to it */
792	handle = ion_handle_lookup(client, buffer);
793	if (!IS_ERR_OR_NULL(handle)) {
794		ion_handle_get(handle);
795		goto end;
796	}
797	handle = ion_handle_create(client, buffer);
798	if (IS_ERR_OR_NULL(handle))
799		goto end;
800	ion_handle_add(client, handle);
801end:
802	mutex_unlock(&client->lock);
803	dma_buf_put(dmabuf);
804	return handle;
805}
806
807static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
808{
809	struct ion_client *client = filp->private_data;
810
811	switch (cmd) {
812	case ION_IOC_ALLOC:
813	{
814		struct ion_allocation_data data;
815
816		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
817			return -EFAULT;
818		data.handle = ion_alloc(client, data.len, data.align,
819					     data.flags);
820
821		if (IS_ERR(data.handle))
822			return PTR_ERR(data.handle);
823
824		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
825			ion_free(client, data.handle);
826			return -EFAULT;
827		}
828		break;
829	}
830	case ION_IOC_FREE:
831	{
832		struct ion_handle_data data;
833		bool valid;
834
835		if (copy_from_user(&data, (void __user *)arg,
836				   sizeof(struct ion_handle_data)))
837			return -EFAULT;
838		mutex_lock(&client->lock);
839		valid = ion_handle_validate(client, data.handle);
840		mutex_unlock(&client->lock);
841		if (!valid)
842			return -EINVAL;
843		ion_free(client, data.handle);
844		break;
845	}
846	case ION_IOC_SHARE:
847	{
848		struct ion_fd_data data;
849
850		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
851			return -EFAULT;
852		data.fd = ion_share_dma_buf(client, data.handle);
853		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
854			return -EFAULT;
855		break;
856	}
857	case ION_IOC_IMPORT:
858	{
859		struct ion_fd_data data;
860		if (copy_from_user(&data, (void __user *)arg,
861				   sizeof(struct ion_fd_data)))
862			return -EFAULT;
863		data.handle = ion_import_dma_buf(client, data.fd);
864		if (IS_ERR(data.handle))
865			data.handle = NULL;
866		if (copy_to_user((void __user *)arg, &data,
867				 sizeof(struct ion_fd_data)))
868			return -EFAULT;
869		break;
870	}
871	case ION_IOC_CUSTOM:
872	{
873		struct ion_device *dev = client->dev;
874		struct ion_custom_data data;
875
876		if (!dev->custom_ioctl)
877			return -ENOTTY;
878		if (copy_from_user(&data, (void __user *)arg,
879				sizeof(struct ion_custom_data)))
880			return -EFAULT;
881		return dev->custom_ioctl(client, data.cmd, data.arg);
882	}
883	default:
884		return -ENOTTY;
885	}
886	return 0;
887}
888
889static int ion_release(struct inode *inode, struct file *file)
890{
891	struct ion_client *client = file->private_data;
892
893	pr_debug("%s: %d\n", __func__, __LINE__);
894	ion_client_destroy(client);
895	return 0;
896}
897
898static int ion_open(struct inode *inode, struct file *file)
899{
900	struct miscdevice *miscdev = file->private_data;
901	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
902	struct ion_client *client;
903
904	pr_debug("%s: %d\n", __func__, __LINE__);
905	client = ion_client_create(dev, -1, "user");
906	if (IS_ERR_OR_NULL(client))
907		return PTR_ERR(client);
908	file->private_data = client;
909
910	return 0;
911}
912
913static const struct file_operations ion_fops = {
914	.owner          = THIS_MODULE,
915	.open           = ion_open,
916	.release        = ion_release,
917	.unlocked_ioctl = ion_ioctl,
918};
919
920static size_t ion_debug_heap_total(struct ion_client *client,
921				   enum ion_heap_type type)
922{
923	size_t size = 0;
924	struct rb_node *n;
925
926	mutex_lock(&client->lock);
927	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
928		struct ion_handle *handle = rb_entry(n,
929						     struct ion_handle,
930						     node);
931		if (handle->buffer->heap->type == type)
932			size += handle->buffer->size;
933	}
934	mutex_unlock(&client->lock);
935	return size;
936}
937
938static int ion_debug_heap_show(struct seq_file *s, void *unused)
939{
940	struct ion_heap *heap = s->private;
941	struct ion_device *dev = heap->dev;
942	struct rb_node *n;
943
944	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
945
946	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
947		struct ion_client *client = rb_entry(n, struct ion_client,
948						     node);
949		size_t size = ion_debug_heap_total(client, heap->type);
950		if (!size)
951			continue;
952		if (client->task) {
953			char task_comm[TASK_COMM_LEN];
954
955			get_task_comm(task_comm, client->task);
956			seq_printf(s, "%16.s %16u %16u\n", task_comm,
957				   client->pid, size);
958		} else {
959			seq_printf(s, "%16.s %16u %16u\n", client->name,
960				   client->pid, size);
961		}
962	}
963	return 0;
964}
965
966static int ion_debug_heap_open(struct inode *inode, struct file *file)
967{
968	return single_open(file, ion_debug_heap_show, inode->i_private);
969}
970
971static const struct file_operations debug_heap_fops = {
972	.open = ion_debug_heap_open,
973	.read = seq_read,
974	.llseek = seq_lseek,
975	.release = single_release,
976};
977
978void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
979{
980	struct rb_node **p = &dev->heaps.rb_node;
981	struct rb_node *parent = NULL;
982	struct ion_heap *entry;
983
984	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
985	    !heap->ops->unmap_dma)
986		pr_err("%s: can not add heap with invalid ops struct.\n",
987		       __func__);
988
989	heap->dev = dev;
990	mutex_lock(&dev->lock);
991	while (*p) {
992		parent = *p;
993		entry = rb_entry(parent, struct ion_heap, node);
994
995		if (heap->id < entry->id) {
996			p = &(*p)->rb_left;
997		} else if (heap->id > entry->id ) {
998			p = &(*p)->rb_right;
999		} else {
1000			pr_err("%s: can not insert multiple heaps with "
1001				"id %d\n", __func__, heap->id);
1002			goto end;
1003		}
1004	}
1005
1006	rb_link_node(&heap->node, parent, p);
1007	rb_insert_color(&heap->node, &dev->heaps);
1008	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1009			    &debug_heap_fops);
1010end:
1011	mutex_unlock(&dev->lock);
1012}
1013
1014struct ion_device *ion_device_create(long (*custom_ioctl)
1015				     (struct ion_client *client,
1016				      unsigned int cmd,
1017				      unsigned long arg))
1018{
1019	struct ion_device *idev;
1020	int ret;
1021
1022	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1023	if (!idev)
1024		return ERR_PTR(-ENOMEM);
1025
1026	idev->dev.minor = MISC_DYNAMIC_MINOR;
1027	idev->dev.name = "ion";
1028	idev->dev.fops = &ion_fops;
1029	idev->dev.parent = NULL;
1030	ret = misc_register(&idev->dev);
1031	if (ret) {
1032		pr_err("ion: failed to register misc device.\n");
1033		return ERR_PTR(ret);
1034	}
1035
1036	idev->debug_root = debugfs_create_dir("ion", NULL);
1037	if (IS_ERR_OR_NULL(idev->debug_root))
1038		pr_err("ion: failed to create debug files.\n");
1039
1040	idev->custom_ioctl = custom_ioctl;
1041	idev->buffers = RB_ROOT;
1042	mutex_init(&idev->lock);
1043	idev->heaps = RB_ROOT;
1044	idev->clients = RB_ROOT;
1045	return idev;
1046}
1047
1048void ion_device_destroy(struct ion_device *dev)
1049{
1050	misc_deregister(&dev->dev);
1051	/* XXX need to free the heaps and clients ? */
1052	kfree(dev);
1053}
1054
1055void __init ion_reserve(struct ion_platform_data *data)
1056{
1057	int i, ret;
1058
1059	for (i = 0; i < data->nr; i++) {
1060		if (data->heaps[i].size == 0)
1061			continue;
1062		ret = memblock_reserve(data->heaps[i].base,
1063				       data->heaps[i].size);
1064		if (ret)
1065			pr_err("memblock reserve of %x@%lx failed\n",
1066			       data->heaps[i].size,
1067			       data->heaps[i].base);
1068	}
1069}
1070