ion.c revision 37bdbf00c618203467a17d01a4dfae324a818022
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/dma-buf.h>
34
35#include "ion.h"
36#include "ion_priv.h"
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev:		the actual misc device
41 * @buffers:	an rb tree of all the existing buffers
42 * @lock:		lock protecting the buffers & heaps trees
43 * @heaps:		list of all the heaps in the system
44 * @user_clients:	list of all the clients created from userspace
45 */
46struct ion_device {
47	struct miscdevice dev;
48	struct rb_root buffers;
49	struct mutex lock;
50	struct rb_root heaps;
51	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52			      unsigned long arg);
53	struct rb_root clients;
54	struct dentry *debug_root;
55};
56
57/**
58 * struct ion_client - a process/hw block local address space
59 * @node:		node in the tree of all clients
60 * @dev:		backpointer to ion device
61 * @handles:		an rb tree of all the handles in this client
62 * @lock:		lock protecting the tree of handles
63 * @heap_mask:		mask of all supported heaps
64 * @name:		used for debugging
65 * @task:		used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72	struct rb_node node;
73	struct ion_device *dev;
74	struct rb_root handles;
75	struct mutex lock;
76	unsigned int heap_mask;
77	const char *name;
78	struct task_struct *task;
79	pid_t pid;
80	struct dentry *debug_root;
81};
82
83/**
84 * ion_handle - a client local reference to a buffer
85 * @ref:		reference count
86 * @client:		back pointer to the client the buffer resides in
87 * @buffer:		pointer to the buffer
88 * @node:		node in the client's handle rbtree
89 * @kmap_cnt:		count of times this client has mapped to kernel
90 * @dmap_cnt:		count of times this client has mapped for dma
91 *
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client.  Other fields are never changed after initialization.
94 */
95struct ion_handle {
96	struct kref ref;
97	struct ion_client *client;
98	struct ion_buffer *buffer;
99	struct rb_node node;
100	unsigned int kmap_cnt;
101};
102
103/* this function should only be called while dev->lock is held */
104static void ion_buffer_add(struct ion_device *dev,
105			   struct ion_buffer *buffer)
106{
107	struct rb_node **p = &dev->buffers.rb_node;
108	struct rb_node *parent = NULL;
109	struct ion_buffer *entry;
110
111	while (*p) {
112		parent = *p;
113		entry = rb_entry(parent, struct ion_buffer, node);
114
115		if (buffer < entry) {
116			p = &(*p)->rb_left;
117		} else if (buffer > entry) {
118			p = &(*p)->rb_right;
119		} else {
120			pr_err("%s: buffer already found.", __func__);
121			BUG();
122		}
123	}
124
125	rb_link_node(&buffer->node, parent, p);
126	rb_insert_color(&buffer->node, &dev->buffers);
127}
128
129static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
130
131/* this function should only be called while dev->lock is held */
132static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133				     struct ion_device *dev,
134				     unsigned long len,
135				     unsigned long align,
136				     unsigned long flags)
137{
138	struct ion_buffer *buffer;
139	struct sg_table *table;
140	struct scatterlist *sg;
141	int i, ret;
142
143	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144	if (!buffer)
145		return ERR_PTR(-ENOMEM);
146
147	buffer->heap = heap;
148	kref_init(&buffer->ref);
149
150	ret = heap->ops->allocate(heap, buffer, len, align, flags);
151	if (ret) {
152		kfree(buffer);
153		return ERR_PTR(ret);
154	}
155
156	buffer->dev = dev;
157	buffer->size = len;
158	buffer->flags = flags;
159
160	table = heap->ops->map_dma(heap, buffer);
161	if (IS_ERR_OR_NULL(table)) {
162		heap->ops->free(buffer);
163		kfree(buffer);
164		return ERR_PTR(PTR_ERR(table));
165	}
166	buffer->sg_table = table;
167	if (buffer->flags & ION_FLAG_CACHED)
168		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
169			    i) {
170			if (sg_dma_len(sg) == PAGE_SIZE)
171				continue;
172			pr_err("%s: cached mappings must have pagewise "
173			       "sg_lists\n", __func__);
174			heap->ops->unmap_dma(heap, buffer);
175			kfree(buffer);
176			return ERR_PTR(-EINVAL);
177		}
178
179	ret = ion_buffer_alloc_dirty(buffer);
180	if (ret) {
181		heap->ops->unmap_dma(heap, buffer);
182		heap->ops->free(buffer);
183		kfree(buffer);
184		return ERR_PTR(ret);
185	}
186
187	buffer->dev = dev;
188	buffer->size = len;
189	INIT_LIST_HEAD(&buffer->vmas);
190	mutex_init(&buffer->lock);
191	/* this will set up dma addresses for the sglist -- it is not
192	   technically correct as per the dma api -- a specific
193	   device isn't really taking ownership here.  However, in practice on
194	   our systems the only dma_address space is physical addresses.
195	   Additionally, we can't afford the overhead of invalidating every
196	   allocation via dma_map_sg. The implicit contract here is that
197	   memory comming from the heaps is ready for dma, ie if it has a
198	   cached mapping that mapping has been invalidated */
199	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
200		sg_dma_address(sg) = sg_phys(sg);
201	ion_buffer_add(dev, buffer);
202	return buffer;
203}
204
205static void ion_buffer_destroy(struct kref *kref)
206{
207	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
208	struct ion_device *dev = buffer->dev;
209
210	if (WARN_ON(buffer->kmap_cnt > 0))
211		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
212
213	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
214	buffer->heap->ops->free(buffer);
215	mutex_lock(&dev->lock);
216	rb_erase(&buffer->node, &dev->buffers);
217	mutex_unlock(&dev->lock);
218	kfree(buffer);
219}
220
221static void ion_buffer_get(struct ion_buffer *buffer)
222{
223	kref_get(&buffer->ref);
224}
225
226static int ion_buffer_put(struct ion_buffer *buffer)
227{
228	return kref_put(&buffer->ref, ion_buffer_destroy);
229}
230
231static struct ion_handle *ion_handle_create(struct ion_client *client,
232				     struct ion_buffer *buffer)
233{
234	struct ion_handle *handle;
235
236	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
237	if (!handle)
238		return ERR_PTR(-ENOMEM);
239	kref_init(&handle->ref);
240	RB_CLEAR_NODE(&handle->node);
241	handle->client = client;
242	ion_buffer_get(buffer);
243	handle->buffer = buffer;
244
245	return handle;
246}
247
248static void ion_handle_kmap_put(struct ion_handle *);
249
250static void ion_handle_destroy(struct kref *kref)
251{
252	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
253	struct ion_client *client = handle->client;
254	struct ion_buffer *buffer = handle->buffer;
255
256	mutex_lock(&buffer->lock);
257	while (handle->kmap_cnt)
258		ion_handle_kmap_put(handle);
259	mutex_unlock(&buffer->lock);
260
261	if (!RB_EMPTY_NODE(&handle->node))
262		rb_erase(&handle->node, &client->handles);
263
264	ion_buffer_put(buffer);
265	kfree(handle);
266}
267
268struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
269{
270	return handle->buffer;
271}
272
273static void ion_handle_get(struct ion_handle *handle)
274{
275	kref_get(&handle->ref);
276}
277
278static int ion_handle_put(struct ion_handle *handle)
279{
280	return kref_put(&handle->ref, ion_handle_destroy);
281}
282
283static struct ion_handle *ion_handle_lookup(struct ion_client *client,
284					    struct ion_buffer *buffer)
285{
286	struct rb_node *n;
287
288	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
289		struct ion_handle *handle = rb_entry(n, struct ion_handle,
290						     node);
291		if (handle->buffer == buffer)
292			return handle;
293	}
294	return NULL;
295}
296
297static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
298{
299	struct rb_node *n = client->handles.rb_node;
300
301	while (n) {
302		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
303							  node);
304		if (handle < handle_node)
305			n = n->rb_left;
306		else if (handle > handle_node)
307			n = n->rb_right;
308		else
309			return true;
310	}
311	return false;
312}
313
314static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
315{
316	struct rb_node **p = &client->handles.rb_node;
317	struct rb_node *parent = NULL;
318	struct ion_handle *entry;
319
320	while (*p) {
321		parent = *p;
322		entry = rb_entry(parent, struct ion_handle, node);
323
324		if (handle < entry)
325			p = &(*p)->rb_left;
326		else if (handle > entry)
327			p = &(*p)->rb_right;
328		else
329			WARN(1, "%s: buffer already found.", __func__);
330	}
331
332	rb_link_node(&handle->node, parent, p);
333	rb_insert_color(&handle->node, &client->handles);
334}
335
336struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
337			     size_t align, unsigned int heap_mask,
338			     unsigned int flags)
339{
340	struct rb_node *n;
341	struct ion_handle *handle;
342	struct ion_device *dev = client->dev;
343	struct ion_buffer *buffer = NULL;
344
345	pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
346		 align, heap_mask, flags);
347	/*
348	 * traverse the list of heaps available in this system in priority
349	 * order.  If the heap type is supported by the client, and matches the
350	 * request of the caller allocate from it.  Repeat until allocate has
351	 * succeeded or all heaps have been tried
352	 */
353	if (WARN_ON(!len))
354		return ERR_PTR(-EINVAL);
355
356	len = PAGE_ALIGN(len);
357
358	mutex_lock(&dev->lock);
359	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
360		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
361		/* if the client doesn't support this heap type */
362		if (!((1 << heap->type) & client->heap_mask))
363			continue;
364		/* if the caller didn't specify this heap type */
365		if (!((1 << heap->id) & heap_mask))
366			continue;
367		buffer = ion_buffer_create(heap, dev, len, align, flags);
368		if (!IS_ERR_OR_NULL(buffer))
369			break;
370	}
371	mutex_unlock(&dev->lock);
372
373	if (buffer == NULL)
374		return ERR_PTR(-ENODEV);
375
376	if (IS_ERR(buffer))
377		return ERR_PTR(PTR_ERR(buffer));
378
379	handle = ion_handle_create(client, buffer);
380
381	/*
382	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
383	 * and ion_handle_create will take a second reference, drop one here
384	 */
385	ion_buffer_put(buffer);
386
387	if (!IS_ERR(handle)) {
388		mutex_lock(&client->lock);
389		ion_handle_add(client, handle);
390		mutex_unlock(&client->lock);
391	}
392
393
394	return handle;
395}
396EXPORT_SYMBOL(ion_alloc);
397
398void ion_free(struct ion_client *client, struct ion_handle *handle)
399{
400	bool valid_handle;
401
402	BUG_ON(client != handle->client);
403
404	mutex_lock(&client->lock);
405	valid_handle = ion_handle_validate(client, handle);
406
407	if (!valid_handle) {
408		WARN(1, "%s: invalid handle passed to free.\n", __func__);
409		mutex_unlock(&client->lock);
410		return;
411	}
412	ion_handle_put(handle);
413	mutex_unlock(&client->lock);
414}
415EXPORT_SYMBOL(ion_free);
416
417int ion_phys(struct ion_client *client, struct ion_handle *handle,
418	     ion_phys_addr_t *addr, size_t *len)
419{
420	struct ion_buffer *buffer;
421	int ret;
422
423	mutex_lock(&client->lock);
424	if (!ion_handle_validate(client, handle)) {
425		mutex_unlock(&client->lock);
426		return -EINVAL;
427	}
428
429	buffer = handle->buffer;
430
431	if (!buffer->heap->ops->phys) {
432		pr_err("%s: ion_phys is not implemented by this heap.\n",
433		       __func__);
434		mutex_unlock(&client->lock);
435		return -ENODEV;
436	}
437	mutex_unlock(&client->lock);
438	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
439	return ret;
440}
441EXPORT_SYMBOL(ion_phys);
442
443static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
444{
445	void *vaddr;
446
447	if (buffer->kmap_cnt) {
448		buffer->kmap_cnt++;
449		return buffer->vaddr;
450	}
451	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
452	if (IS_ERR_OR_NULL(vaddr))
453		return vaddr;
454	buffer->vaddr = vaddr;
455	buffer->kmap_cnt++;
456	return vaddr;
457}
458
459static void *ion_handle_kmap_get(struct ion_handle *handle)
460{
461	struct ion_buffer *buffer = handle->buffer;
462	void *vaddr;
463
464	if (handle->kmap_cnt) {
465		handle->kmap_cnt++;
466		return buffer->vaddr;
467	}
468	vaddr = ion_buffer_kmap_get(buffer);
469	if (IS_ERR_OR_NULL(vaddr))
470		return vaddr;
471	handle->kmap_cnt++;
472	return vaddr;
473}
474
475static void ion_buffer_kmap_put(struct ion_buffer *buffer)
476{
477	buffer->kmap_cnt--;
478	if (!buffer->kmap_cnt) {
479		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
480		buffer->vaddr = NULL;
481	}
482}
483
484static void ion_handle_kmap_put(struct ion_handle *handle)
485{
486	struct ion_buffer *buffer = handle->buffer;
487
488	handle->kmap_cnt--;
489	if (!handle->kmap_cnt)
490		ion_buffer_kmap_put(buffer);
491}
492
493void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
494{
495	struct ion_buffer *buffer;
496	void *vaddr;
497
498	mutex_lock(&client->lock);
499	if (!ion_handle_validate(client, handle)) {
500		pr_err("%s: invalid handle passed to map_kernel.\n",
501		       __func__);
502		mutex_unlock(&client->lock);
503		return ERR_PTR(-EINVAL);
504	}
505
506	buffer = handle->buffer;
507
508	if (!handle->buffer->heap->ops->map_kernel) {
509		pr_err("%s: map_kernel is not implemented by this heap.\n",
510		       __func__);
511		mutex_unlock(&client->lock);
512		return ERR_PTR(-ENODEV);
513	}
514
515	mutex_lock(&buffer->lock);
516	vaddr = ion_handle_kmap_get(handle);
517	mutex_unlock(&buffer->lock);
518	mutex_unlock(&client->lock);
519	return vaddr;
520}
521EXPORT_SYMBOL(ion_map_kernel);
522
523void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
524{
525	struct ion_buffer *buffer;
526
527	mutex_lock(&client->lock);
528	buffer = handle->buffer;
529	mutex_lock(&buffer->lock);
530	ion_handle_kmap_put(handle);
531	mutex_unlock(&buffer->lock);
532	mutex_unlock(&client->lock);
533}
534EXPORT_SYMBOL(ion_unmap_kernel);
535
536static int ion_debug_client_show(struct seq_file *s, void *unused)
537{
538	struct ion_client *client = s->private;
539	struct rb_node *n;
540	size_t sizes[ION_NUM_HEAPS] = {0};
541	const char *names[ION_NUM_HEAPS] = {0};
542	int i;
543
544	mutex_lock(&client->lock);
545	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
546		struct ion_handle *handle = rb_entry(n, struct ion_handle,
547						     node);
548		enum ion_heap_type type = handle->buffer->heap->type;
549
550		if (!names[type])
551			names[type] = handle->buffer->heap->name;
552		sizes[type] += handle->buffer->size;
553	}
554	mutex_unlock(&client->lock);
555
556	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
557	for (i = 0; i < ION_NUM_HEAPS; i++) {
558		if (!names[i])
559			continue;
560		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
561	}
562	return 0;
563}
564
565static int ion_debug_client_open(struct inode *inode, struct file *file)
566{
567	return single_open(file, ion_debug_client_show, inode->i_private);
568}
569
570static const struct file_operations debug_client_fops = {
571	.open = ion_debug_client_open,
572	.read = seq_read,
573	.llseek = seq_lseek,
574	.release = single_release,
575};
576
577struct ion_client *ion_client_create(struct ion_device *dev,
578				     unsigned int heap_mask,
579				     const char *name)
580{
581	struct ion_client *client;
582	struct task_struct *task;
583	struct rb_node **p;
584	struct rb_node *parent = NULL;
585	struct ion_client *entry;
586	char debug_name[64];
587	pid_t pid;
588
589	get_task_struct(current->group_leader);
590	task_lock(current->group_leader);
591	pid = task_pid_nr(current->group_leader);
592	/* don't bother to store task struct for kernel threads,
593	   they can't be killed anyway */
594	if (current->group_leader->flags & PF_KTHREAD) {
595		put_task_struct(current->group_leader);
596		task = NULL;
597	} else {
598		task = current->group_leader;
599	}
600	task_unlock(current->group_leader);
601
602	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
603	if (!client) {
604		if (task)
605			put_task_struct(current->group_leader);
606		return ERR_PTR(-ENOMEM);
607	}
608
609	client->dev = dev;
610	client->handles = RB_ROOT;
611	mutex_init(&client->lock);
612	client->name = name;
613	client->heap_mask = heap_mask;
614	client->task = task;
615	client->pid = pid;
616
617	mutex_lock(&dev->lock);
618	p = &dev->clients.rb_node;
619	while (*p) {
620		parent = *p;
621		entry = rb_entry(parent, struct ion_client, node);
622
623		if (client < entry)
624			p = &(*p)->rb_left;
625		else if (client > entry)
626			p = &(*p)->rb_right;
627	}
628	rb_link_node(&client->node, parent, p);
629	rb_insert_color(&client->node, &dev->clients);
630
631	snprintf(debug_name, 64, "%u", client->pid);
632	client->debug_root = debugfs_create_file(debug_name, 0664,
633						 dev->debug_root, client,
634						 &debug_client_fops);
635	mutex_unlock(&dev->lock);
636
637	return client;
638}
639
640void ion_client_destroy(struct ion_client *client)
641{
642	struct ion_device *dev = client->dev;
643	struct rb_node *n;
644
645	pr_debug("%s: %d\n", __func__, __LINE__);
646	while ((n = rb_first(&client->handles))) {
647		struct ion_handle *handle = rb_entry(n, struct ion_handle,
648						     node);
649		ion_handle_destroy(&handle->ref);
650	}
651	mutex_lock(&dev->lock);
652	if (client->task)
653		put_task_struct(client->task);
654	rb_erase(&client->node, &dev->clients);
655	debugfs_remove_recursive(client->debug_root);
656	mutex_unlock(&dev->lock);
657
658	kfree(client);
659}
660EXPORT_SYMBOL(ion_client_destroy);
661
662struct sg_table *ion_sg_table(struct ion_client *client,
663			      struct ion_handle *handle)
664{
665	struct ion_buffer *buffer;
666	struct sg_table *table;
667
668	mutex_lock(&client->lock);
669	if (!ion_handle_validate(client, handle)) {
670		pr_err("%s: invalid handle passed to map_dma.\n",
671		       __func__);
672		mutex_unlock(&client->lock);
673		return ERR_PTR(-EINVAL);
674	}
675	buffer = handle->buffer;
676	table = buffer->sg_table;
677	mutex_unlock(&client->lock);
678	return table;
679}
680EXPORT_SYMBOL(ion_sg_table);
681
682static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
683				       struct device *dev,
684				       enum dma_data_direction direction);
685
686static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
687					enum dma_data_direction direction)
688{
689	struct dma_buf *dmabuf = attachment->dmabuf;
690	struct ion_buffer *buffer = dmabuf->priv;
691
692	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
693	return buffer->sg_table;
694}
695
696static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
697			      struct sg_table *table,
698			      enum dma_data_direction direction)
699{
700}
701
702static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
703{
704	unsigned long pages = buffer->sg_table->nents;
705	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
706
707	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
708	if (!buffer->dirty)
709		return -ENOMEM;
710	return 0;
711}
712
713struct ion_vma_list {
714	struct list_head list;
715	struct vm_area_struct *vma;
716};
717
718static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
719				       struct device *dev,
720				       enum dma_data_direction dir)
721{
722	struct scatterlist *sg;
723	int i;
724	struct ion_vma_list *vma_list;
725
726	pr_debug("%s: syncing for device %s\n", __func__,
727		 dev ? dev_name(dev) : "null");
728
729	if (!(buffer->flags & ION_FLAG_CACHED))
730		return;
731
732	mutex_lock(&buffer->lock);
733	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
734		if (!test_bit(i, buffer->dirty))
735			continue;
736		dma_sync_sg_for_device(dev, sg, 1, dir);
737		clear_bit(i, buffer->dirty);
738	}
739	list_for_each_entry(vma_list, &buffer->vmas, list) {
740		struct vm_area_struct *vma = vma_list->vma;
741
742		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
743			       NULL);
744	}
745	mutex_unlock(&buffer->lock);
746}
747
748int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
749{
750	struct ion_buffer *buffer = vma->vm_private_data;
751	struct scatterlist *sg;
752	int i;
753
754	mutex_lock(&buffer->lock);
755	set_bit(vmf->pgoff, buffer->dirty);
756
757	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
758		if (i != vmf->pgoff)
759			continue;
760		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
761		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
762			       sg_page(sg));
763		break;
764	}
765	mutex_unlock(&buffer->lock);
766	return VM_FAULT_NOPAGE;
767}
768
769static void ion_vm_open(struct vm_area_struct *vma)
770{
771	struct ion_buffer *buffer = vma->vm_private_data;
772	struct ion_vma_list *vma_list;
773
774	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
775	if (!vma_list)
776		return;
777	vma_list->vma = vma;
778	mutex_lock(&buffer->lock);
779	list_add(&vma_list->list, &buffer->vmas);
780	mutex_unlock(&buffer->lock);
781	pr_debug("%s: adding %p\n", __func__, vma);
782}
783
784static void ion_vm_close(struct vm_area_struct *vma)
785{
786	struct ion_buffer *buffer = vma->vm_private_data;
787	struct ion_vma_list *vma_list, *tmp;
788
789	pr_debug("%s\n", __func__);
790	mutex_lock(&buffer->lock);
791	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
792		if (vma_list->vma != vma)
793			continue;
794		list_del(&vma_list->list);
795		kfree(vma_list);
796		pr_debug("%s: deleting %p\n", __func__, vma);
797		break;
798	}
799	mutex_unlock(&buffer->lock);
800}
801
802struct vm_operations_struct ion_vma_ops = {
803	.open = ion_vm_open,
804	.close = ion_vm_close,
805	.fault = ion_vm_fault,
806};
807
808static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
809{
810	struct ion_buffer *buffer = dmabuf->priv;
811	int ret = 0;
812
813	if (!buffer->heap->ops->map_user) {
814		pr_err("%s: this heap does not define a method for mapping "
815		       "to userspace\n", __func__);
816		return -EINVAL;
817	}
818
819	if (buffer->flags & ION_FLAG_CACHED) {
820		vma->vm_private_data = buffer;
821		vma->vm_ops = &ion_vma_ops;
822		ion_vm_open(vma);
823	} else {
824		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
825		mutex_lock(&buffer->lock);
826		/* now map it to userspace */
827		ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
828		mutex_unlock(&buffer->lock);
829	}
830
831	if (ret)
832		pr_err("%s: failure mapping buffer to userspace\n",
833		       __func__);
834
835	return ret;
836}
837
838static void ion_dma_buf_release(struct dma_buf *dmabuf)
839{
840	struct ion_buffer *buffer = dmabuf->priv;
841	ion_buffer_put(buffer);
842}
843
844static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
845{
846	struct ion_buffer *buffer = dmabuf->priv;
847	return buffer->vaddr + offset * PAGE_SIZE;
848}
849
850static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
851			       void *ptr)
852{
853	return;
854}
855
856static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
857					size_t len,
858					enum dma_data_direction direction)
859{
860	struct ion_buffer *buffer = dmabuf->priv;
861	void *vaddr;
862
863	if (!buffer->heap->ops->map_kernel) {
864		pr_err("%s: map kernel is not implemented by this heap.\n",
865		       __func__);
866		return -ENODEV;
867	}
868
869	mutex_lock(&buffer->lock);
870	vaddr = ion_buffer_kmap_get(buffer);
871	mutex_unlock(&buffer->lock);
872	if (IS_ERR(vaddr))
873		return PTR_ERR(vaddr);
874	if (!vaddr)
875		return -ENOMEM;
876	return 0;
877}
878
879static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
880				       size_t len,
881				       enum dma_data_direction direction)
882{
883	struct ion_buffer *buffer = dmabuf->priv;
884
885	mutex_lock(&buffer->lock);
886	ion_buffer_kmap_put(buffer);
887	mutex_unlock(&buffer->lock);
888}
889
890struct dma_buf_ops dma_buf_ops = {
891	.map_dma_buf = ion_map_dma_buf,
892	.unmap_dma_buf = ion_unmap_dma_buf,
893	.mmap = ion_mmap,
894	.release = ion_dma_buf_release,
895	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
896	.end_cpu_access = ion_dma_buf_end_cpu_access,
897	.kmap_atomic = ion_dma_buf_kmap,
898	.kunmap_atomic = ion_dma_buf_kunmap,
899	.kmap = ion_dma_buf_kmap,
900	.kunmap = ion_dma_buf_kunmap,
901};
902
903int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
904{
905	struct ion_buffer *buffer;
906	struct dma_buf *dmabuf;
907	bool valid_handle;
908	int fd;
909
910	mutex_lock(&client->lock);
911	valid_handle = ion_handle_validate(client, handle);
912	mutex_unlock(&client->lock);
913	if (!valid_handle) {
914		WARN(1, "%s: invalid handle passed to share.\n", __func__);
915		return -EINVAL;
916	}
917
918	buffer = handle->buffer;
919	ion_buffer_get(buffer);
920	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
921	if (IS_ERR(dmabuf)) {
922		ion_buffer_put(buffer);
923		return PTR_ERR(dmabuf);
924	}
925	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
926	if (fd < 0)
927		dma_buf_put(dmabuf);
928
929	return fd;
930}
931EXPORT_SYMBOL(ion_share_dma_buf);
932
933struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
934{
935	struct dma_buf *dmabuf;
936	struct ion_buffer *buffer;
937	struct ion_handle *handle;
938
939	dmabuf = dma_buf_get(fd);
940	if (IS_ERR_OR_NULL(dmabuf))
941		return ERR_PTR(PTR_ERR(dmabuf));
942	/* if this memory came from ion */
943
944	if (dmabuf->ops != &dma_buf_ops) {
945		pr_err("%s: can not import dmabuf from another exporter\n",
946		       __func__);
947		dma_buf_put(dmabuf);
948		return ERR_PTR(-EINVAL);
949	}
950	buffer = dmabuf->priv;
951
952	mutex_lock(&client->lock);
953	/* if a handle exists for this buffer just take a reference to it */
954	handle = ion_handle_lookup(client, buffer);
955	if (!IS_ERR_OR_NULL(handle)) {
956		ion_handle_get(handle);
957		goto end;
958	}
959	handle = ion_handle_create(client, buffer);
960	if (IS_ERR_OR_NULL(handle))
961		goto end;
962	ion_handle_add(client, handle);
963end:
964	mutex_unlock(&client->lock);
965	dma_buf_put(dmabuf);
966	return handle;
967}
968EXPORT_SYMBOL(ion_import_dma_buf);
969
970static int ion_sync_for_device(struct ion_client *client, int fd)
971{
972	struct dma_buf *dmabuf;
973	struct ion_buffer *buffer;
974
975	dmabuf = dma_buf_get(fd);
976	if (IS_ERR_OR_NULL(dmabuf))
977		return PTR_ERR(dmabuf);
978
979	/* if this memory came from ion */
980	if (dmabuf->ops != &dma_buf_ops) {
981		pr_err("%s: can not sync dmabuf from another exporter\n",
982		       __func__);
983		dma_buf_put(dmabuf);
984		return -EINVAL;
985	}
986	buffer = dmabuf->priv;
987	ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
988	dma_buf_put(dmabuf);
989	return 0;
990}
991
992static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
993{
994	struct ion_client *client = filp->private_data;
995
996	switch (cmd) {
997	case ION_IOC_ALLOC:
998	{
999		struct ion_allocation_data data;
1000
1001		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1002			return -EFAULT;
1003		data.handle = ion_alloc(client, data.len, data.align,
1004					     data.heap_mask, data.flags);
1005
1006		if (IS_ERR(data.handle))
1007			return PTR_ERR(data.handle);
1008
1009		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1010			ion_free(client, data.handle);
1011			return -EFAULT;
1012		}
1013		break;
1014	}
1015	case ION_IOC_FREE:
1016	{
1017		struct ion_handle_data data;
1018		bool valid;
1019
1020		if (copy_from_user(&data, (void __user *)arg,
1021				   sizeof(struct ion_handle_data)))
1022			return -EFAULT;
1023		mutex_lock(&client->lock);
1024		valid = ion_handle_validate(client, data.handle);
1025		mutex_unlock(&client->lock);
1026		if (!valid)
1027			return -EINVAL;
1028		ion_free(client, data.handle);
1029		break;
1030	}
1031	case ION_IOC_SHARE:
1032	{
1033		struct ion_fd_data data;
1034
1035		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1036			return -EFAULT;
1037		data.fd = ion_share_dma_buf(client, data.handle);
1038		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1039			return -EFAULT;
1040		if (data.fd < 0)
1041			return data.fd;
1042		break;
1043	}
1044	case ION_IOC_IMPORT:
1045	{
1046		struct ion_fd_data data;
1047		int ret = 0;
1048		if (copy_from_user(&data, (void __user *)arg,
1049				   sizeof(struct ion_fd_data)))
1050			return -EFAULT;
1051		data.handle = ion_import_dma_buf(client, data.fd);
1052		if (IS_ERR(data.handle)) {
1053			ret = PTR_ERR(data.handle);
1054			data.handle = NULL;
1055		}
1056		if (copy_to_user((void __user *)arg, &data,
1057				 sizeof(struct ion_fd_data)))
1058			return -EFAULT;
1059		if (ret < 0)
1060			return ret;
1061		break;
1062	}
1063	case ION_IOC_SYNC:
1064	{
1065		struct ion_fd_data data;
1066		if (copy_from_user(&data, (void __user *)arg,
1067				   sizeof(struct ion_fd_data)))
1068			return -EFAULT;
1069		ion_sync_for_device(client, data.fd);
1070		break;
1071	}
1072	case ION_IOC_CUSTOM:
1073	{
1074		struct ion_device *dev = client->dev;
1075		struct ion_custom_data data;
1076
1077		if (!dev->custom_ioctl)
1078			return -ENOTTY;
1079		if (copy_from_user(&data, (void __user *)arg,
1080				sizeof(struct ion_custom_data)))
1081			return -EFAULT;
1082		return dev->custom_ioctl(client, data.cmd, data.arg);
1083	}
1084	default:
1085		return -ENOTTY;
1086	}
1087	return 0;
1088}
1089
1090static int ion_release(struct inode *inode, struct file *file)
1091{
1092	struct ion_client *client = file->private_data;
1093
1094	pr_debug("%s: %d\n", __func__, __LINE__);
1095	ion_client_destroy(client);
1096	return 0;
1097}
1098
1099static int ion_open(struct inode *inode, struct file *file)
1100{
1101	struct miscdevice *miscdev = file->private_data;
1102	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1103	struct ion_client *client;
1104
1105	pr_debug("%s: %d\n", __func__, __LINE__);
1106	client = ion_client_create(dev, -1, "user");
1107	if (IS_ERR_OR_NULL(client))
1108		return PTR_ERR(client);
1109	file->private_data = client;
1110
1111	return 0;
1112}
1113
1114static const struct file_operations ion_fops = {
1115	.owner          = THIS_MODULE,
1116	.open           = ion_open,
1117	.release        = ion_release,
1118	.unlocked_ioctl = ion_ioctl,
1119};
1120
1121static size_t ion_debug_heap_total(struct ion_client *client,
1122				   enum ion_heap_type type)
1123{
1124	size_t size = 0;
1125	struct rb_node *n;
1126
1127	mutex_lock(&client->lock);
1128	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1129		struct ion_handle *handle = rb_entry(n,
1130						     struct ion_handle,
1131						     node);
1132		if (handle->buffer->heap->type == type)
1133			size += handle->buffer->size;
1134	}
1135	mutex_unlock(&client->lock);
1136	return size;
1137}
1138
1139static int ion_debug_heap_show(struct seq_file *s, void *unused)
1140{
1141	struct ion_heap *heap = s->private;
1142	struct ion_device *dev = heap->dev;
1143	struct rb_node *n;
1144
1145	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1146
1147	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1148		struct ion_client *client = rb_entry(n, struct ion_client,
1149						     node);
1150		size_t size = ion_debug_heap_total(client, heap->type);
1151		if (!size)
1152			continue;
1153		if (client->task) {
1154			char task_comm[TASK_COMM_LEN];
1155
1156			get_task_comm(task_comm, client->task);
1157			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1158				   client->pid, size);
1159		} else {
1160			seq_printf(s, "%16.s %16u %16u\n", client->name,
1161				   client->pid, size);
1162		}
1163	}
1164	return 0;
1165}
1166
1167static int ion_debug_heap_open(struct inode *inode, struct file *file)
1168{
1169	return single_open(file, ion_debug_heap_show, inode->i_private);
1170}
1171
1172static const struct file_operations debug_heap_fops = {
1173	.open = ion_debug_heap_open,
1174	.read = seq_read,
1175	.llseek = seq_lseek,
1176	.release = single_release,
1177};
1178
1179void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1180{
1181	struct rb_node **p = &dev->heaps.rb_node;
1182	struct rb_node *parent = NULL;
1183	struct ion_heap *entry;
1184
1185	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1186	    !heap->ops->unmap_dma)
1187		pr_err("%s: can not add heap with invalid ops struct.\n",
1188		       __func__);
1189
1190	heap->dev = dev;
1191	mutex_lock(&dev->lock);
1192	while (*p) {
1193		parent = *p;
1194		entry = rb_entry(parent, struct ion_heap, node);
1195
1196		if (heap->id < entry->id) {
1197			p = &(*p)->rb_left;
1198		} else if (heap->id > entry->id ) {
1199			p = &(*p)->rb_right;
1200		} else {
1201			pr_err("%s: can not insert multiple heaps with "
1202				"id %d\n", __func__, heap->id);
1203			goto end;
1204		}
1205	}
1206
1207	rb_link_node(&heap->node, parent, p);
1208	rb_insert_color(&heap->node, &dev->heaps);
1209	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1210			    &debug_heap_fops);
1211end:
1212	mutex_unlock(&dev->lock);
1213}
1214
1215struct ion_device *ion_device_create(long (*custom_ioctl)
1216				     (struct ion_client *client,
1217				      unsigned int cmd,
1218				      unsigned long arg))
1219{
1220	struct ion_device *idev;
1221	int ret;
1222
1223	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1224	if (!idev)
1225		return ERR_PTR(-ENOMEM);
1226
1227	idev->dev.minor = MISC_DYNAMIC_MINOR;
1228	idev->dev.name = "ion";
1229	idev->dev.fops = &ion_fops;
1230	idev->dev.parent = NULL;
1231	ret = misc_register(&idev->dev);
1232	if (ret) {
1233		pr_err("ion: failed to register misc device.\n");
1234		return ERR_PTR(ret);
1235	}
1236
1237	idev->debug_root = debugfs_create_dir("ion", NULL);
1238	if (IS_ERR_OR_NULL(idev->debug_root))
1239		pr_err("ion: failed to create debug files.\n");
1240
1241	idev->custom_ioctl = custom_ioctl;
1242	idev->buffers = RB_ROOT;
1243	mutex_init(&idev->lock);
1244	idev->heaps = RB_ROOT;
1245	idev->clients = RB_ROOT;
1246	return idev;
1247}
1248
1249void ion_device_destroy(struct ion_device *dev)
1250{
1251	misc_deregister(&dev->dev);
1252	/* XXX need to free the heaps and clients ? */
1253	kfree(dev);
1254}
1255
1256void __init ion_reserve(struct ion_platform_data *data)
1257{
1258	int i, ret;
1259
1260	for (i = 0; i < data->nr; i++) {
1261		if (data->heaps[i].size == 0)
1262			continue;
1263		ret = memblock_reserve(data->heaps[i].base,
1264				       data->heaps[i].size);
1265		if (ret)
1266			pr_err("memblock reserve of %x@%lx failed\n",
1267			       data->heaps[i].size,
1268			       data->heaps[i].base);
1269	}
1270}
1271