ion.c revision a1c6b996ec1d9c78ab58f189719e1674fea3d2fb
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/dma-buf.h>
34
35#include "ion.h"
36#include "ion_priv.h"
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev:		the actual misc device
41 * @buffers:	an rb tree of all the existing buffers
42 * @lock:		lock protecting the buffers & heaps trees
43 * @heaps:		list of all the heaps in the system
44 * @user_clients:	list of all the clients created from userspace
45 */
46struct ion_device {
47	struct miscdevice dev;
48	struct rb_root buffers;
49	struct mutex lock;
50	struct rb_root heaps;
51	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52			      unsigned long arg);
53	struct rb_root clients;
54	struct dentry *debug_root;
55};
56
57/**
58 * struct ion_client - a process/hw block local address space
59 * @node:		node in the tree of all clients
60 * @dev:		backpointer to ion device
61 * @handles:		an rb tree of all the handles in this client
62 * @lock:		lock protecting the tree of handles
63 * @heap_mask:		mask of all supported heaps
64 * @name:		used for debugging
65 * @task:		used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72	struct rb_node node;
73	struct ion_device *dev;
74	struct rb_root handles;
75	struct mutex lock;
76	unsigned int heap_mask;
77	const char *name;
78	struct task_struct *task;
79	pid_t pid;
80	struct dentry *debug_root;
81};
82
83/**
84 * ion_handle - a client local reference to a buffer
85 * @ref:		reference count
86 * @client:		back pointer to the client the buffer resides in
87 * @buffer:		pointer to the buffer
88 * @node:		node in the client's handle rbtree
89 * @kmap_cnt:		count of times this client has mapped to kernel
90 * @dmap_cnt:		count of times this client has mapped for dma
91 *
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client.  Other fields are never changed after initialization.
94 */
95struct ion_handle {
96	struct kref ref;
97	struct ion_client *client;
98	struct ion_buffer *buffer;
99	struct rb_node node;
100	unsigned int kmap_cnt;
101};
102
103/* this function should only be called while dev->lock is held */
104static void ion_buffer_add(struct ion_device *dev,
105			   struct ion_buffer *buffer)
106{
107	struct rb_node **p = &dev->buffers.rb_node;
108	struct rb_node *parent = NULL;
109	struct ion_buffer *entry;
110
111	while (*p) {
112		parent = *p;
113		entry = rb_entry(parent, struct ion_buffer, node);
114
115		if (buffer < entry) {
116			p = &(*p)->rb_left;
117		} else if (buffer > entry) {
118			p = &(*p)->rb_right;
119		} else {
120			pr_err("%s: buffer already found.", __func__);
121			BUG();
122		}
123	}
124
125	rb_link_node(&buffer->node, parent, p);
126	rb_insert_color(&buffer->node, &dev->buffers);
127}
128
129static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
130
131/* this function should only be called while dev->lock is held */
132static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133				     struct ion_device *dev,
134				     unsigned long len,
135				     unsigned long align,
136				     unsigned long flags)
137{
138	struct ion_buffer *buffer;
139	struct sg_table *table;
140	struct scatterlist *sg;
141	int i, ret;
142
143	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144	if (!buffer)
145		return ERR_PTR(-ENOMEM);
146
147	buffer->heap = heap;
148	kref_init(&buffer->ref);
149
150	ret = heap->ops->allocate(heap, buffer, len, align, flags);
151	if (ret) {
152		kfree(buffer);
153		return ERR_PTR(ret);
154	}
155
156	buffer->dev = dev;
157	buffer->size = len;
158	buffer->flags = flags;
159
160	table = heap->ops->map_dma(heap, buffer);
161	if (IS_ERR_OR_NULL(table)) {
162		heap->ops->free(buffer);
163		kfree(buffer);
164		return ERR_PTR(PTR_ERR(table));
165	}
166	buffer->sg_table = table;
167	if (buffer->flags & ION_FLAG_CACHED)
168		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
169			    i) {
170			if (sg_dma_len(sg) == PAGE_SIZE)
171				continue;
172			pr_err("%s: cached mappings must have pagewise "
173			       "sg_lists\n", __func__);
174			heap->ops->unmap_dma(heap, buffer);
175			kfree(buffer);
176			return ERR_PTR(-EINVAL);
177		}
178
179	ret = ion_buffer_alloc_dirty(buffer);
180	if (ret) {
181		heap->ops->unmap_dma(heap, buffer);
182		heap->ops->free(buffer);
183		kfree(buffer);
184		return ERR_PTR(ret);
185	}
186
187	buffer->dev = dev;
188	buffer->size = len;
189	INIT_LIST_HEAD(&buffer->vmas);
190	mutex_init(&buffer->lock);
191	/* this will set up dma addresses for the sglist -- it is not
192	   technically correct as per the dma api -- a specific
193	   device isn't really taking ownership here.  However, in practice on
194	   our systems the only dma_address space is physical addresses.
195	   Additionally, we can't afford the overhead of invalidating every
196	   allocation via dma_map_sg. The implicit contract here is that
197	   memory comming from the heaps is ready for dma, ie if it has a
198	   cached mapping that mapping has been invalidated */
199	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
200		sg_dma_address(sg) = sg_phys(sg);
201	ion_buffer_add(dev, buffer);
202	return buffer;
203}
204
205static void ion_buffer_destroy(struct kref *kref)
206{
207	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
208	struct ion_device *dev = buffer->dev;
209
210	if (WARN_ON(buffer->kmap_cnt > 0))
211		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
212
213	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
214	buffer->heap->ops->free(buffer);
215	mutex_lock(&dev->lock);
216	rb_erase(&buffer->node, &dev->buffers);
217	mutex_unlock(&dev->lock);
218	kfree(buffer);
219}
220
221static void ion_buffer_get(struct ion_buffer *buffer)
222{
223	kref_get(&buffer->ref);
224}
225
226static int ion_buffer_put(struct ion_buffer *buffer)
227{
228	return kref_put(&buffer->ref, ion_buffer_destroy);
229}
230
231static struct ion_handle *ion_handle_create(struct ion_client *client,
232				     struct ion_buffer *buffer)
233{
234	struct ion_handle *handle;
235
236	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
237	if (!handle)
238		return ERR_PTR(-ENOMEM);
239	kref_init(&handle->ref);
240	RB_CLEAR_NODE(&handle->node);
241	handle->client = client;
242	ion_buffer_get(buffer);
243	handle->buffer = buffer;
244
245	return handle;
246}
247
248static void ion_handle_kmap_put(struct ion_handle *);
249
250static void ion_handle_destroy(struct kref *kref)
251{
252	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
253	struct ion_client *client = handle->client;
254	struct ion_buffer *buffer = handle->buffer;
255
256	mutex_lock(&client->lock);
257
258	mutex_lock(&buffer->lock);
259	while (handle->kmap_cnt)
260		ion_handle_kmap_put(handle);
261	mutex_unlock(&buffer->lock);
262
263	if (!RB_EMPTY_NODE(&handle->node))
264		rb_erase(&handle->node, &client->handles);
265	mutex_unlock(&client->lock);
266
267	ion_buffer_put(buffer);
268	kfree(handle);
269}
270
271struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
272{
273	return handle->buffer;
274}
275
276static void ion_handle_get(struct ion_handle *handle)
277{
278	kref_get(&handle->ref);
279}
280
281static int ion_handle_put(struct ion_handle *handle)
282{
283	return kref_put(&handle->ref, ion_handle_destroy);
284}
285
286static struct ion_handle *ion_handle_lookup(struct ion_client *client,
287					    struct ion_buffer *buffer)
288{
289	struct rb_node *n;
290
291	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
292		struct ion_handle *handle = rb_entry(n, struct ion_handle,
293						     node);
294		if (handle->buffer == buffer)
295			return handle;
296	}
297	return NULL;
298}
299
300static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
301{
302	struct rb_node *n = client->handles.rb_node;
303
304	while (n) {
305		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
306							  node);
307		if (handle < handle_node)
308			n = n->rb_left;
309		else if (handle > handle_node)
310			n = n->rb_right;
311		else
312			return true;
313	}
314	return false;
315}
316
317static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
318{
319	struct rb_node **p = &client->handles.rb_node;
320	struct rb_node *parent = NULL;
321	struct ion_handle *entry;
322
323	while (*p) {
324		parent = *p;
325		entry = rb_entry(parent, struct ion_handle, node);
326
327		if (handle < entry)
328			p = &(*p)->rb_left;
329		else if (handle > entry)
330			p = &(*p)->rb_right;
331		else
332			WARN(1, "%s: buffer already found.", __func__);
333	}
334
335	rb_link_node(&handle->node, parent, p);
336	rb_insert_color(&handle->node, &client->handles);
337}
338
339struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
340			     size_t align, unsigned int heap_mask,
341			     unsigned int flags)
342{
343	struct rb_node *n;
344	struct ion_handle *handle;
345	struct ion_device *dev = client->dev;
346	struct ion_buffer *buffer = NULL;
347
348	pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
349		 align, heap_mask, flags);
350	/*
351	 * traverse the list of heaps available in this system in priority
352	 * order.  If the heap type is supported by the client, and matches the
353	 * request of the caller allocate from it.  Repeat until allocate has
354	 * succeeded or all heaps have been tried
355	 */
356	if (WARN_ON(!len))
357		return ERR_PTR(-EINVAL);
358
359	len = PAGE_ALIGN(len);
360
361	mutex_lock(&dev->lock);
362	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
363		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
364		/* if the client doesn't support this heap type */
365		if (!((1 << heap->type) & client->heap_mask))
366			continue;
367		/* if the caller didn't specify this heap type */
368		if (!((1 << heap->id) & heap_mask))
369			continue;
370		buffer = ion_buffer_create(heap, dev, len, align, flags);
371		if (!IS_ERR_OR_NULL(buffer))
372			break;
373	}
374	mutex_unlock(&dev->lock);
375
376	if (buffer == NULL)
377		return ERR_PTR(-ENODEV);
378
379	if (IS_ERR(buffer))
380		return ERR_PTR(PTR_ERR(buffer));
381
382	handle = ion_handle_create(client, buffer);
383
384	/*
385	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
386	 * and ion_handle_create will take a second reference, drop one here
387	 */
388	ion_buffer_put(buffer);
389
390	if (!IS_ERR(handle)) {
391		mutex_lock(&client->lock);
392		ion_handle_add(client, handle);
393		mutex_unlock(&client->lock);
394	}
395
396
397	return handle;
398}
399EXPORT_SYMBOL(ion_alloc);
400
401void ion_free(struct ion_client *client, struct ion_handle *handle)
402{
403	bool valid_handle;
404
405	BUG_ON(client != handle->client);
406
407	mutex_lock(&client->lock);
408	valid_handle = ion_handle_validate(client, handle);
409	mutex_unlock(&client->lock);
410
411	if (!valid_handle) {
412		WARN(1, "%s: invalid handle passed to free.\n", __func__);
413		return;
414	}
415	ion_handle_put(handle);
416}
417EXPORT_SYMBOL(ion_free);
418
419int ion_phys(struct ion_client *client, struct ion_handle *handle,
420	     ion_phys_addr_t *addr, size_t *len)
421{
422	struct ion_buffer *buffer;
423	int ret;
424
425	mutex_lock(&client->lock);
426	if (!ion_handle_validate(client, handle)) {
427		mutex_unlock(&client->lock);
428		return -EINVAL;
429	}
430
431	buffer = handle->buffer;
432
433	if (!buffer->heap->ops->phys) {
434		pr_err("%s: ion_phys is not implemented by this heap.\n",
435		       __func__);
436		mutex_unlock(&client->lock);
437		return -ENODEV;
438	}
439	mutex_unlock(&client->lock);
440	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
441	return ret;
442}
443EXPORT_SYMBOL(ion_phys);
444
445static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
446{
447	void *vaddr;
448
449	if (buffer->kmap_cnt) {
450		buffer->kmap_cnt++;
451		return buffer->vaddr;
452	}
453	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
454	if (IS_ERR_OR_NULL(vaddr))
455		return vaddr;
456	buffer->vaddr = vaddr;
457	buffer->kmap_cnt++;
458	return vaddr;
459}
460
461static void *ion_handle_kmap_get(struct ion_handle *handle)
462{
463	struct ion_buffer *buffer = handle->buffer;
464	void *vaddr;
465
466	if (handle->kmap_cnt) {
467		handle->kmap_cnt++;
468		return buffer->vaddr;
469	}
470	vaddr = ion_buffer_kmap_get(buffer);
471	if (IS_ERR_OR_NULL(vaddr))
472		return vaddr;
473	handle->kmap_cnt++;
474	return vaddr;
475}
476
477static void ion_buffer_kmap_put(struct ion_buffer *buffer)
478{
479	buffer->kmap_cnt--;
480	if (!buffer->kmap_cnt) {
481		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
482		buffer->vaddr = NULL;
483	}
484}
485
486static void ion_handle_kmap_put(struct ion_handle *handle)
487{
488	struct ion_buffer *buffer = handle->buffer;
489
490	handle->kmap_cnt--;
491	if (!handle->kmap_cnt)
492		ion_buffer_kmap_put(buffer);
493}
494
495void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
496{
497	struct ion_buffer *buffer;
498	void *vaddr;
499
500	mutex_lock(&client->lock);
501	if (!ion_handle_validate(client, handle)) {
502		pr_err("%s: invalid handle passed to map_kernel.\n",
503		       __func__);
504		mutex_unlock(&client->lock);
505		return ERR_PTR(-EINVAL);
506	}
507
508	buffer = handle->buffer;
509
510	if (!handle->buffer->heap->ops->map_kernel) {
511		pr_err("%s: map_kernel is not implemented by this heap.\n",
512		       __func__);
513		mutex_unlock(&client->lock);
514		return ERR_PTR(-ENODEV);
515	}
516
517	mutex_lock(&buffer->lock);
518	vaddr = ion_handle_kmap_get(handle);
519	mutex_unlock(&buffer->lock);
520	mutex_unlock(&client->lock);
521	return vaddr;
522}
523EXPORT_SYMBOL(ion_map_kernel);
524
525void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
526{
527	struct ion_buffer *buffer;
528
529	mutex_lock(&client->lock);
530	buffer = handle->buffer;
531	mutex_lock(&buffer->lock);
532	ion_handle_kmap_put(handle);
533	mutex_unlock(&buffer->lock);
534	mutex_unlock(&client->lock);
535}
536EXPORT_SYMBOL(ion_unmap_kernel);
537
538static int ion_debug_client_show(struct seq_file *s, void *unused)
539{
540	struct ion_client *client = s->private;
541	struct rb_node *n;
542	size_t sizes[ION_NUM_HEAPS] = {0};
543	const char *names[ION_NUM_HEAPS] = {0};
544	int i;
545
546	mutex_lock(&client->lock);
547	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
548		struct ion_handle *handle = rb_entry(n, struct ion_handle,
549						     node);
550		enum ion_heap_type type = handle->buffer->heap->type;
551
552		if (!names[type])
553			names[type] = handle->buffer->heap->name;
554		sizes[type] += handle->buffer->size;
555	}
556	mutex_unlock(&client->lock);
557
558	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
559	for (i = 0; i < ION_NUM_HEAPS; i++) {
560		if (!names[i])
561			continue;
562		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
563	}
564	return 0;
565}
566
567static int ion_debug_client_open(struct inode *inode, struct file *file)
568{
569	return single_open(file, ion_debug_client_show, inode->i_private);
570}
571
572static const struct file_operations debug_client_fops = {
573	.open = ion_debug_client_open,
574	.read = seq_read,
575	.llseek = seq_lseek,
576	.release = single_release,
577};
578
579struct ion_client *ion_client_create(struct ion_device *dev,
580				     unsigned int heap_mask,
581				     const char *name)
582{
583	struct ion_client *client;
584	struct task_struct *task;
585	struct rb_node **p;
586	struct rb_node *parent = NULL;
587	struct ion_client *entry;
588	char debug_name[64];
589	pid_t pid;
590
591	get_task_struct(current->group_leader);
592	task_lock(current->group_leader);
593	pid = task_pid_nr(current->group_leader);
594	/* don't bother to store task struct for kernel threads,
595	   they can't be killed anyway */
596	if (current->group_leader->flags & PF_KTHREAD) {
597		put_task_struct(current->group_leader);
598		task = NULL;
599	} else {
600		task = current->group_leader;
601	}
602	task_unlock(current->group_leader);
603
604	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
605	if (!client) {
606		if (task)
607			put_task_struct(current->group_leader);
608		return ERR_PTR(-ENOMEM);
609	}
610
611	client->dev = dev;
612	client->handles = RB_ROOT;
613	mutex_init(&client->lock);
614	client->name = name;
615	client->heap_mask = heap_mask;
616	client->task = task;
617	client->pid = pid;
618
619	mutex_lock(&dev->lock);
620	p = &dev->clients.rb_node;
621	while (*p) {
622		parent = *p;
623		entry = rb_entry(parent, struct ion_client, node);
624
625		if (client < entry)
626			p = &(*p)->rb_left;
627		else if (client > entry)
628			p = &(*p)->rb_right;
629	}
630	rb_link_node(&client->node, parent, p);
631	rb_insert_color(&client->node, &dev->clients);
632
633	snprintf(debug_name, 64, "%u", client->pid);
634	client->debug_root = debugfs_create_file(debug_name, 0664,
635						 dev->debug_root, client,
636						 &debug_client_fops);
637	mutex_unlock(&dev->lock);
638
639	return client;
640}
641
642void ion_client_destroy(struct ion_client *client)
643{
644	struct ion_device *dev = client->dev;
645	struct rb_node *n;
646
647	pr_debug("%s: %d\n", __func__, __LINE__);
648	while ((n = rb_first(&client->handles))) {
649		struct ion_handle *handle = rb_entry(n, struct ion_handle,
650						     node);
651		ion_handle_destroy(&handle->ref);
652	}
653	mutex_lock(&dev->lock);
654	if (client->task)
655		put_task_struct(client->task);
656	rb_erase(&client->node, &dev->clients);
657	debugfs_remove_recursive(client->debug_root);
658	mutex_unlock(&dev->lock);
659
660	kfree(client);
661}
662EXPORT_SYMBOL(ion_client_destroy);
663
664struct sg_table *ion_sg_table(struct ion_client *client,
665			      struct ion_handle *handle)
666{
667	struct ion_buffer *buffer;
668	struct sg_table *table;
669
670	mutex_lock(&client->lock);
671	if (!ion_handle_validate(client, handle)) {
672		pr_err("%s: invalid handle passed to map_dma.\n",
673		       __func__);
674		mutex_unlock(&client->lock);
675		return ERR_PTR(-EINVAL);
676	}
677	buffer = handle->buffer;
678	table = buffer->sg_table;
679	mutex_unlock(&client->lock);
680	return table;
681}
682EXPORT_SYMBOL(ion_sg_table);
683
684static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
685				       struct device *dev,
686				       enum dma_data_direction direction);
687
688static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
689					enum dma_data_direction direction)
690{
691	struct dma_buf *dmabuf = attachment->dmabuf;
692	struct ion_buffer *buffer = dmabuf->priv;
693
694	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
695	return buffer->sg_table;
696}
697
698static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
699			      struct sg_table *table,
700			      enum dma_data_direction direction)
701{
702}
703
704static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
705{
706	unsigned long pages = buffer->sg_table->nents;
707	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
708
709	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
710	if (!buffer->dirty)
711		return -ENOMEM;
712	return 0;
713}
714
715struct ion_vma_list {
716	struct list_head list;
717	struct vm_area_struct *vma;
718};
719
720static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
721				       struct device *dev,
722				       enum dma_data_direction dir)
723{
724	struct scatterlist *sg;
725	int i;
726	struct ion_vma_list *vma_list;
727
728	pr_debug("%s: syncing for device %s\n", __func__,
729		 dev ? dev_name(dev) : "null");
730
731	if (!(buffer->flags & ION_FLAG_CACHED))
732		return;
733
734	mutex_lock(&buffer->lock);
735	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
736		if (!test_bit(i, buffer->dirty))
737			continue;
738		dma_sync_sg_for_device(dev, sg, 1, dir);
739		clear_bit(i, buffer->dirty);
740	}
741	list_for_each_entry(vma_list, &buffer->vmas, list) {
742		struct vm_area_struct *vma = vma_list->vma;
743
744		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
745			       NULL);
746	}
747	mutex_unlock(&buffer->lock);
748}
749
750int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
751{
752	struct ion_buffer *buffer = vma->vm_private_data;
753	struct scatterlist *sg;
754	int i;
755
756	mutex_lock(&buffer->lock);
757	set_bit(vmf->pgoff, buffer->dirty);
758
759	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
760		if (i != vmf->pgoff)
761			continue;
762		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
763		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
764			       sg_page(sg));
765		break;
766	}
767	mutex_unlock(&buffer->lock);
768	return VM_FAULT_NOPAGE;
769}
770
771static void ion_vm_open(struct vm_area_struct *vma)
772{
773	struct ion_buffer *buffer = vma->vm_private_data;
774	struct ion_vma_list *vma_list;
775
776	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
777	if (!vma_list)
778		return;
779	vma_list->vma = vma;
780	mutex_lock(&buffer->lock);
781	list_add(&vma_list->list, &buffer->vmas);
782	mutex_unlock(&buffer->lock);
783	pr_debug("%s: adding %p\n", __func__, vma);
784}
785
786static void ion_vm_close(struct vm_area_struct *vma)
787{
788	struct ion_buffer *buffer = vma->vm_private_data;
789	struct ion_vma_list *vma_list, *tmp;
790
791	pr_debug("%s\n", __func__);
792	mutex_lock(&buffer->lock);
793	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
794		if (vma_list->vma != vma)
795			continue;
796		list_del(&vma_list->list);
797		kfree(vma_list);
798		pr_debug("%s: deleting %p\n", __func__, vma);
799		break;
800	}
801	mutex_unlock(&buffer->lock);
802}
803
804struct vm_operations_struct ion_vma_ops = {
805	.open = ion_vm_open,
806	.close = ion_vm_close,
807	.fault = ion_vm_fault,
808};
809
810static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
811{
812	struct ion_buffer *buffer = dmabuf->priv;
813	int ret = 0;
814
815	if (!buffer->heap->ops->map_user) {
816		pr_err("%s: this heap does not define a method for mapping "
817		       "to userspace\n", __func__);
818		return -EINVAL;
819	}
820
821	if (buffer->flags & ION_FLAG_CACHED) {
822		vma->vm_private_data = buffer;
823		vma->vm_ops = &ion_vma_ops;
824		ion_vm_open(vma);
825	} else {
826		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
827		mutex_lock(&buffer->lock);
828		/* now map it to userspace */
829		ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
830		mutex_unlock(&buffer->lock);
831	}
832
833	if (ret)
834		pr_err("%s: failure mapping buffer to userspace\n",
835		       __func__);
836
837	return ret;
838}
839
840static void ion_dma_buf_release(struct dma_buf *dmabuf)
841{
842	struct ion_buffer *buffer = dmabuf->priv;
843	ion_buffer_put(buffer);
844}
845
846static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
847{
848	struct ion_buffer *buffer = dmabuf->priv;
849	return buffer->vaddr + offset;
850}
851
852static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
853			       void *ptr)
854{
855	return;
856}
857
858static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
859					size_t len,
860					enum dma_data_direction direction)
861{
862	struct ion_buffer *buffer = dmabuf->priv;
863	void *vaddr;
864
865	if (!buffer->heap->ops->map_kernel) {
866		pr_err("%s: map kernel is not implemented by this heap.\n",
867		       __func__);
868		return -ENODEV;
869	}
870
871	mutex_lock(&buffer->lock);
872	vaddr = ion_buffer_kmap_get(buffer);
873	mutex_unlock(&buffer->lock);
874	if (IS_ERR(vaddr))
875		return PTR_ERR(vaddr);
876	if (!vaddr)
877		return -ENOMEM;
878	return 0;
879}
880
881static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
882				       size_t len,
883				       enum dma_data_direction direction)
884{
885	struct ion_buffer *buffer = dmabuf->priv;
886
887	mutex_lock(&buffer->lock);
888	ion_buffer_kmap_put(buffer);
889	mutex_unlock(&buffer->lock);
890}
891
892struct dma_buf_ops dma_buf_ops = {
893	.map_dma_buf = ion_map_dma_buf,
894	.unmap_dma_buf = ion_unmap_dma_buf,
895	.mmap = ion_mmap,
896	.release = ion_dma_buf_release,
897	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
898	.end_cpu_access = ion_dma_buf_end_cpu_access,
899	.kmap_atomic = ion_dma_buf_kmap,
900	.kunmap_atomic = ion_dma_buf_kunmap,
901	.kmap = ion_dma_buf_kmap,
902	.kunmap = ion_dma_buf_kunmap,
903};
904
905int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
906{
907	struct ion_buffer *buffer;
908	struct dma_buf *dmabuf;
909	bool valid_handle;
910	int fd;
911
912	mutex_lock(&client->lock);
913	valid_handle = ion_handle_validate(client, handle);
914	mutex_unlock(&client->lock);
915	if (!valid_handle) {
916		WARN(1, "%s: invalid handle passed to share.\n", __func__);
917		return -EINVAL;
918	}
919
920	buffer = handle->buffer;
921	ion_buffer_get(buffer);
922	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
923	if (IS_ERR(dmabuf)) {
924		ion_buffer_put(buffer);
925		return PTR_ERR(dmabuf);
926	}
927	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
928	if (fd < 0) {
929		dma_buf_put(dmabuf);
930		ion_buffer_put(buffer);
931	}
932	return fd;
933}
934EXPORT_SYMBOL(ion_share_dma_buf);
935
936struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
937{
938	struct dma_buf *dmabuf;
939	struct ion_buffer *buffer;
940	struct ion_handle *handle;
941
942	dmabuf = dma_buf_get(fd);
943	if (IS_ERR_OR_NULL(dmabuf))
944		return ERR_PTR(PTR_ERR(dmabuf));
945	/* if this memory came from ion */
946
947	if (dmabuf->ops != &dma_buf_ops) {
948		pr_err("%s: can not import dmabuf from another exporter\n",
949		       __func__);
950		dma_buf_put(dmabuf);
951		return ERR_PTR(-EINVAL);
952	}
953	buffer = dmabuf->priv;
954
955	mutex_lock(&client->lock);
956	/* if a handle exists for this buffer just take a reference to it */
957	handle = ion_handle_lookup(client, buffer);
958	if (!IS_ERR_OR_NULL(handle)) {
959		ion_handle_get(handle);
960		goto end;
961	}
962	handle = ion_handle_create(client, buffer);
963	if (IS_ERR_OR_NULL(handle))
964		goto end;
965	ion_handle_add(client, handle);
966end:
967	mutex_unlock(&client->lock);
968	dma_buf_put(dmabuf);
969	return handle;
970}
971EXPORT_SYMBOL(ion_import_dma_buf);
972
973static int ion_sync_for_device(struct ion_client *client, int fd)
974{
975	struct dma_buf *dmabuf;
976	struct ion_buffer *buffer;
977
978	dmabuf = dma_buf_get(fd);
979	if (IS_ERR_OR_NULL(dmabuf))
980		return PTR_ERR(dmabuf);
981
982	/* if this memory came from ion */
983	if (dmabuf->ops != &dma_buf_ops) {
984		pr_err("%s: can not sync dmabuf from another exporter\n",
985		       __func__);
986		dma_buf_put(dmabuf);
987		return -EINVAL;
988	}
989	buffer = dmabuf->priv;
990	ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
991	dma_buf_put(dmabuf);
992	return 0;
993}
994
995static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
996{
997	struct ion_client *client = filp->private_data;
998
999	switch (cmd) {
1000	case ION_IOC_ALLOC:
1001	{
1002		struct ion_allocation_data data;
1003
1004		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1005			return -EFAULT;
1006		data.handle = ion_alloc(client, data.len, data.align,
1007					     data.heap_mask, data.flags);
1008
1009		if (IS_ERR(data.handle))
1010			return PTR_ERR(data.handle);
1011
1012		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1013			ion_free(client, data.handle);
1014			return -EFAULT;
1015		}
1016		break;
1017	}
1018	case ION_IOC_FREE:
1019	{
1020		struct ion_handle_data data;
1021		bool valid;
1022
1023		if (copy_from_user(&data, (void __user *)arg,
1024				   sizeof(struct ion_handle_data)))
1025			return -EFAULT;
1026		mutex_lock(&client->lock);
1027		valid = ion_handle_validate(client, data.handle);
1028		mutex_unlock(&client->lock);
1029		if (!valid)
1030			return -EINVAL;
1031		ion_free(client, data.handle);
1032		break;
1033	}
1034	case ION_IOC_SHARE:
1035	{
1036		struct ion_fd_data data;
1037
1038		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1039			return -EFAULT;
1040		data.fd = ion_share_dma_buf(client, data.handle);
1041		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1042			return -EFAULT;
1043		if (data.fd < 0)
1044			return data.fd;
1045		break;
1046	}
1047	case ION_IOC_IMPORT:
1048	{
1049		struct ion_fd_data data;
1050		int ret = 0;
1051		if (copy_from_user(&data, (void __user *)arg,
1052				   sizeof(struct ion_fd_data)))
1053			return -EFAULT;
1054		data.handle = ion_import_dma_buf(client, data.fd);
1055		if (IS_ERR(data.handle)) {
1056			ret = PTR_ERR(data.handle);
1057			data.handle = NULL;
1058		}
1059		if (copy_to_user((void __user *)arg, &data,
1060				 sizeof(struct ion_fd_data)))
1061			return -EFAULT;
1062		if (ret < 0)
1063			return ret;
1064		break;
1065	}
1066	case ION_IOC_SYNC:
1067	{
1068		struct ion_fd_data data;
1069		if (copy_from_user(&data, (void __user *)arg,
1070				   sizeof(struct ion_fd_data)))
1071			return -EFAULT;
1072		ion_sync_for_device(client, data.fd);
1073		break;
1074	}
1075	case ION_IOC_CUSTOM:
1076	{
1077		struct ion_device *dev = client->dev;
1078		struct ion_custom_data data;
1079
1080		if (!dev->custom_ioctl)
1081			return -ENOTTY;
1082		if (copy_from_user(&data, (void __user *)arg,
1083				sizeof(struct ion_custom_data)))
1084			return -EFAULT;
1085		return dev->custom_ioctl(client, data.cmd, data.arg);
1086	}
1087	default:
1088		return -ENOTTY;
1089	}
1090	return 0;
1091}
1092
1093static int ion_release(struct inode *inode, struct file *file)
1094{
1095	struct ion_client *client = file->private_data;
1096
1097	pr_debug("%s: %d\n", __func__, __LINE__);
1098	ion_client_destroy(client);
1099	return 0;
1100}
1101
1102static int ion_open(struct inode *inode, struct file *file)
1103{
1104	struct miscdevice *miscdev = file->private_data;
1105	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1106	struct ion_client *client;
1107
1108	pr_debug("%s: %d\n", __func__, __LINE__);
1109	client = ion_client_create(dev, -1, "user");
1110	if (IS_ERR_OR_NULL(client))
1111		return PTR_ERR(client);
1112	file->private_data = client;
1113
1114	return 0;
1115}
1116
1117static const struct file_operations ion_fops = {
1118	.owner          = THIS_MODULE,
1119	.open           = ion_open,
1120	.release        = ion_release,
1121	.unlocked_ioctl = ion_ioctl,
1122};
1123
1124static size_t ion_debug_heap_total(struct ion_client *client,
1125				   enum ion_heap_type type)
1126{
1127	size_t size = 0;
1128	struct rb_node *n;
1129
1130	mutex_lock(&client->lock);
1131	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1132		struct ion_handle *handle = rb_entry(n,
1133						     struct ion_handle,
1134						     node);
1135		if (handle->buffer->heap->type == type)
1136			size += handle->buffer->size;
1137	}
1138	mutex_unlock(&client->lock);
1139	return size;
1140}
1141
1142static int ion_debug_heap_show(struct seq_file *s, void *unused)
1143{
1144	struct ion_heap *heap = s->private;
1145	struct ion_device *dev = heap->dev;
1146	struct rb_node *n;
1147
1148	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1149
1150	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1151		struct ion_client *client = rb_entry(n, struct ion_client,
1152						     node);
1153		size_t size = ion_debug_heap_total(client, heap->type);
1154		if (!size)
1155			continue;
1156		if (client->task) {
1157			char task_comm[TASK_COMM_LEN];
1158
1159			get_task_comm(task_comm, client->task);
1160			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1161				   client->pid, size);
1162		} else {
1163			seq_printf(s, "%16.s %16u %16u\n", client->name,
1164				   client->pid, size);
1165		}
1166	}
1167	return 0;
1168}
1169
1170static int ion_debug_heap_open(struct inode *inode, struct file *file)
1171{
1172	return single_open(file, ion_debug_heap_show, inode->i_private);
1173}
1174
1175static const struct file_operations debug_heap_fops = {
1176	.open = ion_debug_heap_open,
1177	.read = seq_read,
1178	.llseek = seq_lseek,
1179	.release = single_release,
1180};
1181
1182void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1183{
1184	struct rb_node **p = &dev->heaps.rb_node;
1185	struct rb_node *parent = NULL;
1186	struct ion_heap *entry;
1187
1188	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1189	    !heap->ops->unmap_dma)
1190		pr_err("%s: can not add heap with invalid ops struct.\n",
1191		       __func__);
1192
1193	heap->dev = dev;
1194	mutex_lock(&dev->lock);
1195	while (*p) {
1196		parent = *p;
1197		entry = rb_entry(parent, struct ion_heap, node);
1198
1199		if (heap->id < entry->id) {
1200			p = &(*p)->rb_left;
1201		} else if (heap->id > entry->id ) {
1202			p = &(*p)->rb_right;
1203		} else {
1204			pr_err("%s: can not insert multiple heaps with "
1205				"id %d\n", __func__, heap->id);
1206			goto end;
1207		}
1208	}
1209
1210	rb_link_node(&heap->node, parent, p);
1211	rb_insert_color(&heap->node, &dev->heaps);
1212	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1213			    &debug_heap_fops);
1214end:
1215	mutex_unlock(&dev->lock);
1216}
1217
1218struct ion_device *ion_device_create(long (*custom_ioctl)
1219				     (struct ion_client *client,
1220				      unsigned int cmd,
1221				      unsigned long arg))
1222{
1223	struct ion_device *idev;
1224	int ret;
1225
1226	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1227	if (!idev)
1228		return ERR_PTR(-ENOMEM);
1229
1230	idev->dev.minor = MISC_DYNAMIC_MINOR;
1231	idev->dev.name = "ion";
1232	idev->dev.fops = &ion_fops;
1233	idev->dev.parent = NULL;
1234	ret = misc_register(&idev->dev);
1235	if (ret) {
1236		pr_err("ion: failed to register misc device.\n");
1237		return ERR_PTR(ret);
1238	}
1239
1240	idev->debug_root = debugfs_create_dir("ion", NULL);
1241	if (IS_ERR_OR_NULL(idev->debug_root))
1242		pr_err("ion: failed to create debug files.\n");
1243
1244	idev->custom_ioctl = custom_ioctl;
1245	idev->buffers = RB_ROOT;
1246	mutex_init(&idev->lock);
1247	idev->heaps = RB_ROOT;
1248	idev->clients = RB_ROOT;
1249	return idev;
1250}
1251
1252void ion_device_destroy(struct ion_device *dev)
1253{
1254	misc_deregister(&dev->dev);
1255	/* XXX need to free the heaps and clients ? */
1256	kfree(dev);
1257}
1258
1259void __init ion_reserve(struct ion_platform_data *data)
1260{
1261	int i, ret;
1262
1263	for (i = 0; i < data->nr; i++) {
1264		if (data->heaps[i].size == 0)
1265			continue;
1266		ret = memblock_reserve(data->heaps[i].base,
1267				       data->heaps[i].size);
1268		if (ret)
1269			pr_err("memblock reserve of %x@%lx failed\n",
1270			       data->heaps[i].size,
1271			       data->heaps[i].base);
1272	}
1273}
1274