ion.c revision 856661d514e8afc452bcda6f4c05a957694fea78
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/dma-buf.h>
34
35#include "ion.h"
36#include "ion_priv.h"
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev:		the actual misc device
41 * @buffers:	an rb tree of all the existing buffers
42 * @lock:		lock protecting the buffers & heaps trees
43 * @heaps:		list of all the heaps in the system
44 * @user_clients:	list of all the clients created from userspace
45 */
46struct ion_device {
47	struct miscdevice dev;
48	struct rb_root buffers;
49	struct mutex lock;
50	struct rb_root heaps;
51	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52			      unsigned long arg);
53	struct rb_root clients;
54	struct dentry *debug_root;
55};
56
57/**
58 * struct ion_client - a process/hw block local address space
59 * @node:		node in the tree of all clients
60 * @dev:		backpointer to ion device
61 * @handles:		an rb tree of all the handles in this client
62 * @lock:		lock protecting the tree of handles
63 * @heap_mask:		mask of all supported heaps
64 * @name:		used for debugging
65 * @task:		used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72	struct rb_node node;
73	struct ion_device *dev;
74	struct rb_root handles;
75	struct mutex lock;
76	unsigned int heap_mask;
77	const char *name;
78	struct task_struct *task;
79	pid_t pid;
80	struct dentry *debug_root;
81};
82
83/**
84 * ion_handle - a client local reference to a buffer
85 * @ref:		reference count
86 * @client:		back pointer to the client the buffer resides in
87 * @buffer:		pointer to the buffer
88 * @node:		node in the client's handle rbtree
89 * @kmap_cnt:		count of times this client has mapped to kernel
90 * @dmap_cnt:		count of times this client has mapped for dma
91 *
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client.  Other fields are never changed after initialization.
94 */
95struct ion_handle {
96	struct kref ref;
97	struct ion_client *client;
98	struct ion_buffer *buffer;
99	struct rb_node node;
100	unsigned int kmap_cnt;
101};
102
103/* this function should only be called while dev->lock is held */
104static void ion_buffer_add(struct ion_device *dev,
105			   struct ion_buffer *buffer)
106{
107	struct rb_node **p = &dev->buffers.rb_node;
108	struct rb_node *parent = NULL;
109	struct ion_buffer *entry;
110
111	while (*p) {
112		parent = *p;
113		entry = rb_entry(parent, struct ion_buffer, node);
114
115		if (buffer < entry) {
116			p = &(*p)->rb_left;
117		} else if (buffer > entry) {
118			p = &(*p)->rb_right;
119		} else {
120			pr_err("%s: buffer already found.", __func__);
121			BUG();
122		}
123	}
124
125	rb_link_node(&buffer->node, parent, p);
126	rb_insert_color(&buffer->node, &dev->buffers);
127}
128
129static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
130
131/* this function should only be called while dev->lock is held */
132static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133				     struct ion_device *dev,
134				     unsigned long len,
135				     unsigned long align,
136				     unsigned long flags)
137{
138	struct ion_buffer *buffer;
139	struct sg_table *table;
140	struct scatterlist *sg;
141	int i, ret;
142
143	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144	if (!buffer)
145		return ERR_PTR(-ENOMEM);
146
147	buffer->heap = heap;
148	kref_init(&buffer->ref);
149
150	ret = heap->ops->allocate(heap, buffer, len, align, flags);
151	if (ret) {
152		kfree(buffer);
153		return ERR_PTR(ret);
154	}
155
156	buffer->dev = dev;
157	buffer->size = len;
158	buffer->flags = flags;
159
160	table = heap->ops->map_dma(heap, buffer);
161	if (IS_ERR_OR_NULL(table)) {
162		heap->ops->free(buffer);
163		kfree(buffer);
164		return ERR_PTR(PTR_ERR(table));
165	}
166	buffer->sg_table = table;
167	if (buffer->flags & ION_FLAG_CACHED &&
168	    !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
169		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
170			    i) {
171			if (sg_dma_len(sg) == PAGE_SIZE)
172				continue;
173			pr_err("%s: cached mappings must have pagewise "
174			       "sg_lists\n", __func__);
175			ret = -EINVAL;
176			goto err;
177		}
178
179		ret = ion_buffer_alloc_dirty(buffer);
180		if (ret)
181			goto err;
182	}
183
184	buffer->dev = dev;
185	buffer->size = len;
186	INIT_LIST_HEAD(&buffer->vmas);
187	mutex_init(&buffer->lock);
188	/* this will set up dma addresses for the sglist -- it is not
189	   technically correct as per the dma api -- a specific
190	   device isn't really taking ownership here.  However, in practice on
191	   our systems the only dma_address space is physical addresses.
192	   Additionally, we can't afford the overhead of invalidating every
193	   allocation via dma_map_sg. The implicit contract here is that
194	   memory comming from the heaps is ready for dma, ie if it has a
195	   cached mapping that mapping has been invalidated */
196	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
197		sg_dma_address(sg) = sg_phys(sg);
198	ion_buffer_add(dev, buffer);
199	return buffer;
200
201err:
202	heap->ops->unmap_dma(heap, buffer);
203	heap->ops->free(buffer);
204	kfree(buffer);
205	return ERR_PTR(ret);
206}
207
208static void ion_buffer_destroy(struct kref *kref)
209{
210	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
211	struct ion_device *dev = buffer->dev;
212
213	if (WARN_ON(buffer->kmap_cnt > 0))
214		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
215	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
216	buffer->heap->ops->free(buffer);
217	mutex_lock(&dev->lock);
218	rb_erase(&buffer->node, &dev->buffers);
219	mutex_unlock(&dev->lock);
220	if (buffer->flags & ION_FLAG_CACHED)
221		kfree(buffer->dirty);
222	kfree(buffer);
223}
224
225static void ion_buffer_get(struct ion_buffer *buffer)
226{
227	kref_get(&buffer->ref);
228}
229
230static int ion_buffer_put(struct ion_buffer *buffer)
231{
232	return kref_put(&buffer->ref, ion_buffer_destroy);
233}
234
235static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
236{
237	mutex_lock(&buffer->dev->lock);
238	buffer->handle_count++;
239	mutex_unlock(&buffer->dev->lock);
240}
241
242static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
243{
244	/*
245	 * when a buffer is removed from a handle, if it is not in
246	 * any other handles, copy the taskcomm and the pid of the
247	 * process it's being removed from into the buffer.  At this
248	 * point there will be no way to track what processes this buffer is
249	 * being used by, it only exists as a dma_buf file descriptor.
250	 * The taskcomm and pid can provide a debug hint as to where this fd
251	 * is in the system
252	 */
253	mutex_lock(&buffer->dev->lock);
254	buffer->handle_count--;
255	BUG_ON(buffer->handle_count < 0);
256	if (!buffer->handle_count) {
257		struct task_struct *task;
258
259		task = current->group_leader;
260		get_task_comm(buffer->task_comm, task);
261		buffer->pid = task_pid_nr(task);
262	}
263	mutex_unlock(&buffer->dev->lock);
264}
265
266static struct ion_handle *ion_handle_create(struct ion_client *client,
267				     struct ion_buffer *buffer)
268{
269	struct ion_handle *handle;
270
271	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
272	if (!handle)
273		return ERR_PTR(-ENOMEM);
274	kref_init(&handle->ref);
275	RB_CLEAR_NODE(&handle->node);
276	handle->client = client;
277	ion_buffer_get(buffer);
278	ion_buffer_add_to_handle(buffer);
279	handle->buffer = buffer;
280
281	return handle;
282}
283
284static void ion_handle_kmap_put(struct ion_handle *);
285
286static void ion_handle_destroy(struct kref *kref)
287{
288	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
289	struct ion_client *client = handle->client;
290	struct ion_buffer *buffer = handle->buffer;
291
292	mutex_lock(&buffer->lock);
293	while (handle->kmap_cnt)
294		ion_handle_kmap_put(handle);
295	mutex_unlock(&buffer->lock);
296
297	if (!RB_EMPTY_NODE(&handle->node))
298		rb_erase(&handle->node, &client->handles);
299
300	ion_buffer_remove_from_handle(buffer);
301	ion_buffer_put(buffer);
302
303	kfree(handle);
304}
305
306struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
307{
308	return handle->buffer;
309}
310
311static void ion_handle_get(struct ion_handle *handle)
312{
313	kref_get(&handle->ref);
314}
315
316static int ion_handle_put(struct ion_handle *handle)
317{
318	return kref_put(&handle->ref, ion_handle_destroy);
319}
320
321static struct ion_handle *ion_handle_lookup(struct ion_client *client,
322					    struct ion_buffer *buffer)
323{
324	struct rb_node *n;
325
326	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
327		struct ion_handle *handle = rb_entry(n, struct ion_handle,
328						     node);
329		if (handle->buffer == buffer)
330			return handle;
331	}
332	return NULL;
333}
334
335static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
336{
337	struct rb_node *n = client->handles.rb_node;
338
339	while (n) {
340		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
341							  node);
342		if (handle < handle_node)
343			n = n->rb_left;
344		else if (handle > handle_node)
345			n = n->rb_right;
346		else
347			return true;
348	}
349	return false;
350}
351
352static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
353{
354	struct rb_node **p = &client->handles.rb_node;
355	struct rb_node *parent = NULL;
356	struct ion_handle *entry;
357
358	while (*p) {
359		parent = *p;
360		entry = rb_entry(parent, struct ion_handle, node);
361
362		if (handle < entry)
363			p = &(*p)->rb_left;
364		else if (handle > entry)
365			p = &(*p)->rb_right;
366		else
367			WARN(1, "%s: buffer already found.", __func__);
368	}
369
370	rb_link_node(&handle->node, parent, p);
371	rb_insert_color(&handle->node, &client->handles);
372}
373
374struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
375			     size_t align, unsigned int heap_mask,
376			     unsigned int flags)
377{
378	struct rb_node *n;
379	struct ion_handle *handle;
380	struct ion_device *dev = client->dev;
381	struct ion_buffer *buffer = NULL;
382
383	pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
384		 align, heap_mask, flags);
385	/*
386	 * traverse the list of heaps available in this system in priority
387	 * order.  If the heap type is supported by the client, and matches the
388	 * request of the caller allocate from it.  Repeat until allocate has
389	 * succeeded or all heaps have been tried
390	 */
391	if (WARN_ON(!len))
392		return ERR_PTR(-EINVAL);
393
394	len = PAGE_ALIGN(len);
395
396	mutex_lock(&dev->lock);
397	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
398		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
399		/* if the client doesn't support this heap type */
400		if (!((1 << heap->type) & client->heap_mask))
401			continue;
402		/* if the caller didn't specify this heap type */
403		if (!((1 << heap->id) & heap_mask))
404			continue;
405		buffer = ion_buffer_create(heap, dev, len, align, flags);
406		if (!IS_ERR_OR_NULL(buffer))
407			break;
408	}
409	mutex_unlock(&dev->lock);
410
411	if (buffer == NULL)
412		return ERR_PTR(-ENODEV);
413
414	if (IS_ERR(buffer))
415		return ERR_PTR(PTR_ERR(buffer));
416
417	handle = ion_handle_create(client, buffer);
418
419	/*
420	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
421	 * and ion_handle_create will take a second reference, drop one here
422	 */
423	ion_buffer_put(buffer);
424
425	if (!IS_ERR(handle)) {
426		mutex_lock(&client->lock);
427		ion_handle_add(client, handle);
428		mutex_unlock(&client->lock);
429	}
430
431
432	return handle;
433}
434EXPORT_SYMBOL(ion_alloc);
435
436void ion_free(struct ion_client *client, struct ion_handle *handle)
437{
438	bool valid_handle;
439
440	BUG_ON(client != handle->client);
441
442	mutex_lock(&client->lock);
443	valid_handle = ion_handle_validate(client, handle);
444
445	if (!valid_handle) {
446		WARN(1, "%s: invalid handle passed to free.\n", __func__);
447		mutex_unlock(&client->lock);
448		return;
449	}
450	ion_handle_put(handle);
451	mutex_unlock(&client->lock);
452}
453EXPORT_SYMBOL(ion_free);
454
455int ion_phys(struct ion_client *client, struct ion_handle *handle,
456	     ion_phys_addr_t *addr, size_t *len)
457{
458	struct ion_buffer *buffer;
459	int ret;
460
461	mutex_lock(&client->lock);
462	if (!ion_handle_validate(client, handle)) {
463		mutex_unlock(&client->lock);
464		return -EINVAL;
465	}
466
467	buffer = handle->buffer;
468
469	if (!buffer->heap->ops->phys) {
470		pr_err("%s: ion_phys is not implemented by this heap.\n",
471		       __func__);
472		mutex_unlock(&client->lock);
473		return -ENODEV;
474	}
475	mutex_unlock(&client->lock);
476	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
477	return ret;
478}
479EXPORT_SYMBOL(ion_phys);
480
481static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
482{
483	void *vaddr;
484
485	if (buffer->kmap_cnt) {
486		buffer->kmap_cnt++;
487		return buffer->vaddr;
488	}
489	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
490	if (IS_ERR_OR_NULL(vaddr))
491		return vaddr;
492	buffer->vaddr = vaddr;
493	buffer->kmap_cnt++;
494	return vaddr;
495}
496
497static void *ion_handle_kmap_get(struct ion_handle *handle)
498{
499	struct ion_buffer *buffer = handle->buffer;
500	void *vaddr;
501
502	if (handle->kmap_cnt) {
503		handle->kmap_cnt++;
504		return buffer->vaddr;
505	}
506	vaddr = ion_buffer_kmap_get(buffer);
507	if (IS_ERR_OR_NULL(vaddr))
508		return vaddr;
509	handle->kmap_cnt++;
510	return vaddr;
511}
512
513static void ion_buffer_kmap_put(struct ion_buffer *buffer)
514{
515	buffer->kmap_cnt--;
516	if (!buffer->kmap_cnt) {
517		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
518		buffer->vaddr = NULL;
519	}
520}
521
522static void ion_handle_kmap_put(struct ion_handle *handle)
523{
524	struct ion_buffer *buffer = handle->buffer;
525
526	handle->kmap_cnt--;
527	if (!handle->kmap_cnt)
528		ion_buffer_kmap_put(buffer);
529}
530
531void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
532{
533	struct ion_buffer *buffer;
534	void *vaddr;
535
536	mutex_lock(&client->lock);
537	if (!ion_handle_validate(client, handle)) {
538		pr_err("%s: invalid handle passed to map_kernel.\n",
539		       __func__);
540		mutex_unlock(&client->lock);
541		return ERR_PTR(-EINVAL);
542	}
543
544	buffer = handle->buffer;
545
546	if (!handle->buffer->heap->ops->map_kernel) {
547		pr_err("%s: map_kernel is not implemented by this heap.\n",
548		       __func__);
549		mutex_unlock(&client->lock);
550		return ERR_PTR(-ENODEV);
551	}
552
553	mutex_lock(&buffer->lock);
554	vaddr = ion_handle_kmap_get(handle);
555	mutex_unlock(&buffer->lock);
556	mutex_unlock(&client->lock);
557	return vaddr;
558}
559EXPORT_SYMBOL(ion_map_kernel);
560
561void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
562{
563	struct ion_buffer *buffer;
564
565	mutex_lock(&client->lock);
566	buffer = handle->buffer;
567	mutex_lock(&buffer->lock);
568	ion_handle_kmap_put(handle);
569	mutex_unlock(&buffer->lock);
570	mutex_unlock(&client->lock);
571}
572EXPORT_SYMBOL(ion_unmap_kernel);
573
574static int ion_debug_client_show(struct seq_file *s, void *unused)
575{
576	struct ion_client *client = s->private;
577	struct rb_node *n;
578	size_t sizes[ION_NUM_HEAPS] = {0};
579	const char *names[ION_NUM_HEAPS] = {0};
580	int i;
581
582	mutex_lock(&client->lock);
583	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
584		struct ion_handle *handle = rb_entry(n, struct ion_handle,
585						     node);
586		enum ion_heap_type type = handle->buffer->heap->type;
587
588		if (!names[type])
589			names[type] = handle->buffer->heap->name;
590		sizes[type] += handle->buffer->size;
591	}
592	mutex_unlock(&client->lock);
593
594	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
595	for (i = 0; i < ION_NUM_HEAPS; i++) {
596		if (!names[i])
597			continue;
598		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
599	}
600	return 0;
601}
602
603static int ion_debug_client_open(struct inode *inode, struct file *file)
604{
605	return single_open(file, ion_debug_client_show, inode->i_private);
606}
607
608static const struct file_operations debug_client_fops = {
609	.open = ion_debug_client_open,
610	.read = seq_read,
611	.llseek = seq_lseek,
612	.release = single_release,
613};
614
615struct ion_client *ion_client_create(struct ion_device *dev,
616				     unsigned int heap_mask,
617				     const char *name)
618{
619	struct ion_client *client;
620	struct task_struct *task;
621	struct rb_node **p;
622	struct rb_node *parent = NULL;
623	struct ion_client *entry;
624	char debug_name[64];
625	pid_t pid;
626
627	get_task_struct(current->group_leader);
628	task_lock(current->group_leader);
629	pid = task_pid_nr(current->group_leader);
630	/* don't bother to store task struct for kernel threads,
631	   they can't be killed anyway */
632	if (current->group_leader->flags & PF_KTHREAD) {
633		put_task_struct(current->group_leader);
634		task = NULL;
635	} else {
636		task = current->group_leader;
637	}
638	task_unlock(current->group_leader);
639
640	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
641	if (!client) {
642		if (task)
643			put_task_struct(current->group_leader);
644		return ERR_PTR(-ENOMEM);
645	}
646
647	client->dev = dev;
648	client->handles = RB_ROOT;
649	mutex_init(&client->lock);
650	client->name = name;
651	client->heap_mask = heap_mask;
652	client->task = task;
653	client->pid = pid;
654
655	mutex_lock(&dev->lock);
656	p = &dev->clients.rb_node;
657	while (*p) {
658		parent = *p;
659		entry = rb_entry(parent, struct ion_client, node);
660
661		if (client < entry)
662			p = &(*p)->rb_left;
663		else if (client > entry)
664			p = &(*p)->rb_right;
665	}
666	rb_link_node(&client->node, parent, p);
667	rb_insert_color(&client->node, &dev->clients);
668
669	snprintf(debug_name, 64, "%u", client->pid);
670	client->debug_root = debugfs_create_file(debug_name, 0664,
671						 dev->debug_root, client,
672						 &debug_client_fops);
673	mutex_unlock(&dev->lock);
674
675	return client;
676}
677
678void ion_client_destroy(struct ion_client *client)
679{
680	struct ion_device *dev = client->dev;
681	struct rb_node *n;
682
683	pr_debug("%s: %d\n", __func__, __LINE__);
684	while ((n = rb_first(&client->handles))) {
685		struct ion_handle *handle = rb_entry(n, struct ion_handle,
686						     node);
687		ion_handle_destroy(&handle->ref);
688	}
689	mutex_lock(&dev->lock);
690	if (client->task)
691		put_task_struct(client->task);
692	rb_erase(&client->node, &dev->clients);
693	debugfs_remove_recursive(client->debug_root);
694	mutex_unlock(&dev->lock);
695
696	kfree(client);
697}
698EXPORT_SYMBOL(ion_client_destroy);
699
700struct sg_table *ion_sg_table(struct ion_client *client,
701			      struct ion_handle *handle)
702{
703	struct ion_buffer *buffer;
704	struct sg_table *table;
705
706	mutex_lock(&client->lock);
707	if (!ion_handle_validate(client, handle)) {
708		pr_err("%s: invalid handle passed to map_dma.\n",
709		       __func__);
710		mutex_unlock(&client->lock);
711		return ERR_PTR(-EINVAL);
712	}
713	buffer = handle->buffer;
714	table = buffer->sg_table;
715	mutex_unlock(&client->lock);
716	return table;
717}
718EXPORT_SYMBOL(ion_sg_table);
719
720static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
721				       struct device *dev,
722				       enum dma_data_direction direction);
723
724static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
725					enum dma_data_direction direction)
726{
727	struct dma_buf *dmabuf = attachment->dmabuf;
728	struct ion_buffer *buffer = dmabuf->priv;
729
730	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
731	return buffer->sg_table;
732}
733
734static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
735			      struct sg_table *table,
736			      enum dma_data_direction direction)
737{
738}
739
740static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
741{
742	unsigned long pages = buffer->sg_table->nents;
743	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
744
745	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
746	if (!buffer->dirty)
747		return -ENOMEM;
748	return 0;
749}
750
751struct ion_vma_list {
752	struct list_head list;
753	struct vm_area_struct *vma;
754};
755
756static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
757				       struct device *dev,
758				       enum dma_data_direction dir)
759{
760	struct scatterlist *sg;
761	int i;
762	struct ion_vma_list *vma_list;
763
764	pr_debug("%s: syncing for device %s\n", __func__,
765		 dev ? dev_name(dev) : "null");
766
767	if (!(buffer->flags & ION_FLAG_CACHED) ||
768	    (buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC))
769		return;
770
771	mutex_lock(&buffer->lock);
772	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
773		if (!test_bit(i, buffer->dirty))
774			continue;
775		dma_sync_sg_for_device(dev, sg, 1, dir);
776		clear_bit(i, buffer->dirty);
777	}
778	list_for_each_entry(vma_list, &buffer->vmas, list) {
779		struct vm_area_struct *vma = vma_list->vma;
780
781		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
782			       NULL);
783	}
784	mutex_unlock(&buffer->lock);
785}
786
787int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
788{
789	struct ion_buffer *buffer = vma->vm_private_data;
790	struct scatterlist *sg;
791	int i;
792
793	mutex_lock(&buffer->lock);
794	set_bit(vmf->pgoff, buffer->dirty);
795
796	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
797		if (i != vmf->pgoff)
798			continue;
799		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
800		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
801			       sg_page(sg));
802		break;
803	}
804	mutex_unlock(&buffer->lock);
805	return VM_FAULT_NOPAGE;
806}
807
808static void ion_vm_open(struct vm_area_struct *vma)
809{
810	struct ion_buffer *buffer = vma->vm_private_data;
811	struct ion_vma_list *vma_list;
812
813	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
814	if (!vma_list)
815		return;
816	vma_list->vma = vma;
817	mutex_lock(&buffer->lock);
818	list_add(&vma_list->list, &buffer->vmas);
819	mutex_unlock(&buffer->lock);
820	pr_debug("%s: adding %p\n", __func__, vma);
821}
822
823static void ion_vm_close(struct vm_area_struct *vma)
824{
825	struct ion_buffer *buffer = vma->vm_private_data;
826	struct ion_vma_list *vma_list, *tmp;
827
828	pr_debug("%s\n", __func__);
829	mutex_lock(&buffer->lock);
830	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
831		if (vma_list->vma != vma)
832			continue;
833		list_del(&vma_list->list);
834		kfree(vma_list);
835		pr_debug("%s: deleting %p\n", __func__, vma);
836		break;
837	}
838	mutex_unlock(&buffer->lock);
839}
840
841struct vm_operations_struct ion_vma_ops = {
842	.open = ion_vm_open,
843	.close = ion_vm_close,
844	.fault = ion_vm_fault,
845};
846
847static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
848{
849	struct ion_buffer *buffer = dmabuf->priv;
850	int ret = 0;
851
852	if (!buffer->heap->ops->map_user) {
853		pr_err("%s: this heap does not define a method for mapping "
854		       "to userspace\n", __func__);
855		return -EINVAL;
856	}
857
858	if (buffer->flags & ION_FLAG_CACHED &&
859	    !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
860		vma->vm_private_data = buffer;
861		vma->vm_ops = &ion_vma_ops;
862		ion_vm_open(vma);
863		return 0;
864	}
865
866	if (!(buffer->flags & ION_FLAG_CACHED))
867		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
868
869	mutex_lock(&buffer->lock);
870	/* now map it to userspace */
871	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
872	mutex_unlock(&buffer->lock);
873
874	if (ret)
875		pr_err("%s: failure mapping buffer to userspace\n",
876		       __func__);
877
878	return ret;
879}
880
881static void ion_dma_buf_release(struct dma_buf *dmabuf)
882{
883	struct ion_buffer *buffer = dmabuf->priv;
884	ion_buffer_put(buffer);
885}
886
887static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
888{
889	struct ion_buffer *buffer = dmabuf->priv;
890	return buffer->vaddr + offset * PAGE_SIZE;
891}
892
893static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
894			       void *ptr)
895{
896	return;
897}
898
899static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
900					size_t len,
901					enum dma_data_direction direction)
902{
903	struct ion_buffer *buffer = dmabuf->priv;
904	void *vaddr;
905
906	if (!buffer->heap->ops->map_kernel) {
907		pr_err("%s: map kernel is not implemented by this heap.\n",
908		       __func__);
909		return -ENODEV;
910	}
911
912	mutex_lock(&buffer->lock);
913	vaddr = ion_buffer_kmap_get(buffer);
914	mutex_unlock(&buffer->lock);
915	if (IS_ERR(vaddr))
916		return PTR_ERR(vaddr);
917	if (!vaddr)
918		return -ENOMEM;
919	return 0;
920}
921
922static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
923				       size_t len,
924				       enum dma_data_direction direction)
925{
926	struct ion_buffer *buffer = dmabuf->priv;
927
928	mutex_lock(&buffer->lock);
929	ion_buffer_kmap_put(buffer);
930	mutex_unlock(&buffer->lock);
931}
932
933struct dma_buf_ops dma_buf_ops = {
934	.map_dma_buf = ion_map_dma_buf,
935	.unmap_dma_buf = ion_unmap_dma_buf,
936	.mmap = ion_mmap,
937	.release = ion_dma_buf_release,
938	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
939	.end_cpu_access = ion_dma_buf_end_cpu_access,
940	.kmap_atomic = ion_dma_buf_kmap,
941	.kunmap_atomic = ion_dma_buf_kunmap,
942	.kmap = ion_dma_buf_kmap,
943	.kunmap = ion_dma_buf_kunmap,
944};
945
946int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
947{
948	struct ion_buffer *buffer;
949	struct dma_buf *dmabuf;
950	bool valid_handle;
951	int fd;
952
953	mutex_lock(&client->lock);
954	valid_handle = ion_handle_validate(client, handle);
955	mutex_unlock(&client->lock);
956	if (!valid_handle) {
957		WARN(1, "%s: invalid handle passed to share.\n", __func__);
958		return -EINVAL;
959	}
960
961	buffer = handle->buffer;
962	ion_buffer_get(buffer);
963	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
964	if (IS_ERR(dmabuf)) {
965		ion_buffer_put(buffer);
966		return PTR_ERR(dmabuf);
967	}
968	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
969	if (fd < 0)
970		dma_buf_put(dmabuf);
971
972	return fd;
973}
974EXPORT_SYMBOL(ion_share_dma_buf);
975
976struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
977{
978	struct dma_buf *dmabuf;
979	struct ion_buffer *buffer;
980	struct ion_handle *handle;
981
982	dmabuf = dma_buf_get(fd);
983	if (IS_ERR_OR_NULL(dmabuf))
984		return ERR_PTR(PTR_ERR(dmabuf));
985	/* if this memory came from ion */
986
987	if (dmabuf->ops != &dma_buf_ops) {
988		pr_err("%s: can not import dmabuf from another exporter\n",
989		       __func__);
990		dma_buf_put(dmabuf);
991		return ERR_PTR(-EINVAL);
992	}
993	buffer = dmabuf->priv;
994
995	mutex_lock(&client->lock);
996	/* if a handle exists for this buffer just take a reference to it */
997	handle = ion_handle_lookup(client, buffer);
998	if (!IS_ERR_OR_NULL(handle)) {
999		ion_handle_get(handle);
1000		goto end;
1001	}
1002	handle = ion_handle_create(client, buffer);
1003	if (IS_ERR_OR_NULL(handle))
1004		goto end;
1005	ion_handle_add(client, handle);
1006end:
1007	mutex_unlock(&client->lock);
1008	dma_buf_put(dmabuf);
1009	return handle;
1010}
1011EXPORT_SYMBOL(ion_import_dma_buf);
1012
1013static int ion_sync_for_device(struct ion_client *client, int fd)
1014{
1015	struct dma_buf *dmabuf;
1016	struct ion_buffer *buffer;
1017
1018	dmabuf = dma_buf_get(fd);
1019	if (IS_ERR_OR_NULL(dmabuf))
1020		return PTR_ERR(dmabuf);
1021
1022	/* if this memory came from ion */
1023	if (dmabuf->ops != &dma_buf_ops) {
1024		pr_err("%s: can not sync dmabuf from another exporter\n",
1025		       __func__);
1026		dma_buf_put(dmabuf);
1027		return -EINVAL;
1028	}
1029	buffer = dmabuf->priv;
1030
1031	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1032			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1033	dma_buf_put(dmabuf);
1034	return 0;
1035}
1036
1037static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1038{
1039	struct ion_client *client = filp->private_data;
1040
1041	switch (cmd) {
1042	case ION_IOC_ALLOC:
1043	{
1044		struct ion_allocation_data data;
1045
1046		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1047			return -EFAULT;
1048		data.handle = ion_alloc(client, data.len, data.align,
1049					     data.heap_mask, data.flags);
1050
1051		if (IS_ERR(data.handle))
1052			return PTR_ERR(data.handle);
1053
1054		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1055			ion_free(client, data.handle);
1056			return -EFAULT;
1057		}
1058		break;
1059	}
1060	case ION_IOC_FREE:
1061	{
1062		struct ion_handle_data data;
1063		bool valid;
1064
1065		if (copy_from_user(&data, (void __user *)arg,
1066				   sizeof(struct ion_handle_data)))
1067			return -EFAULT;
1068		mutex_lock(&client->lock);
1069		valid = ion_handle_validate(client, data.handle);
1070		mutex_unlock(&client->lock);
1071		if (!valid)
1072			return -EINVAL;
1073		ion_free(client, data.handle);
1074		break;
1075	}
1076	case ION_IOC_SHARE:
1077	{
1078		struct ion_fd_data data;
1079
1080		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1081			return -EFAULT;
1082		data.fd = ion_share_dma_buf(client, data.handle);
1083		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1084			return -EFAULT;
1085		if (data.fd < 0)
1086			return data.fd;
1087		break;
1088	}
1089	case ION_IOC_IMPORT:
1090	{
1091		struct ion_fd_data data;
1092		int ret = 0;
1093		if (copy_from_user(&data, (void __user *)arg,
1094				   sizeof(struct ion_fd_data)))
1095			return -EFAULT;
1096		data.handle = ion_import_dma_buf(client, data.fd);
1097		if (IS_ERR(data.handle)) {
1098			ret = PTR_ERR(data.handle);
1099			data.handle = NULL;
1100		}
1101		if (copy_to_user((void __user *)arg, &data,
1102				 sizeof(struct ion_fd_data)))
1103			return -EFAULT;
1104		if (ret < 0)
1105			return ret;
1106		break;
1107	}
1108	case ION_IOC_SYNC:
1109	{
1110		struct ion_fd_data data;
1111		if (copy_from_user(&data, (void __user *)arg,
1112				   sizeof(struct ion_fd_data)))
1113			return -EFAULT;
1114		ion_sync_for_device(client, data.fd);
1115		break;
1116	}
1117	case ION_IOC_CUSTOM:
1118	{
1119		struct ion_device *dev = client->dev;
1120		struct ion_custom_data data;
1121
1122		if (!dev->custom_ioctl)
1123			return -ENOTTY;
1124		if (copy_from_user(&data, (void __user *)arg,
1125				sizeof(struct ion_custom_data)))
1126			return -EFAULT;
1127		return dev->custom_ioctl(client, data.cmd, data.arg);
1128	}
1129	default:
1130		return -ENOTTY;
1131	}
1132	return 0;
1133}
1134
1135static int ion_release(struct inode *inode, struct file *file)
1136{
1137	struct ion_client *client = file->private_data;
1138
1139	pr_debug("%s: %d\n", __func__, __LINE__);
1140	ion_client_destroy(client);
1141	return 0;
1142}
1143
1144static int ion_open(struct inode *inode, struct file *file)
1145{
1146	struct miscdevice *miscdev = file->private_data;
1147	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1148	struct ion_client *client;
1149
1150	pr_debug("%s: %d\n", __func__, __LINE__);
1151	client = ion_client_create(dev, -1, "user");
1152	if (IS_ERR_OR_NULL(client))
1153		return PTR_ERR(client);
1154	file->private_data = client;
1155
1156	return 0;
1157}
1158
1159static const struct file_operations ion_fops = {
1160	.owner          = THIS_MODULE,
1161	.open           = ion_open,
1162	.release        = ion_release,
1163	.unlocked_ioctl = ion_ioctl,
1164};
1165
1166static size_t ion_debug_heap_total(struct ion_client *client,
1167				   enum ion_heap_type type)
1168{
1169	size_t size = 0;
1170	struct rb_node *n;
1171
1172	mutex_lock(&client->lock);
1173	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1174		struct ion_handle *handle = rb_entry(n,
1175						     struct ion_handle,
1176						     node);
1177		if (handle->buffer->heap->type == type)
1178			size += handle->buffer->size;
1179	}
1180	mutex_unlock(&client->lock);
1181	return size;
1182}
1183
1184static int ion_debug_heap_show(struct seq_file *s, void *unused)
1185{
1186	struct ion_heap *heap = s->private;
1187	struct ion_device *dev = heap->dev;
1188	struct rb_node *n;
1189	size_t total_size = 0;
1190	size_t total_orphaned_size = 0;
1191
1192	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1193	seq_printf(s, "----------------------------------------------------\n");
1194
1195	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1196		struct ion_client *client = rb_entry(n, struct ion_client,
1197						     node);
1198		size_t size = ion_debug_heap_total(client, heap->type);
1199		if (!size)
1200			continue;
1201		if (client->task) {
1202			char task_comm[TASK_COMM_LEN];
1203
1204			get_task_comm(task_comm, client->task);
1205			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1206				   client->pid, size);
1207		} else {
1208			seq_printf(s, "%16.s %16u %16u\n", client->name,
1209				   client->pid, size);
1210		}
1211	}
1212	seq_printf(s, "----------------------------------------------------\n");
1213	seq_printf(s, "orphaned allocations (info is from last known client):"
1214		   "\n");
1215	mutex_lock(&dev->lock);
1216	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1217		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1218						     node);
1219		if (buffer->heap->type == heap->type)
1220			total_size += buffer->size;
1221		if (!buffer->handle_count) {
1222			seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm,
1223				   buffer->pid, buffer->size);
1224			total_orphaned_size += buffer->size;
1225		}
1226	}
1227	mutex_unlock(&dev->lock);
1228	seq_printf(s, "----------------------------------------------------\n");
1229	seq_printf(s, "%16.s %16u\n", "total orphaned",
1230		   total_orphaned_size);
1231	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1232
1233	return 0;
1234}
1235
1236static int ion_debug_heap_open(struct inode *inode, struct file *file)
1237{
1238	return single_open(file, ion_debug_heap_show, inode->i_private);
1239}
1240
1241static const struct file_operations debug_heap_fops = {
1242	.open = ion_debug_heap_open,
1243	.read = seq_read,
1244	.llseek = seq_lseek,
1245	.release = single_release,
1246};
1247
1248void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1249{
1250	struct rb_node **p = &dev->heaps.rb_node;
1251	struct rb_node *parent = NULL;
1252	struct ion_heap *entry;
1253
1254	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1255	    !heap->ops->unmap_dma)
1256		pr_err("%s: can not add heap with invalid ops struct.\n",
1257		       __func__);
1258
1259	heap->dev = dev;
1260	mutex_lock(&dev->lock);
1261	while (*p) {
1262		parent = *p;
1263		entry = rb_entry(parent, struct ion_heap, node);
1264
1265		if (heap->id < entry->id) {
1266			p = &(*p)->rb_left;
1267		} else if (heap->id > entry->id ) {
1268			p = &(*p)->rb_right;
1269		} else {
1270			pr_err("%s: can not insert multiple heaps with "
1271				"id %d\n", __func__, heap->id);
1272			goto end;
1273		}
1274	}
1275
1276	rb_link_node(&heap->node, parent, p);
1277	rb_insert_color(&heap->node, &dev->heaps);
1278	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1279			    &debug_heap_fops);
1280end:
1281	mutex_unlock(&dev->lock);
1282}
1283
1284struct ion_device *ion_device_create(long (*custom_ioctl)
1285				     (struct ion_client *client,
1286				      unsigned int cmd,
1287				      unsigned long arg))
1288{
1289	struct ion_device *idev;
1290	int ret;
1291
1292	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1293	if (!idev)
1294		return ERR_PTR(-ENOMEM);
1295
1296	idev->dev.minor = MISC_DYNAMIC_MINOR;
1297	idev->dev.name = "ion";
1298	idev->dev.fops = &ion_fops;
1299	idev->dev.parent = NULL;
1300	ret = misc_register(&idev->dev);
1301	if (ret) {
1302		pr_err("ion: failed to register misc device.\n");
1303		return ERR_PTR(ret);
1304	}
1305
1306	idev->debug_root = debugfs_create_dir("ion", NULL);
1307	if (IS_ERR_OR_NULL(idev->debug_root))
1308		pr_err("ion: failed to create debug files.\n");
1309
1310	idev->custom_ioctl = custom_ioctl;
1311	idev->buffers = RB_ROOT;
1312	mutex_init(&idev->lock);
1313	idev->heaps = RB_ROOT;
1314	idev->clients = RB_ROOT;
1315	return idev;
1316}
1317
1318void ion_device_destroy(struct ion_device *dev)
1319{
1320	misc_deregister(&dev->dev);
1321	/* XXX need to free the heaps and clients ? */
1322	kfree(dev);
1323}
1324
1325void __init ion_reserve(struct ion_platform_data *data)
1326{
1327	int i, ret;
1328
1329	for (i = 0; i < data->nr; i++) {
1330		if (data->heaps[i].size == 0)
1331			continue;
1332		ret = memblock_reserve(data->heaps[i].base,
1333				       data->heaps[i].size);
1334		if (ret)
1335			pr_err("memblock reserve of %x@%lx failed\n",
1336			       data->heaps[i].size,
1337			       data->heaps[i].base);
1338	}
1339}
1340