ion.c revision df0f6c7634f0856c3eb3d26ddc909d414199f6a7
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/dma-buf.h>
34
35#include "ion.h"
36#include "ion_priv.h"
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev:		the actual misc device
41 * @buffers:		an rb tree of all the existing buffers
42 * @buffer_lock:	lock protecting the tree of buffers
43 * @lock:		rwsem protecting the tree of heaps and clients
44 * @heaps:		list of all the heaps in the system
45 * @user_clients:	list of all the clients created from userspace
46 */
47struct ion_device {
48	struct miscdevice dev;
49	struct rb_root buffers;
50	struct mutex buffer_lock;
51	struct rw_semaphore lock;
52	struct rb_root heaps;
53	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
54			      unsigned long arg);
55	struct rb_root clients;
56	struct dentry *debug_root;
57};
58
59/**
60 * struct ion_client - a process/hw block local address space
61 * @node:		node in the tree of all clients
62 * @dev:		backpointer to ion device
63 * @handles:		an rb tree of all the handles in this client
64 * @lock:		lock protecting the tree of handles
65 * @heap_mask:		mask of all supported heaps
66 * @name:		used for debugging
67 * @task:		used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73struct ion_client {
74	struct rb_node node;
75	struct ion_device *dev;
76	struct rb_root handles;
77	struct mutex lock;
78	unsigned int heap_mask;
79	const char *name;
80	struct task_struct *task;
81	pid_t pid;
82	struct dentry *debug_root;
83};
84
85/**
86 * ion_handle - a client local reference to a buffer
87 * @ref:		reference count
88 * @client:		back pointer to the client the buffer resides in
89 * @buffer:		pointer to the buffer
90 * @node:		node in the client's handle rbtree
91 * @kmap_cnt:		count of times this client has mapped to kernel
92 * @dmap_cnt:		count of times this client has mapped for dma
93 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client.  Other fields are never changed after initialization.
96 */
97struct ion_handle {
98	struct kref ref;
99	struct ion_client *client;
100	struct ion_buffer *buffer;
101	struct rb_node node;
102	unsigned int kmap_cnt;
103};
104
105bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
106{
107        return ((buffer->flags & ION_FLAG_CACHED) &&
108                !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
109}
110
111bool ion_buffer_cached(struct ion_buffer *buffer)
112{
113        return !!(buffer->flags & ION_FLAG_CACHED);
114}
115
116/* this function should only be called while dev->lock is held */
117static void ion_buffer_add(struct ion_device *dev,
118			   struct ion_buffer *buffer)
119{
120	struct rb_node **p = &dev->buffers.rb_node;
121	struct rb_node *parent = NULL;
122	struct ion_buffer *entry;
123
124	while (*p) {
125		parent = *p;
126		entry = rb_entry(parent, struct ion_buffer, node);
127
128		if (buffer < entry) {
129			p = &(*p)->rb_left;
130		} else if (buffer > entry) {
131			p = &(*p)->rb_right;
132		} else {
133			pr_err("%s: buffer already found.", __func__);
134			BUG();
135		}
136	}
137
138	rb_link_node(&buffer->node, parent, p);
139	rb_insert_color(&buffer->node, &dev->buffers);
140}
141
142static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
143
144/* this function should only be called while dev->lock is held */
145static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
146				     struct ion_device *dev,
147				     unsigned long len,
148				     unsigned long align,
149				     unsigned long flags)
150{
151	struct ion_buffer *buffer;
152	struct sg_table *table;
153	struct scatterlist *sg;
154	int i, ret;
155
156	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
157	if (!buffer)
158		return ERR_PTR(-ENOMEM);
159
160	buffer->heap = heap;
161	buffer->flags = flags;
162	kref_init(&buffer->ref);
163
164	ret = heap->ops->allocate(heap, buffer, len, align, flags);
165	if (ret) {
166		kfree(buffer);
167		return ERR_PTR(ret);
168	}
169
170	buffer->dev = dev;
171	buffer->size = len;
172
173	table = heap->ops->map_dma(heap, buffer);
174	if (IS_ERR_OR_NULL(table)) {
175		heap->ops->free(buffer);
176		kfree(buffer);
177		return ERR_PTR(PTR_ERR(table));
178	}
179	buffer->sg_table = table;
180	if (ion_buffer_fault_user_mappings(buffer)) {
181		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
182			    i) {
183			if (sg_dma_len(sg) == PAGE_SIZE)
184				continue;
185			pr_err("%s: cached mappings that will be faulted in "
186			       "must have pagewise sg_lists\n", __func__);
187			ret = -EINVAL;
188			goto err;
189		}
190
191		ret = ion_buffer_alloc_dirty(buffer);
192		if (ret)
193			goto err;
194	}
195
196	buffer->dev = dev;
197	buffer->size = len;
198	INIT_LIST_HEAD(&buffer->vmas);
199	mutex_init(&buffer->lock);
200	/* this will set up dma addresses for the sglist -- it is not
201	   technically correct as per the dma api -- a specific
202	   device isn't really taking ownership here.  However, in practice on
203	   our systems the only dma_address space is physical addresses.
204	   Additionally, we can't afford the overhead of invalidating every
205	   allocation via dma_map_sg. The implicit contract here is that
206	   memory comming from the heaps is ready for dma, ie if it has a
207	   cached mapping that mapping has been invalidated */
208	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
209		sg_dma_address(sg) = sg_phys(sg);
210	mutex_lock(&dev->buffer_lock);
211	ion_buffer_add(dev, buffer);
212	mutex_unlock(&dev->buffer_lock);
213	return buffer;
214
215err:
216	heap->ops->unmap_dma(heap, buffer);
217	heap->ops->free(buffer);
218	kfree(buffer);
219	return ERR_PTR(ret);
220}
221
222static void ion_buffer_destroy(struct kref *kref)
223{
224	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
225	struct ion_device *dev = buffer->dev;
226
227	if (WARN_ON(buffer->kmap_cnt > 0))
228		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
229	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
230	buffer->heap->ops->free(buffer);
231	mutex_lock(&dev->buffer_lock);
232	rb_erase(&buffer->node, &dev->buffers);
233	mutex_unlock(&dev->buffer_lock);
234	if (buffer->flags & ION_FLAG_CACHED)
235		kfree(buffer->dirty);
236	kfree(buffer);
237}
238
239static void ion_buffer_get(struct ion_buffer *buffer)
240{
241	kref_get(&buffer->ref);
242}
243
244static int ion_buffer_put(struct ion_buffer *buffer)
245{
246	return kref_put(&buffer->ref, ion_buffer_destroy);
247}
248
249static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
250{
251	mutex_lock(&buffer->lock);
252	buffer->handle_count++;
253	mutex_unlock(&buffer->lock);
254}
255
256static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
257{
258	/*
259	 * when a buffer is removed from a handle, if it is not in
260	 * any other handles, copy the taskcomm and the pid of the
261	 * process it's being removed from into the buffer.  At this
262	 * point there will be no way to track what processes this buffer is
263	 * being used by, it only exists as a dma_buf file descriptor.
264	 * The taskcomm and pid can provide a debug hint as to where this fd
265	 * is in the system
266	 */
267	mutex_lock(&buffer->lock);
268	buffer->handle_count--;
269	BUG_ON(buffer->handle_count < 0);
270	if (!buffer->handle_count) {
271		struct task_struct *task;
272
273		task = current->group_leader;
274		get_task_comm(buffer->task_comm, task);
275		buffer->pid = task_pid_nr(task);
276	}
277	mutex_unlock(&buffer->lock);
278}
279
280static struct ion_handle *ion_handle_create(struct ion_client *client,
281				     struct ion_buffer *buffer)
282{
283	struct ion_handle *handle;
284
285	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
286	if (!handle)
287		return ERR_PTR(-ENOMEM);
288	kref_init(&handle->ref);
289	RB_CLEAR_NODE(&handle->node);
290	handle->client = client;
291	ion_buffer_get(buffer);
292	ion_buffer_add_to_handle(buffer);
293	handle->buffer = buffer;
294
295	return handle;
296}
297
298static void ion_handle_kmap_put(struct ion_handle *);
299
300static void ion_handle_destroy(struct kref *kref)
301{
302	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
303	struct ion_client *client = handle->client;
304	struct ion_buffer *buffer = handle->buffer;
305
306	mutex_lock(&buffer->lock);
307	while (handle->kmap_cnt)
308		ion_handle_kmap_put(handle);
309	mutex_unlock(&buffer->lock);
310
311	if (!RB_EMPTY_NODE(&handle->node))
312		rb_erase(&handle->node, &client->handles);
313
314	ion_buffer_remove_from_handle(buffer);
315	ion_buffer_put(buffer);
316
317	kfree(handle);
318}
319
320struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
321{
322	return handle->buffer;
323}
324
325static void ion_handle_get(struct ion_handle *handle)
326{
327	kref_get(&handle->ref);
328}
329
330static int ion_handle_put(struct ion_handle *handle)
331{
332	return kref_put(&handle->ref, ion_handle_destroy);
333}
334
335static struct ion_handle *ion_handle_lookup(struct ion_client *client,
336					    struct ion_buffer *buffer)
337{
338	struct rb_node *n;
339
340	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
341		struct ion_handle *handle = rb_entry(n, struct ion_handle,
342						     node);
343		if (handle->buffer == buffer)
344			return handle;
345	}
346	return NULL;
347}
348
349static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
350{
351	struct rb_node *n = client->handles.rb_node;
352
353	while (n) {
354		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
355							  node);
356		if (handle < handle_node)
357			n = n->rb_left;
358		else if (handle > handle_node)
359			n = n->rb_right;
360		else
361			return true;
362	}
363	return false;
364}
365
366static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
367{
368	struct rb_node **p = &client->handles.rb_node;
369	struct rb_node *parent = NULL;
370	struct ion_handle *entry;
371
372	while (*p) {
373		parent = *p;
374		entry = rb_entry(parent, struct ion_handle, node);
375
376		if (handle < entry)
377			p = &(*p)->rb_left;
378		else if (handle > entry)
379			p = &(*p)->rb_right;
380		else
381			WARN(1, "%s: buffer already found.", __func__);
382	}
383
384	rb_link_node(&handle->node, parent, p);
385	rb_insert_color(&handle->node, &client->handles);
386}
387
388struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
389			     size_t align, unsigned int heap_mask,
390			     unsigned int flags)
391{
392	struct rb_node *n;
393	struct ion_handle *handle;
394	struct ion_device *dev = client->dev;
395	struct ion_buffer *buffer = NULL;
396
397	pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
398		 align, heap_mask, flags);
399	/*
400	 * traverse the list of heaps available in this system in priority
401	 * order.  If the heap type is supported by the client, and matches the
402	 * request of the caller allocate from it.  Repeat until allocate has
403	 * succeeded or all heaps have been tried
404	 */
405	if (WARN_ON(!len))
406		return ERR_PTR(-EINVAL);
407
408	len = PAGE_ALIGN(len);
409
410	down_read(&dev->lock);
411	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
412		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
413		/* if the client doesn't support this heap type */
414		if (!((1 << heap->type) & client->heap_mask))
415			continue;
416		/* if the caller didn't specify this heap type */
417		if (!((1 << heap->id) & heap_mask))
418			continue;
419		buffer = ion_buffer_create(heap, dev, len, align, flags);
420		if (!IS_ERR_OR_NULL(buffer))
421			break;
422	}
423	up_read(&dev->lock);
424
425	if (buffer == NULL)
426		return ERR_PTR(-ENODEV);
427
428	if (IS_ERR(buffer))
429		return ERR_PTR(PTR_ERR(buffer));
430
431	handle = ion_handle_create(client, buffer);
432
433	/*
434	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
435	 * and ion_handle_create will take a second reference, drop one here
436	 */
437	ion_buffer_put(buffer);
438
439	if (!IS_ERR(handle)) {
440		mutex_lock(&client->lock);
441		ion_handle_add(client, handle);
442		mutex_unlock(&client->lock);
443	}
444
445
446	return handle;
447}
448EXPORT_SYMBOL(ion_alloc);
449
450void ion_free(struct ion_client *client, struct ion_handle *handle)
451{
452	bool valid_handle;
453
454	BUG_ON(client != handle->client);
455
456	mutex_lock(&client->lock);
457	valid_handle = ion_handle_validate(client, handle);
458
459	if (!valid_handle) {
460		WARN(1, "%s: invalid handle passed to free.\n", __func__);
461		mutex_unlock(&client->lock);
462		return;
463	}
464	ion_handle_put(handle);
465	mutex_unlock(&client->lock);
466}
467EXPORT_SYMBOL(ion_free);
468
469int ion_phys(struct ion_client *client, struct ion_handle *handle,
470	     ion_phys_addr_t *addr, size_t *len)
471{
472	struct ion_buffer *buffer;
473	int ret;
474
475	mutex_lock(&client->lock);
476	if (!ion_handle_validate(client, handle)) {
477		mutex_unlock(&client->lock);
478		return -EINVAL;
479	}
480
481	buffer = handle->buffer;
482
483	if (!buffer->heap->ops->phys) {
484		pr_err("%s: ion_phys is not implemented by this heap.\n",
485		       __func__);
486		mutex_unlock(&client->lock);
487		return -ENODEV;
488	}
489	mutex_unlock(&client->lock);
490	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
491	return ret;
492}
493EXPORT_SYMBOL(ion_phys);
494
495static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
496{
497	void *vaddr;
498
499	if (buffer->kmap_cnt) {
500		buffer->kmap_cnt++;
501		return buffer->vaddr;
502	}
503	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
504	if (IS_ERR_OR_NULL(vaddr))
505		return vaddr;
506	buffer->vaddr = vaddr;
507	buffer->kmap_cnt++;
508	return vaddr;
509}
510
511static void *ion_handle_kmap_get(struct ion_handle *handle)
512{
513	struct ion_buffer *buffer = handle->buffer;
514	void *vaddr;
515
516	if (handle->kmap_cnt) {
517		handle->kmap_cnt++;
518		return buffer->vaddr;
519	}
520	vaddr = ion_buffer_kmap_get(buffer);
521	if (IS_ERR_OR_NULL(vaddr))
522		return vaddr;
523	handle->kmap_cnt++;
524	return vaddr;
525}
526
527static void ion_buffer_kmap_put(struct ion_buffer *buffer)
528{
529	buffer->kmap_cnt--;
530	if (!buffer->kmap_cnt) {
531		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
532		buffer->vaddr = NULL;
533	}
534}
535
536static void ion_handle_kmap_put(struct ion_handle *handle)
537{
538	struct ion_buffer *buffer = handle->buffer;
539
540	handle->kmap_cnt--;
541	if (!handle->kmap_cnt)
542		ion_buffer_kmap_put(buffer);
543}
544
545void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
546{
547	struct ion_buffer *buffer;
548	void *vaddr;
549
550	mutex_lock(&client->lock);
551	if (!ion_handle_validate(client, handle)) {
552		pr_err("%s: invalid handle passed to map_kernel.\n",
553		       __func__);
554		mutex_unlock(&client->lock);
555		return ERR_PTR(-EINVAL);
556	}
557
558	buffer = handle->buffer;
559
560	if (!handle->buffer->heap->ops->map_kernel) {
561		pr_err("%s: map_kernel is not implemented by this heap.\n",
562		       __func__);
563		mutex_unlock(&client->lock);
564		return ERR_PTR(-ENODEV);
565	}
566
567	mutex_lock(&buffer->lock);
568	vaddr = ion_handle_kmap_get(handle);
569	mutex_unlock(&buffer->lock);
570	mutex_unlock(&client->lock);
571	return vaddr;
572}
573EXPORT_SYMBOL(ion_map_kernel);
574
575void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
576{
577	struct ion_buffer *buffer;
578
579	mutex_lock(&client->lock);
580	buffer = handle->buffer;
581	mutex_lock(&buffer->lock);
582	ion_handle_kmap_put(handle);
583	mutex_unlock(&buffer->lock);
584	mutex_unlock(&client->lock);
585}
586EXPORT_SYMBOL(ion_unmap_kernel);
587
588static int ion_debug_client_show(struct seq_file *s, void *unused)
589{
590	struct ion_client *client = s->private;
591	struct rb_node *n;
592	size_t sizes[ION_NUM_HEAPS] = {0};
593	const char *names[ION_NUM_HEAPS] = {0};
594	int i;
595
596	mutex_lock(&client->lock);
597	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
598		struct ion_handle *handle = rb_entry(n, struct ion_handle,
599						     node);
600		enum ion_heap_type type = handle->buffer->heap->type;
601
602		if (!names[type])
603			names[type] = handle->buffer->heap->name;
604		sizes[type] += handle->buffer->size;
605	}
606	mutex_unlock(&client->lock);
607
608	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
609	for (i = 0; i < ION_NUM_HEAPS; i++) {
610		if (!names[i])
611			continue;
612		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
613	}
614	return 0;
615}
616
617static int ion_debug_client_open(struct inode *inode, struct file *file)
618{
619	return single_open(file, ion_debug_client_show, inode->i_private);
620}
621
622static const struct file_operations debug_client_fops = {
623	.open = ion_debug_client_open,
624	.read = seq_read,
625	.llseek = seq_lseek,
626	.release = single_release,
627};
628
629struct ion_client *ion_client_create(struct ion_device *dev,
630				     unsigned int heap_mask,
631				     const char *name)
632{
633	struct ion_client *client;
634	struct task_struct *task;
635	struct rb_node **p;
636	struct rb_node *parent = NULL;
637	struct ion_client *entry;
638	char debug_name[64];
639	pid_t pid;
640
641	get_task_struct(current->group_leader);
642	task_lock(current->group_leader);
643	pid = task_pid_nr(current->group_leader);
644	/* don't bother to store task struct for kernel threads,
645	   they can't be killed anyway */
646	if (current->group_leader->flags & PF_KTHREAD) {
647		put_task_struct(current->group_leader);
648		task = NULL;
649	} else {
650		task = current->group_leader;
651	}
652	task_unlock(current->group_leader);
653
654	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
655	if (!client) {
656		if (task)
657			put_task_struct(current->group_leader);
658		return ERR_PTR(-ENOMEM);
659	}
660
661	client->dev = dev;
662	client->handles = RB_ROOT;
663	mutex_init(&client->lock);
664	client->name = name;
665	client->heap_mask = heap_mask;
666	client->task = task;
667	client->pid = pid;
668
669	down_write(&dev->lock);
670	p = &dev->clients.rb_node;
671	while (*p) {
672		parent = *p;
673		entry = rb_entry(parent, struct ion_client, node);
674
675		if (client < entry)
676			p = &(*p)->rb_left;
677		else if (client > entry)
678			p = &(*p)->rb_right;
679	}
680	rb_link_node(&client->node, parent, p);
681	rb_insert_color(&client->node, &dev->clients);
682
683	snprintf(debug_name, 64, "%u", client->pid);
684	client->debug_root = debugfs_create_file(debug_name, 0664,
685						 dev->debug_root, client,
686						 &debug_client_fops);
687	up_write(&dev->lock);
688
689	return client;
690}
691
692void ion_client_destroy(struct ion_client *client)
693{
694	struct ion_device *dev = client->dev;
695	struct rb_node *n;
696
697	pr_debug("%s: %d\n", __func__, __LINE__);
698	while ((n = rb_first(&client->handles))) {
699		struct ion_handle *handle = rb_entry(n, struct ion_handle,
700						     node);
701		ion_handle_destroy(&handle->ref);
702	}
703	down_write(&dev->lock);
704	if (client->task)
705		put_task_struct(client->task);
706	rb_erase(&client->node, &dev->clients);
707	debugfs_remove_recursive(client->debug_root);
708	up_write(&dev->lock);
709
710	kfree(client);
711}
712EXPORT_SYMBOL(ion_client_destroy);
713
714struct sg_table *ion_sg_table(struct ion_client *client,
715			      struct ion_handle *handle)
716{
717	struct ion_buffer *buffer;
718	struct sg_table *table;
719
720	mutex_lock(&client->lock);
721	if (!ion_handle_validate(client, handle)) {
722		pr_err("%s: invalid handle passed to map_dma.\n",
723		       __func__);
724		mutex_unlock(&client->lock);
725		return ERR_PTR(-EINVAL);
726	}
727	buffer = handle->buffer;
728	table = buffer->sg_table;
729	mutex_unlock(&client->lock);
730	return table;
731}
732EXPORT_SYMBOL(ion_sg_table);
733
734static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
735				       struct device *dev,
736				       enum dma_data_direction direction);
737
738static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
739					enum dma_data_direction direction)
740{
741	struct dma_buf *dmabuf = attachment->dmabuf;
742	struct ion_buffer *buffer = dmabuf->priv;
743
744	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
745	return buffer->sg_table;
746}
747
748static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
749			      struct sg_table *table,
750			      enum dma_data_direction direction)
751{
752}
753
754static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
755{
756	unsigned long pages = buffer->sg_table->nents;
757	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
758
759	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
760	if (!buffer->dirty)
761		return -ENOMEM;
762	return 0;
763}
764
765struct ion_vma_list {
766	struct list_head list;
767	struct vm_area_struct *vma;
768};
769
770static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
771				       struct device *dev,
772				       enum dma_data_direction dir)
773{
774	struct scatterlist *sg;
775	int i;
776	struct ion_vma_list *vma_list;
777
778	pr_debug("%s: syncing for device %s\n", __func__,
779		 dev ? dev_name(dev) : "null");
780
781	if (!ion_buffer_fault_user_mappings(buffer))
782		return;
783
784	mutex_lock(&buffer->lock);
785	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
786		if (!test_bit(i, buffer->dirty))
787			continue;
788		dma_sync_sg_for_device(dev, sg, 1, dir);
789		clear_bit(i, buffer->dirty);
790	}
791	list_for_each_entry(vma_list, &buffer->vmas, list) {
792		struct vm_area_struct *vma = vma_list->vma;
793
794		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
795			       NULL);
796	}
797	mutex_unlock(&buffer->lock);
798}
799
800int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
801{
802	struct ion_buffer *buffer = vma->vm_private_data;
803	struct scatterlist *sg;
804	int i;
805
806	mutex_lock(&buffer->lock);
807	set_bit(vmf->pgoff, buffer->dirty);
808
809	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
810		if (i != vmf->pgoff)
811			continue;
812		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
813		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
814			       sg_page(sg));
815		break;
816	}
817	mutex_unlock(&buffer->lock);
818	return VM_FAULT_NOPAGE;
819}
820
821static void ion_vm_open(struct vm_area_struct *vma)
822{
823	struct ion_buffer *buffer = vma->vm_private_data;
824	struct ion_vma_list *vma_list;
825
826	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
827	if (!vma_list)
828		return;
829	vma_list->vma = vma;
830	mutex_lock(&buffer->lock);
831	list_add(&vma_list->list, &buffer->vmas);
832	mutex_unlock(&buffer->lock);
833	pr_debug("%s: adding %p\n", __func__, vma);
834}
835
836static void ion_vm_close(struct vm_area_struct *vma)
837{
838	struct ion_buffer *buffer = vma->vm_private_data;
839	struct ion_vma_list *vma_list, *tmp;
840
841	pr_debug("%s\n", __func__);
842	mutex_lock(&buffer->lock);
843	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
844		if (vma_list->vma != vma)
845			continue;
846		list_del(&vma_list->list);
847		kfree(vma_list);
848		pr_debug("%s: deleting %p\n", __func__, vma);
849		break;
850	}
851	mutex_unlock(&buffer->lock);
852}
853
854struct vm_operations_struct ion_vma_ops = {
855	.open = ion_vm_open,
856	.close = ion_vm_close,
857	.fault = ion_vm_fault,
858};
859
860static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
861{
862	struct ion_buffer *buffer = dmabuf->priv;
863	int ret = 0;
864
865	if (!buffer->heap->ops->map_user) {
866		pr_err("%s: this heap does not define a method for mapping "
867		       "to userspace\n", __func__);
868		return -EINVAL;
869	}
870
871	if (ion_buffer_fault_user_mappings(buffer)) {
872		vma->vm_private_data = buffer;
873		vma->vm_ops = &ion_vma_ops;
874		ion_vm_open(vma);
875		return 0;
876	}
877
878	if (!(buffer->flags & ION_FLAG_CACHED))
879		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
880
881	mutex_lock(&buffer->lock);
882	/* now map it to userspace */
883	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
884	mutex_unlock(&buffer->lock);
885
886	if (ret)
887		pr_err("%s: failure mapping buffer to userspace\n",
888		       __func__);
889
890	return ret;
891}
892
893static void ion_dma_buf_release(struct dma_buf *dmabuf)
894{
895	struct ion_buffer *buffer = dmabuf->priv;
896	ion_buffer_put(buffer);
897}
898
899static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
900{
901	struct ion_buffer *buffer = dmabuf->priv;
902	return buffer->vaddr + offset * PAGE_SIZE;
903}
904
905static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
906			       void *ptr)
907{
908	return;
909}
910
911static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
912					size_t len,
913					enum dma_data_direction direction)
914{
915	struct ion_buffer *buffer = dmabuf->priv;
916	void *vaddr;
917
918	if (!buffer->heap->ops->map_kernel) {
919		pr_err("%s: map kernel is not implemented by this heap.\n",
920		       __func__);
921		return -ENODEV;
922	}
923
924	mutex_lock(&buffer->lock);
925	vaddr = ion_buffer_kmap_get(buffer);
926	mutex_unlock(&buffer->lock);
927	if (IS_ERR(vaddr))
928		return PTR_ERR(vaddr);
929	if (!vaddr)
930		return -ENOMEM;
931	return 0;
932}
933
934static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
935				       size_t len,
936				       enum dma_data_direction direction)
937{
938	struct ion_buffer *buffer = dmabuf->priv;
939
940	mutex_lock(&buffer->lock);
941	ion_buffer_kmap_put(buffer);
942	mutex_unlock(&buffer->lock);
943}
944
945struct dma_buf_ops dma_buf_ops = {
946	.map_dma_buf = ion_map_dma_buf,
947	.unmap_dma_buf = ion_unmap_dma_buf,
948	.mmap = ion_mmap,
949	.release = ion_dma_buf_release,
950	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
951	.end_cpu_access = ion_dma_buf_end_cpu_access,
952	.kmap_atomic = ion_dma_buf_kmap,
953	.kunmap_atomic = ion_dma_buf_kunmap,
954	.kmap = ion_dma_buf_kmap,
955	.kunmap = ion_dma_buf_kunmap,
956};
957
958int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
959{
960	struct ion_buffer *buffer;
961	struct dma_buf *dmabuf;
962	bool valid_handle;
963	int fd;
964
965	mutex_lock(&client->lock);
966	valid_handle = ion_handle_validate(client, handle);
967	mutex_unlock(&client->lock);
968	if (!valid_handle) {
969		WARN(1, "%s: invalid handle passed to share.\n", __func__);
970		return -EINVAL;
971	}
972
973	buffer = handle->buffer;
974	ion_buffer_get(buffer);
975	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
976	if (IS_ERR(dmabuf)) {
977		ion_buffer_put(buffer);
978		return PTR_ERR(dmabuf);
979	}
980	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
981	if (fd < 0)
982		dma_buf_put(dmabuf);
983
984	return fd;
985}
986EXPORT_SYMBOL(ion_share_dma_buf);
987
988struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
989{
990	struct dma_buf *dmabuf;
991	struct ion_buffer *buffer;
992	struct ion_handle *handle;
993
994	dmabuf = dma_buf_get(fd);
995	if (IS_ERR_OR_NULL(dmabuf))
996		return ERR_PTR(PTR_ERR(dmabuf));
997	/* if this memory came from ion */
998
999	if (dmabuf->ops != &dma_buf_ops) {
1000		pr_err("%s: can not import dmabuf from another exporter\n",
1001		       __func__);
1002		dma_buf_put(dmabuf);
1003		return ERR_PTR(-EINVAL);
1004	}
1005	buffer = dmabuf->priv;
1006
1007	mutex_lock(&client->lock);
1008	/* if a handle exists for this buffer just take a reference to it */
1009	handle = ion_handle_lookup(client, buffer);
1010	if (!IS_ERR_OR_NULL(handle)) {
1011		ion_handle_get(handle);
1012		goto end;
1013	}
1014	handle = ion_handle_create(client, buffer);
1015	if (IS_ERR_OR_NULL(handle))
1016		goto end;
1017	ion_handle_add(client, handle);
1018end:
1019	mutex_unlock(&client->lock);
1020	dma_buf_put(dmabuf);
1021	return handle;
1022}
1023EXPORT_SYMBOL(ion_import_dma_buf);
1024
1025static int ion_sync_for_device(struct ion_client *client, int fd)
1026{
1027	struct dma_buf *dmabuf;
1028	struct ion_buffer *buffer;
1029
1030	dmabuf = dma_buf_get(fd);
1031	if (IS_ERR_OR_NULL(dmabuf))
1032		return PTR_ERR(dmabuf);
1033
1034	/* if this memory came from ion */
1035	if (dmabuf->ops != &dma_buf_ops) {
1036		pr_err("%s: can not sync dmabuf from another exporter\n",
1037		       __func__);
1038		dma_buf_put(dmabuf);
1039		return -EINVAL;
1040	}
1041	buffer = dmabuf->priv;
1042
1043	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1044			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1045	dma_buf_put(dmabuf);
1046	return 0;
1047}
1048
1049static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1050{
1051	struct ion_client *client = filp->private_data;
1052
1053	switch (cmd) {
1054	case ION_IOC_ALLOC:
1055	{
1056		struct ion_allocation_data data;
1057
1058		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1059			return -EFAULT;
1060		data.handle = ion_alloc(client, data.len, data.align,
1061					     data.heap_mask, data.flags);
1062
1063		if (IS_ERR(data.handle))
1064			return PTR_ERR(data.handle);
1065
1066		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1067			ion_free(client, data.handle);
1068			return -EFAULT;
1069		}
1070		break;
1071	}
1072	case ION_IOC_FREE:
1073	{
1074		struct ion_handle_data data;
1075		bool valid;
1076
1077		if (copy_from_user(&data, (void __user *)arg,
1078				   sizeof(struct ion_handle_data)))
1079			return -EFAULT;
1080		mutex_lock(&client->lock);
1081		valid = ion_handle_validate(client, data.handle);
1082		mutex_unlock(&client->lock);
1083		if (!valid)
1084			return -EINVAL;
1085		ion_free(client, data.handle);
1086		break;
1087	}
1088	case ION_IOC_SHARE:
1089	case ION_IOC_MAP:
1090	{
1091		struct ion_fd_data data;
1092
1093		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1094			return -EFAULT;
1095		data.fd = ion_share_dma_buf(client, data.handle);
1096		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1097			return -EFAULT;
1098		if (data.fd < 0)
1099			return data.fd;
1100		break;
1101	}
1102	case ION_IOC_IMPORT:
1103	{
1104		struct ion_fd_data data;
1105		int ret = 0;
1106		if (copy_from_user(&data, (void __user *)arg,
1107				   sizeof(struct ion_fd_data)))
1108			return -EFAULT;
1109		data.handle = ion_import_dma_buf(client, data.fd);
1110		if (IS_ERR(data.handle)) {
1111			ret = PTR_ERR(data.handle);
1112			data.handle = NULL;
1113		}
1114		if (copy_to_user((void __user *)arg, &data,
1115				 sizeof(struct ion_fd_data)))
1116			return -EFAULT;
1117		if (ret < 0)
1118			return ret;
1119		break;
1120	}
1121	case ION_IOC_SYNC:
1122	{
1123		struct ion_fd_data data;
1124		if (copy_from_user(&data, (void __user *)arg,
1125				   sizeof(struct ion_fd_data)))
1126			return -EFAULT;
1127		ion_sync_for_device(client, data.fd);
1128		break;
1129	}
1130	case ION_IOC_CUSTOM:
1131	{
1132		struct ion_device *dev = client->dev;
1133		struct ion_custom_data data;
1134
1135		if (!dev->custom_ioctl)
1136			return -ENOTTY;
1137		if (copy_from_user(&data, (void __user *)arg,
1138				sizeof(struct ion_custom_data)))
1139			return -EFAULT;
1140		return dev->custom_ioctl(client, data.cmd, data.arg);
1141	}
1142	default:
1143		return -ENOTTY;
1144	}
1145	return 0;
1146}
1147
1148static int ion_release(struct inode *inode, struct file *file)
1149{
1150	struct ion_client *client = file->private_data;
1151
1152	pr_debug("%s: %d\n", __func__, __LINE__);
1153	ion_client_destroy(client);
1154	return 0;
1155}
1156
1157static int ion_open(struct inode *inode, struct file *file)
1158{
1159	struct miscdevice *miscdev = file->private_data;
1160	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1161	struct ion_client *client;
1162
1163	pr_debug("%s: %d\n", __func__, __LINE__);
1164	client = ion_client_create(dev, -1, "user");
1165	if (IS_ERR_OR_NULL(client))
1166		return PTR_ERR(client);
1167	file->private_data = client;
1168
1169	return 0;
1170}
1171
1172static const struct file_operations ion_fops = {
1173	.owner          = THIS_MODULE,
1174	.open           = ion_open,
1175	.release        = ion_release,
1176	.unlocked_ioctl = ion_ioctl,
1177};
1178
1179static size_t ion_debug_heap_total(struct ion_client *client,
1180				   enum ion_heap_type type)
1181{
1182	size_t size = 0;
1183	struct rb_node *n;
1184
1185	mutex_lock(&client->lock);
1186	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1187		struct ion_handle *handle = rb_entry(n,
1188						     struct ion_handle,
1189						     node);
1190		if (handle->buffer->heap->type == type)
1191			size += handle->buffer->size;
1192	}
1193	mutex_unlock(&client->lock);
1194	return size;
1195}
1196
1197static int ion_debug_heap_show(struct seq_file *s, void *unused)
1198{
1199	struct ion_heap *heap = s->private;
1200	struct ion_device *dev = heap->dev;
1201	struct rb_node *n;
1202	size_t total_size = 0;
1203	size_t total_orphaned_size = 0;
1204
1205	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1206	seq_printf(s, "----------------------------------------------------\n");
1207
1208	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1209		struct ion_client *client = rb_entry(n, struct ion_client,
1210						     node);
1211		size_t size = ion_debug_heap_total(client, heap->type);
1212		if (!size)
1213			continue;
1214		if (client->task) {
1215			char task_comm[TASK_COMM_LEN];
1216
1217			get_task_comm(task_comm, client->task);
1218			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1219				   client->pid, size);
1220		} else {
1221			seq_printf(s, "%16.s %16u %16u\n", client->name,
1222				   client->pid, size);
1223		}
1224	}
1225	seq_printf(s, "----------------------------------------------------\n");
1226	seq_printf(s, "orphaned allocations (info is from last known client):"
1227		   "\n");
1228	mutex_lock(&dev->buffer_lock);
1229	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1230		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1231						     node);
1232		if (buffer->heap->type != heap->type)
1233			continue;
1234		total_size += buffer->size;
1235		if (!buffer->handle_count) {
1236			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1237				   buffer->pid, buffer->size, buffer->kmap_cnt,
1238				   atomic_read(&buffer->ref.refcount));
1239			total_orphaned_size += buffer->size;
1240		}
1241	}
1242	mutex_unlock(&dev->buffer_lock);
1243	seq_printf(s, "----------------------------------------------------\n");
1244	seq_printf(s, "%16.s %16u\n", "total orphaned",
1245		   total_orphaned_size);
1246	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1247	seq_printf(s, "----------------------------------------------------\n");
1248
1249	if (heap->debug_show)
1250		heap->debug_show(heap, s, unused);
1251
1252	return 0;
1253}
1254
1255static int ion_debug_heap_open(struct inode *inode, struct file *file)
1256{
1257	return single_open(file, ion_debug_heap_show, inode->i_private);
1258}
1259
1260static const struct file_operations debug_heap_fops = {
1261	.open = ion_debug_heap_open,
1262	.read = seq_read,
1263	.llseek = seq_lseek,
1264	.release = single_release,
1265};
1266
1267void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1268{
1269	struct rb_node **p = &dev->heaps.rb_node;
1270	struct rb_node *parent = NULL;
1271	struct ion_heap *entry;
1272
1273	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1274	    !heap->ops->unmap_dma)
1275		pr_err("%s: can not add heap with invalid ops struct.\n",
1276		       __func__);
1277
1278	heap->dev = dev;
1279	down_write(&dev->lock);
1280	while (*p) {
1281		parent = *p;
1282		entry = rb_entry(parent, struct ion_heap, node);
1283
1284		if (heap->id < entry->id) {
1285			p = &(*p)->rb_left;
1286		} else if (heap->id > entry->id ) {
1287			p = &(*p)->rb_right;
1288		} else {
1289			pr_err("%s: can not insert multiple heaps with "
1290				"id %d\n", __func__, heap->id);
1291			goto end;
1292		}
1293	}
1294
1295	rb_link_node(&heap->node, parent, p);
1296	rb_insert_color(&heap->node, &dev->heaps);
1297	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1298			    &debug_heap_fops);
1299end:
1300	up_write(&dev->lock);
1301}
1302
1303struct ion_device *ion_device_create(long (*custom_ioctl)
1304				     (struct ion_client *client,
1305				      unsigned int cmd,
1306				      unsigned long arg))
1307{
1308	struct ion_device *idev;
1309	int ret;
1310
1311	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1312	if (!idev)
1313		return ERR_PTR(-ENOMEM);
1314
1315	idev->dev.minor = MISC_DYNAMIC_MINOR;
1316	idev->dev.name = "ion";
1317	idev->dev.fops = &ion_fops;
1318	idev->dev.parent = NULL;
1319	ret = misc_register(&idev->dev);
1320	if (ret) {
1321		pr_err("ion: failed to register misc device.\n");
1322		return ERR_PTR(ret);
1323	}
1324
1325	idev->debug_root = debugfs_create_dir("ion", NULL);
1326	if (IS_ERR_OR_NULL(idev->debug_root))
1327		pr_err("ion: failed to create debug files.\n");
1328
1329	idev->custom_ioctl = custom_ioctl;
1330	idev->buffers = RB_ROOT;
1331	mutex_init(&idev->buffer_lock);
1332	init_rwsem(&idev->lock);
1333	idev->heaps = RB_ROOT;
1334	idev->clients = RB_ROOT;
1335	return idev;
1336}
1337
1338void ion_device_destroy(struct ion_device *dev)
1339{
1340	misc_deregister(&dev->dev);
1341	/* XXX need to free the heaps and clients ? */
1342	kfree(dev);
1343}
1344
1345void __init ion_reserve(struct ion_platform_data *data)
1346{
1347	int i;
1348
1349	for (i = 0; i < data->nr; i++) {
1350		if (data->heaps[i].size == 0)
1351			continue;
1352
1353		if (data->heaps[i].base == 0) {
1354			phys_addr_t paddr;
1355			paddr = memblock_alloc_base(data->heaps[i].size,
1356						    data->heaps[i].align,
1357						    MEMBLOCK_ALLOC_ANYWHERE);
1358			if (!paddr) {
1359				pr_err("%s: error allocating memblock for "
1360				       "heap %d\n",
1361					__func__, i);
1362				continue;
1363			}
1364			data->heaps[i].base = paddr;
1365		} else {
1366			int ret = memblock_reserve(data->heaps[i].base,
1367					       data->heaps[i].size);
1368			if (ret)
1369				pr_err("memblock reserve of %x@%lx failed\n",
1370				       data->heaps[i].size,
1371				       data->heaps[i].base);
1372		}
1373		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1374			data->heaps[i].name,
1375			data->heaps[i].base,
1376			data->heaps[i].size);
1377	}
1378}
1379