ion.c revision 22ba43220b594983c18394442e58ed4988893de6
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/anon_inodes.h>
22#include <linux/list.h>
23#include <linux/memblock.h>
24#include <linux/miscdevice.h>
25#include <linux/export.h>
26#include <linux/mm.h>
27#include <linux/mm_types.h>
28#include <linux/rbtree.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/seq_file.h>
32#include <linux/uaccess.h>
33#include <linux/debugfs.h>
34#include <linux/dma-buf.h>
35
36#include "ion.h"
37#include "ion_priv.h"
38
39/**
40 * struct ion_device - the metadata of the ion device node
41 * @dev:		the actual misc device
42 * @buffers:		an rb tree of all the existing buffers
43 * @buffer_lock:	lock protecting the tree of buffers
44 * @lock:		rwsem protecting the tree of heaps and clients
45 * @heaps:		list of all the heaps in the system
46 * @user_clients:	list of all the clients created from userspace
47 */
48struct ion_device {
49	struct miscdevice dev;
50	struct rb_root buffers;
51	struct mutex buffer_lock;
52	struct rw_semaphore lock;
53	struct plist_head heaps;
54	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
55			      unsigned long arg);
56	struct rb_root clients;
57	struct dentry *debug_root;
58};
59
60/**
61 * struct ion_client - a process/hw block local address space
62 * @node:		node in the tree of all clients
63 * @dev:		backpointer to ion device
64 * @handles:		an rb tree of all the handles in this client
65 * @lock:		lock protecting the tree of handles
66 * @name:		used for debugging
67 * @task:		used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73struct ion_client {
74	struct rb_node node;
75	struct ion_device *dev;
76	struct rb_root handles;
77	struct mutex lock;
78	const char *name;
79	struct task_struct *task;
80	pid_t pid;
81	struct dentry *debug_root;
82};
83
84/**
85 * ion_handle - a client local reference to a buffer
86 * @ref:		reference count
87 * @client:		back pointer to the client the buffer resides in
88 * @buffer:		pointer to the buffer
89 * @node:		node in the client's handle rbtree
90 * @kmap_cnt:		count of times this client has mapped to kernel
91 * @dmap_cnt:		count of times this client has mapped for dma
92 *
93 * Modifications to node, map_cnt or mapping should be protected by the
94 * lock in the client.  Other fields are never changed after initialization.
95 */
96struct ion_handle {
97	struct kref ref;
98	struct ion_client *client;
99	struct ion_buffer *buffer;
100	struct rb_node node;
101	unsigned int kmap_cnt;
102};
103
104bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
105{
106        return ((buffer->flags & ION_FLAG_CACHED) &&
107                !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
108}
109
110bool ion_buffer_cached(struct ion_buffer *buffer)
111{
112        return !!(buffer->flags & ION_FLAG_CACHED);
113}
114
115/* this function should only be called while dev->lock is held */
116static void ion_buffer_add(struct ion_device *dev,
117			   struct ion_buffer *buffer)
118{
119	struct rb_node **p = &dev->buffers.rb_node;
120	struct rb_node *parent = NULL;
121	struct ion_buffer *entry;
122
123	while (*p) {
124		parent = *p;
125		entry = rb_entry(parent, struct ion_buffer, node);
126
127		if (buffer < entry) {
128			p = &(*p)->rb_left;
129		} else if (buffer > entry) {
130			p = &(*p)->rb_right;
131		} else {
132			pr_err("%s: buffer already found.", __func__);
133			BUG();
134		}
135	}
136
137	rb_link_node(&buffer->node, parent, p);
138	rb_insert_color(&buffer->node, &dev->buffers);
139}
140
141static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
142
143/* this function should only be called while dev->lock is held */
144static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
145				     struct ion_device *dev,
146				     unsigned long len,
147				     unsigned long align,
148				     unsigned long flags)
149{
150	struct ion_buffer *buffer;
151	struct sg_table *table;
152	struct scatterlist *sg;
153	int i, ret;
154
155	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
156	if (!buffer)
157		return ERR_PTR(-ENOMEM);
158
159	buffer->heap = heap;
160	buffer->flags = flags;
161	kref_init(&buffer->ref);
162
163	ret = heap->ops->allocate(heap, buffer, len, align, flags);
164	if (ret) {
165		kfree(buffer);
166		return ERR_PTR(ret);
167	}
168
169	buffer->dev = dev;
170	buffer->size = len;
171
172	table = heap->ops->map_dma(heap, buffer);
173	if (IS_ERR_OR_NULL(table)) {
174		heap->ops->free(buffer);
175		kfree(buffer);
176		return ERR_PTR(PTR_ERR(table));
177	}
178	buffer->sg_table = table;
179	if (ion_buffer_fault_user_mappings(buffer)) {
180		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
181			    i) {
182			if (sg_dma_len(sg) == PAGE_SIZE)
183				continue;
184			pr_err("%s: cached mappings that will be faulted in "
185			       "must have pagewise sg_lists\n", __func__);
186			ret = -EINVAL;
187			goto err;
188		}
189
190		ret = ion_buffer_alloc_dirty(buffer);
191		if (ret)
192			goto err;
193	}
194
195	buffer->dev = dev;
196	buffer->size = len;
197	INIT_LIST_HEAD(&buffer->vmas);
198	mutex_init(&buffer->lock);
199	/* this will set up dma addresses for the sglist -- it is not
200	   technically correct as per the dma api -- a specific
201	   device isn't really taking ownership here.  However, in practice on
202	   our systems the only dma_address space is physical addresses.
203	   Additionally, we can't afford the overhead of invalidating every
204	   allocation via dma_map_sg. The implicit contract here is that
205	   memory comming from the heaps is ready for dma, ie if it has a
206	   cached mapping that mapping has been invalidated */
207	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
208		sg_dma_address(sg) = sg_phys(sg);
209	mutex_lock(&dev->buffer_lock);
210	ion_buffer_add(dev, buffer);
211	mutex_unlock(&dev->buffer_lock);
212	return buffer;
213
214err:
215	heap->ops->unmap_dma(heap, buffer);
216	heap->ops->free(buffer);
217	kfree(buffer);
218	return ERR_PTR(ret);
219}
220
221static void ion_buffer_destroy(struct kref *kref)
222{
223	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
224	struct ion_device *dev = buffer->dev;
225
226	if (WARN_ON(buffer->kmap_cnt > 0))
227		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
228	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
229	buffer->heap->ops->free(buffer);
230	mutex_lock(&dev->buffer_lock);
231	rb_erase(&buffer->node, &dev->buffers);
232	mutex_unlock(&dev->buffer_lock);
233	if (buffer->flags & ION_FLAG_CACHED)
234		kfree(buffer->dirty);
235	kfree(buffer);
236}
237
238static void ion_buffer_get(struct ion_buffer *buffer)
239{
240	kref_get(&buffer->ref);
241}
242
243static int ion_buffer_put(struct ion_buffer *buffer)
244{
245	return kref_put(&buffer->ref, ion_buffer_destroy);
246}
247
248static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
249{
250	mutex_lock(&buffer->lock);
251	buffer->handle_count++;
252	mutex_unlock(&buffer->lock);
253}
254
255static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
256{
257	/*
258	 * when a buffer is removed from a handle, if it is not in
259	 * any other handles, copy the taskcomm and the pid of the
260	 * process it's being removed from into the buffer.  At this
261	 * point there will be no way to track what processes this buffer is
262	 * being used by, it only exists as a dma_buf file descriptor.
263	 * The taskcomm and pid can provide a debug hint as to where this fd
264	 * is in the system
265	 */
266	mutex_lock(&buffer->lock);
267	buffer->handle_count--;
268	BUG_ON(buffer->handle_count < 0);
269	if (!buffer->handle_count) {
270		struct task_struct *task;
271
272		task = current->group_leader;
273		get_task_comm(buffer->task_comm, task);
274		buffer->pid = task_pid_nr(task);
275	}
276	mutex_unlock(&buffer->lock);
277}
278
279static struct ion_handle *ion_handle_create(struct ion_client *client,
280				     struct ion_buffer *buffer)
281{
282	struct ion_handle *handle;
283
284	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
285	if (!handle)
286		return ERR_PTR(-ENOMEM);
287	kref_init(&handle->ref);
288	RB_CLEAR_NODE(&handle->node);
289	handle->client = client;
290	ion_buffer_get(buffer);
291	ion_buffer_add_to_handle(buffer);
292	handle->buffer = buffer;
293
294	return handle;
295}
296
297static void ion_handle_kmap_put(struct ion_handle *);
298
299static void ion_handle_destroy(struct kref *kref)
300{
301	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
302	struct ion_client *client = handle->client;
303	struct ion_buffer *buffer = handle->buffer;
304
305	mutex_lock(&buffer->lock);
306	while (handle->kmap_cnt)
307		ion_handle_kmap_put(handle);
308	mutex_unlock(&buffer->lock);
309
310	if (!RB_EMPTY_NODE(&handle->node))
311		rb_erase(&handle->node, &client->handles);
312
313	ion_buffer_remove_from_handle(buffer);
314	ion_buffer_put(buffer);
315
316	kfree(handle);
317}
318
319struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
320{
321	return handle->buffer;
322}
323
324static void ion_handle_get(struct ion_handle *handle)
325{
326	kref_get(&handle->ref);
327}
328
329static int ion_handle_put(struct ion_handle *handle)
330{
331	return kref_put(&handle->ref, ion_handle_destroy);
332}
333
334static struct ion_handle *ion_handle_lookup(struct ion_client *client,
335					    struct ion_buffer *buffer)
336{
337	struct rb_node *n;
338
339	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
340		struct ion_handle *handle = rb_entry(n, struct ion_handle,
341						     node);
342		if (handle->buffer == buffer)
343			return handle;
344	}
345	return NULL;
346}
347
348static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
349{
350	struct rb_node *n = client->handles.rb_node;
351
352	while (n) {
353		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
354							  node);
355		if (handle < handle_node)
356			n = n->rb_left;
357		else if (handle > handle_node)
358			n = n->rb_right;
359		else
360			return true;
361	}
362	return false;
363}
364
365static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
366{
367	struct rb_node **p = &client->handles.rb_node;
368	struct rb_node *parent = NULL;
369	struct ion_handle *entry;
370
371	while (*p) {
372		parent = *p;
373		entry = rb_entry(parent, struct ion_handle, node);
374
375		if (handle < entry)
376			p = &(*p)->rb_left;
377		else if (handle > entry)
378			p = &(*p)->rb_right;
379		else
380			WARN(1, "%s: buffer already found.", __func__);
381	}
382
383	rb_link_node(&handle->node, parent, p);
384	rb_insert_color(&handle->node, &client->handles);
385}
386
387struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
388			     size_t align, unsigned int heap_id_mask,
389			     unsigned int flags)
390{
391	struct ion_handle *handle;
392	struct ion_device *dev = client->dev;
393	struct ion_buffer *buffer = NULL;
394	struct ion_heap *heap;
395
396	pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
397		 len, align, heap_id_mask, flags);
398	/*
399	 * traverse the list of heaps available in this system in priority
400	 * order.  If the heap type is supported by the client, and matches the
401	 * request of the caller allocate from it.  Repeat until allocate has
402	 * succeeded or all heaps have been tried
403	 */
404	if (WARN_ON(!len))
405		return ERR_PTR(-EINVAL);
406
407	len = PAGE_ALIGN(len);
408
409	down_read(&dev->lock);
410	plist_for_each_entry(heap, &dev->heaps, node) {
411		/* if the caller didn't specify this heap id */
412		if (!((1 << heap->id) & heap_id_mask))
413			continue;
414		buffer = ion_buffer_create(heap, dev, len, align, flags);
415		if (!IS_ERR_OR_NULL(buffer))
416			break;
417	}
418	up_read(&dev->lock);
419
420	if (buffer == NULL)
421		return ERR_PTR(-ENODEV);
422
423	if (IS_ERR(buffer))
424		return ERR_PTR(PTR_ERR(buffer));
425
426	handle = ion_handle_create(client, buffer);
427
428	/*
429	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
430	 * and ion_handle_create will take a second reference, drop one here
431	 */
432	ion_buffer_put(buffer);
433
434	if (!IS_ERR(handle)) {
435		mutex_lock(&client->lock);
436		ion_handle_add(client, handle);
437		mutex_unlock(&client->lock);
438	}
439
440
441	return handle;
442}
443EXPORT_SYMBOL(ion_alloc);
444
445void ion_free(struct ion_client *client, struct ion_handle *handle)
446{
447	bool valid_handle;
448
449	BUG_ON(client != handle->client);
450
451	mutex_lock(&client->lock);
452	valid_handle = ion_handle_validate(client, handle);
453
454	if (!valid_handle) {
455		WARN(1, "%s: invalid handle passed to free.\n", __func__);
456		mutex_unlock(&client->lock);
457		return;
458	}
459	ion_handle_put(handle);
460	mutex_unlock(&client->lock);
461}
462EXPORT_SYMBOL(ion_free);
463
464int ion_phys(struct ion_client *client, struct ion_handle *handle,
465	     ion_phys_addr_t *addr, size_t *len)
466{
467	struct ion_buffer *buffer;
468	int ret;
469
470	mutex_lock(&client->lock);
471	if (!ion_handle_validate(client, handle)) {
472		mutex_unlock(&client->lock);
473		return -EINVAL;
474	}
475
476	buffer = handle->buffer;
477
478	if (!buffer->heap->ops->phys) {
479		pr_err("%s: ion_phys is not implemented by this heap.\n",
480		       __func__);
481		mutex_unlock(&client->lock);
482		return -ENODEV;
483	}
484	mutex_unlock(&client->lock);
485	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
486	return ret;
487}
488EXPORT_SYMBOL(ion_phys);
489
490static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
491{
492	void *vaddr;
493
494	if (buffer->kmap_cnt) {
495		buffer->kmap_cnt++;
496		return buffer->vaddr;
497	}
498	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
499	if (IS_ERR_OR_NULL(vaddr))
500		return vaddr;
501	buffer->vaddr = vaddr;
502	buffer->kmap_cnt++;
503	return vaddr;
504}
505
506static void *ion_handle_kmap_get(struct ion_handle *handle)
507{
508	struct ion_buffer *buffer = handle->buffer;
509	void *vaddr;
510
511	if (handle->kmap_cnt) {
512		handle->kmap_cnt++;
513		return buffer->vaddr;
514	}
515	vaddr = ion_buffer_kmap_get(buffer);
516	if (IS_ERR_OR_NULL(vaddr))
517		return vaddr;
518	handle->kmap_cnt++;
519	return vaddr;
520}
521
522static void ion_buffer_kmap_put(struct ion_buffer *buffer)
523{
524	buffer->kmap_cnt--;
525	if (!buffer->kmap_cnt) {
526		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
527		buffer->vaddr = NULL;
528	}
529}
530
531static void ion_handle_kmap_put(struct ion_handle *handle)
532{
533	struct ion_buffer *buffer = handle->buffer;
534
535	handle->kmap_cnt--;
536	if (!handle->kmap_cnt)
537		ion_buffer_kmap_put(buffer);
538}
539
540void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
541{
542	struct ion_buffer *buffer;
543	void *vaddr;
544
545	mutex_lock(&client->lock);
546	if (!ion_handle_validate(client, handle)) {
547		pr_err("%s: invalid handle passed to map_kernel.\n",
548		       __func__);
549		mutex_unlock(&client->lock);
550		return ERR_PTR(-EINVAL);
551	}
552
553	buffer = handle->buffer;
554
555	if (!handle->buffer->heap->ops->map_kernel) {
556		pr_err("%s: map_kernel is not implemented by this heap.\n",
557		       __func__);
558		mutex_unlock(&client->lock);
559		return ERR_PTR(-ENODEV);
560	}
561
562	mutex_lock(&buffer->lock);
563	vaddr = ion_handle_kmap_get(handle);
564	mutex_unlock(&buffer->lock);
565	mutex_unlock(&client->lock);
566	return vaddr;
567}
568EXPORT_SYMBOL(ion_map_kernel);
569
570void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
571{
572	struct ion_buffer *buffer;
573
574	mutex_lock(&client->lock);
575	buffer = handle->buffer;
576	mutex_lock(&buffer->lock);
577	ion_handle_kmap_put(handle);
578	mutex_unlock(&buffer->lock);
579	mutex_unlock(&client->lock);
580}
581EXPORT_SYMBOL(ion_unmap_kernel);
582
583static int ion_debug_client_show(struct seq_file *s, void *unused)
584{
585	struct ion_client *client = s->private;
586	struct rb_node *n;
587	size_t sizes[ION_NUM_HEAP_IDS] = {0};
588	const char *names[ION_NUM_HEAP_IDS] = {0};
589	int i;
590
591	mutex_lock(&client->lock);
592	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
593		struct ion_handle *handle = rb_entry(n, struct ion_handle,
594						     node);
595		unsigned int id = handle->buffer->heap->id;
596
597		if (!names[id])
598			names[id] = handle->buffer->heap->name;
599		sizes[id] += handle->buffer->size;
600	}
601	mutex_unlock(&client->lock);
602
603	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
604	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
605		if (!names[i])
606			continue;
607		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
608	}
609	return 0;
610}
611
612static int ion_debug_client_open(struct inode *inode, struct file *file)
613{
614	return single_open(file, ion_debug_client_show, inode->i_private);
615}
616
617static const struct file_operations debug_client_fops = {
618	.open = ion_debug_client_open,
619	.read = seq_read,
620	.llseek = seq_lseek,
621	.release = single_release,
622};
623
624struct ion_client *ion_client_create(struct ion_device *dev,
625				     const char *name)
626{
627	struct ion_client *client;
628	struct task_struct *task;
629	struct rb_node **p;
630	struct rb_node *parent = NULL;
631	struct ion_client *entry;
632	char debug_name[64];
633	pid_t pid;
634
635	get_task_struct(current->group_leader);
636	task_lock(current->group_leader);
637	pid = task_pid_nr(current->group_leader);
638	/* don't bother to store task struct for kernel threads,
639	   they can't be killed anyway */
640	if (current->group_leader->flags & PF_KTHREAD) {
641		put_task_struct(current->group_leader);
642		task = NULL;
643	} else {
644		task = current->group_leader;
645	}
646	task_unlock(current->group_leader);
647
648	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
649	if (!client) {
650		if (task)
651			put_task_struct(current->group_leader);
652		return ERR_PTR(-ENOMEM);
653	}
654
655	client->dev = dev;
656	client->handles = RB_ROOT;
657	mutex_init(&client->lock);
658	client->name = name;
659	client->task = task;
660	client->pid = pid;
661
662	down_write(&dev->lock);
663	p = &dev->clients.rb_node;
664	while (*p) {
665		parent = *p;
666		entry = rb_entry(parent, struct ion_client, node);
667
668		if (client < entry)
669			p = &(*p)->rb_left;
670		else if (client > entry)
671			p = &(*p)->rb_right;
672	}
673	rb_link_node(&client->node, parent, p);
674	rb_insert_color(&client->node, &dev->clients);
675
676	snprintf(debug_name, 64, "%u", client->pid);
677	client->debug_root = debugfs_create_file(debug_name, 0664,
678						 dev->debug_root, client,
679						 &debug_client_fops);
680	up_write(&dev->lock);
681
682	return client;
683}
684EXPORT_SYMBOL(ion_client_create);
685
686void ion_client_destroy(struct ion_client *client)
687{
688	struct ion_device *dev = client->dev;
689	struct rb_node *n;
690
691	pr_debug("%s: %d\n", __func__, __LINE__);
692	while ((n = rb_first(&client->handles))) {
693		struct ion_handle *handle = rb_entry(n, struct ion_handle,
694						     node);
695		ion_handle_destroy(&handle->ref);
696	}
697	down_write(&dev->lock);
698	if (client->task)
699		put_task_struct(client->task);
700	rb_erase(&client->node, &dev->clients);
701	debugfs_remove_recursive(client->debug_root);
702	up_write(&dev->lock);
703
704	kfree(client);
705}
706EXPORT_SYMBOL(ion_client_destroy);
707
708struct sg_table *ion_sg_table(struct ion_client *client,
709			      struct ion_handle *handle)
710{
711	struct ion_buffer *buffer;
712	struct sg_table *table;
713
714	mutex_lock(&client->lock);
715	if (!ion_handle_validate(client, handle)) {
716		pr_err("%s: invalid handle passed to map_dma.\n",
717		       __func__);
718		mutex_unlock(&client->lock);
719		return ERR_PTR(-EINVAL);
720	}
721	buffer = handle->buffer;
722	table = buffer->sg_table;
723	mutex_unlock(&client->lock);
724	return table;
725}
726EXPORT_SYMBOL(ion_sg_table);
727
728static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
729				       struct device *dev,
730				       enum dma_data_direction direction);
731
732static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
733					enum dma_data_direction direction)
734{
735	struct dma_buf *dmabuf = attachment->dmabuf;
736	struct ion_buffer *buffer = dmabuf->priv;
737
738	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
739	return buffer->sg_table;
740}
741
742static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
743			      struct sg_table *table,
744			      enum dma_data_direction direction)
745{
746}
747
748static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
749{
750	unsigned long pages = buffer->sg_table->nents;
751	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
752
753	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
754	if (!buffer->dirty)
755		return -ENOMEM;
756	return 0;
757}
758
759struct ion_vma_list {
760	struct list_head list;
761	struct vm_area_struct *vma;
762};
763
764static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
765				       struct device *dev,
766				       enum dma_data_direction dir)
767{
768	struct scatterlist *sg;
769	int i;
770	struct ion_vma_list *vma_list;
771
772	pr_debug("%s: syncing for device %s\n", __func__,
773		 dev ? dev_name(dev) : "null");
774
775	if (!ion_buffer_fault_user_mappings(buffer))
776		return;
777
778	mutex_lock(&buffer->lock);
779	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
780		if (!test_bit(i, buffer->dirty))
781			continue;
782		dma_sync_sg_for_device(dev, sg, 1, dir);
783		clear_bit(i, buffer->dirty);
784	}
785	list_for_each_entry(vma_list, &buffer->vmas, list) {
786		struct vm_area_struct *vma = vma_list->vma;
787
788		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
789			       NULL);
790	}
791	mutex_unlock(&buffer->lock);
792}
793
794int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
795{
796	struct ion_buffer *buffer = vma->vm_private_data;
797	struct scatterlist *sg;
798	int i;
799
800	mutex_lock(&buffer->lock);
801	set_bit(vmf->pgoff, buffer->dirty);
802
803	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
804		if (i != vmf->pgoff)
805			continue;
806		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
807		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
808			       sg_page(sg));
809		break;
810	}
811	mutex_unlock(&buffer->lock);
812	return VM_FAULT_NOPAGE;
813}
814
815static void ion_vm_open(struct vm_area_struct *vma)
816{
817	struct ion_buffer *buffer = vma->vm_private_data;
818	struct ion_vma_list *vma_list;
819
820	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
821	if (!vma_list)
822		return;
823	vma_list->vma = vma;
824	mutex_lock(&buffer->lock);
825	list_add(&vma_list->list, &buffer->vmas);
826	mutex_unlock(&buffer->lock);
827	pr_debug("%s: adding %p\n", __func__, vma);
828}
829
830static void ion_vm_close(struct vm_area_struct *vma)
831{
832	struct ion_buffer *buffer = vma->vm_private_data;
833	struct ion_vma_list *vma_list, *tmp;
834
835	pr_debug("%s\n", __func__);
836	mutex_lock(&buffer->lock);
837	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
838		if (vma_list->vma != vma)
839			continue;
840		list_del(&vma_list->list);
841		kfree(vma_list);
842		pr_debug("%s: deleting %p\n", __func__, vma);
843		break;
844	}
845	mutex_unlock(&buffer->lock);
846}
847
848struct vm_operations_struct ion_vma_ops = {
849	.open = ion_vm_open,
850	.close = ion_vm_close,
851	.fault = ion_vm_fault,
852};
853
854static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
855{
856	struct ion_buffer *buffer = dmabuf->priv;
857	int ret = 0;
858
859	if (!buffer->heap->ops->map_user) {
860		pr_err("%s: this heap does not define a method for mapping "
861		       "to userspace\n", __func__);
862		return -EINVAL;
863	}
864
865	if (ion_buffer_fault_user_mappings(buffer)) {
866		vma->vm_private_data = buffer;
867		vma->vm_ops = &ion_vma_ops;
868		ion_vm_open(vma);
869		return 0;
870	}
871
872	if (!(buffer->flags & ION_FLAG_CACHED))
873		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
874
875	mutex_lock(&buffer->lock);
876	/* now map it to userspace */
877	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
878	mutex_unlock(&buffer->lock);
879
880	if (ret)
881		pr_err("%s: failure mapping buffer to userspace\n",
882		       __func__);
883
884	return ret;
885}
886
887static void ion_dma_buf_release(struct dma_buf *dmabuf)
888{
889	struct ion_buffer *buffer = dmabuf->priv;
890	ion_buffer_put(buffer);
891}
892
893static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
894{
895	struct ion_buffer *buffer = dmabuf->priv;
896	return buffer->vaddr + offset * PAGE_SIZE;
897}
898
899static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
900			       void *ptr)
901{
902	return;
903}
904
905static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
906					size_t len,
907					enum dma_data_direction direction)
908{
909	struct ion_buffer *buffer = dmabuf->priv;
910	void *vaddr;
911
912	if (!buffer->heap->ops->map_kernel) {
913		pr_err("%s: map kernel is not implemented by this heap.\n",
914		       __func__);
915		return -ENODEV;
916	}
917
918	mutex_lock(&buffer->lock);
919	vaddr = ion_buffer_kmap_get(buffer);
920	mutex_unlock(&buffer->lock);
921	if (IS_ERR(vaddr))
922		return PTR_ERR(vaddr);
923	if (!vaddr)
924		return -ENOMEM;
925	return 0;
926}
927
928static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
929				       size_t len,
930				       enum dma_data_direction direction)
931{
932	struct ion_buffer *buffer = dmabuf->priv;
933
934	mutex_lock(&buffer->lock);
935	ion_buffer_kmap_put(buffer);
936	mutex_unlock(&buffer->lock);
937}
938
939struct dma_buf_ops dma_buf_ops = {
940	.map_dma_buf = ion_map_dma_buf,
941	.unmap_dma_buf = ion_unmap_dma_buf,
942	.mmap = ion_mmap,
943	.release = ion_dma_buf_release,
944	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
945	.end_cpu_access = ion_dma_buf_end_cpu_access,
946	.kmap_atomic = ion_dma_buf_kmap,
947	.kunmap_atomic = ion_dma_buf_kunmap,
948	.kmap = ion_dma_buf_kmap,
949	.kunmap = ion_dma_buf_kunmap,
950};
951
952struct dma_buf *ion_share_dma_buf(struct ion_client *client,
953						struct ion_handle *handle)
954{
955	struct ion_buffer *buffer;
956	struct dma_buf *dmabuf;
957	bool valid_handle;
958
959	mutex_lock(&client->lock);
960	valid_handle = ion_handle_validate(client, handle);
961	mutex_unlock(&client->lock);
962	if (!valid_handle) {
963		WARN(1, "%s: invalid handle passed to share.\n", __func__);
964		return ERR_PTR(-EINVAL);
965	}
966
967	buffer = handle->buffer;
968	ion_buffer_get(buffer);
969	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
970	if (IS_ERR(dmabuf)) {
971		ion_buffer_put(buffer);
972		return dmabuf;
973	}
974
975	return dmabuf;
976}
977EXPORT_SYMBOL(ion_share_dma_buf);
978
979int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
980{
981	struct dma_buf *dmabuf;
982	int fd;
983
984	dmabuf = ion_share_dma_buf(client, handle);
985	if (IS_ERR(dmabuf))
986		return PTR_ERR(dmabuf);
987
988	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
989	if (fd < 0)
990		dma_buf_put(dmabuf);
991
992	return fd;
993}
994EXPORT_SYMBOL(ion_share_dma_buf_fd);
995
996struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
997{
998	struct dma_buf *dmabuf;
999	struct ion_buffer *buffer;
1000	struct ion_handle *handle;
1001
1002	dmabuf = dma_buf_get(fd);
1003	if (IS_ERR_OR_NULL(dmabuf))
1004		return ERR_PTR(PTR_ERR(dmabuf));
1005	/* if this memory came from ion */
1006
1007	if (dmabuf->ops != &dma_buf_ops) {
1008		pr_err("%s: can not import dmabuf from another exporter\n",
1009		       __func__);
1010		dma_buf_put(dmabuf);
1011		return ERR_PTR(-EINVAL);
1012	}
1013	buffer = dmabuf->priv;
1014
1015	mutex_lock(&client->lock);
1016	/* if a handle exists for this buffer just take a reference to it */
1017	handle = ion_handle_lookup(client, buffer);
1018	if (!IS_ERR_OR_NULL(handle)) {
1019		ion_handle_get(handle);
1020		goto end;
1021	}
1022	handle = ion_handle_create(client, buffer);
1023	if (IS_ERR_OR_NULL(handle))
1024		goto end;
1025	ion_handle_add(client, handle);
1026end:
1027	mutex_unlock(&client->lock);
1028	dma_buf_put(dmabuf);
1029	return handle;
1030}
1031EXPORT_SYMBOL(ion_import_dma_buf);
1032
1033static int ion_sync_for_device(struct ion_client *client, int fd)
1034{
1035	struct dma_buf *dmabuf;
1036	struct ion_buffer *buffer;
1037
1038	dmabuf = dma_buf_get(fd);
1039	if (IS_ERR_OR_NULL(dmabuf))
1040		return PTR_ERR(dmabuf);
1041
1042	/* if this memory came from ion */
1043	if (dmabuf->ops != &dma_buf_ops) {
1044		pr_err("%s: can not sync dmabuf from another exporter\n",
1045		       __func__);
1046		dma_buf_put(dmabuf);
1047		return -EINVAL;
1048	}
1049	buffer = dmabuf->priv;
1050
1051	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1052			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1053	dma_buf_put(dmabuf);
1054	return 0;
1055}
1056
1057static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1058{
1059	struct ion_client *client = filp->private_data;
1060
1061	switch (cmd) {
1062	case ION_IOC_ALLOC:
1063	{
1064		struct ion_allocation_data data;
1065
1066		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1067			return -EFAULT;
1068		data.handle = ion_alloc(client, data.len, data.align,
1069					     data.heap_id_mask, data.flags);
1070
1071		if (IS_ERR(data.handle))
1072			return PTR_ERR(data.handle);
1073
1074		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1075			ion_free(client, data.handle);
1076			return -EFAULT;
1077		}
1078		break;
1079	}
1080	case ION_IOC_FREE:
1081	{
1082		struct ion_handle_data data;
1083		bool valid;
1084
1085		if (copy_from_user(&data, (void __user *)arg,
1086				   sizeof(struct ion_handle_data)))
1087			return -EFAULT;
1088		mutex_lock(&client->lock);
1089		valid = ion_handle_validate(client, data.handle);
1090		mutex_unlock(&client->lock);
1091		if (!valid)
1092			return -EINVAL;
1093		ion_free(client, data.handle);
1094		break;
1095	}
1096	case ION_IOC_SHARE:
1097	case ION_IOC_MAP:
1098	{
1099		struct ion_fd_data data;
1100
1101		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1102			return -EFAULT;
1103		data.fd = ion_share_dma_buf_fd(client, data.handle);
1104		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1105			return -EFAULT;
1106		if (data.fd < 0)
1107			return data.fd;
1108		break;
1109	}
1110	case ION_IOC_IMPORT:
1111	{
1112		struct ion_fd_data data;
1113		int ret = 0;
1114		if (copy_from_user(&data, (void __user *)arg,
1115				   sizeof(struct ion_fd_data)))
1116			return -EFAULT;
1117		data.handle = ion_import_dma_buf(client, data.fd);
1118		if (IS_ERR(data.handle)) {
1119			ret = PTR_ERR(data.handle);
1120			data.handle = NULL;
1121		}
1122		if (copy_to_user((void __user *)arg, &data,
1123				 sizeof(struct ion_fd_data)))
1124			return -EFAULT;
1125		if (ret < 0)
1126			return ret;
1127		break;
1128	}
1129	case ION_IOC_SYNC:
1130	{
1131		struct ion_fd_data data;
1132		if (copy_from_user(&data, (void __user *)arg,
1133				   sizeof(struct ion_fd_data)))
1134			return -EFAULT;
1135		ion_sync_for_device(client, data.fd);
1136		break;
1137	}
1138	case ION_IOC_CUSTOM:
1139	{
1140		struct ion_device *dev = client->dev;
1141		struct ion_custom_data data;
1142
1143		if (!dev->custom_ioctl)
1144			return -ENOTTY;
1145		if (copy_from_user(&data, (void __user *)arg,
1146				sizeof(struct ion_custom_data)))
1147			return -EFAULT;
1148		return dev->custom_ioctl(client, data.cmd, data.arg);
1149	}
1150	default:
1151		return -ENOTTY;
1152	}
1153	return 0;
1154}
1155
1156static int ion_release(struct inode *inode, struct file *file)
1157{
1158	struct ion_client *client = file->private_data;
1159
1160	pr_debug("%s: %d\n", __func__, __LINE__);
1161	ion_client_destroy(client);
1162	return 0;
1163}
1164
1165static int ion_open(struct inode *inode, struct file *file)
1166{
1167	struct miscdevice *miscdev = file->private_data;
1168	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1169	struct ion_client *client;
1170
1171	pr_debug("%s: %d\n", __func__, __LINE__);
1172	client = ion_client_create(dev, "user");
1173	if (IS_ERR_OR_NULL(client))
1174		return PTR_ERR(client);
1175	file->private_data = client;
1176
1177	return 0;
1178}
1179
1180static const struct file_operations ion_fops = {
1181	.owner          = THIS_MODULE,
1182	.open           = ion_open,
1183	.release        = ion_release,
1184	.unlocked_ioctl = ion_ioctl,
1185};
1186
1187static size_t ion_debug_heap_total(struct ion_client *client,
1188				   unsigned int id)
1189{
1190	size_t size = 0;
1191	struct rb_node *n;
1192
1193	mutex_lock(&client->lock);
1194	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1195		struct ion_handle *handle = rb_entry(n,
1196						     struct ion_handle,
1197						     node);
1198		if (handle->buffer->heap->id == id)
1199			size += handle->buffer->size;
1200	}
1201	mutex_unlock(&client->lock);
1202	return size;
1203}
1204
1205static int ion_debug_heap_show(struct seq_file *s, void *unused)
1206{
1207	struct ion_heap *heap = s->private;
1208	struct ion_device *dev = heap->dev;
1209	struct rb_node *n;
1210	size_t total_size = 0;
1211	size_t total_orphaned_size = 0;
1212
1213	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1214	seq_printf(s, "----------------------------------------------------\n");
1215
1216	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1217		struct ion_client *client = rb_entry(n, struct ion_client,
1218						     node);
1219		size_t size = ion_debug_heap_total(client, heap->id);
1220		if (!size)
1221			continue;
1222		if (client->task) {
1223			char task_comm[TASK_COMM_LEN];
1224
1225			get_task_comm(task_comm, client->task);
1226			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1227				   client->pid, size);
1228		} else {
1229			seq_printf(s, "%16.s %16u %16u\n", client->name,
1230				   client->pid, size);
1231		}
1232	}
1233	seq_printf(s, "----------------------------------------------------\n");
1234	seq_printf(s, "orphaned allocations (info is from last known client):"
1235		   "\n");
1236	mutex_lock(&dev->buffer_lock);
1237	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1238		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1239						     node);
1240		if (buffer->heap->id != heap->id)
1241			continue;
1242		total_size += buffer->size;
1243		if (!buffer->handle_count) {
1244			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1245				   buffer->pid, buffer->size, buffer->kmap_cnt,
1246				   atomic_read(&buffer->ref.refcount));
1247			total_orphaned_size += buffer->size;
1248		}
1249	}
1250	mutex_unlock(&dev->buffer_lock);
1251	seq_printf(s, "----------------------------------------------------\n");
1252	seq_printf(s, "%16.s %16u\n", "total orphaned",
1253		   total_orphaned_size);
1254	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1255	seq_printf(s, "----------------------------------------------------\n");
1256
1257	if (heap->debug_show)
1258		heap->debug_show(heap, s, unused);
1259
1260	return 0;
1261}
1262
1263static int ion_debug_heap_open(struct inode *inode, struct file *file)
1264{
1265	return single_open(file, ion_debug_heap_show, inode->i_private);
1266}
1267
1268static const struct file_operations debug_heap_fops = {
1269	.open = ion_debug_heap_open,
1270	.read = seq_read,
1271	.llseek = seq_lseek,
1272	.release = single_release,
1273};
1274
1275void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1276{
1277	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1278	    !heap->ops->unmap_dma)
1279		pr_err("%s: can not add heap with invalid ops struct.\n",
1280		       __func__);
1281
1282	heap->dev = dev;
1283	down_write(&dev->lock);
1284	/* use negative heap->id to reverse the priority -- when traversing
1285	   the list later attempt higher id numbers first */
1286	plist_node_init(&heap->node, -heap->id);
1287	plist_add(&heap->node, &dev->heaps);
1288	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1289			    &debug_heap_fops);
1290	up_write(&dev->lock);
1291}
1292
1293struct ion_device *ion_device_create(long (*custom_ioctl)
1294				     (struct ion_client *client,
1295				      unsigned int cmd,
1296				      unsigned long arg))
1297{
1298	struct ion_device *idev;
1299	int ret;
1300
1301	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1302	if (!idev)
1303		return ERR_PTR(-ENOMEM);
1304
1305	idev->dev.minor = MISC_DYNAMIC_MINOR;
1306	idev->dev.name = "ion";
1307	idev->dev.fops = &ion_fops;
1308	idev->dev.parent = NULL;
1309	ret = misc_register(&idev->dev);
1310	if (ret) {
1311		pr_err("ion: failed to register misc device.\n");
1312		return ERR_PTR(ret);
1313	}
1314
1315	idev->debug_root = debugfs_create_dir("ion", NULL);
1316	if (IS_ERR_OR_NULL(idev->debug_root))
1317		pr_err("ion: failed to create debug files.\n");
1318
1319	idev->custom_ioctl = custom_ioctl;
1320	idev->buffers = RB_ROOT;
1321	mutex_init(&idev->buffer_lock);
1322	init_rwsem(&idev->lock);
1323	plist_head_init(&idev->heaps);
1324	idev->clients = RB_ROOT;
1325	return idev;
1326}
1327
1328void ion_device_destroy(struct ion_device *dev)
1329{
1330	misc_deregister(&dev->dev);
1331	/* XXX need to free the heaps and clients ? */
1332	kfree(dev);
1333}
1334
1335void __init ion_reserve(struct ion_platform_data *data)
1336{
1337	int i;
1338
1339	for (i = 0; i < data->nr; i++) {
1340		if (data->heaps[i].size == 0)
1341			continue;
1342
1343		if (data->heaps[i].base == 0) {
1344			phys_addr_t paddr;
1345			paddr = memblock_alloc_base(data->heaps[i].size,
1346						    data->heaps[i].align,
1347						    MEMBLOCK_ALLOC_ANYWHERE);
1348			if (!paddr) {
1349				pr_err("%s: error allocating memblock for "
1350				       "heap %d\n",
1351					__func__, i);
1352				continue;
1353			}
1354			data->heaps[i].base = paddr;
1355		} else {
1356			int ret = memblock_reserve(data->heaps[i].base,
1357					       data->heaps[i].size);
1358			if (ret)
1359				pr_err("memblock reserve of %x@%lx failed\n",
1360				       data->heaps[i].size,
1361				       data->heaps[i].base);
1362		}
1363		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1364			data->heaps[i].name,
1365			data->heaps[i].base,
1366			data->heaps[i].size);
1367	}
1368}
1369