ion.c revision cd69488c7be3abb0767b49f313b12b8591abe5dc
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
22#include <linux/memblock.h>
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/dma-buf.h>
34
35#include "ion.h"
36#include "ion_priv.h"
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev:		the actual misc device
41 * @buffers:		an rb tree of all the existing buffers
42 * @buffer_lock:	lock protecting the tree of buffers
43 * @lock:		rwsem protecting the tree of heaps and clients
44 * @heaps:		list of all the heaps in the system
45 * @user_clients:	list of all the clients created from userspace
46 */
47struct ion_device {
48	struct miscdevice dev;
49	struct rb_root buffers;
50	struct mutex buffer_lock;
51	struct rw_semaphore lock;
52	struct plist_head heaps;
53	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
54			      unsigned long arg);
55	struct rb_root clients;
56	struct dentry *debug_root;
57};
58
59/**
60 * struct ion_client - a process/hw block local address space
61 * @node:		node in the tree of all clients
62 * @dev:		backpointer to ion device
63 * @handles:		an rb tree of all the handles in this client
64 * @lock:		lock protecting the tree of handles
65 * @heap_mask:		mask of all supported heaps
66 * @name:		used for debugging
67 * @task:		used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73struct ion_client {
74	struct rb_node node;
75	struct ion_device *dev;
76	struct rb_root handles;
77	struct mutex lock;
78	unsigned int heap_mask;
79	const char *name;
80	struct task_struct *task;
81	pid_t pid;
82	struct dentry *debug_root;
83};
84
85/**
86 * ion_handle - a client local reference to a buffer
87 * @ref:		reference count
88 * @client:		back pointer to the client the buffer resides in
89 * @buffer:		pointer to the buffer
90 * @node:		node in the client's handle rbtree
91 * @kmap_cnt:		count of times this client has mapped to kernel
92 * @dmap_cnt:		count of times this client has mapped for dma
93 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client.  Other fields are never changed after initialization.
96 */
97struct ion_handle {
98	struct kref ref;
99	struct ion_client *client;
100	struct ion_buffer *buffer;
101	struct rb_node node;
102	unsigned int kmap_cnt;
103};
104
105bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
106{
107        return ((buffer->flags & ION_FLAG_CACHED) &&
108                !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
109}
110
111bool ion_buffer_cached(struct ion_buffer *buffer)
112{
113        return !!(buffer->flags & ION_FLAG_CACHED);
114}
115
116/* this function should only be called while dev->lock is held */
117static void ion_buffer_add(struct ion_device *dev,
118			   struct ion_buffer *buffer)
119{
120	struct rb_node **p = &dev->buffers.rb_node;
121	struct rb_node *parent = NULL;
122	struct ion_buffer *entry;
123
124	while (*p) {
125		parent = *p;
126		entry = rb_entry(parent, struct ion_buffer, node);
127
128		if (buffer < entry) {
129			p = &(*p)->rb_left;
130		} else if (buffer > entry) {
131			p = &(*p)->rb_right;
132		} else {
133			pr_err("%s: buffer already found.", __func__);
134			BUG();
135		}
136	}
137
138	rb_link_node(&buffer->node, parent, p);
139	rb_insert_color(&buffer->node, &dev->buffers);
140}
141
142static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
143
144/* this function should only be called while dev->lock is held */
145static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
146				     struct ion_device *dev,
147				     unsigned long len,
148				     unsigned long align,
149				     unsigned long flags)
150{
151	struct ion_buffer *buffer;
152	struct sg_table *table;
153	struct scatterlist *sg;
154	int i, ret;
155
156	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
157	if (!buffer)
158		return ERR_PTR(-ENOMEM);
159
160	buffer->heap = heap;
161	buffer->flags = flags;
162	kref_init(&buffer->ref);
163
164	ret = heap->ops->allocate(heap, buffer, len, align, flags);
165	if (ret) {
166		kfree(buffer);
167		return ERR_PTR(ret);
168	}
169
170	buffer->dev = dev;
171	buffer->size = len;
172
173	table = heap->ops->map_dma(heap, buffer);
174	if (IS_ERR_OR_NULL(table)) {
175		heap->ops->free(buffer);
176		kfree(buffer);
177		return ERR_PTR(PTR_ERR(table));
178	}
179	buffer->sg_table = table;
180	if (ion_buffer_fault_user_mappings(buffer)) {
181		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
182			    i) {
183			if (sg_dma_len(sg) == PAGE_SIZE)
184				continue;
185			pr_err("%s: cached mappings that will be faulted in "
186			       "must have pagewise sg_lists\n", __func__);
187			ret = -EINVAL;
188			goto err;
189		}
190
191		ret = ion_buffer_alloc_dirty(buffer);
192		if (ret)
193			goto err;
194	}
195
196	buffer->dev = dev;
197	buffer->size = len;
198	INIT_LIST_HEAD(&buffer->vmas);
199	mutex_init(&buffer->lock);
200	/* this will set up dma addresses for the sglist -- it is not
201	   technically correct as per the dma api -- a specific
202	   device isn't really taking ownership here.  However, in practice on
203	   our systems the only dma_address space is physical addresses.
204	   Additionally, we can't afford the overhead of invalidating every
205	   allocation via dma_map_sg. The implicit contract here is that
206	   memory comming from the heaps is ready for dma, ie if it has a
207	   cached mapping that mapping has been invalidated */
208	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
209		sg_dma_address(sg) = sg_phys(sg);
210	mutex_lock(&dev->buffer_lock);
211	ion_buffer_add(dev, buffer);
212	mutex_unlock(&dev->buffer_lock);
213	return buffer;
214
215err:
216	heap->ops->unmap_dma(heap, buffer);
217	heap->ops->free(buffer);
218	kfree(buffer);
219	return ERR_PTR(ret);
220}
221
222static void ion_buffer_destroy(struct kref *kref)
223{
224	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
225	struct ion_device *dev = buffer->dev;
226
227	if (WARN_ON(buffer->kmap_cnt > 0))
228		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
229	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
230	buffer->heap->ops->free(buffer);
231	mutex_lock(&dev->buffer_lock);
232	rb_erase(&buffer->node, &dev->buffers);
233	mutex_unlock(&dev->buffer_lock);
234	if (buffer->flags & ION_FLAG_CACHED)
235		kfree(buffer->dirty);
236	kfree(buffer);
237}
238
239static void ion_buffer_get(struct ion_buffer *buffer)
240{
241	kref_get(&buffer->ref);
242}
243
244static int ion_buffer_put(struct ion_buffer *buffer)
245{
246	return kref_put(&buffer->ref, ion_buffer_destroy);
247}
248
249static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
250{
251	mutex_lock(&buffer->lock);
252	buffer->handle_count++;
253	mutex_unlock(&buffer->lock);
254}
255
256static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
257{
258	/*
259	 * when a buffer is removed from a handle, if it is not in
260	 * any other handles, copy the taskcomm and the pid of the
261	 * process it's being removed from into the buffer.  At this
262	 * point there will be no way to track what processes this buffer is
263	 * being used by, it only exists as a dma_buf file descriptor.
264	 * The taskcomm and pid can provide a debug hint as to where this fd
265	 * is in the system
266	 */
267	mutex_lock(&buffer->lock);
268	buffer->handle_count--;
269	BUG_ON(buffer->handle_count < 0);
270	if (!buffer->handle_count) {
271		struct task_struct *task;
272
273		task = current->group_leader;
274		get_task_comm(buffer->task_comm, task);
275		buffer->pid = task_pid_nr(task);
276	}
277	mutex_unlock(&buffer->lock);
278}
279
280static struct ion_handle *ion_handle_create(struct ion_client *client,
281				     struct ion_buffer *buffer)
282{
283	struct ion_handle *handle;
284
285	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
286	if (!handle)
287		return ERR_PTR(-ENOMEM);
288	kref_init(&handle->ref);
289	RB_CLEAR_NODE(&handle->node);
290	handle->client = client;
291	ion_buffer_get(buffer);
292	ion_buffer_add_to_handle(buffer);
293	handle->buffer = buffer;
294
295	return handle;
296}
297
298static void ion_handle_kmap_put(struct ion_handle *);
299
300static void ion_handle_destroy(struct kref *kref)
301{
302	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
303	struct ion_client *client = handle->client;
304	struct ion_buffer *buffer = handle->buffer;
305
306	mutex_lock(&buffer->lock);
307	while (handle->kmap_cnt)
308		ion_handle_kmap_put(handle);
309	mutex_unlock(&buffer->lock);
310
311	if (!RB_EMPTY_NODE(&handle->node))
312		rb_erase(&handle->node, &client->handles);
313
314	ion_buffer_remove_from_handle(buffer);
315	ion_buffer_put(buffer);
316
317	kfree(handle);
318}
319
320struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
321{
322	return handle->buffer;
323}
324
325static void ion_handle_get(struct ion_handle *handle)
326{
327	kref_get(&handle->ref);
328}
329
330static int ion_handle_put(struct ion_handle *handle)
331{
332	return kref_put(&handle->ref, ion_handle_destroy);
333}
334
335static struct ion_handle *ion_handle_lookup(struct ion_client *client,
336					    struct ion_buffer *buffer)
337{
338	struct rb_node *n;
339
340	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
341		struct ion_handle *handle = rb_entry(n, struct ion_handle,
342						     node);
343		if (handle->buffer == buffer)
344			return handle;
345	}
346	return NULL;
347}
348
349static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
350{
351	struct rb_node *n = client->handles.rb_node;
352
353	while (n) {
354		struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
355							  node);
356		if (handle < handle_node)
357			n = n->rb_left;
358		else if (handle > handle_node)
359			n = n->rb_right;
360		else
361			return true;
362	}
363	return false;
364}
365
366static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
367{
368	struct rb_node **p = &client->handles.rb_node;
369	struct rb_node *parent = NULL;
370	struct ion_handle *entry;
371
372	while (*p) {
373		parent = *p;
374		entry = rb_entry(parent, struct ion_handle, node);
375
376		if (handle < entry)
377			p = &(*p)->rb_left;
378		else if (handle > entry)
379			p = &(*p)->rb_right;
380		else
381			WARN(1, "%s: buffer already found.", __func__);
382	}
383
384	rb_link_node(&handle->node, parent, p);
385	rb_insert_color(&handle->node, &client->handles);
386}
387
388struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
389			     size_t align, unsigned int heap_mask,
390			     unsigned int flags)
391{
392	struct ion_handle *handle;
393	struct ion_device *dev = client->dev;
394	struct ion_buffer *buffer = NULL;
395	struct ion_heap *heap;
396
397	pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
398		 align, heap_mask, flags);
399	/*
400	 * traverse the list of heaps available in this system in priority
401	 * order.  If the heap type is supported by the client, and matches the
402	 * request of the caller allocate from it.  Repeat until allocate has
403	 * succeeded or all heaps have been tried
404	 */
405	if (WARN_ON(!len))
406		return ERR_PTR(-EINVAL);
407
408	len = PAGE_ALIGN(len);
409
410	down_read(&dev->lock);
411	plist_for_each_entry(heap, &dev->heaps, node) {
412		/* if the client doesn't support this heap type */
413		if (!((1 << heap->type) & client->heap_mask))
414			continue;
415		/* if the caller didn't specify this heap type */
416		if (!((1 << heap->id) & heap_mask))
417			continue;
418		buffer = ion_buffer_create(heap, dev, len, align, flags);
419		if (!IS_ERR_OR_NULL(buffer))
420			break;
421	}
422	up_read(&dev->lock);
423
424	if (buffer == NULL)
425		return ERR_PTR(-ENODEV);
426
427	if (IS_ERR(buffer))
428		return ERR_PTR(PTR_ERR(buffer));
429
430	handle = ion_handle_create(client, buffer);
431
432	/*
433	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
434	 * and ion_handle_create will take a second reference, drop one here
435	 */
436	ion_buffer_put(buffer);
437
438	if (!IS_ERR(handle)) {
439		mutex_lock(&client->lock);
440		ion_handle_add(client, handle);
441		mutex_unlock(&client->lock);
442	}
443
444
445	return handle;
446}
447EXPORT_SYMBOL(ion_alloc);
448
449void ion_free(struct ion_client *client, struct ion_handle *handle)
450{
451	bool valid_handle;
452
453	BUG_ON(client != handle->client);
454
455	mutex_lock(&client->lock);
456	valid_handle = ion_handle_validate(client, handle);
457
458	if (!valid_handle) {
459		WARN(1, "%s: invalid handle passed to free.\n", __func__);
460		mutex_unlock(&client->lock);
461		return;
462	}
463	ion_handle_put(handle);
464	mutex_unlock(&client->lock);
465}
466EXPORT_SYMBOL(ion_free);
467
468int ion_phys(struct ion_client *client, struct ion_handle *handle,
469	     ion_phys_addr_t *addr, size_t *len)
470{
471	struct ion_buffer *buffer;
472	int ret;
473
474	mutex_lock(&client->lock);
475	if (!ion_handle_validate(client, handle)) {
476		mutex_unlock(&client->lock);
477		return -EINVAL;
478	}
479
480	buffer = handle->buffer;
481
482	if (!buffer->heap->ops->phys) {
483		pr_err("%s: ion_phys is not implemented by this heap.\n",
484		       __func__);
485		mutex_unlock(&client->lock);
486		return -ENODEV;
487	}
488	mutex_unlock(&client->lock);
489	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
490	return ret;
491}
492EXPORT_SYMBOL(ion_phys);
493
494static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
495{
496	void *vaddr;
497
498	if (buffer->kmap_cnt) {
499		buffer->kmap_cnt++;
500		return buffer->vaddr;
501	}
502	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
503	if (IS_ERR_OR_NULL(vaddr))
504		return vaddr;
505	buffer->vaddr = vaddr;
506	buffer->kmap_cnt++;
507	return vaddr;
508}
509
510static void *ion_handle_kmap_get(struct ion_handle *handle)
511{
512	struct ion_buffer *buffer = handle->buffer;
513	void *vaddr;
514
515	if (handle->kmap_cnt) {
516		handle->kmap_cnt++;
517		return buffer->vaddr;
518	}
519	vaddr = ion_buffer_kmap_get(buffer);
520	if (IS_ERR_OR_NULL(vaddr))
521		return vaddr;
522	handle->kmap_cnt++;
523	return vaddr;
524}
525
526static void ion_buffer_kmap_put(struct ion_buffer *buffer)
527{
528	buffer->kmap_cnt--;
529	if (!buffer->kmap_cnt) {
530		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
531		buffer->vaddr = NULL;
532	}
533}
534
535static void ion_handle_kmap_put(struct ion_handle *handle)
536{
537	struct ion_buffer *buffer = handle->buffer;
538
539	handle->kmap_cnt--;
540	if (!handle->kmap_cnt)
541		ion_buffer_kmap_put(buffer);
542}
543
544void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
545{
546	struct ion_buffer *buffer;
547	void *vaddr;
548
549	mutex_lock(&client->lock);
550	if (!ion_handle_validate(client, handle)) {
551		pr_err("%s: invalid handle passed to map_kernel.\n",
552		       __func__);
553		mutex_unlock(&client->lock);
554		return ERR_PTR(-EINVAL);
555	}
556
557	buffer = handle->buffer;
558
559	if (!handle->buffer->heap->ops->map_kernel) {
560		pr_err("%s: map_kernel is not implemented by this heap.\n",
561		       __func__);
562		mutex_unlock(&client->lock);
563		return ERR_PTR(-ENODEV);
564	}
565
566	mutex_lock(&buffer->lock);
567	vaddr = ion_handle_kmap_get(handle);
568	mutex_unlock(&buffer->lock);
569	mutex_unlock(&client->lock);
570	return vaddr;
571}
572EXPORT_SYMBOL(ion_map_kernel);
573
574void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
575{
576	struct ion_buffer *buffer;
577
578	mutex_lock(&client->lock);
579	buffer = handle->buffer;
580	mutex_lock(&buffer->lock);
581	ion_handle_kmap_put(handle);
582	mutex_unlock(&buffer->lock);
583	mutex_unlock(&client->lock);
584}
585EXPORT_SYMBOL(ion_unmap_kernel);
586
587static int ion_debug_client_show(struct seq_file *s, void *unused)
588{
589	struct ion_client *client = s->private;
590	struct rb_node *n;
591	size_t sizes[ION_NUM_HEAPS] = {0};
592	const char *names[ION_NUM_HEAPS] = {0};
593	int i;
594
595	mutex_lock(&client->lock);
596	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
597		struct ion_handle *handle = rb_entry(n, struct ion_handle,
598						     node);
599		enum ion_heap_type type = handle->buffer->heap->type;
600
601		if (!names[type])
602			names[type] = handle->buffer->heap->name;
603		sizes[type] += handle->buffer->size;
604	}
605	mutex_unlock(&client->lock);
606
607	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
608	for (i = 0; i < ION_NUM_HEAPS; i++) {
609		if (!names[i])
610			continue;
611		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
612	}
613	return 0;
614}
615
616static int ion_debug_client_open(struct inode *inode, struct file *file)
617{
618	return single_open(file, ion_debug_client_show, inode->i_private);
619}
620
621static const struct file_operations debug_client_fops = {
622	.open = ion_debug_client_open,
623	.read = seq_read,
624	.llseek = seq_lseek,
625	.release = single_release,
626};
627
628struct ion_client *ion_client_create(struct ion_device *dev,
629				     unsigned int heap_mask,
630				     const char *name)
631{
632	struct ion_client *client;
633	struct task_struct *task;
634	struct rb_node **p;
635	struct rb_node *parent = NULL;
636	struct ion_client *entry;
637	char debug_name[64];
638	pid_t pid;
639
640	get_task_struct(current->group_leader);
641	task_lock(current->group_leader);
642	pid = task_pid_nr(current->group_leader);
643	/* don't bother to store task struct for kernel threads,
644	   they can't be killed anyway */
645	if (current->group_leader->flags & PF_KTHREAD) {
646		put_task_struct(current->group_leader);
647		task = NULL;
648	} else {
649		task = current->group_leader;
650	}
651	task_unlock(current->group_leader);
652
653	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
654	if (!client) {
655		if (task)
656			put_task_struct(current->group_leader);
657		return ERR_PTR(-ENOMEM);
658	}
659
660	client->dev = dev;
661	client->handles = RB_ROOT;
662	mutex_init(&client->lock);
663	client->name = name;
664	client->heap_mask = heap_mask;
665	client->task = task;
666	client->pid = pid;
667
668	down_write(&dev->lock);
669	p = &dev->clients.rb_node;
670	while (*p) {
671		parent = *p;
672		entry = rb_entry(parent, struct ion_client, node);
673
674		if (client < entry)
675			p = &(*p)->rb_left;
676		else if (client > entry)
677			p = &(*p)->rb_right;
678	}
679	rb_link_node(&client->node, parent, p);
680	rb_insert_color(&client->node, &dev->clients);
681
682	snprintf(debug_name, 64, "%u", client->pid);
683	client->debug_root = debugfs_create_file(debug_name, 0664,
684						 dev->debug_root, client,
685						 &debug_client_fops);
686	up_write(&dev->lock);
687
688	return client;
689}
690
691void ion_client_destroy(struct ion_client *client)
692{
693	struct ion_device *dev = client->dev;
694	struct rb_node *n;
695
696	pr_debug("%s: %d\n", __func__, __LINE__);
697	while ((n = rb_first(&client->handles))) {
698		struct ion_handle *handle = rb_entry(n, struct ion_handle,
699						     node);
700		ion_handle_destroy(&handle->ref);
701	}
702	down_write(&dev->lock);
703	if (client->task)
704		put_task_struct(client->task);
705	rb_erase(&client->node, &dev->clients);
706	debugfs_remove_recursive(client->debug_root);
707	up_write(&dev->lock);
708
709	kfree(client);
710}
711EXPORT_SYMBOL(ion_client_destroy);
712
713struct sg_table *ion_sg_table(struct ion_client *client,
714			      struct ion_handle *handle)
715{
716	struct ion_buffer *buffer;
717	struct sg_table *table;
718
719	mutex_lock(&client->lock);
720	if (!ion_handle_validate(client, handle)) {
721		pr_err("%s: invalid handle passed to map_dma.\n",
722		       __func__);
723		mutex_unlock(&client->lock);
724		return ERR_PTR(-EINVAL);
725	}
726	buffer = handle->buffer;
727	table = buffer->sg_table;
728	mutex_unlock(&client->lock);
729	return table;
730}
731EXPORT_SYMBOL(ion_sg_table);
732
733static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
734				       struct device *dev,
735				       enum dma_data_direction direction);
736
737static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
738					enum dma_data_direction direction)
739{
740	struct dma_buf *dmabuf = attachment->dmabuf;
741	struct ion_buffer *buffer = dmabuf->priv;
742
743	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
744	return buffer->sg_table;
745}
746
747static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
748			      struct sg_table *table,
749			      enum dma_data_direction direction)
750{
751}
752
753static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
754{
755	unsigned long pages = buffer->sg_table->nents;
756	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
757
758	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
759	if (!buffer->dirty)
760		return -ENOMEM;
761	return 0;
762}
763
764struct ion_vma_list {
765	struct list_head list;
766	struct vm_area_struct *vma;
767};
768
769static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
770				       struct device *dev,
771				       enum dma_data_direction dir)
772{
773	struct scatterlist *sg;
774	int i;
775	struct ion_vma_list *vma_list;
776
777	pr_debug("%s: syncing for device %s\n", __func__,
778		 dev ? dev_name(dev) : "null");
779
780	if (!ion_buffer_fault_user_mappings(buffer))
781		return;
782
783	mutex_lock(&buffer->lock);
784	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
785		if (!test_bit(i, buffer->dirty))
786			continue;
787		dma_sync_sg_for_device(dev, sg, 1, dir);
788		clear_bit(i, buffer->dirty);
789	}
790	list_for_each_entry(vma_list, &buffer->vmas, list) {
791		struct vm_area_struct *vma = vma_list->vma;
792
793		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
794			       NULL);
795	}
796	mutex_unlock(&buffer->lock);
797}
798
799int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
800{
801	struct ion_buffer *buffer = vma->vm_private_data;
802	struct scatterlist *sg;
803	int i;
804
805	mutex_lock(&buffer->lock);
806	set_bit(vmf->pgoff, buffer->dirty);
807
808	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
809		if (i != vmf->pgoff)
810			continue;
811		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
812		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
813			       sg_page(sg));
814		break;
815	}
816	mutex_unlock(&buffer->lock);
817	return VM_FAULT_NOPAGE;
818}
819
820static void ion_vm_open(struct vm_area_struct *vma)
821{
822	struct ion_buffer *buffer = vma->vm_private_data;
823	struct ion_vma_list *vma_list;
824
825	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
826	if (!vma_list)
827		return;
828	vma_list->vma = vma;
829	mutex_lock(&buffer->lock);
830	list_add(&vma_list->list, &buffer->vmas);
831	mutex_unlock(&buffer->lock);
832	pr_debug("%s: adding %p\n", __func__, vma);
833}
834
835static void ion_vm_close(struct vm_area_struct *vma)
836{
837	struct ion_buffer *buffer = vma->vm_private_data;
838	struct ion_vma_list *vma_list, *tmp;
839
840	pr_debug("%s\n", __func__);
841	mutex_lock(&buffer->lock);
842	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
843		if (vma_list->vma != vma)
844			continue;
845		list_del(&vma_list->list);
846		kfree(vma_list);
847		pr_debug("%s: deleting %p\n", __func__, vma);
848		break;
849	}
850	mutex_unlock(&buffer->lock);
851}
852
853struct vm_operations_struct ion_vma_ops = {
854	.open = ion_vm_open,
855	.close = ion_vm_close,
856	.fault = ion_vm_fault,
857};
858
859static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
860{
861	struct ion_buffer *buffer = dmabuf->priv;
862	int ret = 0;
863
864	if (!buffer->heap->ops->map_user) {
865		pr_err("%s: this heap does not define a method for mapping "
866		       "to userspace\n", __func__);
867		return -EINVAL;
868	}
869
870	if (ion_buffer_fault_user_mappings(buffer)) {
871		vma->vm_private_data = buffer;
872		vma->vm_ops = &ion_vma_ops;
873		ion_vm_open(vma);
874		return 0;
875	}
876
877	if (!(buffer->flags & ION_FLAG_CACHED))
878		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
879
880	mutex_lock(&buffer->lock);
881	/* now map it to userspace */
882	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
883	mutex_unlock(&buffer->lock);
884
885	if (ret)
886		pr_err("%s: failure mapping buffer to userspace\n",
887		       __func__);
888
889	return ret;
890}
891
892static void ion_dma_buf_release(struct dma_buf *dmabuf)
893{
894	struct ion_buffer *buffer = dmabuf->priv;
895	ion_buffer_put(buffer);
896}
897
898static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
899{
900	struct ion_buffer *buffer = dmabuf->priv;
901	return buffer->vaddr + offset * PAGE_SIZE;
902}
903
904static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
905			       void *ptr)
906{
907	return;
908}
909
910static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
911					size_t len,
912					enum dma_data_direction direction)
913{
914	struct ion_buffer *buffer = dmabuf->priv;
915	void *vaddr;
916
917	if (!buffer->heap->ops->map_kernel) {
918		pr_err("%s: map kernel is not implemented by this heap.\n",
919		       __func__);
920		return -ENODEV;
921	}
922
923	mutex_lock(&buffer->lock);
924	vaddr = ion_buffer_kmap_get(buffer);
925	mutex_unlock(&buffer->lock);
926	if (IS_ERR(vaddr))
927		return PTR_ERR(vaddr);
928	if (!vaddr)
929		return -ENOMEM;
930	return 0;
931}
932
933static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
934				       size_t len,
935				       enum dma_data_direction direction)
936{
937	struct ion_buffer *buffer = dmabuf->priv;
938
939	mutex_lock(&buffer->lock);
940	ion_buffer_kmap_put(buffer);
941	mutex_unlock(&buffer->lock);
942}
943
944struct dma_buf_ops dma_buf_ops = {
945	.map_dma_buf = ion_map_dma_buf,
946	.unmap_dma_buf = ion_unmap_dma_buf,
947	.mmap = ion_mmap,
948	.release = ion_dma_buf_release,
949	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
950	.end_cpu_access = ion_dma_buf_end_cpu_access,
951	.kmap_atomic = ion_dma_buf_kmap,
952	.kunmap_atomic = ion_dma_buf_kunmap,
953	.kmap = ion_dma_buf_kmap,
954	.kunmap = ion_dma_buf_kunmap,
955};
956
957int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
958{
959	struct ion_buffer *buffer;
960	struct dma_buf *dmabuf;
961	bool valid_handle;
962	int fd;
963
964	mutex_lock(&client->lock);
965	valid_handle = ion_handle_validate(client, handle);
966	mutex_unlock(&client->lock);
967	if (!valid_handle) {
968		WARN(1, "%s: invalid handle passed to share.\n", __func__);
969		return -EINVAL;
970	}
971
972	buffer = handle->buffer;
973	ion_buffer_get(buffer);
974	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
975	if (IS_ERR(dmabuf)) {
976		ion_buffer_put(buffer);
977		return PTR_ERR(dmabuf);
978	}
979	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
980	if (fd < 0)
981		dma_buf_put(dmabuf);
982
983	return fd;
984}
985EXPORT_SYMBOL(ion_share_dma_buf);
986
987struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
988{
989	struct dma_buf *dmabuf;
990	struct ion_buffer *buffer;
991	struct ion_handle *handle;
992
993	dmabuf = dma_buf_get(fd);
994	if (IS_ERR_OR_NULL(dmabuf))
995		return ERR_PTR(PTR_ERR(dmabuf));
996	/* if this memory came from ion */
997
998	if (dmabuf->ops != &dma_buf_ops) {
999		pr_err("%s: can not import dmabuf from another exporter\n",
1000		       __func__);
1001		dma_buf_put(dmabuf);
1002		return ERR_PTR(-EINVAL);
1003	}
1004	buffer = dmabuf->priv;
1005
1006	mutex_lock(&client->lock);
1007	/* if a handle exists for this buffer just take a reference to it */
1008	handle = ion_handle_lookup(client, buffer);
1009	if (!IS_ERR_OR_NULL(handle)) {
1010		ion_handle_get(handle);
1011		goto end;
1012	}
1013	handle = ion_handle_create(client, buffer);
1014	if (IS_ERR_OR_NULL(handle))
1015		goto end;
1016	ion_handle_add(client, handle);
1017end:
1018	mutex_unlock(&client->lock);
1019	dma_buf_put(dmabuf);
1020	return handle;
1021}
1022EXPORT_SYMBOL(ion_import_dma_buf);
1023
1024static int ion_sync_for_device(struct ion_client *client, int fd)
1025{
1026	struct dma_buf *dmabuf;
1027	struct ion_buffer *buffer;
1028
1029	dmabuf = dma_buf_get(fd);
1030	if (IS_ERR_OR_NULL(dmabuf))
1031		return PTR_ERR(dmabuf);
1032
1033	/* if this memory came from ion */
1034	if (dmabuf->ops != &dma_buf_ops) {
1035		pr_err("%s: can not sync dmabuf from another exporter\n",
1036		       __func__);
1037		dma_buf_put(dmabuf);
1038		return -EINVAL;
1039	}
1040	buffer = dmabuf->priv;
1041
1042	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1043			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1044	dma_buf_put(dmabuf);
1045	return 0;
1046}
1047
1048static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1049{
1050	struct ion_client *client = filp->private_data;
1051
1052	switch (cmd) {
1053	case ION_IOC_ALLOC:
1054	{
1055		struct ion_allocation_data data;
1056
1057		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1058			return -EFAULT;
1059		data.handle = ion_alloc(client, data.len, data.align,
1060					     data.heap_mask, data.flags);
1061
1062		if (IS_ERR(data.handle))
1063			return PTR_ERR(data.handle);
1064
1065		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1066			ion_free(client, data.handle);
1067			return -EFAULT;
1068		}
1069		break;
1070	}
1071	case ION_IOC_FREE:
1072	{
1073		struct ion_handle_data data;
1074		bool valid;
1075
1076		if (copy_from_user(&data, (void __user *)arg,
1077				   sizeof(struct ion_handle_data)))
1078			return -EFAULT;
1079		mutex_lock(&client->lock);
1080		valid = ion_handle_validate(client, data.handle);
1081		mutex_unlock(&client->lock);
1082		if (!valid)
1083			return -EINVAL;
1084		ion_free(client, data.handle);
1085		break;
1086	}
1087	case ION_IOC_SHARE:
1088	case ION_IOC_MAP:
1089	{
1090		struct ion_fd_data data;
1091
1092		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1093			return -EFAULT;
1094		data.fd = ion_share_dma_buf(client, data.handle);
1095		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1096			return -EFAULT;
1097		if (data.fd < 0)
1098			return data.fd;
1099		break;
1100	}
1101	case ION_IOC_IMPORT:
1102	{
1103		struct ion_fd_data data;
1104		int ret = 0;
1105		if (copy_from_user(&data, (void __user *)arg,
1106				   sizeof(struct ion_fd_data)))
1107			return -EFAULT;
1108		data.handle = ion_import_dma_buf(client, data.fd);
1109		if (IS_ERR(data.handle)) {
1110			ret = PTR_ERR(data.handle);
1111			data.handle = NULL;
1112		}
1113		if (copy_to_user((void __user *)arg, &data,
1114				 sizeof(struct ion_fd_data)))
1115			return -EFAULT;
1116		if (ret < 0)
1117			return ret;
1118		break;
1119	}
1120	case ION_IOC_SYNC:
1121	{
1122		struct ion_fd_data data;
1123		if (copy_from_user(&data, (void __user *)arg,
1124				   sizeof(struct ion_fd_data)))
1125			return -EFAULT;
1126		ion_sync_for_device(client, data.fd);
1127		break;
1128	}
1129	case ION_IOC_CUSTOM:
1130	{
1131		struct ion_device *dev = client->dev;
1132		struct ion_custom_data data;
1133
1134		if (!dev->custom_ioctl)
1135			return -ENOTTY;
1136		if (copy_from_user(&data, (void __user *)arg,
1137				sizeof(struct ion_custom_data)))
1138			return -EFAULT;
1139		return dev->custom_ioctl(client, data.cmd, data.arg);
1140	}
1141	default:
1142		return -ENOTTY;
1143	}
1144	return 0;
1145}
1146
1147static int ion_release(struct inode *inode, struct file *file)
1148{
1149	struct ion_client *client = file->private_data;
1150
1151	pr_debug("%s: %d\n", __func__, __LINE__);
1152	ion_client_destroy(client);
1153	return 0;
1154}
1155
1156static int ion_open(struct inode *inode, struct file *file)
1157{
1158	struct miscdevice *miscdev = file->private_data;
1159	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1160	struct ion_client *client;
1161
1162	pr_debug("%s: %d\n", __func__, __LINE__);
1163	client = ion_client_create(dev, -1, "user");
1164	if (IS_ERR_OR_NULL(client))
1165		return PTR_ERR(client);
1166	file->private_data = client;
1167
1168	return 0;
1169}
1170
1171static const struct file_operations ion_fops = {
1172	.owner          = THIS_MODULE,
1173	.open           = ion_open,
1174	.release        = ion_release,
1175	.unlocked_ioctl = ion_ioctl,
1176};
1177
1178static size_t ion_debug_heap_total(struct ion_client *client,
1179				   enum ion_heap_type type)
1180{
1181	size_t size = 0;
1182	struct rb_node *n;
1183
1184	mutex_lock(&client->lock);
1185	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1186		struct ion_handle *handle = rb_entry(n,
1187						     struct ion_handle,
1188						     node);
1189		if (handle->buffer->heap->type == type)
1190			size += handle->buffer->size;
1191	}
1192	mutex_unlock(&client->lock);
1193	return size;
1194}
1195
1196static int ion_debug_heap_show(struct seq_file *s, void *unused)
1197{
1198	struct ion_heap *heap = s->private;
1199	struct ion_device *dev = heap->dev;
1200	struct rb_node *n;
1201	size_t total_size = 0;
1202	size_t total_orphaned_size = 0;
1203
1204	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1205	seq_printf(s, "----------------------------------------------------\n");
1206
1207	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1208		struct ion_client *client = rb_entry(n, struct ion_client,
1209						     node);
1210		size_t size = ion_debug_heap_total(client, heap->type);
1211		if (!size)
1212			continue;
1213		if (client->task) {
1214			char task_comm[TASK_COMM_LEN];
1215
1216			get_task_comm(task_comm, client->task);
1217			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1218				   client->pid, size);
1219		} else {
1220			seq_printf(s, "%16.s %16u %16u\n", client->name,
1221				   client->pid, size);
1222		}
1223	}
1224	seq_printf(s, "----------------------------------------------------\n");
1225	seq_printf(s, "orphaned allocations (info is from last known client):"
1226		   "\n");
1227	mutex_lock(&dev->buffer_lock);
1228	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1229		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1230						     node);
1231		if (buffer->heap->type != heap->type)
1232			continue;
1233		total_size += buffer->size;
1234		if (!buffer->handle_count) {
1235			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1236				   buffer->pid, buffer->size, buffer->kmap_cnt,
1237				   atomic_read(&buffer->ref.refcount));
1238			total_orphaned_size += buffer->size;
1239		}
1240	}
1241	mutex_unlock(&dev->buffer_lock);
1242	seq_printf(s, "----------------------------------------------------\n");
1243	seq_printf(s, "%16.s %16u\n", "total orphaned",
1244		   total_orphaned_size);
1245	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1246	seq_printf(s, "----------------------------------------------------\n");
1247
1248	if (heap->debug_show)
1249		heap->debug_show(heap, s, unused);
1250
1251	return 0;
1252}
1253
1254static int ion_debug_heap_open(struct inode *inode, struct file *file)
1255{
1256	return single_open(file, ion_debug_heap_show, inode->i_private);
1257}
1258
1259static const struct file_operations debug_heap_fops = {
1260	.open = ion_debug_heap_open,
1261	.read = seq_read,
1262	.llseek = seq_lseek,
1263	.release = single_release,
1264};
1265
1266void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1267{
1268	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1269	    !heap->ops->unmap_dma)
1270		pr_err("%s: can not add heap with invalid ops struct.\n",
1271		       __func__);
1272
1273	heap->dev = dev;
1274	down_write(&dev->lock);
1275	/* use negative heap->id to reverse the priority -- when traversing
1276	   the list later attempt higher id numbers first */
1277	plist_node_init(&heap->node, -heap->id);
1278	plist_add(&heap->node, &dev->heaps);
1279	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1280			    &debug_heap_fops);
1281	up_write(&dev->lock);
1282}
1283
1284struct ion_device *ion_device_create(long (*custom_ioctl)
1285				     (struct ion_client *client,
1286				      unsigned int cmd,
1287				      unsigned long arg))
1288{
1289	struct ion_device *idev;
1290	int ret;
1291
1292	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1293	if (!idev)
1294		return ERR_PTR(-ENOMEM);
1295
1296	idev->dev.minor = MISC_DYNAMIC_MINOR;
1297	idev->dev.name = "ion";
1298	idev->dev.fops = &ion_fops;
1299	idev->dev.parent = NULL;
1300	ret = misc_register(&idev->dev);
1301	if (ret) {
1302		pr_err("ion: failed to register misc device.\n");
1303		return ERR_PTR(ret);
1304	}
1305
1306	idev->debug_root = debugfs_create_dir("ion", NULL);
1307	if (IS_ERR_OR_NULL(idev->debug_root))
1308		pr_err("ion: failed to create debug files.\n");
1309
1310	idev->custom_ioctl = custom_ioctl;
1311	idev->buffers = RB_ROOT;
1312	mutex_init(&idev->buffer_lock);
1313	init_rwsem(&idev->lock);
1314	plist_head_init(&idev->heaps);
1315	idev->clients = RB_ROOT;
1316	return idev;
1317}
1318
1319void ion_device_destroy(struct ion_device *dev)
1320{
1321	misc_deregister(&dev->dev);
1322	/* XXX need to free the heaps and clients ? */
1323	kfree(dev);
1324}
1325
1326void __init ion_reserve(struct ion_platform_data *data)
1327{
1328	int i;
1329
1330	for (i = 0; i < data->nr; i++) {
1331		if (data->heaps[i].size == 0)
1332			continue;
1333
1334		if (data->heaps[i].base == 0) {
1335			phys_addr_t paddr;
1336			paddr = memblock_alloc_base(data->heaps[i].size,
1337						    data->heaps[i].align,
1338						    MEMBLOCK_ALLOC_ANYWHERE);
1339			if (!paddr) {
1340				pr_err("%s: error allocating memblock for "
1341				       "heap %d\n",
1342					__func__, i);
1343				continue;
1344			}
1345			data->heaps[i].base = paddr;
1346		} else {
1347			int ret = memblock_reserve(data->heaps[i].base,
1348					       data->heaps[i].size);
1349			if (ret)
1350				pr_err("memblock reserve of %x@%lx failed\n",
1351				       data->heaps[i].size,
1352				       data->heaps[i].base);
1353		}
1354		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1355			data->heaps[i].name,
1356			data->heaps[i].base,
1357			data->heaps[i].size);
1358	}
1359}
1360