ion.c revision 06e0dcaeb4fd72a010a1f5ad0c03abd8e0a58ef9
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/freezer.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/memblock.h>
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/debugfs.h>
36#include <linux/dma-buf.h>
37#include <linux/idr.h>
38
39#include "ion.h"
40#include "ion_priv.h"
41#include "compat_ion.h"
42
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev:		the actual misc device
46 * @buffers:		an rb tree of all the existing buffers
47 * @buffer_lock:	lock protecting the tree of buffers
48 * @lock:		rwsem protecting the tree of heaps and clients
49 * @heaps:		list of all the heaps in the system
50 * @user_clients:	list of all the clients created from userspace
51 */
52struct ion_device {
53	struct miscdevice dev;
54	struct rb_root buffers;
55	struct mutex buffer_lock;
56	struct rw_semaphore lock;
57	struct plist_head heaps;
58	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59			      unsigned long arg);
60	struct rb_root clients;
61	struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
66 * @node:		node in the tree of all clients
67 * @dev:		backpointer to ion device
68 * @handles:		an rb tree of all the handles in this client
69 * @idr:		an idr space for allocating handle ids
70 * @lock:		lock protecting the tree of handles
71 * @name:		used for debugging
72 * @task:		used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
79	struct rb_node node;
80	struct ion_device *dev;
81	struct rb_root handles;
82	struct idr idr;
83	struct mutex lock;
84	const char *name;
85	struct task_struct *task;
86	pid_t pid;
87	struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref:		reference count
93 * @client:		back pointer to the client the buffer resides in
94 * @buffer:		pointer to the buffer
95 * @node:		node in the client's handle rbtree
96 * @kmap_cnt:		count of times this client has mapped to kernel
97 * @id:			client-unique id allocated by client->idr
98 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client.  Other fields are never changed after initialization.
101 */
102struct ion_handle {
103	struct kref ref;
104	struct ion_client *client;
105	struct ion_buffer *buffer;
106	struct rb_node node;
107	unsigned int kmap_cnt;
108	int id;
109};
110
111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
113	return ((buffer->flags & ION_FLAG_CACHED) &&
114		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
115}
116
117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
119	return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
122static inline struct page *ion_buffer_page(struct page *page)
123{
124	return (struct page *)((unsigned long)page & ~(1UL));
125}
126
127static inline bool ion_buffer_page_is_dirty(struct page *page)
128{
129	return !!((unsigned long)page & 1UL);
130}
131
132static inline void ion_buffer_page_dirty(struct page **page)
133{
134	*page = (struct page *)((unsigned long)(*page) | 1UL);
135}
136
137static inline void ion_buffer_page_clean(struct page **page)
138{
139	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
140}
141
142/* this function should only be called while dev->lock is held */
143static void ion_buffer_add(struct ion_device *dev,
144			   struct ion_buffer *buffer)
145{
146	struct rb_node **p = &dev->buffers.rb_node;
147	struct rb_node *parent = NULL;
148	struct ion_buffer *entry;
149
150	while (*p) {
151		parent = *p;
152		entry = rb_entry(parent, struct ion_buffer, node);
153
154		if (buffer < entry) {
155			p = &(*p)->rb_left;
156		} else if (buffer > entry) {
157			p = &(*p)->rb_right;
158		} else {
159			pr_err("%s: buffer already found.", __func__);
160			BUG();
161		}
162	}
163
164	rb_link_node(&buffer->node, parent, p);
165	rb_insert_color(&buffer->node, &dev->buffers);
166}
167
168/* this function should only be called while dev->lock is held */
169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170				     struct ion_device *dev,
171				     unsigned long len,
172				     unsigned long align,
173				     unsigned long flags)
174{
175	struct ion_buffer *buffer;
176	struct sg_table *table;
177	struct scatterlist *sg;
178	int i, ret;
179
180	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181	if (!buffer)
182		return ERR_PTR(-ENOMEM);
183
184	buffer->heap = heap;
185	buffer->flags = flags;
186	kref_init(&buffer->ref);
187
188	ret = heap->ops->allocate(heap, buffer, len, align, flags);
189
190	if (ret) {
191		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192			goto err2;
193
194		ion_heap_freelist_drain(heap, 0);
195		ret = heap->ops->allocate(heap, buffer, len, align,
196					  flags);
197		if (ret)
198			goto err2;
199	}
200
201	buffer->dev = dev;
202	buffer->size = len;
203
204	table = heap->ops->map_dma(heap, buffer);
205	if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
206		table = ERR_PTR(-EINVAL);
207	if (IS_ERR(table)) {
208		heap->ops->free(buffer);
209		kfree(buffer);
210		return ERR_PTR(PTR_ERR(table));
211	}
212	buffer->sg_table = table;
213	if (ion_buffer_fault_user_mappings(buffer)) {
214		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
215		struct scatterlist *sg;
216		int i, j, k = 0;
217
218		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
219		if (!buffer->pages) {
220			ret = -ENOMEM;
221			goto err1;
222		}
223
224		for_each_sg(table->sgl, sg, table->nents, i) {
225			struct page *page = sg_page(sg);
226
227			for (j = 0; j < sg->length / PAGE_SIZE; j++)
228				buffer->pages[k++] = page++;
229		}
230
231		if (ret)
232			goto err;
233	}
234
235	buffer->dev = dev;
236	buffer->size = len;
237	INIT_LIST_HEAD(&buffer->vmas);
238	mutex_init(&buffer->lock);
239	/* this will set up dma addresses for the sglist -- it is not
240	   technically correct as per the dma api -- a specific
241	   device isn't really taking ownership here.  However, in practice on
242	   our systems the only dma_address space is physical addresses.
243	   Additionally, we can't afford the overhead of invalidating every
244	   allocation via dma_map_sg. The implicit contract here is that
245	   memory comming from the heaps is ready for dma, ie if it has a
246	   cached mapping that mapping has been invalidated */
247	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
248		sg_dma_address(sg) = sg_phys(sg);
249	mutex_lock(&dev->buffer_lock);
250	ion_buffer_add(dev, buffer);
251	mutex_unlock(&dev->buffer_lock);
252	return buffer;
253
254err:
255	heap->ops->unmap_dma(heap, buffer);
256	heap->ops->free(buffer);
257err1:
258	if (buffer->pages)
259		vfree(buffer->pages);
260err2:
261	kfree(buffer);
262	return ERR_PTR(ret);
263}
264
265void ion_buffer_destroy(struct ion_buffer *buffer)
266{
267	if (WARN_ON(buffer->kmap_cnt > 0))
268		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
269	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
270	buffer->heap->ops->free(buffer);
271	if (buffer->pages)
272		vfree(buffer->pages);
273	kfree(buffer);
274}
275
276static void _ion_buffer_destroy(struct kref *kref)
277{
278	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
279	struct ion_heap *heap = buffer->heap;
280	struct ion_device *dev = buffer->dev;
281
282	mutex_lock(&dev->buffer_lock);
283	rb_erase(&buffer->node, &dev->buffers);
284	mutex_unlock(&dev->buffer_lock);
285
286	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287		ion_heap_freelist_add(heap, buffer);
288	else
289		ion_buffer_destroy(buffer);
290}
291
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294	kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
299	return kref_put(&buffer->ref, _ion_buffer_destroy);
300}
301
302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
304	mutex_lock(&buffer->lock);
305	buffer->handle_count++;
306	mutex_unlock(&buffer->lock);
307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311	/*
312	 * when a buffer is removed from a handle, if it is not in
313	 * any other handles, copy the taskcomm and the pid of the
314	 * process it's being removed from into the buffer.  At this
315	 * point there will be no way to track what processes this buffer is
316	 * being used by, it only exists as a dma_buf file descriptor.
317	 * The taskcomm and pid can provide a debug hint as to where this fd
318	 * is in the system
319	 */
320	mutex_lock(&buffer->lock);
321	buffer->handle_count--;
322	BUG_ON(buffer->handle_count < 0);
323	if (!buffer->handle_count) {
324		struct task_struct *task;
325
326		task = current->group_leader;
327		get_task_comm(buffer->task_comm, task);
328		buffer->pid = task_pid_nr(task);
329	}
330	mutex_unlock(&buffer->lock);
331}
332
333static struct ion_handle *ion_handle_create(struct ion_client *client,
334				     struct ion_buffer *buffer)
335{
336	struct ion_handle *handle;
337
338	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
339	if (!handle)
340		return ERR_PTR(-ENOMEM);
341	kref_init(&handle->ref);
342	RB_CLEAR_NODE(&handle->node);
343	handle->client = client;
344	ion_buffer_get(buffer);
345	ion_buffer_add_to_handle(buffer);
346	handle->buffer = buffer;
347
348	return handle;
349}
350
351static void ion_handle_kmap_put(struct ion_handle *);
352
353static void ion_handle_destroy(struct kref *kref)
354{
355	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
356	struct ion_client *client = handle->client;
357	struct ion_buffer *buffer = handle->buffer;
358
359	mutex_lock(&buffer->lock);
360	while (handle->kmap_cnt)
361		ion_handle_kmap_put(handle);
362	mutex_unlock(&buffer->lock);
363
364	idr_remove(&client->idr, handle->id);
365	if (!RB_EMPTY_NODE(&handle->node))
366		rb_erase(&handle->node, &client->handles);
367
368	ion_buffer_remove_from_handle(buffer);
369	ion_buffer_put(buffer);
370
371	kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376	return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381	kref_get(&handle->ref);
382}
383
384static int ion_handle_put(struct ion_handle *handle)
385{
386	struct ion_client *client = handle->client;
387	int ret;
388
389	mutex_lock(&client->lock);
390	ret = kref_put(&handle->ref, ion_handle_destroy);
391	mutex_unlock(&client->lock);
392
393	return ret;
394}
395
396static struct ion_handle *ion_handle_lookup(struct ion_client *client,
397					    struct ion_buffer *buffer)
398{
399	struct rb_node *n = client->handles.rb_node;
400
401	while (n) {
402		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
403		if (buffer < entry->buffer)
404			n = n->rb_left;
405		else if (buffer > entry->buffer)
406			n = n->rb_right;
407		else
408			return entry;
409	}
410	return ERR_PTR(-EINVAL);
411}
412
413static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
414						int id)
415{
416	struct ion_handle *handle;
417
418	mutex_lock(&client->lock);
419	handle = idr_find(&client->idr, id);
420	if (handle)
421		ion_handle_get(handle);
422	mutex_unlock(&client->lock);
423
424	return handle ? handle : ERR_PTR(-EINVAL);
425}
426
427static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
428{
429	WARN_ON(!mutex_is_locked(&client->lock));
430	return (idr_find(&client->idr, handle->id) == handle);
431}
432
433static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
434{
435	int rc;
436	struct rb_node **p = &client->handles.rb_node;
437	struct rb_node *parent = NULL;
438	struct ion_handle *entry;
439
440	do {
441		int id;
442		rc = idr_pre_get(&client->idr, GFP_KERNEL);
443		if (!rc)
444			return -ENOMEM;
445		rc = idr_get_new_above(&client->idr, handle, 1, &id);
446		handle->id = id;
447	} while (rc == -EAGAIN);
448
449	if (rc < 0)
450		return rc;
451
452	while (*p) {
453		parent = *p;
454		entry = rb_entry(parent, struct ion_handle, node);
455
456		if (handle->buffer < entry->buffer)
457			p = &(*p)->rb_left;
458		else if (handle->buffer > entry->buffer)
459			p = &(*p)->rb_right;
460		else
461			WARN(1, "%s: buffer already found.", __func__);
462	}
463
464	rb_link_node(&handle->node, parent, p);
465	rb_insert_color(&handle->node, &client->handles);
466
467	return 0;
468}
469
470struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
471			     size_t align, unsigned int heap_id_mask,
472			     unsigned int flags)
473{
474	struct ion_handle *handle;
475	struct ion_device *dev = client->dev;
476	struct ion_buffer *buffer = NULL;
477	struct ion_heap *heap;
478	int ret;
479
480	pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
481		 len, align, heap_id_mask, flags);
482	/*
483	 * traverse the list of heaps available in this system in priority
484	 * order.  If the heap type is supported by the client, and matches the
485	 * request of the caller allocate from it.  Repeat until allocate has
486	 * succeeded or all heaps have been tried
487	 */
488	len = PAGE_ALIGN(len);
489
490	if (!len)
491		return ERR_PTR(-EINVAL);
492
493	down_read(&dev->lock);
494	plist_for_each_entry(heap, &dev->heaps, node) {
495		/* if the caller didn't specify this heap id */
496		if (!((1 << heap->id) & heap_id_mask))
497			continue;
498		buffer = ion_buffer_create(heap, dev, len, align, flags);
499		if (!IS_ERR(buffer))
500			break;
501	}
502	up_read(&dev->lock);
503
504	if (buffer == NULL)
505		return ERR_PTR(-ENODEV);
506
507	if (IS_ERR(buffer))
508		return ERR_PTR(PTR_ERR(buffer));
509
510	handle = ion_handle_create(client, buffer);
511
512	/*
513	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
514	 * and ion_handle_create will take a second reference, drop one here
515	 */
516	ion_buffer_put(buffer);
517
518	if (IS_ERR(handle))
519		return handle;
520
521	mutex_lock(&client->lock);
522	ret = ion_handle_add(client, handle);
523	mutex_unlock(&client->lock);
524	if (ret) {
525		ion_handle_put(handle);
526		handle = ERR_PTR(ret);
527	}
528
529	return handle;
530}
531EXPORT_SYMBOL(ion_alloc);
532
533void ion_free(struct ion_client *client, struct ion_handle *handle)
534{
535	bool valid_handle;
536
537	BUG_ON(client != handle->client);
538
539	mutex_lock(&client->lock);
540	valid_handle = ion_handle_validate(client, handle);
541
542	if (!valid_handle) {
543		WARN(1, "%s: invalid handle passed to free.\n", __func__);
544		mutex_unlock(&client->lock);
545		return;
546	}
547	mutex_unlock(&client->lock);
548	ion_handle_put(handle);
549}
550EXPORT_SYMBOL(ion_free);
551
552int ion_phys(struct ion_client *client, struct ion_handle *handle,
553	     ion_phys_addr_t *addr, size_t *len)
554{
555	struct ion_buffer *buffer;
556	int ret;
557
558	mutex_lock(&client->lock);
559	if (!ion_handle_validate(client, handle)) {
560		mutex_unlock(&client->lock);
561		return -EINVAL;
562	}
563
564	buffer = handle->buffer;
565
566	if (!buffer->heap->ops->phys) {
567		pr_err("%s: ion_phys is not implemented by this heap.\n",
568		       __func__);
569		mutex_unlock(&client->lock);
570		return -ENODEV;
571	}
572	mutex_unlock(&client->lock);
573	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
574	return ret;
575}
576EXPORT_SYMBOL(ion_phys);
577
578static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
579{
580	void *vaddr;
581
582	if (buffer->kmap_cnt) {
583		buffer->kmap_cnt++;
584		return buffer->vaddr;
585	}
586	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
587	if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
588		return ERR_PTR(-EINVAL);
589	if (IS_ERR(vaddr))
590		return vaddr;
591	buffer->vaddr = vaddr;
592	buffer->kmap_cnt++;
593	return vaddr;
594}
595
596static void *ion_handle_kmap_get(struct ion_handle *handle)
597{
598	struct ion_buffer *buffer = handle->buffer;
599	void *vaddr;
600
601	if (handle->kmap_cnt) {
602		handle->kmap_cnt++;
603		return buffer->vaddr;
604	}
605	vaddr = ion_buffer_kmap_get(buffer);
606	if (IS_ERR(vaddr))
607		return vaddr;
608	handle->kmap_cnt++;
609	return vaddr;
610}
611
612static void ion_buffer_kmap_put(struct ion_buffer *buffer)
613{
614	buffer->kmap_cnt--;
615	if (!buffer->kmap_cnt) {
616		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
617		buffer->vaddr = NULL;
618	}
619}
620
621static void ion_handle_kmap_put(struct ion_handle *handle)
622{
623	struct ion_buffer *buffer = handle->buffer;
624
625	handle->kmap_cnt--;
626	if (!handle->kmap_cnt)
627		ion_buffer_kmap_put(buffer);
628}
629
630void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
631{
632	struct ion_buffer *buffer;
633	void *vaddr;
634
635	mutex_lock(&client->lock);
636	if (!ion_handle_validate(client, handle)) {
637		pr_err("%s: invalid handle passed to map_kernel.\n",
638		       __func__);
639		mutex_unlock(&client->lock);
640		return ERR_PTR(-EINVAL);
641	}
642
643	buffer = handle->buffer;
644
645	if (!handle->buffer->heap->ops->map_kernel) {
646		pr_err("%s: map_kernel is not implemented by this heap.\n",
647		       __func__);
648		mutex_unlock(&client->lock);
649		return ERR_PTR(-ENODEV);
650	}
651
652	mutex_lock(&buffer->lock);
653	vaddr = ion_handle_kmap_get(handle);
654	mutex_unlock(&buffer->lock);
655	mutex_unlock(&client->lock);
656	return vaddr;
657}
658EXPORT_SYMBOL(ion_map_kernel);
659
660void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
661{
662	struct ion_buffer *buffer;
663
664	mutex_lock(&client->lock);
665	buffer = handle->buffer;
666	mutex_lock(&buffer->lock);
667	ion_handle_kmap_put(handle);
668	mutex_unlock(&buffer->lock);
669	mutex_unlock(&client->lock);
670}
671EXPORT_SYMBOL(ion_unmap_kernel);
672
673static int ion_debug_client_show(struct seq_file *s, void *unused)
674{
675	struct ion_client *client = s->private;
676	struct rb_node *n;
677	size_t sizes[ION_NUM_HEAP_IDS] = {0};
678	const char *names[ION_NUM_HEAP_IDS] = {0};
679	int i;
680
681	mutex_lock(&client->lock);
682	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
683		struct ion_handle *handle = rb_entry(n, struct ion_handle,
684						     node);
685		unsigned int id = handle->buffer->heap->id;
686
687		if (!names[id])
688			names[id] = handle->buffer->heap->name;
689		sizes[id] += handle->buffer->size;
690	}
691	mutex_unlock(&client->lock);
692
693	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
694	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
695		if (!names[i])
696			continue;
697		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
698	}
699	return 0;
700}
701
702static int ion_debug_client_open(struct inode *inode, struct file *file)
703{
704	return single_open(file, ion_debug_client_show, inode->i_private);
705}
706
707static const struct file_operations debug_client_fops = {
708	.open = ion_debug_client_open,
709	.read = seq_read,
710	.llseek = seq_lseek,
711	.release = single_release,
712};
713
714struct ion_client *ion_client_create(struct ion_device *dev,
715				     const char *name)
716{
717	struct ion_client *client;
718	struct task_struct *task;
719	struct rb_node **p;
720	struct rb_node *parent = NULL;
721	struct ion_client *entry;
722	char debug_name[64];
723	pid_t pid;
724
725	get_task_struct(current->group_leader);
726	task_lock(current->group_leader);
727	pid = task_pid_nr(current->group_leader);
728	/* don't bother to store task struct for kernel threads,
729	   they can't be killed anyway */
730	if (current->group_leader->flags & PF_KTHREAD) {
731		put_task_struct(current->group_leader);
732		task = NULL;
733	} else {
734		task = current->group_leader;
735	}
736	task_unlock(current->group_leader);
737
738	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
739	if (!client) {
740		if (task)
741			put_task_struct(current->group_leader);
742		return ERR_PTR(-ENOMEM);
743	}
744
745	client->dev = dev;
746	client->handles = RB_ROOT;
747	idr_init(&client->idr);
748	mutex_init(&client->lock);
749	client->name = name;
750	client->task = task;
751	client->pid = pid;
752
753	down_write(&dev->lock);
754	p = &dev->clients.rb_node;
755	while (*p) {
756		parent = *p;
757		entry = rb_entry(parent, struct ion_client, node);
758
759		if (client < entry)
760			p = &(*p)->rb_left;
761		else if (client > entry)
762			p = &(*p)->rb_right;
763	}
764	rb_link_node(&client->node, parent, p);
765	rb_insert_color(&client->node, &dev->clients);
766
767	snprintf(debug_name, 64, "%u", client->pid);
768	client->debug_root = debugfs_create_file(debug_name, 0664,
769						 dev->debug_root, client,
770						 &debug_client_fops);
771	up_write(&dev->lock);
772
773	return client;
774}
775EXPORT_SYMBOL(ion_client_create);
776
777void ion_client_destroy(struct ion_client *client)
778{
779	struct ion_device *dev = client->dev;
780	struct rb_node *n;
781
782	pr_debug("%s: %d\n", __func__, __LINE__);
783	while ((n = rb_first(&client->handles))) {
784		struct ion_handle *handle = rb_entry(n, struct ion_handle,
785						     node);
786		ion_handle_destroy(&handle->ref);
787	}
788
789	idr_remove_all(&client->idr);
790	idr_destroy(&client->idr);
791
792	down_write(&dev->lock);
793	if (client->task)
794		put_task_struct(client->task);
795	rb_erase(&client->node, &dev->clients);
796	debugfs_remove_recursive(client->debug_root);
797	up_write(&dev->lock);
798
799	kfree(client);
800}
801EXPORT_SYMBOL(ion_client_destroy);
802
803struct sg_table *ion_sg_table(struct ion_client *client,
804			      struct ion_handle *handle)
805{
806	struct ion_buffer *buffer;
807	struct sg_table *table;
808
809	mutex_lock(&client->lock);
810	if (!ion_handle_validate(client, handle)) {
811		pr_err("%s: invalid handle passed to map_dma.\n",
812		       __func__);
813		mutex_unlock(&client->lock);
814		return ERR_PTR(-EINVAL);
815	}
816	buffer = handle->buffer;
817	table = buffer->sg_table;
818	mutex_unlock(&client->lock);
819	return table;
820}
821EXPORT_SYMBOL(ion_sg_table);
822
823static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
824				       struct device *dev,
825				       enum dma_data_direction direction);
826
827static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
828					enum dma_data_direction direction)
829{
830	struct dma_buf *dmabuf = attachment->dmabuf;
831	struct ion_buffer *buffer = dmabuf->priv;
832
833	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
834	return buffer->sg_table;
835}
836
837static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
838			      struct sg_table *table,
839			      enum dma_data_direction direction)
840{
841}
842
843void ion_pages_sync_for_device(struct device *dev, struct page *page,
844		size_t size, enum dma_data_direction dir)
845{
846	struct scatterlist sg;
847
848	sg_init_table(&sg, 1);
849	sg_set_page(&sg, page, size, 0);
850	/*
851	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
852	 * for the the targeted device, but this works on the currently targeted
853	 * hardware.
854	 */
855	sg_dma_address(&sg) = page_to_phys(page);
856	dma_sync_sg_for_device(dev, &sg, 1, dir);
857}
858
859struct ion_vma_list {
860	struct list_head list;
861	struct vm_area_struct *vma;
862};
863
864static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
865				       struct device *dev,
866				       enum dma_data_direction dir)
867{
868	struct ion_vma_list *vma_list;
869	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
870	int i;
871
872	pr_debug("%s: syncing for device %s\n", __func__,
873		 dev ? dev_name(dev) : "null");
874
875	if (!ion_buffer_fault_user_mappings(buffer))
876		return;
877
878	mutex_lock(&buffer->lock);
879	for (i = 0; i < pages; i++) {
880		struct page *page = buffer->pages[i];
881
882		if (ion_buffer_page_is_dirty(page))
883			ion_pages_sync_for_device(dev, ion_buffer_page(page),
884							PAGE_SIZE, dir);
885
886		ion_buffer_page_clean(buffer->pages + i);
887	}
888	list_for_each_entry(vma_list, &buffer->vmas, list) {
889		struct vm_area_struct *vma = vma_list->vma;
890
891		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
892			       NULL);
893	}
894	mutex_unlock(&buffer->lock);
895}
896
897int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
898{
899	struct ion_buffer *buffer = vma->vm_private_data;
900	int ret;
901
902	mutex_lock(&buffer->lock);
903	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
904
905	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
906	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
907			     ion_buffer_page(buffer->pages[vmf->pgoff]));
908	mutex_unlock(&buffer->lock);
909	if (ret)
910		return VM_FAULT_ERROR;
911
912	return VM_FAULT_NOPAGE;
913}
914
915static void ion_vm_open(struct vm_area_struct *vma)
916{
917	struct ion_buffer *buffer = vma->vm_private_data;
918	struct ion_vma_list *vma_list;
919
920	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
921	if (!vma_list)
922		return;
923	vma_list->vma = vma;
924	mutex_lock(&buffer->lock);
925	list_add(&vma_list->list, &buffer->vmas);
926	mutex_unlock(&buffer->lock);
927	pr_debug("%s: adding %p\n", __func__, vma);
928}
929
930static void ion_vm_close(struct vm_area_struct *vma)
931{
932	struct ion_buffer *buffer = vma->vm_private_data;
933	struct ion_vma_list *vma_list, *tmp;
934
935	pr_debug("%s\n", __func__);
936	mutex_lock(&buffer->lock);
937	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
938		if (vma_list->vma != vma)
939			continue;
940		list_del(&vma_list->list);
941		kfree(vma_list);
942		pr_debug("%s: deleting %p\n", __func__, vma);
943		break;
944	}
945	mutex_unlock(&buffer->lock);
946}
947
948struct vm_operations_struct ion_vma_ops = {
949	.open = ion_vm_open,
950	.close = ion_vm_close,
951	.fault = ion_vm_fault,
952};
953
954static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
955{
956	struct ion_buffer *buffer = dmabuf->priv;
957	int ret = 0;
958
959	if (!buffer->heap->ops->map_user) {
960		pr_err("%s: this heap does not define a method for mapping "
961		       "to userspace\n", __func__);
962		return -EINVAL;
963	}
964
965	if (ion_buffer_fault_user_mappings(buffer)) {
966		vma->vm_private_data = buffer;
967		vma->vm_ops = &ion_vma_ops;
968		ion_vm_open(vma);
969		return 0;
970	}
971
972	if (!(buffer->flags & ION_FLAG_CACHED))
973		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
974
975	mutex_lock(&buffer->lock);
976	/* now map it to userspace */
977	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
978	mutex_unlock(&buffer->lock);
979
980	if (ret)
981		pr_err("%s: failure mapping buffer to userspace\n",
982		       __func__);
983
984	return ret;
985}
986
987static void ion_dma_buf_release(struct dma_buf *dmabuf)
988{
989	struct ion_buffer *buffer = dmabuf->priv;
990	ion_buffer_put(buffer);
991}
992
993static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
994{
995	struct ion_buffer *buffer = dmabuf->priv;
996	return buffer->vaddr + offset * PAGE_SIZE;
997}
998
999static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1000			       void *ptr)
1001{
1002	return;
1003}
1004
1005static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1006					size_t len,
1007					enum dma_data_direction direction)
1008{
1009	struct ion_buffer *buffer = dmabuf->priv;
1010	void *vaddr;
1011
1012	if (!buffer->heap->ops->map_kernel) {
1013		pr_err("%s: map kernel is not implemented by this heap.\n",
1014		       __func__);
1015		return -ENODEV;
1016	}
1017
1018	mutex_lock(&buffer->lock);
1019	vaddr = ion_buffer_kmap_get(buffer);
1020	mutex_unlock(&buffer->lock);
1021	if (IS_ERR(vaddr))
1022		return PTR_ERR(vaddr);
1023	return 0;
1024}
1025
1026static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1027				       size_t len,
1028				       enum dma_data_direction direction)
1029{
1030	struct ion_buffer *buffer = dmabuf->priv;
1031
1032	mutex_lock(&buffer->lock);
1033	ion_buffer_kmap_put(buffer);
1034	mutex_unlock(&buffer->lock);
1035}
1036
1037struct dma_buf_ops dma_buf_ops = {
1038	.map_dma_buf = ion_map_dma_buf,
1039	.unmap_dma_buf = ion_unmap_dma_buf,
1040	.mmap = ion_mmap,
1041	.release = ion_dma_buf_release,
1042	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1043	.end_cpu_access = ion_dma_buf_end_cpu_access,
1044	.kmap_atomic = ion_dma_buf_kmap,
1045	.kunmap_atomic = ion_dma_buf_kunmap,
1046	.kmap = ion_dma_buf_kmap,
1047	.kunmap = ion_dma_buf_kunmap,
1048};
1049
1050struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1051						struct ion_handle *handle)
1052{
1053	struct ion_buffer *buffer;
1054	struct dma_buf *dmabuf;
1055	bool valid_handle;
1056
1057	mutex_lock(&client->lock);
1058	valid_handle = ion_handle_validate(client, handle);
1059	if (!valid_handle) {
1060		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1061		mutex_unlock(&client->lock);
1062		return ERR_PTR(-EINVAL);
1063	}
1064	buffer = handle->buffer;
1065	ion_buffer_get(buffer);
1066	mutex_unlock(&client->lock);
1067
1068	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1069	if (IS_ERR(dmabuf)) {
1070		ion_buffer_put(buffer);
1071		return dmabuf;
1072	}
1073
1074	return dmabuf;
1075}
1076EXPORT_SYMBOL(ion_share_dma_buf);
1077
1078int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1079{
1080	struct dma_buf *dmabuf;
1081	int fd;
1082
1083	dmabuf = ion_share_dma_buf(client, handle);
1084	if (IS_ERR(dmabuf))
1085		return PTR_ERR(dmabuf);
1086
1087	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1088	if (fd < 0)
1089		dma_buf_put(dmabuf);
1090
1091	return fd;
1092}
1093EXPORT_SYMBOL(ion_share_dma_buf_fd);
1094
1095struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1096{
1097	struct dma_buf *dmabuf;
1098	struct ion_buffer *buffer;
1099	struct ion_handle *handle;
1100	int ret;
1101
1102	dmabuf = dma_buf_get(fd);
1103	if (IS_ERR(dmabuf))
1104		return ERR_PTR(PTR_ERR(dmabuf));
1105	/* if this memory came from ion */
1106
1107	if (dmabuf->ops != &dma_buf_ops) {
1108		pr_err("%s: can not import dmabuf from another exporter\n",
1109		       __func__);
1110		dma_buf_put(dmabuf);
1111		return ERR_PTR(-EINVAL);
1112	}
1113	buffer = dmabuf->priv;
1114
1115	mutex_lock(&client->lock);
1116	/* if a handle exists for this buffer just take a reference to it */
1117	handle = ion_handle_lookup(client, buffer);
1118	if (!IS_ERR(handle)) {
1119		ion_handle_get(handle);
1120		mutex_unlock(&client->lock);
1121		goto end;
1122	}
1123	mutex_unlock(&client->lock);
1124
1125	handle = ion_handle_create(client, buffer);
1126	if (IS_ERR(handle))
1127		goto end;
1128
1129	mutex_lock(&client->lock);
1130	ret = ion_handle_add(client, handle);
1131	mutex_unlock(&client->lock);
1132	if (ret) {
1133		ion_handle_put(handle);
1134		handle = ERR_PTR(ret);
1135	}
1136
1137end:
1138	dma_buf_put(dmabuf);
1139	return handle;
1140}
1141EXPORT_SYMBOL(ion_import_dma_buf);
1142
1143static int ion_sync_for_device(struct ion_client *client, int fd)
1144{
1145	struct dma_buf *dmabuf;
1146	struct ion_buffer *buffer;
1147
1148	dmabuf = dma_buf_get(fd);
1149	if (IS_ERR(dmabuf))
1150		return PTR_ERR(dmabuf);
1151
1152	/* if this memory came from ion */
1153	if (dmabuf->ops != &dma_buf_ops) {
1154		pr_err("%s: can not sync dmabuf from another exporter\n",
1155		       __func__);
1156		dma_buf_put(dmabuf);
1157		return -EINVAL;
1158	}
1159	buffer = dmabuf->priv;
1160
1161	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1162			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1163	dma_buf_put(dmabuf);
1164	return 0;
1165}
1166
1167static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1168{
1169	struct ion_client *client = filp->private_data;
1170
1171	switch (cmd) {
1172	case ION_IOC_ALLOC:
1173	{
1174		struct ion_allocation_data data;
1175		struct ion_handle *handle;
1176
1177		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1178			return -EFAULT;
1179		handle = ion_alloc(client, data.len, data.align,
1180					     data.heap_id_mask, data.flags);
1181
1182		if (IS_ERR(handle))
1183			return PTR_ERR(handle);
1184
1185		data.handle = handle->id;
1186
1187		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1188			ion_free(client, handle);
1189			return -EFAULT;
1190		}
1191		break;
1192	}
1193	case ION_IOC_FREE:
1194	{
1195		struct ion_handle_data data;
1196		struct ion_handle *handle;
1197
1198		if (copy_from_user(&data, (void __user *)arg,
1199				   sizeof(struct ion_handle_data)))
1200			return -EFAULT;
1201		handle = ion_handle_get_by_id(client, data.handle);
1202		if (IS_ERR(handle))
1203			return PTR_ERR(handle);
1204		ion_free(client, handle);
1205		ion_handle_put(handle);
1206		break;
1207	}
1208	case ION_IOC_SHARE:
1209	case ION_IOC_MAP:
1210	{
1211		struct ion_fd_data data;
1212		struct ion_handle *handle;
1213
1214		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1215			return -EFAULT;
1216		handle = ion_handle_get_by_id(client, data.handle);
1217		if (IS_ERR(handle))
1218			return PTR_ERR(handle);
1219		data.fd = ion_share_dma_buf_fd(client, handle);
1220		ion_handle_put(handle);
1221		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1222			return -EFAULT;
1223		if (data.fd < 0)
1224			return data.fd;
1225		break;
1226	}
1227	case ION_IOC_IMPORT:
1228	{
1229		struct ion_fd_data data;
1230		struct ion_handle *handle;
1231		int ret = 0;
1232		if (copy_from_user(&data, (void __user *)arg,
1233				   sizeof(struct ion_fd_data)))
1234			return -EFAULT;
1235		handle = ion_import_dma_buf(client, data.fd);
1236		if (IS_ERR(handle))
1237			ret = PTR_ERR(handle);
1238		else
1239			data.handle = handle->id;
1240
1241		if (copy_to_user((void __user *)arg, &data,
1242				 sizeof(struct ion_fd_data)))
1243			return -EFAULT;
1244		if (ret < 0)
1245			return ret;
1246		break;
1247	}
1248	case ION_IOC_SYNC:
1249	{
1250		struct ion_fd_data data;
1251		if (copy_from_user(&data, (void __user *)arg,
1252				   sizeof(struct ion_fd_data)))
1253			return -EFAULT;
1254		ion_sync_for_device(client, data.fd);
1255		break;
1256	}
1257	case ION_IOC_CUSTOM:
1258	{
1259		struct ion_device *dev = client->dev;
1260		struct ion_custom_data data;
1261
1262		if (!dev->custom_ioctl)
1263			return -ENOTTY;
1264		if (copy_from_user(&data, (void __user *)arg,
1265				sizeof(struct ion_custom_data)))
1266			return -EFAULT;
1267		return dev->custom_ioctl(client, data.cmd, data.arg);
1268	}
1269	default:
1270		return -ENOTTY;
1271	}
1272	return 0;
1273}
1274
1275static int ion_release(struct inode *inode, struct file *file)
1276{
1277	struct ion_client *client = file->private_data;
1278
1279	pr_debug("%s: %d\n", __func__, __LINE__);
1280	ion_client_destroy(client);
1281	return 0;
1282}
1283
1284static int ion_open(struct inode *inode, struct file *file)
1285{
1286	struct miscdevice *miscdev = file->private_data;
1287	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1288	struct ion_client *client;
1289
1290	pr_debug("%s: %d\n", __func__, __LINE__);
1291	client = ion_client_create(dev, "user");
1292	if (IS_ERR(client))
1293		return PTR_ERR(client);
1294	file->private_data = client;
1295
1296	return 0;
1297}
1298
1299static const struct file_operations ion_fops = {
1300	.owner          = THIS_MODULE,
1301	.open           = ion_open,
1302	.release        = ion_release,
1303	.unlocked_ioctl = ion_ioctl,
1304	.compat_ioctl   = compat_ion_ioctl,
1305};
1306
1307static size_t ion_debug_heap_total(struct ion_client *client,
1308				   unsigned int id)
1309{
1310	size_t size = 0;
1311	struct rb_node *n;
1312
1313	mutex_lock(&client->lock);
1314	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1315		struct ion_handle *handle = rb_entry(n,
1316						     struct ion_handle,
1317						     node);
1318		if (handle->buffer->heap->id == id)
1319			size += handle->buffer->size;
1320	}
1321	mutex_unlock(&client->lock);
1322	return size;
1323}
1324
1325static int ion_debug_heap_show(struct seq_file *s, void *unused)
1326{
1327	struct ion_heap *heap = s->private;
1328	struct ion_device *dev = heap->dev;
1329	struct rb_node *n;
1330	size_t total_size = 0;
1331	size_t total_orphaned_size = 0;
1332
1333	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1334	seq_printf(s, "----------------------------------------------------\n");
1335
1336	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1337		struct ion_client *client = rb_entry(n, struct ion_client,
1338						     node);
1339		size_t size = ion_debug_heap_total(client, heap->id);
1340		if (!size)
1341			continue;
1342		if (client->task) {
1343			char task_comm[TASK_COMM_LEN];
1344
1345			get_task_comm(task_comm, client->task);
1346			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1347				   client->pid, size);
1348		} else {
1349			seq_printf(s, "%16.s %16u %16u\n", client->name,
1350				   client->pid, size);
1351		}
1352	}
1353	seq_printf(s, "----------------------------------------------------\n");
1354	seq_printf(s, "orphaned allocations (info is from last known client):"
1355		   "\n");
1356	mutex_lock(&dev->buffer_lock);
1357	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1358		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1359						     node);
1360		if (buffer->heap->id != heap->id)
1361			continue;
1362		total_size += buffer->size;
1363		if (!buffer->handle_count) {
1364			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1365				   buffer->pid, buffer->size, buffer->kmap_cnt,
1366				   atomic_read(&buffer->ref.refcount));
1367			total_orphaned_size += buffer->size;
1368		}
1369	}
1370	mutex_unlock(&dev->buffer_lock);
1371	seq_printf(s, "----------------------------------------------------\n");
1372	seq_printf(s, "%16.s %16u\n", "total orphaned",
1373		   total_orphaned_size);
1374	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1375	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1376		seq_printf(s, "%16.s %16u\n", "deferred free",
1377				heap->free_list_size);
1378	seq_printf(s, "----------------------------------------------------\n");
1379
1380	if (heap->debug_show)
1381		heap->debug_show(heap, s, unused);
1382
1383	return 0;
1384}
1385
1386static int ion_debug_heap_open(struct inode *inode, struct file *file)
1387{
1388	return single_open(file, ion_debug_heap_show, inode->i_private);
1389}
1390
1391static const struct file_operations debug_heap_fops = {
1392	.open = ion_debug_heap_open,
1393	.read = seq_read,
1394	.llseek = seq_lseek,
1395	.release = single_release,
1396};
1397
1398#ifdef DEBUG_HEAP_SHRINKER
1399static int debug_shrink_set(void *data, u64 val)
1400{
1401        struct ion_heap *heap = data;
1402        struct shrink_control sc;
1403        int objs;
1404
1405        sc.gfp_mask = -1;
1406        sc.nr_to_scan = 0;
1407
1408        if (!val)
1409                return 0;
1410
1411        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1412        sc.nr_to_scan = objs;
1413
1414        heap->shrinker.shrink(&heap->shrinker, &sc);
1415        return 0;
1416}
1417
1418static int debug_shrink_get(void *data, u64 *val)
1419{
1420        struct ion_heap *heap = data;
1421        struct shrink_control sc;
1422        int objs;
1423
1424        sc.gfp_mask = -1;
1425        sc.nr_to_scan = 0;
1426
1427        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1428        *val = objs;
1429        return 0;
1430}
1431
1432DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1433                        debug_shrink_set, "%llu\n");
1434#endif
1435
1436void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1437{
1438	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1439	    !heap->ops->unmap_dma)
1440		pr_err("%s: can not add heap with invalid ops struct.\n",
1441		       __func__);
1442
1443	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1444		ion_heap_init_deferred_free(heap);
1445
1446	heap->dev = dev;
1447	down_write(&dev->lock);
1448	/* use negative heap->id to reverse the priority -- when traversing
1449	   the list later attempt higher id numbers first */
1450	plist_node_init(&heap->node, -heap->id);
1451	plist_add(&heap->node, &dev->heaps);
1452	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1453			    &debug_heap_fops);
1454#ifdef DEBUG_HEAP_SHRINKER
1455	if (heap->shrinker.shrink) {
1456		char debug_name[64];
1457
1458		snprintf(debug_name, 64, "%s_shrink", heap->name);
1459		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1460				    &debug_shrink_fops);
1461	}
1462#endif
1463	up_write(&dev->lock);
1464}
1465
1466struct ion_device *ion_device_create(long (*custom_ioctl)
1467				     (struct ion_client *client,
1468				      unsigned int cmd,
1469				      unsigned long arg))
1470{
1471	struct ion_device *idev;
1472	int ret;
1473
1474	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1475	if (!idev)
1476		return ERR_PTR(-ENOMEM);
1477
1478	idev->dev.minor = MISC_DYNAMIC_MINOR;
1479	idev->dev.name = "ion";
1480	idev->dev.fops = &ion_fops;
1481	idev->dev.parent = NULL;
1482	ret = misc_register(&idev->dev);
1483	if (ret) {
1484		pr_err("ion: failed to register misc device.\n");
1485		return ERR_PTR(ret);
1486	}
1487
1488	idev->debug_root = debugfs_create_dir("ion", NULL);
1489	if (!idev->debug_root)
1490		pr_err("ion: failed to create debug files.\n");
1491
1492	idev->custom_ioctl = custom_ioctl;
1493	idev->buffers = RB_ROOT;
1494	mutex_init(&idev->buffer_lock);
1495	init_rwsem(&idev->lock);
1496	plist_head_init(&idev->heaps);
1497	idev->clients = RB_ROOT;
1498	return idev;
1499}
1500
1501void ion_device_destroy(struct ion_device *dev)
1502{
1503	misc_deregister(&dev->dev);
1504	/* XXX need to free the heaps and clients ? */
1505	kfree(dev);
1506}
1507
1508void __init ion_reserve(struct ion_platform_data *data)
1509{
1510	int i;
1511
1512	for (i = 0; i < data->nr; i++) {
1513		if (data->heaps[i].size == 0)
1514			continue;
1515
1516		if (data->heaps[i].base == 0) {
1517			phys_addr_t paddr;
1518			paddr = memblock_alloc_base(data->heaps[i].size,
1519						    data->heaps[i].align,
1520						    MEMBLOCK_ALLOC_ANYWHERE);
1521			if (!paddr) {
1522				pr_err("%s: error allocating memblock for "
1523				       "heap %d\n",
1524					__func__, i);
1525				continue;
1526			}
1527			data->heaps[i].base = paddr;
1528		} else {
1529			int ret = memblock_reserve(data->heaps[i].base,
1530					       data->heaps[i].size);
1531			if (ret)
1532				pr_err("memblock reserve of %x@%lx failed\n",
1533				       data->heaps[i].size,
1534				       data->heaps[i].base);
1535		}
1536		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1537			data->heaps[i].name,
1538			data->heaps[i].base,
1539			data->heaps[i].size);
1540	}
1541}
1542