ion.c revision e61fc915fa8c6991f0ed14ce70a0a3c139012684
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/freezer.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/memblock.h>
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/debugfs.h>
36#include <linux/dma-buf.h>
37#include <linux/idr.h>
38
39#include "ion.h"
40#include "ion_priv.h"
41#include "compat_ion.h"
42
43/**
44 * struct ion_device - the metadata of the ion device node
45 * @dev:		the actual misc device
46 * @buffers:		an rb tree of all the existing buffers
47 * @buffer_lock:	lock protecting the tree of buffers
48 * @lock:		rwsem protecting the tree of heaps and clients
49 * @heaps:		list of all the heaps in the system
50 * @user_clients:	list of all the clients created from userspace
51 */
52struct ion_device {
53	struct miscdevice dev;
54	struct rb_root buffers;
55	struct mutex buffer_lock;
56	struct rw_semaphore lock;
57	struct plist_head heaps;
58	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59			      unsigned long arg);
60	struct rb_root clients;
61	struct dentry *debug_root;
62};
63
64/**
65 * struct ion_client - a process/hw block local address space
66 * @node:		node in the tree of all clients
67 * @dev:		backpointer to ion device
68 * @handles:		an rb tree of all the handles in this client
69 * @idr:		an idr space for allocating handle ids
70 * @lock:		lock protecting the tree of handles
71 * @name:		used for debugging
72 * @task:		used for debugging
73 *
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
77 */
78struct ion_client {
79	struct rb_node node;
80	struct ion_device *dev;
81	struct rb_root handles;
82	struct idr idr;
83	struct mutex lock;
84	const char *name;
85	struct task_struct *task;
86	pid_t pid;
87	struct dentry *debug_root;
88};
89
90/**
91 * ion_handle - a client local reference to a buffer
92 * @ref:		reference count
93 * @client:		back pointer to the client the buffer resides in
94 * @buffer:		pointer to the buffer
95 * @node:		node in the client's handle rbtree
96 * @kmap_cnt:		count of times this client has mapped to kernel
97 * @id:			client-unique id allocated by client->idr
98 *
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client.  Other fields are never changed after initialization.
101 */
102struct ion_handle {
103	struct kref ref;
104	struct ion_client *client;
105	struct ion_buffer *buffer;
106	struct rb_node node;
107	unsigned int kmap_cnt;
108	int id;
109};
110
111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112{
113	return ((buffer->flags & ION_FLAG_CACHED) &&
114		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
115}
116
117bool ion_buffer_cached(struct ion_buffer *buffer)
118{
119	return !!(buffer->flags & ION_FLAG_CACHED);
120}
121
122static inline struct page *ion_buffer_page(struct page *page)
123{
124	return (struct page *)((unsigned long)page & ~(1UL));
125}
126
127static inline bool ion_buffer_page_is_dirty(struct page *page)
128{
129	return !!((unsigned long)page & 1UL);
130}
131
132static inline void ion_buffer_page_dirty(struct page **page)
133{
134	*page = (struct page *)((unsigned long)(*page) | 1UL);
135}
136
137static inline void ion_buffer_page_clean(struct page **page)
138{
139	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
140}
141
142/* this function should only be called while dev->lock is held */
143static void ion_buffer_add(struct ion_device *dev,
144			   struct ion_buffer *buffer)
145{
146	struct rb_node **p = &dev->buffers.rb_node;
147	struct rb_node *parent = NULL;
148	struct ion_buffer *entry;
149
150	while (*p) {
151		parent = *p;
152		entry = rb_entry(parent, struct ion_buffer, node);
153
154		if (buffer < entry) {
155			p = &(*p)->rb_left;
156		} else if (buffer > entry) {
157			p = &(*p)->rb_right;
158		} else {
159			pr_err("%s: buffer already found.", __func__);
160			BUG();
161		}
162	}
163
164	rb_link_node(&buffer->node, parent, p);
165	rb_insert_color(&buffer->node, &dev->buffers);
166}
167
168/* this function should only be called while dev->lock is held */
169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170				     struct ion_device *dev,
171				     unsigned long len,
172				     unsigned long align,
173				     unsigned long flags)
174{
175	struct ion_buffer *buffer;
176	struct sg_table *table;
177	struct scatterlist *sg;
178	int i, ret;
179
180	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181	if (!buffer)
182		return ERR_PTR(-ENOMEM);
183
184	buffer->heap = heap;
185	buffer->flags = flags;
186	kref_init(&buffer->ref);
187
188	ret = heap->ops->allocate(heap, buffer, len, align, flags);
189
190	if (ret) {
191		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192			goto err2;
193
194		ion_heap_freelist_drain(heap, 0);
195		ret = heap->ops->allocate(heap, buffer, len, align,
196					  flags);
197		if (ret)
198			goto err2;
199	}
200
201	buffer->dev = dev;
202	buffer->size = len;
203
204	table = heap->ops->map_dma(heap, buffer);
205	if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
206		table = ERR_PTR(-EINVAL);
207	if (IS_ERR(table)) {
208		heap->ops->free(buffer);
209		kfree(buffer);
210		return ERR_PTR(PTR_ERR(table));
211	}
212	buffer->sg_table = table;
213	if (ion_buffer_fault_user_mappings(buffer)) {
214		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
215		struct scatterlist *sg;
216		int i, j, k = 0;
217
218		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
219		if (!buffer->pages) {
220			ret = -ENOMEM;
221			goto err1;
222		}
223
224		for_each_sg(table->sgl, sg, table->nents, i) {
225			struct page *page = sg_page(sg);
226
227			for (j = 0; j < sg->length / PAGE_SIZE; j++)
228				buffer->pages[k++] = page++;
229		}
230
231		if (ret)
232			goto err;
233	}
234
235	buffer->dev = dev;
236	buffer->size = len;
237	INIT_LIST_HEAD(&buffer->vmas);
238	mutex_init(&buffer->lock);
239	/* this will set up dma addresses for the sglist -- it is not
240	   technically correct as per the dma api -- a specific
241	   device isn't really taking ownership here.  However, in practice on
242	   our systems the only dma_address space is physical addresses.
243	   Additionally, we can't afford the overhead of invalidating every
244	   allocation via dma_map_sg. The implicit contract here is that
245	   memory comming from the heaps is ready for dma, ie if it has a
246	   cached mapping that mapping has been invalidated */
247	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
248		sg_dma_address(sg) = sg_phys(sg);
249	mutex_lock(&dev->buffer_lock);
250	ion_buffer_add(dev, buffer);
251	mutex_unlock(&dev->buffer_lock);
252	return buffer;
253
254err:
255	heap->ops->unmap_dma(heap, buffer);
256	heap->ops->free(buffer);
257err1:
258	if (buffer->pages)
259		vfree(buffer->pages);
260err2:
261	kfree(buffer);
262	return ERR_PTR(ret);
263}
264
265void ion_buffer_destroy(struct ion_buffer *buffer)
266{
267	if (WARN_ON(buffer->kmap_cnt > 0))
268		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
269	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
270	buffer->heap->ops->free(buffer);
271	if (buffer->pages)
272		vfree(buffer->pages);
273	kfree(buffer);
274}
275
276static void _ion_buffer_destroy(struct kref *kref)
277{
278	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
279	struct ion_heap *heap = buffer->heap;
280	struct ion_device *dev = buffer->dev;
281
282	mutex_lock(&dev->buffer_lock);
283	rb_erase(&buffer->node, &dev->buffers);
284	mutex_unlock(&dev->buffer_lock);
285
286	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
287		ion_heap_freelist_add(heap, buffer);
288	else
289		ion_buffer_destroy(buffer);
290}
291
292static void ion_buffer_get(struct ion_buffer *buffer)
293{
294	kref_get(&buffer->ref);
295}
296
297static int ion_buffer_put(struct ion_buffer *buffer)
298{
299	return kref_put(&buffer->ref, _ion_buffer_destroy);
300}
301
302static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
303{
304	mutex_lock(&buffer->lock);
305	buffer->handle_count++;
306	mutex_unlock(&buffer->lock);
307}
308
309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
310{
311	/*
312	 * when a buffer is removed from a handle, if it is not in
313	 * any other handles, copy the taskcomm and the pid of the
314	 * process it's being removed from into the buffer.  At this
315	 * point there will be no way to track what processes this buffer is
316	 * being used by, it only exists as a dma_buf file descriptor.
317	 * The taskcomm and pid can provide a debug hint as to where this fd
318	 * is in the system
319	 */
320	mutex_lock(&buffer->lock);
321	buffer->handle_count--;
322	BUG_ON(buffer->handle_count < 0);
323	if (!buffer->handle_count) {
324		struct task_struct *task;
325
326		task = current->group_leader;
327		get_task_comm(buffer->task_comm, task);
328		buffer->pid = task_pid_nr(task);
329	}
330	mutex_unlock(&buffer->lock);
331}
332
333static struct ion_handle *ion_handle_create(struct ion_client *client,
334				     struct ion_buffer *buffer)
335{
336	struct ion_handle *handle;
337
338	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
339	if (!handle)
340		return ERR_PTR(-ENOMEM);
341	kref_init(&handle->ref);
342	RB_CLEAR_NODE(&handle->node);
343	handle->client = client;
344	ion_buffer_get(buffer);
345	ion_buffer_add_to_handle(buffer);
346	handle->buffer = buffer;
347
348	return handle;
349}
350
351static void ion_handle_kmap_put(struct ion_handle *);
352
353static void ion_handle_destroy(struct kref *kref)
354{
355	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
356	struct ion_client *client = handle->client;
357	struct ion_buffer *buffer = handle->buffer;
358
359	mutex_lock(&buffer->lock);
360	while (handle->kmap_cnt)
361		ion_handle_kmap_put(handle);
362	mutex_unlock(&buffer->lock);
363
364	idr_remove(&client->idr, handle->id);
365	if (!RB_EMPTY_NODE(&handle->node))
366		rb_erase(&handle->node, &client->handles);
367
368	ion_buffer_remove_from_handle(buffer);
369	ion_buffer_put(buffer);
370
371	kfree(handle);
372}
373
374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
375{
376	return handle->buffer;
377}
378
379static void ion_handle_get(struct ion_handle *handle)
380{
381	kref_get(&handle->ref);
382}
383
384static int ion_handle_put(struct ion_handle *handle)
385{
386	struct ion_client *client = handle->client;
387	int ret;
388
389	mutex_lock(&client->lock);
390	ret = kref_put(&handle->ref, ion_handle_destroy);
391	mutex_unlock(&client->lock);
392
393	return ret;
394}
395
396static struct ion_handle *ion_handle_lookup(struct ion_client *client,
397					    struct ion_buffer *buffer)
398{
399	struct rb_node *n = client->handles.rb_node;
400
401	while (n) {
402		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
403		if (buffer < entry->buffer)
404			n = n->rb_left;
405		else if (buffer > entry->buffer)
406			n = n->rb_right;
407		else
408			return entry;
409	}
410	return ERR_PTR(-EINVAL);
411}
412
413static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
414						int id)
415{
416	struct ion_handle *handle;
417
418	mutex_lock(&client->lock);
419	handle = idr_find(&client->idr, id);
420	if (handle)
421		ion_handle_get(handle);
422	mutex_unlock(&client->lock);
423
424	return handle ? handle : ERR_PTR(-EINVAL);
425}
426
427static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
428{
429	WARN_ON(!mutex_is_locked(&client->lock));
430	return (idr_find(&client->idr, handle->id) == handle);
431}
432
433static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
434{
435	int id;
436	struct rb_node **p = &client->handles.rb_node;
437	struct rb_node *parent = NULL;
438	struct ion_handle *entry;
439
440	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
441	if (id < 0)
442		return id;
443
444	handle->id = id;
445
446	while (*p) {
447		parent = *p;
448		entry = rb_entry(parent, struct ion_handle, node);
449
450		if (handle->buffer < entry->buffer)
451			p = &(*p)->rb_left;
452		else if (handle->buffer > entry->buffer)
453			p = &(*p)->rb_right;
454		else
455			WARN(1, "%s: buffer already found.", __func__);
456	}
457
458	rb_link_node(&handle->node, parent, p);
459	rb_insert_color(&handle->node, &client->handles);
460
461	return 0;
462}
463
464struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
465			     size_t align, unsigned int heap_id_mask,
466			     unsigned int flags)
467{
468	struct ion_handle *handle;
469	struct ion_device *dev = client->dev;
470	struct ion_buffer *buffer = NULL;
471	struct ion_heap *heap;
472	int ret;
473
474	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
475		 len, align, heap_id_mask, flags);
476	/*
477	 * traverse the list of heaps available in this system in priority
478	 * order.  If the heap type is supported by the client, and matches the
479	 * request of the caller allocate from it.  Repeat until allocate has
480	 * succeeded or all heaps have been tried
481	 */
482	len = PAGE_ALIGN(len);
483
484	if (!len)
485		return ERR_PTR(-EINVAL);
486
487	down_read(&dev->lock);
488	plist_for_each_entry(heap, &dev->heaps, node) {
489		/* if the caller didn't specify this heap id */
490		if (!((1 << heap->id) & heap_id_mask))
491			continue;
492		buffer = ion_buffer_create(heap, dev, len, align, flags);
493		if (!IS_ERR(buffer))
494			break;
495	}
496	up_read(&dev->lock);
497
498	if (buffer == NULL)
499		return ERR_PTR(-ENODEV);
500
501	if (IS_ERR(buffer))
502		return ERR_PTR(PTR_ERR(buffer));
503
504	handle = ion_handle_create(client, buffer);
505
506	/*
507	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
508	 * and ion_handle_create will take a second reference, drop one here
509	 */
510	ion_buffer_put(buffer);
511
512	if (IS_ERR(handle))
513		return handle;
514
515	mutex_lock(&client->lock);
516	ret = ion_handle_add(client, handle);
517	mutex_unlock(&client->lock);
518	if (ret) {
519		ion_handle_put(handle);
520		handle = ERR_PTR(ret);
521	}
522
523	return handle;
524}
525EXPORT_SYMBOL(ion_alloc);
526
527void ion_free(struct ion_client *client, struct ion_handle *handle)
528{
529	bool valid_handle;
530
531	BUG_ON(client != handle->client);
532
533	mutex_lock(&client->lock);
534	valid_handle = ion_handle_validate(client, handle);
535
536	if (!valid_handle) {
537		WARN(1, "%s: invalid handle passed to free.\n", __func__);
538		mutex_unlock(&client->lock);
539		return;
540	}
541	mutex_unlock(&client->lock);
542	ion_handle_put(handle);
543}
544EXPORT_SYMBOL(ion_free);
545
546int ion_phys(struct ion_client *client, struct ion_handle *handle,
547	     ion_phys_addr_t *addr, size_t *len)
548{
549	struct ion_buffer *buffer;
550	int ret;
551
552	mutex_lock(&client->lock);
553	if (!ion_handle_validate(client, handle)) {
554		mutex_unlock(&client->lock);
555		return -EINVAL;
556	}
557
558	buffer = handle->buffer;
559
560	if (!buffer->heap->ops->phys) {
561		pr_err("%s: ion_phys is not implemented by this heap.\n",
562		       __func__);
563		mutex_unlock(&client->lock);
564		return -ENODEV;
565	}
566	mutex_unlock(&client->lock);
567	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
568	return ret;
569}
570EXPORT_SYMBOL(ion_phys);
571
572static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
573{
574	void *vaddr;
575
576	if (buffer->kmap_cnt) {
577		buffer->kmap_cnt++;
578		return buffer->vaddr;
579	}
580	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
581	if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
582		return ERR_PTR(-EINVAL);
583	if (IS_ERR(vaddr))
584		return vaddr;
585	buffer->vaddr = vaddr;
586	buffer->kmap_cnt++;
587	return vaddr;
588}
589
590static void *ion_handle_kmap_get(struct ion_handle *handle)
591{
592	struct ion_buffer *buffer = handle->buffer;
593	void *vaddr;
594
595	if (handle->kmap_cnt) {
596		handle->kmap_cnt++;
597		return buffer->vaddr;
598	}
599	vaddr = ion_buffer_kmap_get(buffer);
600	if (IS_ERR(vaddr))
601		return vaddr;
602	handle->kmap_cnt++;
603	return vaddr;
604}
605
606static void ion_buffer_kmap_put(struct ion_buffer *buffer)
607{
608	buffer->kmap_cnt--;
609	if (!buffer->kmap_cnt) {
610		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
611		buffer->vaddr = NULL;
612	}
613}
614
615static void ion_handle_kmap_put(struct ion_handle *handle)
616{
617	struct ion_buffer *buffer = handle->buffer;
618
619	handle->kmap_cnt--;
620	if (!handle->kmap_cnt)
621		ion_buffer_kmap_put(buffer);
622}
623
624void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
625{
626	struct ion_buffer *buffer;
627	void *vaddr;
628
629	mutex_lock(&client->lock);
630	if (!ion_handle_validate(client, handle)) {
631		pr_err("%s: invalid handle passed to map_kernel.\n",
632		       __func__);
633		mutex_unlock(&client->lock);
634		return ERR_PTR(-EINVAL);
635	}
636
637	buffer = handle->buffer;
638
639	if (!handle->buffer->heap->ops->map_kernel) {
640		pr_err("%s: map_kernel is not implemented by this heap.\n",
641		       __func__);
642		mutex_unlock(&client->lock);
643		return ERR_PTR(-ENODEV);
644	}
645
646	mutex_lock(&buffer->lock);
647	vaddr = ion_handle_kmap_get(handle);
648	mutex_unlock(&buffer->lock);
649	mutex_unlock(&client->lock);
650	return vaddr;
651}
652EXPORT_SYMBOL(ion_map_kernel);
653
654void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
655{
656	struct ion_buffer *buffer;
657
658	mutex_lock(&client->lock);
659	buffer = handle->buffer;
660	mutex_lock(&buffer->lock);
661	ion_handle_kmap_put(handle);
662	mutex_unlock(&buffer->lock);
663	mutex_unlock(&client->lock);
664}
665EXPORT_SYMBOL(ion_unmap_kernel);
666
667static int ion_debug_client_show(struct seq_file *s, void *unused)
668{
669	struct ion_client *client = s->private;
670	struct rb_node *n;
671	size_t sizes[ION_NUM_HEAP_IDS] = {0};
672	const char *names[ION_NUM_HEAP_IDS] = {0};
673	int i;
674
675	mutex_lock(&client->lock);
676	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
677		struct ion_handle *handle = rb_entry(n, struct ion_handle,
678						     node);
679		unsigned int id = handle->buffer->heap->id;
680
681		if (!names[id])
682			names[id] = handle->buffer->heap->name;
683		sizes[id] += handle->buffer->size;
684	}
685	mutex_unlock(&client->lock);
686
687	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
688	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
689		if (!names[i])
690			continue;
691		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
692	}
693	return 0;
694}
695
696static int ion_debug_client_open(struct inode *inode, struct file *file)
697{
698	return single_open(file, ion_debug_client_show, inode->i_private);
699}
700
701static const struct file_operations debug_client_fops = {
702	.open = ion_debug_client_open,
703	.read = seq_read,
704	.llseek = seq_lseek,
705	.release = single_release,
706};
707
708struct ion_client *ion_client_create(struct ion_device *dev,
709				     const char *name)
710{
711	struct ion_client *client;
712	struct task_struct *task;
713	struct rb_node **p;
714	struct rb_node *parent = NULL;
715	struct ion_client *entry;
716	char debug_name[64];
717	pid_t pid;
718
719	get_task_struct(current->group_leader);
720	task_lock(current->group_leader);
721	pid = task_pid_nr(current->group_leader);
722	/* don't bother to store task struct for kernel threads,
723	   they can't be killed anyway */
724	if (current->group_leader->flags & PF_KTHREAD) {
725		put_task_struct(current->group_leader);
726		task = NULL;
727	} else {
728		task = current->group_leader;
729	}
730	task_unlock(current->group_leader);
731
732	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
733	if (!client) {
734		if (task)
735			put_task_struct(current->group_leader);
736		return ERR_PTR(-ENOMEM);
737	}
738
739	client->dev = dev;
740	client->handles = RB_ROOT;
741	idr_init(&client->idr);
742	mutex_init(&client->lock);
743	client->name = name;
744	client->task = task;
745	client->pid = pid;
746
747	down_write(&dev->lock);
748	p = &dev->clients.rb_node;
749	while (*p) {
750		parent = *p;
751		entry = rb_entry(parent, struct ion_client, node);
752
753		if (client < entry)
754			p = &(*p)->rb_left;
755		else if (client > entry)
756			p = &(*p)->rb_right;
757	}
758	rb_link_node(&client->node, parent, p);
759	rb_insert_color(&client->node, &dev->clients);
760
761	snprintf(debug_name, 64, "%u", client->pid);
762	client->debug_root = debugfs_create_file(debug_name, 0664,
763						 dev->debug_root, client,
764						 &debug_client_fops);
765	up_write(&dev->lock);
766
767	return client;
768}
769EXPORT_SYMBOL(ion_client_create);
770
771void ion_client_destroy(struct ion_client *client)
772{
773	struct ion_device *dev = client->dev;
774	struct rb_node *n;
775
776	pr_debug("%s: %d\n", __func__, __LINE__);
777	while ((n = rb_first(&client->handles))) {
778		struct ion_handle *handle = rb_entry(n, struct ion_handle,
779						     node);
780		ion_handle_destroy(&handle->ref);
781	}
782
783	idr_destroy(&client->idr);
784
785	down_write(&dev->lock);
786	if (client->task)
787		put_task_struct(client->task);
788	rb_erase(&client->node, &dev->clients);
789	debugfs_remove_recursive(client->debug_root);
790	up_write(&dev->lock);
791
792	kfree(client);
793}
794EXPORT_SYMBOL(ion_client_destroy);
795
796struct sg_table *ion_sg_table(struct ion_client *client,
797			      struct ion_handle *handle)
798{
799	struct ion_buffer *buffer;
800	struct sg_table *table;
801
802	mutex_lock(&client->lock);
803	if (!ion_handle_validate(client, handle)) {
804		pr_err("%s: invalid handle passed to map_dma.\n",
805		       __func__);
806		mutex_unlock(&client->lock);
807		return ERR_PTR(-EINVAL);
808	}
809	buffer = handle->buffer;
810	table = buffer->sg_table;
811	mutex_unlock(&client->lock);
812	return table;
813}
814EXPORT_SYMBOL(ion_sg_table);
815
816static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
817				       struct device *dev,
818				       enum dma_data_direction direction);
819
820static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
821					enum dma_data_direction direction)
822{
823	struct dma_buf *dmabuf = attachment->dmabuf;
824	struct ion_buffer *buffer = dmabuf->priv;
825
826	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
827	return buffer->sg_table;
828}
829
830static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
831			      struct sg_table *table,
832			      enum dma_data_direction direction)
833{
834}
835
836void ion_pages_sync_for_device(struct device *dev, struct page *page,
837		size_t size, enum dma_data_direction dir)
838{
839	struct scatterlist sg;
840
841	sg_init_table(&sg, 1);
842	sg_set_page(&sg, page, size, 0);
843	/*
844	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
845	 * for the the targeted device, but this works on the currently targeted
846	 * hardware.
847	 */
848	sg_dma_address(&sg) = page_to_phys(page);
849	dma_sync_sg_for_device(dev, &sg, 1, dir);
850}
851
852struct ion_vma_list {
853	struct list_head list;
854	struct vm_area_struct *vma;
855};
856
857static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
858				       struct device *dev,
859				       enum dma_data_direction dir)
860{
861	struct ion_vma_list *vma_list;
862	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
863	int i;
864
865	pr_debug("%s: syncing for device %s\n", __func__,
866		 dev ? dev_name(dev) : "null");
867
868	if (!ion_buffer_fault_user_mappings(buffer))
869		return;
870
871	mutex_lock(&buffer->lock);
872	for (i = 0; i < pages; i++) {
873		struct page *page = buffer->pages[i];
874
875		if (ion_buffer_page_is_dirty(page))
876			ion_pages_sync_for_device(dev, ion_buffer_page(page),
877							PAGE_SIZE, dir);
878
879		ion_buffer_page_clean(buffer->pages + i);
880	}
881	list_for_each_entry(vma_list, &buffer->vmas, list) {
882		struct vm_area_struct *vma = vma_list->vma;
883
884		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
885			       NULL);
886	}
887	mutex_unlock(&buffer->lock);
888}
889
890int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
891{
892	struct ion_buffer *buffer = vma->vm_private_data;
893	int ret;
894
895	mutex_lock(&buffer->lock);
896	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
897
898	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
899	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
900			     ion_buffer_page(buffer->pages[vmf->pgoff]));
901	mutex_unlock(&buffer->lock);
902	if (ret)
903		return VM_FAULT_ERROR;
904
905	return VM_FAULT_NOPAGE;
906}
907
908static void ion_vm_open(struct vm_area_struct *vma)
909{
910	struct ion_buffer *buffer = vma->vm_private_data;
911	struct ion_vma_list *vma_list;
912
913	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
914	if (!vma_list)
915		return;
916	vma_list->vma = vma;
917	mutex_lock(&buffer->lock);
918	list_add(&vma_list->list, &buffer->vmas);
919	mutex_unlock(&buffer->lock);
920	pr_debug("%s: adding %p\n", __func__, vma);
921}
922
923static void ion_vm_close(struct vm_area_struct *vma)
924{
925	struct ion_buffer *buffer = vma->vm_private_data;
926	struct ion_vma_list *vma_list, *tmp;
927
928	pr_debug("%s\n", __func__);
929	mutex_lock(&buffer->lock);
930	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
931		if (vma_list->vma != vma)
932			continue;
933		list_del(&vma_list->list);
934		kfree(vma_list);
935		pr_debug("%s: deleting %p\n", __func__, vma);
936		break;
937	}
938	mutex_unlock(&buffer->lock);
939}
940
941struct vm_operations_struct ion_vma_ops = {
942	.open = ion_vm_open,
943	.close = ion_vm_close,
944	.fault = ion_vm_fault,
945};
946
947static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
948{
949	struct ion_buffer *buffer = dmabuf->priv;
950	int ret = 0;
951
952	if (!buffer->heap->ops->map_user) {
953		pr_err("%s: this heap does not define a method for mapping "
954		       "to userspace\n", __func__);
955		return -EINVAL;
956	}
957
958	if (ion_buffer_fault_user_mappings(buffer)) {
959		vma->vm_private_data = buffer;
960		vma->vm_ops = &ion_vma_ops;
961		ion_vm_open(vma);
962		return 0;
963	}
964
965	if (!(buffer->flags & ION_FLAG_CACHED))
966		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
967
968	mutex_lock(&buffer->lock);
969	/* now map it to userspace */
970	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
971	mutex_unlock(&buffer->lock);
972
973	if (ret)
974		pr_err("%s: failure mapping buffer to userspace\n",
975		       __func__);
976
977	return ret;
978}
979
980static void ion_dma_buf_release(struct dma_buf *dmabuf)
981{
982	struct ion_buffer *buffer = dmabuf->priv;
983	ion_buffer_put(buffer);
984}
985
986static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
987{
988	struct ion_buffer *buffer = dmabuf->priv;
989	return buffer->vaddr + offset * PAGE_SIZE;
990}
991
992static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
993			       void *ptr)
994{
995	return;
996}
997
998static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
999					size_t len,
1000					enum dma_data_direction direction)
1001{
1002	struct ion_buffer *buffer = dmabuf->priv;
1003	void *vaddr;
1004
1005	if (!buffer->heap->ops->map_kernel) {
1006		pr_err("%s: map kernel is not implemented by this heap.\n",
1007		       __func__);
1008		return -ENODEV;
1009	}
1010
1011	mutex_lock(&buffer->lock);
1012	vaddr = ion_buffer_kmap_get(buffer);
1013	mutex_unlock(&buffer->lock);
1014	if (IS_ERR(vaddr))
1015		return PTR_ERR(vaddr);
1016	return 0;
1017}
1018
1019static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1020				       size_t len,
1021				       enum dma_data_direction direction)
1022{
1023	struct ion_buffer *buffer = dmabuf->priv;
1024
1025	mutex_lock(&buffer->lock);
1026	ion_buffer_kmap_put(buffer);
1027	mutex_unlock(&buffer->lock);
1028}
1029
1030struct dma_buf_ops dma_buf_ops = {
1031	.map_dma_buf = ion_map_dma_buf,
1032	.unmap_dma_buf = ion_unmap_dma_buf,
1033	.mmap = ion_mmap,
1034	.release = ion_dma_buf_release,
1035	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1036	.end_cpu_access = ion_dma_buf_end_cpu_access,
1037	.kmap_atomic = ion_dma_buf_kmap,
1038	.kunmap_atomic = ion_dma_buf_kunmap,
1039	.kmap = ion_dma_buf_kmap,
1040	.kunmap = ion_dma_buf_kunmap,
1041};
1042
1043struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1044						struct ion_handle *handle)
1045{
1046	struct ion_buffer *buffer;
1047	struct dma_buf *dmabuf;
1048	bool valid_handle;
1049
1050	mutex_lock(&client->lock);
1051	valid_handle = ion_handle_validate(client, handle);
1052	if (!valid_handle) {
1053		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1054		mutex_unlock(&client->lock);
1055		return ERR_PTR(-EINVAL);
1056	}
1057	buffer = handle->buffer;
1058	ion_buffer_get(buffer);
1059	mutex_unlock(&client->lock);
1060
1061	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1062	if (IS_ERR(dmabuf)) {
1063		ion_buffer_put(buffer);
1064		return dmabuf;
1065	}
1066
1067	return dmabuf;
1068}
1069EXPORT_SYMBOL(ion_share_dma_buf);
1070
1071int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1072{
1073	struct dma_buf *dmabuf;
1074	int fd;
1075
1076	dmabuf = ion_share_dma_buf(client, handle);
1077	if (IS_ERR(dmabuf))
1078		return PTR_ERR(dmabuf);
1079
1080	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1081	if (fd < 0)
1082		dma_buf_put(dmabuf);
1083
1084	return fd;
1085}
1086EXPORT_SYMBOL(ion_share_dma_buf_fd);
1087
1088struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1089{
1090	struct dma_buf *dmabuf;
1091	struct ion_buffer *buffer;
1092	struct ion_handle *handle;
1093	int ret;
1094
1095	dmabuf = dma_buf_get(fd);
1096	if (IS_ERR(dmabuf))
1097		return ERR_PTR(PTR_ERR(dmabuf));
1098	/* if this memory came from ion */
1099
1100	if (dmabuf->ops != &dma_buf_ops) {
1101		pr_err("%s: can not import dmabuf from another exporter\n",
1102		       __func__);
1103		dma_buf_put(dmabuf);
1104		return ERR_PTR(-EINVAL);
1105	}
1106	buffer = dmabuf->priv;
1107
1108	mutex_lock(&client->lock);
1109	/* if a handle exists for this buffer just take a reference to it */
1110	handle = ion_handle_lookup(client, buffer);
1111	if (!IS_ERR(handle)) {
1112		ion_handle_get(handle);
1113		mutex_unlock(&client->lock);
1114		goto end;
1115	}
1116	mutex_unlock(&client->lock);
1117
1118	handle = ion_handle_create(client, buffer);
1119	if (IS_ERR(handle))
1120		goto end;
1121
1122	mutex_lock(&client->lock);
1123	ret = ion_handle_add(client, handle);
1124	mutex_unlock(&client->lock);
1125	if (ret) {
1126		ion_handle_put(handle);
1127		handle = ERR_PTR(ret);
1128	}
1129
1130end:
1131	dma_buf_put(dmabuf);
1132	return handle;
1133}
1134EXPORT_SYMBOL(ion_import_dma_buf);
1135
1136static int ion_sync_for_device(struct ion_client *client, int fd)
1137{
1138	struct dma_buf *dmabuf;
1139	struct ion_buffer *buffer;
1140
1141	dmabuf = dma_buf_get(fd);
1142	if (IS_ERR(dmabuf))
1143		return PTR_ERR(dmabuf);
1144
1145	/* if this memory came from ion */
1146	if (dmabuf->ops != &dma_buf_ops) {
1147		pr_err("%s: can not sync dmabuf from another exporter\n",
1148		       __func__);
1149		dma_buf_put(dmabuf);
1150		return -EINVAL;
1151	}
1152	buffer = dmabuf->priv;
1153
1154	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1155			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1156	dma_buf_put(dmabuf);
1157	return 0;
1158}
1159
1160static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1161{
1162	struct ion_client *client = filp->private_data;
1163
1164	switch (cmd) {
1165	case ION_IOC_ALLOC:
1166	{
1167		struct ion_allocation_data data;
1168		struct ion_handle *handle;
1169
1170		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1171			return -EFAULT;
1172		handle = ion_alloc(client, data.len, data.align,
1173					     data.heap_id_mask, data.flags);
1174
1175		if (IS_ERR(handle))
1176			return PTR_ERR(handle);
1177
1178		data.handle = handle->id;
1179
1180		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1181			ion_free(client, handle);
1182			return -EFAULT;
1183		}
1184		break;
1185	}
1186	case ION_IOC_FREE:
1187	{
1188		struct ion_handle_data data;
1189		struct ion_handle *handle;
1190
1191		if (copy_from_user(&data, (void __user *)arg,
1192				   sizeof(struct ion_handle_data)))
1193			return -EFAULT;
1194		handle = ion_handle_get_by_id(client, data.handle);
1195		if (IS_ERR(handle))
1196			return PTR_ERR(handle);
1197		ion_free(client, handle);
1198		ion_handle_put(handle);
1199		break;
1200	}
1201	case ION_IOC_SHARE:
1202	case ION_IOC_MAP:
1203	{
1204		struct ion_fd_data data;
1205		struct ion_handle *handle;
1206
1207		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1208			return -EFAULT;
1209		handle = ion_handle_get_by_id(client, data.handle);
1210		if (IS_ERR(handle))
1211			return PTR_ERR(handle);
1212		data.fd = ion_share_dma_buf_fd(client, handle);
1213		ion_handle_put(handle);
1214		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1215			return -EFAULT;
1216		if (data.fd < 0)
1217			return data.fd;
1218		break;
1219	}
1220	case ION_IOC_IMPORT:
1221	{
1222		struct ion_fd_data data;
1223		struct ion_handle *handle;
1224		int ret = 0;
1225		if (copy_from_user(&data, (void __user *)arg,
1226				   sizeof(struct ion_fd_data)))
1227			return -EFAULT;
1228		handle = ion_import_dma_buf(client, data.fd);
1229		if (IS_ERR(handle))
1230			ret = PTR_ERR(handle);
1231		else
1232			data.handle = handle->id;
1233
1234		if (copy_to_user((void __user *)arg, &data,
1235				 sizeof(struct ion_fd_data)))
1236			return -EFAULT;
1237		if (ret < 0)
1238			return ret;
1239		break;
1240	}
1241	case ION_IOC_SYNC:
1242	{
1243		struct ion_fd_data data;
1244		if (copy_from_user(&data, (void __user *)arg,
1245				   sizeof(struct ion_fd_data)))
1246			return -EFAULT;
1247		ion_sync_for_device(client, data.fd);
1248		break;
1249	}
1250	case ION_IOC_CUSTOM:
1251	{
1252		struct ion_device *dev = client->dev;
1253		struct ion_custom_data data;
1254
1255		if (!dev->custom_ioctl)
1256			return -ENOTTY;
1257		if (copy_from_user(&data, (void __user *)arg,
1258				sizeof(struct ion_custom_data)))
1259			return -EFAULT;
1260		return dev->custom_ioctl(client, data.cmd, data.arg);
1261	}
1262	default:
1263		return -ENOTTY;
1264	}
1265	return 0;
1266}
1267
1268static int ion_release(struct inode *inode, struct file *file)
1269{
1270	struct ion_client *client = file->private_data;
1271
1272	pr_debug("%s: %d\n", __func__, __LINE__);
1273	ion_client_destroy(client);
1274	return 0;
1275}
1276
1277static int ion_open(struct inode *inode, struct file *file)
1278{
1279	struct miscdevice *miscdev = file->private_data;
1280	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1281	struct ion_client *client;
1282
1283	pr_debug("%s: %d\n", __func__, __LINE__);
1284	client = ion_client_create(dev, "user");
1285	if (IS_ERR(client))
1286		return PTR_ERR(client);
1287	file->private_data = client;
1288
1289	return 0;
1290}
1291
1292static const struct file_operations ion_fops = {
1293	.owner          = THIS_MODULE,
1294	.open           = ion_open,
1295	.release        = ion_release,
1296	.unlocked_ioctl = ion_ioctl,
1297	.compat_ioctl   = compat_ion_ioctl,
1298};
1299
1300static size_t ion_debug_heap_total(struct ion_client *client,
1301				   unsigned int id)
1302{
1303	size_t size = 0;
1304	struct rb_node *n;
1305
1306	mutex_lock(&client->lock);
1307	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1308		struct ion_handle *handle = rb_entry(n,
1309						     struct ion_handle,
1310						     node);
1311		if (handle->buffer->heap->id == id)
1312			size += handle->buffer->size;
1313	}
1314	mutex_unlock(&client->lock);
1315	return size;
1316}
1317
1318static int ion_debug_heap_show(struct seq_file *s, void *unused)
1319{
1320	struct ion_heap *heap = s->private;
1321	struct ion_device *dev = heap->dev;
1322	struct rb_node *n;
1323	size_t total_size = 0;
1324	size_t total_orphaned_size = 0;
1325
1326	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1327	seq_printf(s, "----------------------------------------------------\n");
1328
1329	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1330		struct ion_client *client = rb_entry(n, struct ion_client,
1331						     node);
1332		size_t size = ion_debug_heap_total(client, heap->id);
1333		if (!size)
1334			continue;
1335		if (client->task) {
1336			char task_comm[TASK_COMM_LEN];
1337
1338			get_task_comm(task_comm, client->task);
1339			seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1340				   client->pid, size);
1341		} else {
1342			seq_printf(s, "%16.s %16u %16zu\n", client->name,
1343				   client->pid, size);
1344		}
1345	}
1346	seq_printf(s, "----------------------------------------------------\n");
1347	seq_printf(s, "orphaned allocations (info is from last known client):"
1348		   "\n");
1349	mutex_lock(&dev->buffer_lock);
1350	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1351		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1352						     node);
1353		if (buffer->heap->id != heap->id)
1354			continue;
1355		total_size += buffer->size;
1356		if (!buffer->handle_count) {
1357			seq_printf(s, "%16.s %16u %16zu %d %d\n",
1358				   buffer->task_comm, buffer->pid,
1359				   buffer->size, buffer->kmap_cnt,
1360				   atomic_read(&buffer->ref.refcount));
1361			total_orphaned_size += buffer->size;
1362		}
1363	}
1364	mutex_unlock(&dev->buffer_lock);
1365	seq_printf(s, "----------------------------------------------------\n");
1366	seq_printf(s, "%16.s %16zu\n", "total orphaned",
1367		   total_orphaned_size);
1368	seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1369	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1370		seq_printf(s, "%16.s %16zu\n", "deferred free",
1371				heap->free_list_size);
1372	seq_printf(s, "----------------------------------------------------\n");
1373
1374	if (heap->debug_show)
1375		heap->debug_show(heap, s, unused);
1376
1377	return 0;
1378}
1379
1380static int ion_debug_heap_open(struct inode *inode, struct file *file)
1381{
1382	return single_open(file, ion_debug_heap_show, inode->i_private);
1383}
1384
1385static const struct file_operations debug_heap_fops = {
1386	.open = ion_debug_heap_open,
1387	.read = seq_read,
1388	.llseek = seq_lseek,
1389	.release = single_release,
1390};
1391
1392#ifdef DEBUG_HEAP_SHRINKER
1393static int debug_shrink_set(void *data, u64 val)
1394{
1395        struct ion_heap *heap = data;
1396        struct shrink_control sc;
1397        int objs;
1398
1399        sc.gfp_mask = -1;
1400        sc.nr_to_scan = 0;
1401
1402        if (!val)
1403                return 0;
1404
1405        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1406        sc.nr_to_scan = objs;
1407
1408        heap->shrinker.shrink(&heap->shrinker, &sc);
1409        return 0;
1410}
1411
1412static int debug_shrink_get(void *data, u64 *val)
1413{
1414        struct ion_heap *heap = data;
1415        struct shrink_control sc;
1416        int objs;
1417
1418        sc.gfp_mask = -1;
1419        sc.nr_to_scan = 0;
1420
1421        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1422        *val = objs;
1423        return 0;
1424}
1425
1426DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1427                        debug_shrink_set, "%llu\n");
1428#endif
1429
1430void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1431{
1432	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1433	    !heap->ops->unmap_dma)
1434		pr_err("%s: can not add heap with invalid ops struct.\n",
1435		       __func__);
1436
1437	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1438		ion_heap_init_deferred_free(heap);
1439
1440	heap->dev = dev;
1441	down_write(&dev->lock);
1442	/* use negative heap->id to reverse the priority -- when traversing
1443	   the list later attempt higher id numbers first */
1444	plist_node_init(&heap->node, -heap->id);
1445	plist_add(&heap->node, &dev->heaps);
1446	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1447			    &debug_heap_fops);
1448#ifdef DEBUG_HEAP_SHRINKER
1449	if (heap->shrinker.shrink) {
1450		char debug_name[64];
1451
1452		snprintf(debug_name, 64, "%s_shrink", heap->name);
1453		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1454				    &debug_shrink_fops);
1455	}
1456#endif
1457	up_write(&dev->lock);
1458}
1459
1460struct ion_device *ion_device_create(long (*custom_ioctl)
1461				     (struct ion_client *client,
1462				      unsigned int cmd,
1463				      unsigned long arg))
1464{
1465	struct ion_device *idev;
1466	int ret;
1467
1468	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1469	if (!idev)
1470		return ERR_PTR(-ENOMEM);
1471
1472	idev->dev.minor = MISC_DYNAMIC_MINOR;
1473	idev->dev.name = "ion";
1474	idev->dev.fops = &ion_fops;
1475	idev->dev.parent = NULL;
1476	ret = misc_register(&idev->dev);
1477	if (ret) {
1478		pr_err("ion: failed to register misc device.\n");
1479		return ERR_PTR(ret);
1480	}
1481
1482	idev->debug_root = debugfs_create_dir("ion", NULL);
1483	if (!idev->debug_root)
1484		pr_err("ion: failed to create debug files.\n");
1485
1486	idev->custom_ioctl = custom_ioctl;
1487	idev->buffers = RB_ROOT;
1488	mutex_init(&idev->buffer_lock);
1489	init_rwsem(&idev->lock);
1490	plist_head_init(&idev->heaps);
1491	idev->clients = RB_ROOT;
1492	return idev;
1493}
1494
1495void ion_device_destroy(struct ion_device *dev)
1496{
1497	misc_deregister(&dev->dev);
1498	/* XXX need to free the heaps and clients ? */
1499	kfree(dev);
1500}
1501
1502void __init ion_reserve(struct ion_platform_data *data)
1503{
1504	int i;
1505
1506	for (i = 0; i < data->nr; i++) {
1507		if (data->heaps[i].size == 0)
1508			continue;
1509
1510		if (data->heaps[i].base == 0) {
1511			phys_addr_t paddr;
1512			paddr = memblock_alloc_base(data->heaps[i].size,
1513						    data->heaps[i].align,
1514						    MEMBLOCK_ALLOC_ANYWHERE);
1515			if (!paddr) {
1516				pr_err("%s: error allocating memblock for "
1517				       "heap %d\n",
1518					__func__, i);
1519				continue;
1520			}
1521			data->heaps[i].base = paddr;
1522		} else {
1523			int ret = memblock_reserve(data->heaps[i].base,
1524					       data->heaps[i].size);
1525			if (ret)
1526				pr_err("memblock reserve of %zx@%lx failed\n",
1527				       data->heaps[i].size,
1528				       data->heaps[i].base);
1529		}
1530		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1531			data->heaps[i].name,
1532			data->heaps[i].base,
1533			data->heaps[i].size);
1534	}
1535}
1536