ion.c revision 9a7e2ac9a7cf827b865b9e616a9549f6972d1140
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/file.h>
21#include <linux/freezer.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/kthread.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/vmalloc.h>
36#include <linux/debugfs.h>
37#include <linux/dma-buf.h>
38#include <linux/idr.h>
39
40#include "ion.h"
41#include "ion_priv.h"
42#include "compat_ion.h"
43
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev:		the actual misc device
47 * @buffers:		an rb tree of all the existing buffers
48 * @buffer_lock:	lock protecting the tree of buffers
49 * @lock:		rwsem protecting the tree of heaps and clients
50 * @heaps:		list of all the heaps in the system
51 * @user_clients:	list of all the clients created from userspace
52 */
53struct ion_device {
54	struct miscdevice dev;
55	struct rb_root buffers;
56	struct mutex buffer_lock;
57	struct rw_semaphore lock;
58	struct plist_head heaps;
59	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60			     unsigned long arg);
61	struct rb_root clients;
62	struct dentry *debug_root;
63	struct dentry *heaps_debug_root;
64	struct dentry *clients_debug_root;
65};
66
67/**
68 * struct ion_client - a process/hw block local address space
69 * @node:		node in the tree of all clients
70 * @dev:		backpointer to ion device
71 * @handles:		an rb tree of all the handles in this client
72 * @idr:		an idr space for allocating handle ids
73 * @lock:		lock protecting the tree of handles
74 * @name:		used for debugging
75 * @display_name:	used for debugging (unique version of @name)
76 * @display_serial:	used for debugging (to make display_name unique)
77 * @task:		used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83struct ion_client {
84	struct rb_node node;
85	struct ion_device *dev;
86	struct rb_root handles;
87	struct idr idr;
88	struct mutex lock;
89	const char *name;
90	char *display_name;
91	int display_serial;
92	struct task_struct *task;
93	pid_t pid;
94	struct dentry *debug_root;
95};
96
97/**
98 * ion_handle - a client local reference to a buffer
99 * @ref:		reference count
100 * @client:		back pointer to the client the buffer resides in
101 * @buffer:		pointer to the buffer
102 * @node:		node in the client's handle rbtree
103 * @kmap_cnt:		count of times this client has mapped to kernel
104 * @id:			client-unique id allocated by client->idr
105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client.  Other fields are never changed after initialization.
108 */
109struct ion_handle {
110	struct kref ref;
111	struct ion_client *client;
112	struct ion_buffer *buffer;
113	struct rb_node node;
114	unsigned int kmap_cnt;
115	int id;
116};
117
118bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119{
120	return (buffer->flags & ION_FLAG_CACHED) &&
121		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122}
123
124bool ion_buffer_cached(struct ion_buffer *buffer)
125{
126	return !!(buffer->flags & ION_FLAG_CACHED);
127}
128
129static inline struct page *ion_buffer_page(struct page *page)
130{
131	return (struct page *)((unsigned long)page & ~(1UL));
132}
133
134static inline bool ion_buffer_page_is_dirty(struct page *page)
135{
136	return !!((unsigned long)page & 1UL);
137}
138
139static inline void ion_buffer_page_dirty(struct page **page)
140{
141	*page = (struct page *)((unsigned long)(*page) | 1UL);
142}
143
144static inline void ion_buffer_page_clean(struct page **page)
145{
146	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
147}
148
149/* this function should only be called while dev->lock is held */
150static void ion_buffer_add(struct ion_device *dev,
151			   struct ion_buffer *buffer)
152{
153	struct rb_node **p = &dev->buffers.rb_node;
154	struct rb_node *parent = NULL;
155	struct ion_buffer *entry;
156
157	while (*p) {
158		parent = *p;
159		entry = rb_entry(parent, struct ion_buffer, node);
160
161		if (buffer < entry) {
162			p = &(*p)->rb_left;
163		} else if (buffer > entry) {
164			p = &(*p)->rb_right;
165		} else {
166			pr_err("%s: buffer already found.", __func__);
167			BUG();
168		}
169	}
170
171	rb_link_node(&buffer->node, parent, p);
172	rb_insert_color(&buffer->node, &dev->buffers);
173}
174
175/* this function should only be called while dev->lock is held */
176static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177				     struct ion_device *dev,
178				     unsigned long len,
179				     unsigned long align,
180				     unsigned long flags)
181{
182	struct ion_buffer *buffer;
183	struct sg_table *table;
184	struct scatterlist *sg;
185	int i, ret;
186
187	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188	if (!buffer)
189		return ERR_PTR(-ENOMEM);
190
191	buffer->heap = heap;
192	buffer->flags = flags;
193	kref_init(&buffer->ref);
194
195	ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197	if (ret) {
198		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199			goto err2;
200
201		ion_heap_freelist_drain(heap, 0);
202		ret = heap->ops->allocate(heap, buffer, len, align,
203					  flags);
204		if (ret)
205			goto err2;
206	}
207
208	buffer->dev = dev;
209	buffer->size = len;
210
211	table = heap->ops->map_dma(heap, buffer);
212	if (WARN_ONCE(table == NULL,
213			"heap->ops->map_dma should return ERR_PTR on error"))
214		table = ERR_PTR(-EINVAL);
215	if (IS_ERR(table)) {
216		heap->ops->free(buffer);
217		kfree(buffer);
218		return ERR_CAST(table);
219	}
220	buffer->sg_table = table;
221	if (ion_buffer_fault_user_mappings(buffer)) {
222		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223		struct scatterlist *sg;
224		int i, j, k = 0;
225
226		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227		if (!buffer->pages) {
228			ret = -ENOMEM;
229			goto err1;
230		}
231
232		for_each_sg(table->sgl, sg, table->nents, i) {
233			struct page *page = sg_page(sg);
234
235			for (j = 0; j < sg->length / PAGE_SIZE; j++)
236				buffer->pages[k++] = page++;
237		}
238
239		if (ret)
240			goto err;
241	}
242
243	buffer->dev = dev;
244	buffer->size = len;
245	INIT_LIST_HEAD(&buffer->vmas);
246	mutex_init(&buffer->lock);
247	/* this will set up dma addresses for the sglist -- it is not
248	   technically correct as per the dma api -- a specific
249	   device isn't really taking ownership here.  However, in practice on
250	   our systems the only dma_address space is physical addresses.
251	   Additionally, we can't afford the overhead of invalidating every
252	   allocation via dma_map_sg. The implicit contract here is that
253	   memory comming from the heaps is ready for dma, ie if it has a
254	   cached mapping that mapping has been invalidated */
255	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
256		sg_dma_address(sg) = sg_phys(sg);
257	mutex_lock(&dev->buffer_lock);
258	ion_buffer_add(dev, buffer);
259	mutex_unlock(&dev->buffer_lock);
260	return buffer;
261
262err:
263	heap->ops->unmap_dma(heap, buffer);
264	heap->ops->free(buffer);
265err1:
266	if (buffer->pages)
267		vfree(buffer->pages);
268err2:
269	kfree(buffer);
270	return ERR_PTR(ret);
271}
272
273void ion_buffer_destroy(struct ion_buffer *buffer)
274{
275	if (WARN_ON(buffer->kmap_cnt > 0))
276		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
277	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
278	buffer->heap->ops->free(buffer);
279	if (buffer->pages)
280		vfree(buffer->pages);
281	kfree(buffer);
282}
283
284static void _ion_buffer_destroy(struct kref *kref)
285{
286	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
287	struct ion_heap *heap = buffer->heap;
288	struct ion_device *dev = buffer->dev;
289
290	mutex_lock(&dev->buffer_lock);
291	rb_erase(&buffer->node, &dev->buffers);
292	mutex_unlock(&dev->buffer_lock);
293
294	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
295		ion_heap_freelist_add(heap, buffer);
296	else
297		ion_buffer_destroy(buffer);
298}
299
300static void ion_buffer_get(struct ion_buffer *buffer)
301{
302	kref_get(&buffer->ref);
303}
304
305static int ion_buffer_put(struct ion_buffer *buffer)
306{
307	return kref_put(&buffer->ref, _ion_buffer_destroy);
308}
309
310static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
311{
312	mutex_lock(&buffer->lock);
313	buffer->handle_count++;
314	mutex_unlock(&buffer->lock);
315}
316
317static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
318{
319	/*
320	 * when a buffer is removed from a handle, if it is not in
321	 * any other handles, copy the taskcomm and the pid of the
322	 * process it's being removed from into the buffer.  At this
323	 * point there will be no way to track what processes this buffer is
324	 * being used by, it only exists as a dma_buf file descriptor.
325	 * The taskcomm and pid can provide a debug hint as to where this fd
326	 * is in the system
327	 */
328	mutex_lock(&buffer->lock);
329	buffer->handle_count--;
330	BUG_ON(buffer->handle_count < 0);
331	if (!buffer->handle_count) {
332		struct task_struct *task;
333
334		task = current->group_leader;
335		get_task_comm(buffer->task_comm, task);
336		buffer->pid = task_pid_nr(task);
337	}
338	mutex_unlock(&buffer->lock);
339}
340
341static struct ion_handle *ion_handle_create(struct ion_client *client,
342				     struct ion_buffer *buffer)
343{
344	struct ion_handle *handle;
345
346	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
347	if (!handle)
348		return ERR_PTR(-ENOMEM);
349	kref_init(&handle->ref);
350	RB_CLEAR_NODE(&handle->node);
351	handle->client = client;
352	ion_buffer_get(buffer);
353	ion_buffer_add_to_handle(buffer);
354	handle->buffer = buffer;
355
356	return handle;
357}
358
359static void ion_handle_kmap_put(struct ion_handle *);
360
361static void ion_handle_destroy(struct kref *kref)
362{
363	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
364	struct ion_client *client = handle->client;
365	struct ion_buffer *buffer = handle->buffer;
366
367	mutex_lock(&buffer->lock);
368	while (handle->kmap_cnt)
369		ion_handle_kmap_put(handle);
370	mutex_unlock(&buffer->lock);
371
372	idr_remove(&client->idr, handle->id);
373	if (!RB_EMPTY_NODE(&handle->node))
374		rb_erase(&handle->node, &client->handles);
375
376	ion_buffer_remove_from_handle(buffer);
377	ion_buffer_put(buffer);
378
379	kfree(handle);
380}
381
382struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
383{
384	return handle->buffer;
385}
386
387static void ion_handle_get(struct ion_handle *handle)
388{
389	kref_get(&handle->ref);
390}
391
392static int ion_handle_put(struct ion_handle *handle)
393{
394	struct ion_client *client = handle->client;
395	int ret;
396
397	mutex_lock(&client->lock);
398	ret = kref_put(&handle->ref, ion_handle_destroy);
399	mutex_unlock(&client->lock);
400
401	return ret;
402}
403
404static struct ion_handle *ion_handle_lookup(struct ion_client *client,
405					    struct ion_buffer *buffer)
406{
407	struct rb_node *n = client->handles.rb_node;
408
409	while (n) {
410		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
411
412		if (buffer < entry->buffer)
413			n = n->rb_left;
414		else if (buffer > entry->buffer)
415			n = n->rb_right;
416		else
417			return entry;
418	}
419	return ERR_PTR(-EINVAL);
420}
421
422static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
423						int id)
424{
425	struct ion_handle *handle;
426
427	mutex_lock(&client->lock);
428	handle = idr_find(&client->idr, id);
429	if (handle)
430		ion_handle_get(handle);
431	mutex_unlock(&client->lock);
432
433	return handle ? handle : ERR_PTR(-EINVAL);
434}
435
436static bool ion_handle_validate(struct ion_client *client,
437				struct ion_handle *handle)
438{
439	WARN_ON(!mutex_is_locked(&client->lock));
440	return idr_find(&client->idr, handle->id) == handle;
441}
442
443static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
444{
445	int id;
446	struct rb_node **p = &client->handles.rb_node;
447	struct rb_node *parent = NULL;
448	struct ion_handle *entry;
449
450	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
451	if (id < 0)
452		return id;
453
454	handle->id = id;
455
456	while (*p) {
457		parent = *p;
458		entry = rb_entry(parent, struct ion_handle, node);
459
460		if (handle->buffer < entry->buffer)
461			p = &(*p)->rb_left;
462		else if (handle->buffer > entry->buffer)
463			p = &(*p)->rb_right;
464		else
465			WARN(1, "%s: buffer already found.", __func__);
466	}
467
468	rb_link_node(&handle->node, parent, p);
469	rb_insert_color(&handle->node, &client->handles);
470
471	return 0;
472}
473
474struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
475			     size_t align, unsigned int heap_id_mask,
476			     unsigned int flags)
477{
478	struct ion_handle *handle;
479	struct ion_device *dev = client->dev;
480	struct ion_buffer *buffer = NULL;
481	struct ion_heap *heap;
482	int ret;
483
484	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
485		 len, align, heap_id_mask, flags);
486	/*
487	 * traverse the list of heaps available in this system in priority
488	 * order.  If the heap type is supported by the client, and matches the
489	 * request of the caller allocate from it.  Repeat until allocate has
490	 * succeeded or all heaps have been tried
491	 */
492	len = PAGE_ALIGN(len);
493
494	if (!len)
495		return ERR_PTR(-EINVAL);
496
497	down_read(&dev->lock);
498	plist_for_each_entry(heap, &dev->heaps, node) {
499		/* if the caller didn't specify this heap id */
500		if (!((1 << heap->id) & heap_id_mask))
501			continue;
502		buffer = ion_buffer_create(heap, dev, len, align, flags);
503		if (!IS_ERR(buffer))
504			break;
505	}
506	up_read(&dev->lock);
507
508	if (buffer == NULL)
509		return ERR_PTR(-ENODEV);
510
511	if (IS_ERR(buffer))
512		return ERR_CAST(buffer);
513
514	handle = ion_handle_create(client, buffer);
515
516	/*
517	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
518	 * and ion_handle_create will take a second reference, drop one here
519	 */
520	ion_buffer_put(buffer);
521
522	if (IS_ERR(handle))
523		return handle;
524
525	mutex_lock(&client->lock);
526	ret = ion_handle_add(client, handle);
527	mutex_unlock(&client->lock);
528	if (ret) {
529		ion_handle_put(handle);
530		handle = ERR_PTR(ret);
531	}
532
533	return handle;
534}
535EXPORT_SYMBOL(ion_alloc);
536
537void ion_free(struct ion_client *client, struct ion_handle *handle)
538{
539	bool valid_handle;
540
541	BUG_ON(client != handle->client);
542
543	mutex_lock(&client->lock);
544	valid_handle = ion_handle_validate(client, handle);
545
546	if (!valid_handle) {
547		WARN(1, "%s: invalid handle passed to free.\n", __func__);
548		mutex_unlock(&client->lock);
549		return;
550	}
551	mutex_unlock(&client->lock);
552	ion_handle_put(handle);
553}
554EXPORT_SYMBOL(ion_free);
555
556int ion_phys(struct ion_client *client, struct ion_handle *handle,
557	     ion_phys_addr_t *addr, size_t *len)
558{
559	struct ion_buffer *buffer;
560	int ret;
561
562	mutex_lock(&client->lock);
563	if (!ion_handle_validate(client, handle)) {
564		mutex_unlock(&client->lock);
565		return -EINVAL;
566	}
567
568	buffer = handle->buffer;
569
570	if (!buffer->heap->ops->phys) {
571		pr_err("%s: ion_phys is not implemented by this heap.\n",
572		       __func__);
573		mutex_unlock(&client->lock);
574		return -ENODEV;
575	}
576	mutex_unlock(&client->lock);
577	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
578	return ret;
579}
580EXPORT_SYMBOL(ion_phys);
581
582static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
583{
584	void *vaddr;
585
586	if (buffer->kmap_cnt) {
587		buffer->kmap_cnt++;
588		return buffer->vaddr;
589	}
590	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
591	if (WARN_ONCE(vaddr == NULL,
592			"heap->ops->map_kernel should return ERR_PTR on error"))
593		return ERR_PTR(-EINVAL);
594	if (IS_ERR(vaddr))
595		return vaddr;
596	buffer->vaddr = vaddr;
597	buffer->kmap_cnt++;
598	return vaddr;
599}
600
601static void *ion_handle_kmap_get(struct ion_handle *handle)
602{
603	struct ion_buffer *buffer = handle->buffer;
604	void *vaddr;
605
606	if (handle->kmap_cnt) {
607		handle->kmap_cnt++;
608		return buffer->vaddr;
609	}
610	vaddr = ion_buffer_kmap_get(buffer);
611	if (IS_ERR(vaddr))
612		return vaddr;
613	handle->kmap_cnt++;
614	return vaddr;
615}
616
617static void ion_buffer_kmap_put(struct ion_buffer *buffer)
618{
619	buffer->kmap_cnt--;
620	if (!buffer->kmap_cnt) {
621		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
622		buffer->vaddr = NULL;
623	}
624}
625
626static void ion_handle_kmap_put(struct ion_handle *handle)
627{
628	struct ion_buffer *buffer = handle->buffer;
629
630	if (!handle->kmap_cnt) {
631		WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
632		return;
633	}
634	handle->kmap_cnt--;
635	if (!handle->kmap_cnt)
636		ion_buffer_kmap_put(buffer);
637}
638
639void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
640{
641	struct ion_buffer *buffer;
642	void *vaddr;
643
644	mutex_lock(&client->lock);
645	if (!ion_handle_validate(client, handle)) {
646		pr_err("%s: invalid handle passed to map_kernel.\n",
647		       __func__);
648		mutex_unlock(&client->lock);
649		return ERR_PTR(-EINVAL);
650	}
651
652	buffer = handle->buffer;
653
654	if (!handle->buffer->heap->ops->map_kernel) {
655		pr_err("%s: map_kernel is not implemented by this heap.\n",
656		       __func__);
657		mutex_unlock(&client->lock);
658		return ERR_PTR(-ENODEV);
659	}
660
661	mutex_lock(&buffer->lock);
662	vaddr = ion_handle_kmap_get(handle);
663	mutex_unlock(&buffer->lock);
664	mutex_unlock(&client->lock);
665	return vaddr;
666}
667EXPORT_SYMBOL(ion_map_kernel);
668
669void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
670{
671	struct ion_buffer *buffer;
672
673	mutex_lock(&client->lock);
674	buffer = handle->buffer;
675	mutex_lock(&buffer->lock);
676	ion_handle_kmap_put(handle);
677	mutex_unlock(&buffer->lock);
678	mutex_unlock(&client->lock);
679}
680EXPORT_SYMBOL(ion_unmap_kernel);
681
682static int ion_debug_client_show(struct seq_file *s, void *unused)
683{
684	struct ion_client *client = s->private;
685	struct rb_node *n;
686	size_t sizes[ION_NUM_HEAP_IDS] = {0};
687	const char *names[ION_NUM_HEAP_IDS] = {NULL};
688	int i;
689
690	mutex_lock(&client->lock);
691	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
692		struct ion_handle *handle = rb_entry(n, struct ion_handle,
693						     node);
694		unsigned int id = handle->buffer->heap->id;
695
696		if (!names[id])
697			names[id] = handle->buffer->heap->name;
698		sizes[id] += handle->buffer->size;
699	}
700	mutex_unlock(&client->lock);
701
702	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
703	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
704		if (!names[i])
705			continue;
706		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
707	}
708	return 0;
709}
710
711static int ion_debug_client_open(struct inode *inode, struct file *file)
712{
713	return single_open(file, ion_debug_client_show, inode->i_private);
714}
715
716static const struct file_operations debug_client_fops = {
717	.open = ion_debug_client_open,
718	.read = seq_read,
719	.llseek = seq_lseek,
720	.release = single_release,
721};
722
723static int ion_get_client_serial(const struct rb_root *root,
724					const unsigned char *name)
725{
726	int serial = -1;
727	struct rb_node *node;
728
729	for (node = rb_first(root); node; node = rb_next(node)) {
730		struct ion_client *client = rb_entry(node, struct ion_client,
731						node);
732
733		if (strcmp(client->name, name))
734			continue;
735		serial = max(serial, client->display_serial);
736	}
737	return serial + 1;
738}
739
740struct ion_client *ion_client_create(struct ion_device *dev,
741				     const char *name)
742{
743	struct ion_client *client;
744	struct task_struct *task;
745	struct rb_node **p;
746	struct rb_node *parent = NULL;
747	struct ion_client *entry;
748	pid_t pid;
749
750	if (!name) {
751		pr_err("%s: Name cannot be null\n", __func__);
752		return ERR_PTR(-EINVAL);
753	}
754
755	get_task_struct(current->group_leader);
756	task_lock(current->group_leader);
757	pid = task_pid_nr(current->group_leader);
758	/* don't bother to store task struct for kernel threads,
759	   they can't be killed anyway */
760	if (current->group_leader->flags & PF_KTHREAD) {
761		put_task_struct(current->group_leader);
762		task = NULL;
763	} else {
764		task = current->group_leader;
765	}
766	task_unlock(current->group_leader);
767
768	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
769	if (!client)
770		goto err_put_task_struct;
771
772	client->dev = dev;
773	client->handles = RB_ROOT;
774	idr_init(&client->idr);
775	mutex_init(&client->lock);
776	client->task = task;
777	client->pid = pid;
778	client->name = kstrdup(name, GFP_KERNEL);
779	if (!client->name)
780		goto err_free_client;
781
782	down_write(&dev->lock);
783	client->display_serial = ion_get_client_serial(&dev->clients, name);
784	client->display_name = kasprintf(
785		GFP_KERNEL, "%s-%d", name, client->display_serial);
786	if (!client->display_name) {
787		up_write(&dev->lock);
788		goto err_free_client_name;
789	}
790	p = &dev->clients.rb_node;
791	while (*p) {
792		parent = *p;
793		entry = rb_entry(parent, struct ion_client, node);
794
795		if (client < entry)
796			p = &(*p)->rb_left;
797		else if (client > entry)
798			p = &(*p)->rb_right;
799	}
800	rb_link_node(&client->node, parent, p);
801	rb_insert_color(&client->node, &dev->clients);
802
803	client->debug_root = debugfs_create_file(client->display_name, 0664,
804						dev->clients_debug_root,
805						client, &debug_client_fops);
806	if (!client->debug_root) {
807		char buf[256], *path;
808
809		path = dentry_path(dev->clients_debug_root, buf, 256);
810		pr_err("Failed to create client debugfs at %s/%s\n",
811			path, client->display_name);
812	}
813
814	up_write(&dev->lock);
815
816	return client;
817
818err_free_client_name:
819	kfree(client->name);
820err_free_client:
821	kfree(client);
822err_put_task_struct:
823	if (task)
824		put_task_struct(current->group_leader);
825	return ERR_PTR(-ENOMEM);
826}
827EXPORT_SYMBOL(ion_client_create);
828
829void ion_client_destroy(struct ion_client *client)
830{
831	struct ion_device *dev = client->dev;
832	struct rb_node *n;
833
834	pr_debug("%s: %d\n", __func__, __LINE__);
835	while ((n = rb_first(&client->handles))) {
836		struct ion_handle *handle = rb_entry(n, struct ion_handle,
837						     node);
838		ion_handle_destroy(&handle->ref);
839	}
840
841	idr_destroy(&client->idr);
842
843	down_write(&dev->lock);
844	if (client->task)
845		put_task_struct(client->task);
846	rb_erase(&client->node, &dev->clients);
847	debugfs_remove_recursive(client->debug_root);
848	up_write(&dev->lock);
849
850	kfree(client->display_name);
851	kfree(client->name);
852	kfree(client);
853}
854EXPORT_SYMBOL(ion_client_destroy);
855
856struct sg_table *ion_sg_table(struct ion_client *client,
857			      struct ion_handle *handle)
858{
859	struct ion_buffer *buffer;
860	struct sg_table *table;
861
862	mutex_lock(&client->lock);
863	if (!ion_handle_validate(client, handle)) {
864		pr_err("%s: invalid handle passed to map_dma.\n",
865		       __func__);
866		mutex_unlock(&client->lock);
867		return ERR_PTR(-EINVAL);
868	}
869	buffer = handle->buffer;
870	table = buffer->sg_table;
871	mutex_unlock(&client->lock);
872	return table;
873}
874EXPORT_SYMBOL(ion_sg_table);
875
876static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
877				       struct device *dev,
878				       enum dma_data_direction direction);
879
880static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
881					enum dma_data_direction direction)
882{
883	struct dma_buf *dmabuf = attachment->dmabuf;
884	struct ion_buffer *buffer = dmabuf->priv;
885
886	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
887	return buffer->sg_table;
888}
889
890static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
891			      struct sg_table *table,
892			      enum dma_data_direction direction)
893{
894}
895
896void ion_pages_sync_for_device(struct device *dev, struct page *page,
897		size_t size, enum dma_data_direction dir)
898{
899	struct scatterlist sg;
900
901	sg_init_table(&sg, 1);
902	sg_set_page(&sg, page, size, 0);
903	/*
904	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
905	 * for the targeted device, but this works on the currently targeted
906	 * hardware.
907	 */
908	sg_dma_address(&sg) = page_to_phys(page);
909	dma_sync_sg_for_device(dev, &sg, 1, dir);
910}
911
912struct ion_vma_list {
913	struct list_head list;
914	struct vm_area_struct *vma;
915};
916
917static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
918				       struct device *dev,
919				       enum dma_data_direction dir)
920{
921	struct ion_vma_list *vma_list;
922	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
923	int i;
924
925	pr_debug("%s: syncing for device %s\n", __func__,
926		 dev ? dev_name(dev) : "null");
927
928	if (!ion_buffer_fault_user_mappings(buffer))
929		return;
930
931	mutex_lock(&buffer->lock);
932	for (i = 0; i < pages; i++) {
933		struct page *page = buffer->pages[i];
934
935		if (ion_buffer_page_is_dirty(page))
936			ion_pages_sync_for_device(dev, ion_buffer_page(page),
937							PAGE_SIZE, dir);
938
939		ion_buffer_page_clean(buffer->pages + i);
940	}
941	list_for_each_entry(vma_list, &buffer->vmas, list) {
942		struct vm_area_struct *vma = vma_list->vma;
943
944		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
945			       NULL);
946	}
947	mutex_unlock(&buffer->lock);
948}
949
950static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
951{
952	struct ion_buffer *buffer = vma->vm_private_data;
953	unsigned long pfn;
954	int ret;
955
956	mutex_lock(&buffer->lock);
957	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
958	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
959
960	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
961	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
962	mutex_unlock(&buffer->lock);
963	if (ret)
964		return VM_FAULT_ERROR;
965
966	return VM_FAULT_NOPAGE;
967}
968
969static void ion_vm_open(struct vm_area_struct *vma)
970{
971	struct ion_buffer *buffer = vma->vm_private_data;
972	struct ion_vma_list *vma_list;
973
974	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
975	if (!vma_list)
976		return;
977	vma_list->vma = vma;
978	mutex_lock(&buffer->lock);
979	list_add(&vma_list->list, &buffer->vmas);
980	mutex_unlock(&buffer->lock);
981	pr_debug("%s: adding %p\n", __func__, vma);
982}
983
984static void ion_vm_close(struct vm_area_struct *vma)
985{
986	struct ion_buffer *buffer = vma->vm_private_data;
987	struct ion_vma_list *vma_list, *tmp;
988
989	pr_debug("%s\n", __func__);
990	mutex_lock(&buffer->lock);
991	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
992		if (vma_list->vma != vma)
993			continue;
994		list_del(&vma_list->list);
995		kfree(vma_list);
996		pr_debug("%s: deleting %p\n", __func__, vma);
997		break;
998	}
999	mutex_unlock(&buffer->lock);
1000}
1001
1002static struct vm_operations_struct ion_vma_ops = {
1003	.open = ion_vm_open,
1004	.close = ion_vm_close,
1005	.fault = ion_vm_fault,
1006};
1007
1008static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1009{
1010	struct ion_buffer *buffer = dmabuf->priv;
1011	int ret = 0;
1012
1013	if (!buffer->heap->ops->map_user) {
1014		pr_err("%s: this heap does not define a method for mapping to userspace\n",
1015			__func__);
1016		return -EINVAL;
1017	}
1018
1019	if (ion_buffer_fault_user_mappings(buffer)) {
1020		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1021							VM_DONTDUMP;
1022		vma->vm_private_data = buffer;
1023		vma->vm_ops = &ion_vma_ops;
1024		ion_vm_open(vma);
1025		return 0;
1026	}
1027
1028	if (!(buffer->flags & ION_FLAG_CACHED))
1029		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1030
1031	mutex_lock(&buffer->lock);
1032	/* now map it to userspace */
1033	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1034	mutex_unlock(&buffer->lock);
1035
1036	if (ret)
1037		pr_err("%s: failure mapping buffer to userspace\n",
1038		       __func__);
1039
1040	return ret;
1041}
1042
1043static void ion_dma_buf_release(struct dma_buf *dmabuf)
1044{
1045	struct ion_buffer *buffer = dmabuf->priv;
1046
1047	ion_buffer_put(buffer);
1048}
1049
1050static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1051{
1052	struct ion_buffer *buffer = dmabuf->priv;
1053
1054	return buffer->vaddr + offset * PAGE_SIZE;
1055}
1056
1057static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1058			       void *ptr)
1059{
1060}
1061
1062static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1063					size_t len,
1064					enum dma_data_direction direction)
1065{
1066	struct ion_buffer *buffer = dmabuf->priv;
1067	void *vaddr;
1068
1069	if (!buffer->heap->ops->map_kernel) {
1070		pr_err("%s: map kernel is not implemented by this heap.\n",
1071		       __func__);
1072		return -ENODEV;
1073	}
1074
1075	mutex_lock(&buffer->lock);
1076	vaddr = ion_buffer_kmap_get(buffer);
1077	mutex_unlock(&buffer->lock);
1078	return PTR_ERR_OR_ZERO(vaddr);
1079}
1080
1081static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1082				       size_t len,
1083				       enum dma_data_direction direction)
1084{
1085	struct ion_buffer *buffer = dmabuf->priv;
1086
1087	mutex_lock(&buffer->lock);
1088	ion_buffer_kmap_put(buffer);
1089	mutex_unlock(&buffer->lock);
1090}
1091
1092static struct dma_buf_ops dma_buf_ops = {
1093	.map_dma_buf = ion_map_dma_buf,
1094	.unmap_dma_buf = ion_unmap_dma_buf,
1095	.mmap = ion_mmap,
1096	.release = ion_dma_buf_release,
1097	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1098	.end_cpu_access = ion_dma_buf_end_cpu_access,
1099	.kmap_atomic = ion_dma_buf_kmap,
1100	.kunmap_atomic = ion_dma_buf_kunmap,
1101	.kmap = ion_dma_buf_kmap,
1102	.kunmap = ion_dma_buf_kunmap,
1103};
1104
1105struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1106						struct ion_handle *handle)
1107{
1108	struct ion_buffer *buffer;
1109	struct dma_buf *dmabuf;
1110	bool valid_handle;
1111
1112	mutex_lock(&client->lock);
1113	valid_handle = ion_handle_validate(client, handle);
1114	if (!valid_handle) {
1115		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1116		mutex_unlock(&client->lock);
1117		return ERR_PTR(-EINVAL);
1118	}
1119	buffer = handle->buffer;
1120	ion_buffer_get(buffer);
1121	mutex_unlock(&client->lock);
1122
1123	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR,
1124				NULL);
1125	if (IS_ERR(dmabuf)) {
1126		ion_buffer_put(buffer);
1127		return dmabuf;
1128	}
1129
1130	return dmabuf;
1131}
1132EXPORT_SYMBOL(ion_share_dma_buf);
1133
1134int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1135{
1136	struct dma_buf *dmabuf;
1137	int fd;
1138
1139	dmabuf = ion_share_dma_buf(client, handle);
1140	if (IS_ERR(dmabuf))
1141		return PTR_ERR(dmabuf);
1142
1143	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1144	if (fd < 0)
1145		dma_buf_put(dmabuf);
1146
1147	return fd;
1148}
1149EXPORT_SYMBOL(ion_share_dma_buf_fd);
1150
1151struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1152{
1153	struct dma_buf *dmabuf;
1154	struct ion_buffer *buffer;
1155	struct ion_handle *handle;
1156	int ret;
1157
1158	dmabuf = dma_buf_get(fd);
1159	if (IS_ERR(dmabuf))
1160		return ERR_CAST(dmabuf);
1161	/* if this memory came from ion */
1162
1163	if (dmabuf->ops != &dma_buf_ops) {
1164		pr_err("%s: can not import dmabuf from another exporter\n",
1165		       __func__);
1166		dma_buf_put(dmabuf);
1167		return ERR_PTR(-EINVAL);
1168	}
1169	buffer = dmabuf->priv;
1170
1171	mutex_lock(&client->lock);
1172	/* if a handle exists for this buffer just take a reference to it */
1173	handle = ion_handle_lookup(client, buffer);
1174	if (!IS_ERR(handle)) {
1175		ion_handle_get(handle);
1176		mutex_unlock(&client->lock);
1177		goto end;
1178	}
1179	mutex_unlock(&client->lock);
1180
1181	handle = ion_handle_create(client, buffer);
1182	if (IS_ERR(handle))
1183		goto end;
1184
1185	mutex_lock(&client->lock);
1186	ret = ion_handle_add(client, handle);
1187	mutex_unlock(&client->lock);
1188	if (ret) {
1189		ion_handle_put(handle);
1190		handle = ERR_PTR(ret);
1191	}
1192
1193end:
1194	dma_buf_put(dmabuf);
1195	return handle;
1196}
1197EXPORT_SYMBOL(ion_import_dma_buf);
1198
1199static int ion_sync_for_device(struct ion_client *client, int fd)
1200{
1201	struct dma_buf *dmabuf;
1202	struct ion_buffer *buffer;
1203
1204	dmabuf = dma_buf_get(fd);
1205	if (IS_ERR(dmabuf))
1206		return PTR_ERR(dmabuf);
1207
1208	/* if this memory came from ion */
1209	if (dmabuf->ops != &dma_buf_ops) {
1210		pr_err("%s: can not sync dmabuf from another exporter\n",
1211		       __func__);
1212		dma_buf_put(dmabuf);
1213		return -EINVAL;
1214	}
1215	buffer = dmabuf->priv;
1216
1217	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1218			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1219	dma_buf_put(dmabuf);
1220	return 0;
1221}
1222
1223/* fix up the cases where the ioctl direction bits are incorrect */
1224static unsigned int ion_ioctl_dir(unsigned int cmd)
1225{
1226	switch (cmd) {
1227	case ION_IOC_SYNC:
1228	case ION_IOC_FREE:
1229	case ION_IOC_CUSTOM:
1230		return _IOC_WRITE;
1231	default:
1232		return _IOC_DIR(cmd);
1233	}
1234}
1235
1236static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1237{
1238	struct ion_client *client = filp->private_data;
1239	struct ion_device *dev = client->dev;
1240	struct ion_handle *cleanup_handle = NULL;
1241	int ret = 0;
1242	unsigned int dir;
1243
1244	union {
1245		struct ion_fd_data fd;
1246		struct ion_allocation_data allocation;
1247		struct ion_handle_data handle;
1248		struct ion_custom_data custom;
1249	} data;
1250
1251	dir = ion_ioctl_dir(cmd);
1252
1253	if (_IOC_SIZE(cmd) > sizeof(data))
1254		return -EINVAL;
1255
1256	if (dir & _IOC_WRITE)
1257		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1258			return -EFAULT;
1259
1260	switch (cmd) {
1261	case ION_IOC_ALLOC:
1262	{
1263		struct ion_handle *handle;
1264
1265		handle = ion_alloc(client, data.allocation.len,
1266						data.allocation.align,
1267						data.allocation.heap_id_mask,
1268						data.allocation.flags);
1269		if (IS_ERR(handle))
1270			return PTR_ERR(handle);
1271
1272		data.allocation.handle = handle->id;
1273
1274		cleanup_handle = handle;
1275		break;
1276	}
1277	case ION_IOC_FREE:
1278	{
1279		struct ion_handle *handle;
1280
1281		handle = ion_handle_get_by_id(client, data.handle.handle);
1282		if (IS_ERR(handle))
1283			return PTR_ERR(handle);
1284		ion_free(client, handle);
1285		ion_handle_put(handle);
1286		break;
1287	}
1288	case ION_IOC_SHARE:
1289	case ION_IOC_MAP:
1290	{
1291		struct ion_handle *handle;
1292
1293		handle = ion_handle_get_by_id(client, data.handle.handle);
1294		if (IS_ERR(handle))
1295			return PTR_ERR(handle);
1296		data.fd.fd = ion_share_dma_buf_fd(client, handle);
1297		ion_handle_put(handle);
1298		if (data.fd.fd < 0)
1299			ret = data.fd.fd;
1300		break;
1301	}
1302	case ION_IOC_IMPORT:
1303	{
1304		struct ion_handle *handle;
1305
1306		handle = ion_import_dma_buf(client, data.fd.fd);
1307		if (IS_ERR(handle))
1308			ret = PTR_ERR(handle);
1309		else
1310			data.handle.handle = handle->id;
1311		break;
1312	}
1313	case ION_IOC_SYNC:
1314	{
1315		ret = ion_sync_for_device(client, data.fd.fd);
1316		break;
1317	}
1318	case ION_IOC_CUSTOM:
1319	{
1320		if (!dev->custom_ioctl)
1321			return -ENOTTY;
1322		ret = dev->custom_ioctl(client, data.custom.cmd,
1323						data.custom.arg);
1324		break;
1325	}
1326	default:
1327		return -ENOTTY;
1328	}
1329
1330	if (dir & _IOC_READ) {
1331		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1332			if (cleanup_handle)
1333				ion_free(client, cleanup_handle);
1334			return -EFAULT;
1335		}
1336	}
1337	return ret;
1338}
1339
1340static int ion_release(struct inode *inode, struct file *file)
1341{
1342	struct ion_client *client = file->private_data;
1343
1344	pr_debug("%s: %d\n", __func__, __LINE__);
1345	ion_client_destroy(client);
1346	return 0;
1347}
1348
1349static int ion_open(struct inode *inode, struct file *file)
1350{
1351	struct miscdevice *miscdev = file->private_data;
1352	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1353	struct ion_client *client;
1354	char debug_name[64];
1355
1356	pr_debug("%s: %d\n", __func__, __LINE__);
1357	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1358	client = ion_client_create(dev, debug_name);
1359	if (IS_ERR(client))
1360		return PTR_ERR(client);
1361	file->private_data = client;
1362
1363	return 0;
1364}
1365
1366static const struct file_operations ion_fops = {
1367	.owner          = THIS_MODULE,
1368	.open           = ion_open,
1369	.release        = ion_release,
1370	.unlocked_ioctl = ion_ioctl,
1371	.compat_ioctl   = compat_ion_ioctl,
1372};
1373
1374static size_t ion_debug_heap_total(struct ion_client *client,
1375				   unsigned int id)
1376{
1377	size_t size = 0;
1378	struct rb_node *n;
1379
1380	mutex_lock(&client->lock);
1381	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1382		struct ion_handle *handle = rb_entry(n,
1383						     struct ion_handle,
1384						     node);
1385		if (handle->buffer->heap->id == id)
1386			size += handle->buffer->size;
1387	}
1388	mutex_unlock(&client->lock);
1389	return size;
1390}
1391
1392static int ion_debug_heap_show(struct seq_file *s, void *unused)
1393{
1394	struct ion_heap *heap = s->private;
1395	struct ion_device *dev = heap->dev;
1396	struct rb_node *n;
1397	size_t total_size = 0;
1398	size_t total_orphaned_size = 0;
1399
1400	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1401	seq_puts(s, "----------------------------------------------------\n");
1402
1403	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1404		struct ion_client *client = rb_entry(n, struct ion_client,
1405						     node);
1406		size_t size = ion_debug_heap_total(client, heap->id);
1407
1408		if (!size)
1409			continue;
1410		if (client->task) {
1411			char task_comm[TASK_COMM_LEN];
1412
1413			get_task_comm(task_comm, client->task);
1414			seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1415				   client->pid, size);
1416		} else {
1417			seq_printf(s, "%16.s %16u %16zu\n", client->name,
1418				   client->pid, size);
1419		}
1420	}
1421	seq_puts(s, "----------------------------------------------------\n");
1422	seq_puts(s, "orphaned allocations (info is from last known client):\n");
1423	mutex_lock(&dev->buffer_lock);
1424	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1425		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1426						     node);
1427		if (buffer->heap->id != heap->id)
1428			continue;
1429		total_size += buffer->size;
1430		if (!buffer->handle_count) {
1431			seq_printf(s, "%16.s %16u %16zu %d %d\n",
1432				   buffer->task_comm, buffer->pid,
1433				   buffer->size, buffer->kmap_cnt,
1434				   atomic_read(&buffer->ref.refcount));
1435			total_orphaned_size += buffer->size;
1436		}
1437	}
1438	mutex_unlock(&dev->buffer_lock);
1439	seq_puts(s, "----------------------------------------------------\n");
1440	seq_printf(s, "%16.s %16zu\n", "total orphaned",
1441		   total_orphaned_size);
1442	seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1443	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1444		seq_printf(s, "%16.s %16zu\n", "deferred free",
1445				heap->free_list_size);
1446	seq_puts(s, "----------------------------------------------------\n");
1447
1448	if (heap->debug_show)
1449		heap->debug_show(heap, s, unused);
1450
1451	return 0;
1452}
1453
1454static int ion_debug_heap_open(struct inode *inode, struct file *file)
1455{
1456	return single_open(file, ion_debug_heap_show, inode->i_private);
1457}
1458
1459static const struct file_operations debug_heap_fops = {
1460	.open = ion_debug_heap_open,
1461	.read = seq_read,
1462	.llseek = seq_lseek,
1463	.release = single_release,
1464};
1465
1466#ifdef DEBUG_HEAP_SHRINKER
1467static int debug_shrink_set(void *data, u64 val)
1468{
1469	struct ion_heap *heap = data;
1470	struct shrink_control sc;
1471	int objs;
1472
1473	sc.gfp_mask = -1;
1474	sc.nr_to_scan = 0;
1475
1476	if (!val)
1477		return 0;
1478
1479	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1480	sc.nr_to_scan = objs;
1481
1482	heap->shrinker.shrink(&heap->shrinker, &sc);
1483	return 0;
1484}
1485
1486static int debug_shrink_get(void *data, u64 *val)
1487{
1488	struct ion_heap *heap = data;
1489	struct shrink_control sc;
1490	int objs;
1491
1492	sc.gfp_mask = -1;
1493	sc.nr_to_scan = 0;
1494
1495	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1496	*val = objs;
1497	return 0;
1498}
1499
1500DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1501			debug_shrink_set, "%llu\n");
1502#endif
1503
1504void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1505{
1506	struct dentry *debug_file;
1507
1508	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1509	    !heap->ops->unmap_dma)
1510		pr_err("%s: can not add heap with invalid ops struct.\n",
1511		       __func__);
1512
1513	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1514		ion_heap_init_deferred_free(heap);
1515
1516	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1517		ion_heap_init_shrinker(heap);
1518
1519	heap->dev = dev;
1520	down_write(&dev->lock);
1521	/* use negative heap->id to reverse the priority -- when traversing
1522	   the list later attempt higher id numbers first */
1523	plist_node_init(&heap->node, -heap->id);
1524	plist_add(&heap->node, &dev->heaps);
1525	debug_file = debugfs_create_file(heap->name, 0664,
1526					dev->heaps_debug_root, heap,
1527					&debug_heap_fops);
1528
1529	if (!debug_file) {
1530		char buf[256], *path;
1531
1532		path = dentry_path(dev->heaps_debug_root, buf, 256);
1533		pr_err("Failed to create heap debugfs at %s/%s\n",
1534			path, heap->name);
1535	}
1536
1537#ifdef DEBUG_HEAP_SHRINKER
1538	if (heap->shrinker.shrink) {
1539		char debug_name[64];
1540
1541		snprintf(debug_name, 64, "%s_shrink", heap->name);
1542		debug_file = debugfs_create_file(
1543			debug_name, 0644, dev->heaps_debug_root, heap,
1544			&debug_shrink_fops);
1545		if (!debug_file) {
1546			char buf[256], *path;
1547
1548			path = dentry_path(dev->heaps_debug_root, buf, 256);
1549			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1550				path, debug_name);
1551		}
1552	}
1553#endif
1554	up_write(&dev->lock);
1555}
1556
1557struct ion_device *ion_device_create(long (*custom_ioctl)
1558				     (struct ion_client *client,
1559				      unsigned int cmd,
1560				      unsigned long arg))
1561{
1562	struct ion_device *idev;
1563	int ret;
1564
1565	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1566	if (!idev)
1567		return ERR_PTR(-ENOMEM);
1568
1569	idev->dev.minor = MISC_DYNAMIC_MINOR;
1570	idev->dev.name = "ion";
1571	idev->dev.fops = &ion_fops;
1572	idev->dev.parent = NULL;
1573	ret = misc_register(&idev->dev);
1574	if (ret) {
1575		pr_err("ion: failed to register misc device.\n");
1576		return ERR_PTR(ret);
1577	}
1578
1579	idev->debug_root = debugfs_create_dir("ion", NULL);
1580	if (!idev->debug_root) {
1581		pr_err("ion: failed to create debugfs root directory.\n");
1582		goto debugfs_done;
1583	}
1584	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1585	if (!idev->heaps_debug_root) {
1586		pr_err("ion: failed to create debugfs heaps directory.\n");
1587		goto debugfs_done;
1588	}
1589	idev->clients_debug_root = debugfs_create_dir("clients",
1590						idev->debug_root);
1591	if (!idev->clients_debug_root)
1592		pr_err("ion: failed to create debugfs clients directory.\n");
1593
1594debugfs_done:
1595
1596	idev->custom_ioctl = custom_ioctl;
1597	idev->buffers = RB_ROOT;
1598	mutex_init(&idev->buffer_lock);
1599	init_rwsem(&idev->lock);
1600	plist_head_init(&idev->heaps);
1601	idev->clients = RB_ROOT;
1602	return idev;
1603}
1604
1605void ion_device_destroy(struct ion_device *dev)
1606{
1607	misc_deregister(&dev->dev);
1608	debugfs_remove_recursive(dev->debug_root);
1609	/* XXX need to free the heaps and clients ? */
1610	kfree(dev);
1611}
1612
1613void __init ion_reserve(struct ion_platform_data *data)
1614{
1615	int i;
1616
1617	for (i = 0; i < data->nr; i++) {
1618		if (data->heaps[i].size == 0)
1619			continue;
1620
1621		if (data->heaps[i].base == 0) {
1622			phys_addr_t paddr;
1623
1624			paddr = memblock_alloc_base(data->heaps[i].size,
1625						    data->heaps[i].align,
1626						    MEMBLOCK_ALLOC_ANYWHERE);
1627			if (!paddr) {
1628				pr_err("%s: error allocating memblock for heap %d\n",
1629					__func__, i);
1630				continue;
1631			}
1632			data->heaps[i].base = paddr;
1633		} else {
1634			int ret = memblock_reserve(data->heaps[i].base,
1635					       data->heaps[i].size);
1636			if (ret)
1637				pr_err("memblock reserve of %zx@%lx failed\n",
1638				       data->heaps[i].size,
1639				       data->heaps[i].base);
1640		}
1641		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1642			data->heaps[i].name,
1643			data->heaps[i].base,
1644			data->heaps[i].size);
1645	}
1646}
1647