ion.c revision b88fa7319ef3c77478d732fab2f6750c305c1873
1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/freezer.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/memblock.h>
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/debugfs.h>
36#include <linux/dma-buf.h>
37#include <linux/idr.h>
38
39#include "ion.h"
40#include "ion_priv.h"
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev:		the actual misc device
45 * @buffers:		an rb tree of all the existing buffers
46 * @buffer_lock:	lock protecting the tree of buffers
47 * @lock:		rwsem protecting the tree of heaps and clients
48 * @heaps:		list of all the heaps in the system
49 * @user_clients:	list of all the clients created from userspace
50 */
51struct ion_device {
52	struct miscdevice dev;
53	struct rb_root buffers;
54	struct mutex buffer_lock;
55	struct rw_semaphore lock;
56	struct plist_head heaps;
57	long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
58			      unsigned long arg);
59	struct rb_root clients;
60	struct dentry *debug_root;
61};
62
63/**
64 * struct ion_client - a process/hw block local address space
65 * @node:		node in the tree of all clients
66 * @dev:		backpointer to ion device
67 * @handles:		an rb tree of all the handles in this client
68 * @idr:		an idr space for allocating handle ids
69 * @lock:		lock protecting the tree of handles
70 * @name:		used for debugging
71 * @task:		used for debugging
72 *
73 * A client represents a list of buffers this client may access.
74 * The mutex stored here is used to protect both handles tree
75 * as well as the handles themselves, and should be held while modifying either.
76 */
77struct ion_client {
78	struct rb_node node;
79	struct ion_device *dev;
80	struct rb_root handles;
81	struct idr idr;
82	struct mutex lock;
83	const char *name;
84	struct task_struct *task;
85	pid_t pid;
86	struct dentry *debug_root;
87};
88
89/**
90 * ion_handle - a client local reference to a buffer
91 * @ref:		reference count
92 * @client:		back pointer to the client the buffer resides in
93 * @buffer:		pointer to the buffer
94 * @node:		node in the client's handle rbtree
95 * @kmap_cnt:		count of times this client has mapped to kernel
96 * @id:			client-unique id allocated by client->idr
97 *
98 * Modifications to node, map_cnt or mapping should be protected by the
99 * lock in the client.  Other fields are never changed after initialization.
100 */
101struct ion_handle {
102	struct kref ref;
103	struct ion_client *client;
104	struct ion_buffer *buffer;
105	struct rb_node node;
106	unsigned int kmap_cnt;
107	int id;
108};
109
110bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
111{
112	return ((buffer->flags & ION_FLAG_CACHED) &&
113		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
114}
115
116bool ion_buffer_cached(struct ion_buffer *buffer)
117{
118	return !!(buffer->flags & ION_FLAG_CACHED);
119}
120
121static inline struct page *ion_buffer_page(struct page *page)
122{
123	return (struct page *)((unsigned long)page & ~(1UL));
124}
125
126static inline bool ion_buffer_page_is_dirty(struct page *page)
127{
128	return !!((unsigned long)page & 1UL);
129}
130
131static inline void ion_buffer_page_dirty(struct page **page)
132{
133	*page = (struct page *)((unsigned long)(*page) | 1UL);
134}
135
136static inline void ion_buffer_page_clean(struct page **page)
137{
138	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
139}
140
141/* this function should only be called while dev->lock is held */
142static void ion_buffer_add(struct ion_device *dev,
143			   struct ion_buffer *buffer)
144{
145	struct rb_node **p = &dev->buffers.rb_node;
146	struct rb_node *parent = NULL;
147	struct ion_buffer *entry;
148
149	while (*p) {
150		parent = *p;
151		entry = rb_entry(parent, struct ion_buffer, node);
152
153		if (buffer < entry) {
154			p = &(*p)->rb_left;
155		} else if (buffer > entry) {
156			p = &(*p)->rb_right;
157		} else {
158			pr_err("%s: buffer already found.", __func__);
159			BUG();
160		}
161	}
162
163	rb_link_node(&buffer->node, parent, p);
164	rb_insert_color(&buffer->node, &dev->buffers);
165}
166
167/* this function should only be called while dev->lock is held */
168static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
169				     struct ion_device *dev,
170				     unsigned long len,
171				     unsigned long align,
172				     unsigned long flags)
173{
174	struct ion_buffer *buffer;
175	struct sg_table *table;
176	struct scatterlist *sg;
177	int i, ret;
178
179	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
180	if (!buffer)
181		return ERR_PTR(-ENOMEM);
182
183	buffer->heap = heap;
184	buffer->flags = flags;
185	kref_init(&buffer->ref);
186
187	ret = heap->ops->allocate(heap, buffer, len, align, flags);
188
189	if (ret) {
190		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
191			goto err2;
192
193		ion_heap_freelist_drain(heap, 0);
194		ret = heap->ops->allocate(heap, buffer, len, align,
195					  flags);
196		if (ret)
197			goto err2;
198	}
199
200	buffer->dev = dev;
201	buffer->size = len;
202
203	table = heap->ops->map_dma(heap, buffer);
204	if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
205		table = ERR_PTR(-EINVAL);
206	if (IS_ERR(table)) {
207		heap->ops->free(buffer);
208		kfree(buffer);
209		return ERR_PTR(PTR_ERR(table));
210	}
211	buffer->sg_table = table;
212	if (ion_buffer_fault_user_mappings(buffer)) {
213		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
214		struct scatterlist *sg;
215		int i, j, k = 0;
216
217		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
218		if (!buffer->pages) {
219			ret = -ENOMEM;
220			goto err1;
221		}
222
223		for_each_sg(table->sgl, sg, table->nents, i) {
224			struct page *page = sg_page(sg);
225
226			for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
227				buffer->pages[k++] = page++;
228		}
229
230		if (ret)
231			goto err;
232	}
233
234	buffer->dev = dev;
235	buffer->size = len;
236	INIT_LIST_HEAD(&buffer->vmas);
237	mutex_init(&buffer->lock);
238	/* this will set up dma addresses for the sglist -- it is not
239	   technically correct as per the dma api -- a specific
240	   device isn't really taking ownership here.  However, in practice on
241	   our systems the only dma_address space is physical addresses.
242	   Additionally, we can't afford the overhead of invalidating every
243	   allocation via dma_map_sg. The implicit contract here is that
244	   memory comming from the heaps is ready for dma, ie if it has a
245	   cached mapping that mapping has been invalidated */
246	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
247		sg_dma_address(sg) = sg_phys(sg);
248	mutex_lock(&dev->buffer_lock);
249	ion_buffer_add(dev, buffer);
250	mutex_unlock(&dev->buffer_lock);
251	return buffer;
252
253err:
254	heap->ops->unmap_dma(heap, buffer);
255	heap->ops->free(buffer);
256err1:
257	if (buffer->pages)
258		vfree(buffer->pages);
259err2:
260	kfree(buffer);
261	return ERR_PTR(ret);
262}
263
264void ion_buffer_destroy(struct ion_buffer *buffer)
265{
266	if (WARN_ON(buffer->kmap_cnt > 0))
267		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
268	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
269	buffer->heap->ops->free(buffer);
270	if (buffer->pages)
271		vfree(buffer->pages);
272	kfree(buffer);
273}
274
275static void _ion_buffer_destroy(struct kref *kref)
276{
277	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
278	struct ion_heap *heap = buffer->heap;
279	struct ion_device *dev = buffer->dev;
280
281	mutex_lock(&dev->buffer_lock);
282	rb_erase(&buffer->node, &dev->buffers);
283	mutex_unlock(&dev->buffer_lock);
284
285	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
286		ion_heap_freelist_add(heap, buffer);
287	else
288		ion_buffer_destroy(buffer);
289}
290
291static void ion_buffer_get(struct ion_buffer *buffer)
292{
293	kref_get(&buffer->ref);
294}
295
296static int ion_buffer_put(struct ion_buffer *buffer)
297{
298	return kref_put(&buffer->ref, _ion_buffer_destroy);
299}
300
301static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
302{
303	mutex_lock(&buffer->lock);
304	buffer->handle_count++;
305	mutex_unlock(&buffer->lock);
306}
307
308static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
309{
310	/*
311	 * when a buffer is removed from a handle, if it is not in
312	 * any other handles, copy the taskcomm and the pid of the
313	 * process it's being removed from into the buffer.  At this
314	 * point there will be no way to track what processes this buffer is
315	 * being used by, it only exists as a dma_buf file descriptor.
316	 * The taskcomm and pid can provide a debug hint as to where this fd
317	 * is in the system
318	 */
319	mutex_lock(&buffer->lock);
320	buffer->handle_count--;
321	BUG_ON(buffer->handle_count < 0);
322	if (!buffer->handle_count) {
323		struct task_struct *task;
324
325		task = current->group_leader;
326		get_task_comm(buffer->task_comm, task);
327		buffer->pid = task_pid_nr(task);
328	}
329	mutex_unlock(&buffer->lock);
330}
331
332static struct ion_handle *ion_handle_create(struct ion_client *client,
333				     struct ion_buffer *buffer)
334{
335	struct ion_handle *handle;
336
337	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
338	if (!handle)
339		return ERR_PTR(-ENOMEM);
340	kref_init(&handle->ref);
341	RB_CLEAR_NODE(&handle->node);
342	handle->client = client;
343	ion_buffer_get(buffer);
344	ion_buffer_add_to_handle(buffer);
345	handle->buffer = buffer;
346
347	return handle;
348}
349
350static void ion_handle_kmap_put(struct ion_handle *);
351
352static void ion_handle_destroy(struct kref *kref)
353{
354	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
355	struct ion_client *client = handle->client;
356	struct ion_buffer *buffer = handle->buffer;
357
358	mutex_lock(&buffer->lock);
359	while (handle->kmap_cnt)
360		ion_handle_kmap_put(handle);
361	mutex_unlock(&buffer->lock);
362
363	idr_remove(&client->idr, handle->id);
364	if (!RB_EMPTY_NODE(&handle->node))
365		rb_erase(&handle->node, &client->handles);
366
367	ion_buffer_remove_from_handle(buffer);
368	ion_buffer_put(buffer);
369
370	kfree(handle);
371}
372
373struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
374{
375	return handle->buffer;
376}
377
378static void ion_handle_get(struct ion_handle *handle)
379{
380	kref_get(&handle->ref);
381}
382
383static int ion_handle_put(struct ion_handle *handle)
384{
385	return kref_put(&handle->ref, ion_handle_destroy);
386}
387
388static struct ion_handle *ion_handle_lookup(struct ion_client *client,
389					    struct ion_buffer *buffer)
390{
391	struct rb_node *n = client->handles.rb_node;
392
393	while (n) {
394		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
395		if (buffer < entry->buffer)
396			n = n->rb_left;
397		else if (buffer > entry->buffer)
398			n = n->rb_right;
399		else
400			return entry;
401	}
402	return ERR_PTR(-EINVAL);
403}
404
405static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
406{
407	return idr_find(&client->idr, id);
408}
409
410static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
411{
412	return (ion_uhandle_get(client, handle->id) == handle);
413}
414
415static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
416{
417	int rc;
418	struct rb_node **p = &client->handles.rb_node;
419	struct rb_node *parent = NULL;
420	struct ion_handle *entry;
421
422	do {
423		int id;
424		rc = idr_pre_get(&client->idr, GFP_KERNEL);
425		if (!rc)
426			return -ENOMEM;
427		rc = idr_get_new_above(&client->idr, handle, 1, &id);
428		handle->id = id;
429	} while (rc == -EAGAIN);
430
431	if (rc < 0)
432		return rc;
433
434	while (*p) {
435		parent = *p;
436		entry = rb_entry(parent, struct ion_handle, node);
437
438		if (handle->buffer < entry->buffer)
439			p = &(*p)->rb_left;
440		else if (handle->buffer > entry->buffer)
441			p = &(*p)->rb_right;
442		else
443			WARN(1, "%s: buffer already found.", __func__);
444	}
445
446	rb_link_node(&handle->node, parent, p);
447	rb_insert_color(&handle->node, &client->handles);
448
449	return 0;
450}
451
452struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
453			     size_t align, unsigned int heap_id_mask,
454			     unsigned int flags)
455{
456	struct ion_handle *handle;
457	struct ion_device *dev = client->dev;
458	struct ion_buffer *buffer = NULL;
459	struct ion_heap *heap;
460	int ret;
461
462	pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
463		 len, align, heap_id_mask, flags);
464	/*
465	 * traverse the list of heaps available in this system in priority
466	 * order.  If the heap type is supported by the client, and matches the
467	 * request of the caller allocate from it.  Repeat until allocate has
468	 * succeeded or all heaps have been tried
469	 */
470	if (WARN_ON(!len))
471		return ERR_PTR(-EINVAL);
472
473	len = PAGE_ALIGN(len);
474
475	down_read(&dev->lock);
476	plist_for_each_entry(heap, &dev->heaps, node) {
477		/* if the caller didn't specify this heap id */
478		if (!((1 << heap->id) & heap_id_mask))
479			continue;
480		buffer = ion_buffer_create(heap, dev, len, align, flags);
481		if (!IS_ERR(buffer))
482			break;
483	}
484	up_read(&dev->lock);
485
486	if (buffer == NULL)
487		return ERR_PTR(-ENODEV);
488
489	if (IS_ERR(buffer))
490		return ERR_PTR(PTR_ERR(buffer));
491
492	handle = ion_handle_create(client, buffer);
493
494	/*
495	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
496	 * and ion_handle_create will take a second reference, drop one here
497	 */
498	ion_buffer_put(buffer);
499
500	if (IS_ERR(handle))
501		return handle;
502
503	mutex_lock(&client->lock);
504	ret = ion_handle_add(client, handle);
505	if (ret) {
506		ion_handle_put(handle);
507		handle = ERR_PTR(ret);
508	}
509	mutex_unlock(&client->lock);
510
511	return handle;
512}
513EXPORT_SYMBOL(ion_alloc);
514
515void ion_free(struct ion_client *client, struct ion_handle *handle)
516{
517	bool valid_handle;
518
519	BUG_ON(client != handle->client);
520
521	mutex_lock(&client->lock);
522	valid_handle = ion_handle_validate(client, handle);
523
524	if (!valid_handle) {
525		WARN(1, "%s: invalid handle passed to free.\n", __func__);
526		mutex_unlock(&client->lock);
527		return;
528	}
529	ion_handle_put(handle);
530	mutex_unlock(&client->lock);
531}
532EXPORT_SYMBOL(ion_free);
533
534int ion_phys(struct ion_client *client, struct ion_handle *handle,
535	     ion_phys_addr_t *addr, size_t *len)
536{
537	struct ion_buffer *buffer;
538	int ret;
539
540	mutex_lock(&client->lock);
541	if (!ion_handle_validate(client, handle)) {
542		mutex_unlock(&client->lock);
543		return -EINVAL;
544	}
545
546	buffer = handle->buffer;
547
548	if (!buffer->heap->ops->phys) {
549		pr_err("%s: ion_phys is not implemented by this heap.\n",
550		       __func__);
551		mutex_unlock(&client->lock);
552		return -ENODEV;
553	}
554	mutex_unlock(&client->lock);
555	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
556	return ret;
557}
558EXPORT_SYMBOL(ion_phys);
559
560static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
561{
562	void *vaddr;
563
564	if (buffer->kmap_cnt) {
565		buffer->kmap_cnt++;
566		return buffer->vaddr;
567	}
568	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
569	if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
570		return ERR_PTR(-EINVAL);
571	if (IS_ERR(vaddr))
572		return vaddr;
573	buffer->vaddr = vaddr;
574	buffer->kmap_cnt++;
575	return vaddr;
576}
577
578static void *ion_handle_kmap_get(struct ion_handle *handle)
579{
580	struct ion_buffer *buffer = handle->buffer;
581	void *vaddr;
582
583	if (handle->kmap_cnt) {
584		handle->kmap_cnt++;
585		return buffer->vaddr;
586	}
587	vaddr = ion_buffer_kmap_get(buffer);
588	if (IS_ERR(vaddr))
589		return vaddr;
590	handle->kmap_cnt++;
591	return vaddr;
592}
593
594static void ion_buffer_kmap_put(struct ion_buffer *buffer)
595{
596	buffer->kmap_cnt--;
597	if (!buffer->kmap_cnt) {
598		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
599		buffer->vaddr = NULL;
600	}
601}
602
603static void ion_handle_kmap_put(struct ion_handle *handle)
604{
605	struct ion_buffer *buffer = handle->buffer;
606
607	handle->kmap_cnt--;
608	if (!handle->kmap_cnt)
609		ion_buffer_kmap_put(buffer);
610}
611
612void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
613{
614	struct ion_buffer *buffer;
615	void *vaddr;
616
617	mutex_lock(&client->lock);
618	if (!ion_handle_validate(client, handle)) {
619		pr_err("%s: invalid handle passed to map_kernel.\n",
620		       __func__);
621		mutex_unlock(&client->lock);
622		return ERR_PTR(-EINVAL);
623	}
624
625	buffer = handle->buffer;
626
627	if (!handle->buffer->heap->ops->map_kernel) {
628		pr_err("%s: map_kernel is not implemented by this heap.\n",
629		       __func__);
630		mutex_unlock(&client->lock);
631		return ERR_PTR(-ENODEV);
632	}
633
634	mutex_lock(&buffer->lock);
635	vaddr = ion_handle_kmap_get(handle);
636	mutex_unlock(&buffer->lock);
637	mutex_unlock(&client->lock);
638	return vaddr;
639}
640EXPORT_SYMBOL(ion_map_kernel);
641
642void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
643{
644	struct ion_buffer *buffer;
645
646	mutex_lock(&client->lock);
647	buffer = handle->buffer;
648	mutex_lock(&buffer->lock);
649	ion_handle_kmap_put(handle);
650	mutex_unlock(&buffer->lock);
651	mutex_unlock(&client->lock);
652}
653EXPORT_SYMBOL(ion_unmap_kernel);
654
655static int ion_debug_client_show(struct seq_file *s, void *unused)
656{
657	struct ion_client *client = s->private;
658	struct rb_node *n;
659	size_t sizes[ION_NUM_HEAP_IDS] = {0};
660	const char *names[ION_NUM_HEAP_IDS] = {0};
661	int i;
662
663	mutex_lock(&client->lock);
664	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
665		struct ion_handle *handle = rb_entry(n, struct ion_handle,
666						     node);
667		unsigned int id = handle->buffer->heap->id;
668
669		if (!names[id])
670			names[id] = handle->buffer->heap->name;
671		sizes[id] += handle->buffer->size;
672	}
673	mutex_unlock(&client->lock);
674
675	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
676	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
677		if (!names[i])
678			continue;
679		seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
680	}
681	return 0;
682}
683
684static int ion_debug_client_open(struct inode *inode, struct file *file)
685{
686	return single_open(file, ion_debug_client_show, inode->i_private);
687}
688
689static const struct file_operations debug_client_fops = {
690	.open = ion_debug_client_open,
691	.read = seq_read,
692	.llseek = seq_lseek,
693	.release = single_release,
694};
695
696struct ion_client *ion_client_create(struct ion_device *dev,
697				     const char *name)
698{
699	struct ion_client *client;
700	struct task_struct *task;
701	struct rb_node **p;
702	struct rb_node *parent = NULL;
703	struct ion_client *entry;
704	char debug_name[64];
705	pid_t pid;
706
707	get_task_struct(current->group_leader);
708	task_lock(current->group_leader);
709	pid = task_pid_nr(current->group_leader);
710	/* don't bother to store task struct for kernel threads,
711	   they can't be killed anyway */
712	if (current->group_leader->flags & PF_KTHREAD) {
713		put_task_struct(current->group_leader);
714		task = NULL;
715	} else {
716		task = current->group_leader;
717	}
718	task_unlock(current->group_leader);
719
720	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
721	if (!client) {
722		if (task)
723			put_task_struct(current->group_leader);
724		return ERR_PTR(-ENOMEM);
725	}
726
727	client->dev = dev;
728	client->handles = RB_ROOT;
729	idr_init(&client->idr);
730	mutex_init(&client->lock);
731	client->name = name;
732	client->task = task;
733	client->pid = pid;
734
735	down_write(&dev->lock);
736	p = &dev->clients.rb_node;
737	while (*p) {
738		parent = *p;
739		entry = rb_entry(parent, struct ion_client, node);
740
741		if (client < entry)
742			p = &(*p)->rb_left;
743		else if (client > entry)
744			p = &(*p)->rb_right;
745	}
746	rb_link_node(&client->node, parent, p);
747	rb_insert_color(&client->node, &dev->clients);
748
749	snprintf(debug_name, 64, "%u", client->pid);
750	client->debug_root = debugfs_create_file(debug_name, 0664,
751						 dev->debug_root, client,
752						 &debug_client_fops);
753	up_write(&dev->lock);
754
755	return client;
756}
757EXPORT_SYMBOL(ion_client_create);
758
759void ion_client_destroy(struct ion_client *client)
760{
761	struct ion_device *dev = client->dev;
762	struct rb_node *n;
763
764	pr_debug("%s: %d\n", __func__, __LINE__);
765	while ((n = rb_first(&client->handles))) {
766		struct ion_handle *handle = rb_entry(n, struct ion_handle,
767						     node);
768		ion_handle_destroy(&handle->ref);
769	}
770
771	idr_remove_all(&client->idr);
772	idr_destroy(&client->idr);
773
774	down_write(&dev->lock);
775	if (client->task)
776		put_task_struct(client->task);
777	rb_erase(&client->node, &dev->clients);
778	debugfs_remove_recursive(client->debug_root);
779	up_write(&dev->lock);
780
781	kfree(client);
782}
783EXPORT_SYMBOL(ion_client_destroy);
784
785struct sg_table *ion_sg_table(struct ion_client *client,
786			      struct ion_handle *handle)
787{
788	struct ion_buffer *buffer;
789	struct sg_table *table;
790
791	mutex_lock(&client->lock);
792	if (!ion_handle_validate(client, handle)) {
793		pr_err("%s: invalid handle passed to map_dma.\n",
794		       __func__);
795		mutex_unlock(&client->lock);
796		return ERR_PTR(-EINVAL);
797	}
798	buffer = handle->buffer;
799	table = buffer->sg_table;
800	mutex_unlock(&client->lock);
801	return table;
802}
803EXPORT_SYMBOL(ion_sg_table);
804
805static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
806				       struct device *dev,
807				       enum dma_data_direction direction);
808
809static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
810					enum dma_data_direction direction)
811{
812	struct dma_buf *dmabuf = attachment->dmabuf;
813	struct ion_buffer *buffer = dmabuf->priv;
814
815	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
816	return buffer->sg_table;
817}
818
819static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
820			      struct sg_table *table,
821			      enum dma_data_direction direction)
822{
823}
824
825struct ion_vma_list {
826	struct list_head list;
827	struct vm_area_struct *vma;
828};
829
830static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
831				       struct device *dev,
832				       enum dma_data_direction dir)
833{
834	struct ion_vma_list *vma_list;
835	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
836	int i;
837
838	pr_debug("%s: syncing for device %s\n", __func__,
839		 dev ? dev_name(dev) : "null");
840
841	if (!ion_buffer_fault_user_mappings(buffer))
842		return;
843
844	mutex_lock(&buffer->lock);
845	for (i = 0; i < pages; i++) {
846		struct page *page = buffer->pages[i];
847
848		if (ion_buffer_page_is_dirty(page))
849			__dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
850		ion_buffer_page_clean(buffer->pages + i);
851	}
852	list_for_each_entry(vma_list, &buffer->vmas, list) {
853		struct vm_area_struct *vma = vma_list->vma;
854
855		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
856			       NULL);
857	}
858	mutex_unlock(&buffer->lock);
859}
860
861int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
862{
863	struct ion_buffer *buffer = vma->vm_private_data;
864	int ret;
865
866	mutex_lock(&buffer->lock);
867	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
868
869	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
870	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
871			     ion_buffer_page(buffer->pages[vmf->pgoff]));
872	mutex_unlock(&buffer->lock);
873	if (ret)
874		return VM_FAULT_ERROR;
875
876	return VM_FAULT_NOPAGE;
877}
878
879static void ion_vm_open(struct vm_area_struct *vma)
880{
881	struct ion_buffer *buffer = vma->vm_private_data;
882	struct ion_vma_list *vma_list;
883
884	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
885	if (!vma_list)
886		return;
887	vma_list->vma = vma;
888	mutex_lock(&buffer->lock);
889	list_add(&vma_list->list, &buffer->vmas);
890	mutex_unlock(&buffer->lock);
891	pr_debug("%s: adding %p\n", __func__, vma);
892}
893
894static void ion_vm_close(struct vm_area_struct *vma)
895{
896	struct ion_buffer *buffer = vma->vm_private_data;
897	struct ion_vma_list *vma_list, *tmp;
898
899	pr_debug("%s\n", __func__);
900	mutex_lock(&buffer->lock);
901	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
902		if (vma_list->vma != vma)
903			continue;
904		list_del(&vma_list->list);
905		kfree(vma_list);
906		pr_debug("%s: deleting %p\n", __func__, vma);
907		break;
908	}
909	mutex_unlock(&buffer->lock);
910}
911
912struct vm_operations_struct ion_vma_ops = {
913	.open = ion_vm_open,
914	.close = ion_vm_close,
915	.fault = ion_vm_fault,
916};
917
918static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
919{
920	struct ion_buffer *buffer = dmabuf->priv;
921	int ret = 0;
922
923	if (!buffer->heap->ops->map_user) {
924		pr_err("%s: this heap does not define a method for mapping "
925		       "to userspace\n", __func__);
926		return -EINVAL;
927	}
928
929	if (ion_buffer_fault_user_mappings(buffer)) {
930		vma->vm_private_data = buffer;
931		vma->vm_ops = &ion_vma_ops;
932		ion_vm_open(vma);
933		return 0;
934	}
935
936	if (!(buffer->flags & ION_FLAG_CACHED))
937		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
938
939	mutex_lock(&buffer->lock);
940	/* now map it to userspace */
941	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
942	mutex_unlock(&buffer->lock);
943
944	if (ret)
945		pr_err("%s: failure mapping buffer to userspace\n",
946		       __func__);
947
948	return ret;
949}
950
951static void ion_dma_buf_release(struct dma_buf *dmabuf)
952{
953	struct ion_buffer *buffer = dmabuf->priv;
954	ion_buffer_put(buffer);
955}
956
957static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
958{
959	struct ion_buffer *buffer = dmabuf->priv;
960	return buffer->vaddr + offset * PAGE_SIZE;
961}
962
963static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
964			       void *ptr)
965{
966	return;
967}
968
969static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
970					size_t len,
971					enum dma_data_direction direction)
972{
973	struct ion_buffer *buffer = dmabuf->priv;
974	void *vaddr;
975
976	if (!buffer->heap->ops->map_kernel) {
977		pr_err("%s: map kernel is not implemented by this heap.\n",
978		       __func__);
979		return -ENODEV;
980	}
981
982	mutex_lock(&buffer->lock);
983	vaddr = ion_buffer_kmap_get(buffer);
984	mutex_unlock(&buffer->lock);
985	if (IS_ERR(vaddr))
986		return PTR_ERR(vaddr);
987	return 0;
988}
989
990static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
991				       size_t len,
992				       enum dma_data_direction direction)
993{
994	struct ion_buffer *buffer = dmabuf->priv;
995
996	mutex_lock(&buffer->lock);
997	ion_buffer_kmap_put(buffer);
998	mutex_unlock(&buffer->lock);
999}
1000
1001struct dma_buf_ops dma_buf_ops = {
1002	.map_dma_buf = ion_map_dma_buf,
1003	.unmap_dma_buf = ion_unmap_dma_buf,
1004	.mmap = ion_mmap,
1005	.release = ion_dma_buf_release,
1006	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1007	.end_cpu_access = ion_dma_buf_end_cpu_access,
1008	.kmap_atomic = ion_dma_buf_kmap,
1009	.kunmap_atomic = ion_dma_buf_kunmap,
1010	.kmap = ion_dma_buf_kmap,
1011	.kunmap = ion_dma_buf_kunmap,
1012};
1013
1014struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1015						struct ion_handle *handle)
1016{
1017	struct ion_buffer *buffer;
1018	struct dma_buf *dmabuf;
1019	bool valid_handle;
1020
1021	mutex_lock(&client->lock);
1022	valid_handle = ion_handle_validate(client, handle);
1023	mutex_unlock(&client->lock);
1024	if (!valid_handle) {
1025		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1026		return ERR_PTR(-EINVAL);
1027	}
1028
1029	buffer = handle->buffer;
1030	ion_buffer_get(buffer);
1031	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1032	if (IS_ERR(dmabuf)) {
1033		ion_buffer_put(buffer);
1034		return dmabuf;
1035	}
1036
1037	return dmabuf;
1038}
1039EXPORT_SYMBOL(ion_share_dma_buf);
1040
1041int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1042{
1043	struct dma_buf *dmabuf;
1044	int fd;
1045
1046	dmabuf = ion_share_dma_buf(client, handle);
1047	if (IS_ERR(dmabuf))
1048		return PTR_ERR(dmabuf);
1049
1050	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1051	if (fd < 0)
1052		dma_buf_put(dmabuf);
1053
1054	return fd;
1055}
1056EXPORT_SYMBOL(ion_share_dma_buf_fd);
1057
1058struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1059{
1060	struct dma_buf *dmabuf;
1061	struct ion_buffer *buffer;
1062	struct ion_handle *handle;
1063	int ret;
1064
1065	dmabuf = dma_buf_get(fd);
1066	if (IS_ERR(dmabuf))
1067		return ERR_PTR(PTR_ERR(dmabuf));
1068	/* if this memory came from ion */
1069
1070	if (dmabuf->ops != &dma_buf_ops) {
1071		pr_err("%s: can not import dmabuf from another exporter\n",
1072		       __func__);
1073		dma_buf_put(dmabuf);
1074		return ERR_PTR(-EINVAL);
1075	}
1076	buffer = dmabuf->priv;
1077
1078	mutex_lock(&client->lock);
1079	/* if a handle exists for this buffer just take a reference to it */
1080	handle = ion_handle_lookup(client, buffer);
1081	if (!IS_ERR(handle)) {
1082		ion_handle_get(handle);
1083		goto end;
1084	}
1085	handle = ion_handle_create(client, buffer);
1086	if (IS_ERR(handle))
1087		goto end;
1088	ret = ion_handle_add(client, handle);
1089	if (ret) {
1090		ion_handle_put(handle);
1091		handle = ERR_PTR(ret);
1092	}
1093end:
1094	mutex_unlock(&client->lock);
1095	dma_buf_put(dmabuf);
1096	return handle;
1097}
1098EXPORT_SYMBOL(ion_import_dma_buf);
1099
1100static int ion_sync_for_device(struct ion_client *client, int fd)
1101{
1102	struct dma_buf *dmabuf;
1103	struct ion_buffer *buffer;
1104
1105	dmabuf = dma_buf_get(fd);
1106	if (IS_ERR(dmabuf))
1107		return PTR_ERR(dmabuf);
1108
1109	/* if this memory came from ion */
1110	if (dmabuf->ops != &dma_buf_ops) {
1111		pr_err("%s: can not sync dmabuf from another exporter\n",
1112		       __func__);
1113		dma_buf_put(dmabuf);
1114		return -EINVAL;
1115	}
1116	buffer = dmabuf->priv;
1117
1118	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1119			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1120	dma_buf_put(dmabuf);
1121	return 0;
1122}
1123
1124static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1125{
1126	struct ion_client *client = filp->private_data;
1127
1128	switch (cmd) {
1129	case ION_IOC_ALLOC:
1130	{
1131		struct ion_allocation_data data;
1132		struct ion_handle *handle;
1133
1134		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1135			return -EFAULT;
1136		handle = ion_alloc(client, data.len, data.align,
1137					     data.heap_id_mask, data.flags);
1138
1139		if (IS_ERR(handle))
1140			return PTR_ERR(handle);
1141
1142		data.handle = handle->id;
1143
1144		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1145			ion_free(client, handle);
1146			return -EFAULT;
1147		}
1148		break;
1149	}
1150	case ION_IOC_FREE:
1151	{
1152		struct ion_handle_data data;
1153		struct ion_handle *handle;
1154
1155		if (copy_from_user(&data, (void __user *)arg,
1156				   sizeof(struct ion_handle_data)))
1157			return -EFAULT;
1158		mutex_lock(&client->lock);
1159		handle = ion_uhandle_get(client, data.handle);
1160		mutex_unlock(&client->lock);
1161		if (!handle)
1162			return -EINVAL;
1163		ion_free(client, handle);
1164		break;
1165	}
1166	case ION_IOC_SHARE:
1167	case ION_IOC_MAP:
1168	{
1169		struct ion_fd_data data;
1170		struct ion_handle *handle;
1171
1172		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1173			return -EFAULT;
1174		handle = ion_uhandle_get(client, data.handle);
1175		data.fd = ion_share_dma_buf_fd(client, handle);
1176		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1177			return -EFAULT;
1178		if (data.fd < 0)
1179			return data.fd;
1180		break;
1181	}
1182	case ION_IOC_IMPORT:
1183	{
1184		struct ion_fd_data data;
1185		struct ion_handle *handle;
1186		int ret = 0;
1187		if (copy_from_user(&data, (void __user *)arg,
1188				   sizeof(struct ion_fd_data)))
1189			return -EFAULT;
1190		handle = ion_import_dma_buf(client, data.fd);
1191		if (IS_ERR(handle))
1192			ret = PTR_ERR(handle);
1193		else
1194			data.handle = handle->id;
1195
1196		if (copy_to_user((void __user *)arg, &data,
1197				 sizeof(struct ion_fd_data)))
1198			return -EFAULT;
1199		if (ret < 0)
1200			return ret;
1201		break;
1202	}
1203	case ION_IOC_SYNC:
1204	{
1205		struct ion_fd_data data;
1206		if (copy_from_user(&data, (void __user *)arg,
1207				   sizeof(struct ion_fd_data)))
1208			return -EFAULT;
1209		ion_sync_for_device(client, data.fd);
1210		break;
1211	}
1212	case ION_IOC_CUSTOM:
1213	{
1214		struct ion_device *dev = client->dev;
1215		struct ion_custom_data data;
1216
1217		if (!dev->custom_ioctl)
1218			return -ENOTTY;
1219		if (copy_from_user(&data, (void __user *)arg,
1220				sizeof(struct ion_custom_data)))
1221			return -EFAULT;
1222		return dev->custom_ioctl(client, data.cmd, data.arg);
1223	}
1224	default:
1225		return -ENOTTY;
1226	}
1227	return 0;
1228}
1229
1230static int ion_release(struct inode *inode, struct file *file)
1231{
1232	struct ion_client *client = file->private_data;
1233
1234	pr_debug("%s: %d\n", __func__, __LINE__);
1235	ion_client_destroy(client);
1236	return 0;
1237}
1238
1239static int ion_open(struct inode *inode, struct file *file)
1240{
1241	struct miscdevice *miscdev = file->private_data;
1242	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1243	struct ion_client *client;
1244
1245	pr_debug("%s: %d\n", __func__, __LINE__);
1246	client = ion_client_create(dev, "user");
1247	if (IS_ERR(client))
1248		return PTR_ERR(client);
1249	file->private_data = client;
1250
1251	return 0;
1252}
1253
1254static const struct file_operations ion_fops = {
1255	.owner          = THIS_MODULE,
1256	.open           = ion_open,
1257	.release        = ion_release,
1258	.unlocked_ioctl = ion_ioctl,
1259};
1260
1261static size_t ion_debug_heap_total(struct ion_client *client,
1262				   unsigned int id)
1263{
1264	size_t size = 0;
1265	struct rb_node *n;
1266
1267	mutex_lock(&client->lock);
1268	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1269		struct ion_handle *handle = rb_entry(n,
1270						     struct ion_handle,
1271						     node);
1272		if (handle->buffer->heap->id == id)
1273			size += handle->buffer->size;
1274	}
1275	mutex_unlock(&client->lock);
1276	return size;
1277}
1278
1279static int ion_debug_heap_show(struct seq_file *s, void *unused)
1280{
1281	struct ion_heap *heap = s->private;
1282	struct ion_device *dev = heap->dev;
1283	struct rb_node *n;
1284	size_t total_size = 0;
1285	size_t total_orphaned_size = 0;
1286
1287	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1288	seq_printf(s, "----------------------------------------------------\n");
1289
1290	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1291		struct ion_client *client = rb_entry(n, struct ion_client,
1292						     node);
1293		size_t size = ion_debug_heap_total(client, heap->id);
1294		if (!size)
1295			continue;
1296		if (client->task) {
1297			char task_comm[TASK_COMM_LEN];
1298
1299			get_task_comm(task_comm, client->task);
1300			seq_printf(s, "%16.s %16u %16u\n", task_comm,
1301				   client->pid, size);
1302		} else {
1303			seq_printf(s, "%16.s %16u %16u\n", client->name,
1304				   client->pid, size);
1305		}
1306	}
1307	seq_printf(s, "----------------------------------------------------\n");
1308	seq_printf(s, "orphaned allocations (info is from last known client):"
1309		   "\n");
1310	mutex_lock(&dev->buffer_lock);
1311	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1312		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1313						     node);
1314		if (buffer->heap->id != heap->id)
1315			continue;
1316		total_size += buffer->size;
1317		if (!buffer->handle_count) {
1318			seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1319				   buffer->pid, buffer->size, buffer->kmap_cnt,
1320				   atomic_read(&buffer->ref.refcount));
1321			total_orphaned_size += buffer->size;
1322		}
1323	}
1324	mutex_unlock(&dev->buffer_lock);
1325	seq_printf(s, "----------------------------------------------------\n");
1326	seq_printf(s, "%16.s %16u\n", "total orphaned",
1327		   total_orphaned_size);
1328	seq_printf(s, "%16.s %16u\n", "total ", total_size);
1329	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1330		seq_printf(s, "%16.s %16u\n", "deferred free",
1331				heap->free_list_size);
1332	seq_printf(s, "----------------------------------------------------\n");
1333
1334	if (heap->debug_show)
1335		heap->debug_show(heap, s, unused);
1336
1337	return 0;
1338}
1339
1340static int ion_debug_heap_open(struct inode *inode, struct file *file)
1341{
1342	return single_open(file, ion_debug_heap_show, inode->i_private);
1343}
1344
1345static const struct file_operations debug_heap_fops = {
1346	.open = ion_debug_heap_open,
1347	.read = seq_read,
1348	.llseek = seq_lseek,
1349	.release = single_release,
1350};
1351
1352#ifdef DEBUG_HEAP_SHRINKER
1353static int debug_shrink_set(void *data, u64 val)
1354{
1355        struct ion_heap *heap = data;
1356        struct shrink_control sc;
1357        int objs;
1358
1359        sc.gfp_mask = -1;
1360        sc.nr_to_scan = 0;
1361
1362        if (!val)
1363                return 0;
1364
1365        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1366        sc.nr_to_scan = objs;
1367
1368        heap->shrinker.shrink(&heap->shrinker, &sc);
1369        return 0;
1370}
1371
1372static int debug_shrink_get(void *data, u64 *val)
1373{
1374        struct ion_heap *heap = data;
1375        struct shrink_control sc;
1376        int objs;
1377
1378        sc.gfp_mask = -1;
1379        sc.nr_to_scan = 0;
1380
1381        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1382        *val = objs;
1383        return 0;
1384}
1385
1386DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1387                        debug_shrink_set, "%llu\n");
1388#endif
1389
1390void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1391{
1392	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1393	    !heap->ops->unmap_dma)
1394		pr_err("%s: can not add heap with invalid ops struct.\n",
1395		       __func__);
1396
1397	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1398		ion_heap_init_deferred_free(heap);
1399
1400	heap->dev = dev;
1401	down_write(&dev->lock);
1402	/* use negative heap->id to reverse the priority -- when traversing
1403	   the list later attempt higher id numbers first */
1404	plist_node_init(&heap->node, -heap->id);
1405	plist_add(&heap->node, &dev->heaps);
1406	debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1407			    &debug_heap_fops);
1408#ifdef DEBUG_HEAP_SHRINKER
1409	if (heap->shrinker.shrink) {
1410		char debug_name[64];
1411
1412		snprintf(debug_name, 64, "%s_shrink", heap->name);
1413		debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1414				    &debug_shrink_fops);
1415	}
1416#endif
1417	up_write(&dev->lock);
1418}
1419
1420struct ion_device *ion_device_create(long (*custom_ioctl)
1421				     (struct ion_client *client,
1422				      unsigned int cmd,
1423				      unsigned long arg))
1424{
1425	struct ion_device *idev;
1426	int ret;
1427
1428	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1429	if (!idev)
1430		return ERR_PTR(-ENOMEM);
1431
1432	idev->dev.minor = MISC_DYNAMIC_MINOR;
1433	idev->dev.name = "ion";
1434	idev->dev.fops = &ion_fops;
1435	idev->dev.parent = NULL;
1436	ret = misc_register(&idev->dev);
1437	if (ret) {
1438		pr_err("ion: failed to register misc device.\n");
1439		return ERR_PTR(ret);
1440	}
1441
1442	idev->debug_root = debugfs_create_dir("ion", NULL);
1443	if (!idev->debug_root)
1444		pr_err("ion: failed to create debug files.\n");
1445
1446	idev->custom_ioctl = custom_ioctl;
1447	idev->buffers = RB_ROOT;
1448	mutex_init(&idev->buffer_lock);
1449	init_rwsem(&idev->lock);
1450	plist_head_init(&idev->heaps);
1451	idev->clients = RB_ROOT;
1452	return idev;
1453}
1454
1455void ion_device_destroy(struct ion_device *dev)
1456{
1457	misc_deregister(&dev->dev);
1458	/* XXX need to free the heaps and clients ? */
1459	kfree(dev);
1460}
1461
1462void __init ion_reserve(struct ion_platform_data *data)
1463{
1464	int i;
1465
1466	for (i = 0; i < data->nr; i++) {
1467		if (data->heaps[i].size == 0)
1468			continue;
1469
1470		if (data->heaps[i].base == 0) {
1471			phys_addr_t paddr;
1472			paddr = memblock_alloc_base(data->heaps[i].size,
1473						    data->heaps[i].align,
1474						    MEMBLOCK_ALLOC_ANYWHERE);
1475			if (!paddr) {
1476				pr_err("%s: error allocating memblock for "
1477				       "heap %d\n",
1478					__func__, i);
1479				continue;
1480			}
1481			data->heaps[i].base = paddr;
1482		} else {
1483			int ret = memblock_reserve(data->heaps[i].base,
1484					       data->heaps[i].size);
1485			if (ret)
1486				pr_err("memblock reserve of %x@%lx failed\n",
1487				       data->heaps[i].size,
1488				       data->heaps[i].base);
1489		}
1490		pr_info("%s: %s reserved base %lx size %d\n", __func__,
1491			data->heaps[i].name,
1492			data->heaps[i].base,
1493			data->heaps[i].size);
1494	}
1495}
1496