ashmem.c revision c810a399798022d545191e2daaca0368623c15d3
1/* mm/ashmem.c
2**
3** Anonymous Shared Memory Subsystem, ashmem
4**
5** Copyright (C) 2008 Google, Inc.
6**
7** Robert Love <rlove@google.com>
8**
9** This software is licensed under the terms of the GNU General Public
10** License version 2, as published by the Free Software Foundation, and
11** may be copied, distributed, and modified under those terms.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16** GNU General Public License for more details.
17*/
18
19#define pr_fmt(fmt) "ashmem: " fmt
20
21#include <linux/module.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/falloc.h>
25#include <linux/miscdevice.h>
26#include <linux/security.h>
27#include <linux/mm.h>
28#include <linux/mman.h>
29#include <linux/uaccess.h>
30#include <linux/personality.h>
31#include <linux/bitops.h>
32#include <linux/mutex.h>
33#include <linux/shmem_fs.h>
34#include "ashmem.h"
35
36#define ASHMEM_NAME_PREFIX "dev/ashmem/"
37#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
39
40/*
41 * ashmem_area - anonymous shared memory area
42 * Lifecycle: From our parent file's open() until its release()
43 * Locking: Protected by `ashmem_mutex'
44 * Big Note: Mappings do NOT pin this structure; it dies on close()
45 */
46struct ashmem_area {
47	char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
48	struct list_head unpinned_list;	 /* list of all ashmem areas */
49	struct file *file;		 /* the shmem-based backing file */
50	size_t size;			 /* size of the mapping, in bytes */
51	unsigned long prot_mask;	 /* allowed prot bits, as vm_flags */
52};
53
54/*
55 * ashmem_range - represents an interval of unpinned (evictable) pages
56 * Lifecycle: From unpin to pin
57 * Locking: Protected by `ashmem_mutex'
58 */
59struct ashmem_range {
60	struct list_head lru;		/* entry in LRU list */
61	struct list_head unpinned;	/* entry in its area's unpinned list */
62	struct ashmem_area *asma;	/* associated area */
63	size_t pgstart;			/* starting page, inclusive */
64	size_t pgend;			/* ending page, inclusive */
65	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */
66};
67
68/* LRU list of unpinned pages, protected by ashmem_mutex */
69static LIST_HEAD(ashmem_lru_list);
70
71/* Count of pages on our LRU list, protected by ashmem_mutex */
72static unsigned long lru_count;
73
74/*
75 * ashmem_mutex - protects the list of and each individual ashmem_area
76 *
77 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
78 */
79static DEFINE_MUTEX(ashmem_mutex);
80
81static struct kmem_cache *ashmem_area_cachep __read_mostly;
82static struct kmem_cache *ashmem_range_cachep __read_mostly;
83
84#define range_size(range) \
85	((range)->pgend - (range)->pgstart + 1)
86
87#define range_on_lru(range) \
88	((range)->purged == ASHMEM_NOT_PURGED)
89
90#define page_range_subsumes_range(range, start, end) \
91	(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
92
93#define page_range_subsumed_by_range(range, start, end) \
94	(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
95
96#define page_in_range(range, page) \
97	(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
98
99#define page_range_in_range(range, start, end) \
100	(page_in_range(range, start) || page_in_range(range, end) || \
101		page_range_subsumes_range(range, start, end))
102
103#define range_before_page(range, page) \
104	((range)->pgend < (page))
105
106#define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
107
108static inline void lru_add(struct ashmem_range *range)
109{
110	list_add_tail(&range->lru, &ashmem_lru_list);
111	lru_count += range_size(range);
112}
113
114static inline void lru_del(struct ashmem_range *range)
115{
116	list_del(&range->lru);
117	lru_count -= range_size(range);
118}
119
120/*
121 * range_alloc - allocate and initialize a new ashmem_range structure
122 *
123 * 'asma' - associated ashmem_area
124 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
125 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
126 * 'start' - starting page, inclusive
127 * 'end' - ending page, inclusive
128 *
129 * Caller must hold ashmem_mutex.
130 */
131static int range_alloc(struct ashmem_area *asma,
132		       struct ashmem_range *prev_range, unsigned int purged,
133		       size_t start, size_t end)
134{
135	struct ashmem_range *range;
136
137	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
138	if (unlikely(!range))
139		return -ENOMEM;
140
141	range->asma = asma;
142	range->pgstart = start;
143	range->pgend = end;
144	range->purged = purged;
145
146	list_add_tail(&range->unpinned, &prev_range->unpinned);
147
148	if (range_on_lru(range))
149		lru_add(range);
150
151	return 0;
152}
153
154static void range_del(struct ashmem_range *range)
155{
156	list_del(&range->unpinned);
157	if (range_on_lru(range))
158		lru_del(range);
159	kmem_cache_free(ashmem_range_cachep, range);
160}
161
162/*
163 * range_shrink - shrinks a range
164 *
165 * Caller must hold ashmem_mutex.
166 */
167static inline void range_shrink(struct ashmem_range *range,
168				size_t start, size_t end)
169{
170	size_t pre = range_size(range);
171
172	range->pgstart = start;
173	range->pgend = end;
174
175	if (range_on_lru(range))
176		lru_count -= pre - range_size(range);
177}
178
179static int ashmem_open(struct inode *inode, struct file *file)
180{
181	struct ashmem_area *asma;
182	int ret;
183
184	ret = generic_file_open(inode, file);
185	if (unlikely(ret))
186		return ret;
187
188	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
189	if (unlikely(!asma))
190		return -ENOMEM;
191
192	INIT_LIST_HEAD(&asma->unpinned_list);
193	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
194	asma->prot_mask = PROT_MASK;
195	file->private_data = asma;
196
197	return 0;
198}
199
200static int ashmem_release(struct inode *ignored, struct file *file)
201{
202	struct ashmem_area *asma = file->private_data;
203	struct ashmem_range *range, *next;
204
205	mutex_lock(&ashmem_mutex);
206	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
207		range_del(range);
208	mutex_unlock(&ashmem_mutex);
209
210	if (asma->file)
211		fput(asma->file);
212	kmem_cache_free(ashmem_area_cachep, asma);
213
214	return 0;
215}
216
217static ssize_t ashmem_read(struct file *file, char __user *buf,
218			   size_t len, loff_t *pos)
219{
220	struct ashmem_area *asma = file->private_data;
221	int ret = 0;
222
223	mutex_lock(&ashmem_mutex);
224
225	/* If size is not set, or set to 0, always return EOF. */
226	if (asma->size == 0)
227		goto out;
228
229	if (!asma->file) {
230		ret = -EBADF;
231		goto out;
232	}
233
234	ret = asma->file->f_op->read(asma->file, buf, len, pos);
235	if (ret < 0)
236		goto out;
237
238	/** Update backing file pos, since f_ops->read() doesn't */
239	asma->file->f_pos = *pos;
240
241out:
242	mutex_unlock(&ashmem_mutex);
243	return ret;
244}
245
246static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
247{
248	struct ashmem_area *asma = file->private_data;
249	int ret;
250
251	mutex_lock(&ashmem_mutex);
252
253	if (asma->size == 0) {
254		ret = -EINVAL;
255		goto out;
256	}
257
258	if (!asma->file) {
259		ret = -EBADF;
260		goto out;
261	}
262
263	ret = asma->file->f_op->llseek(asma->file, offset, origin);
264	if (ret < 0)
265		goto out;
266
267	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
268	file->f_pos = asma->file->f_pos;
269
270out:
271	mutex_unlock(&ashmem_mutex);
272	return ret;
273}
274
275static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
276{
277	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
278	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
279	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
280}
281
282static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
283{
284	struct ashmem_area *asma = file->private_data;
285	int ret = 0;
286
287	mutex_lock(&ashmem_mutex);
288
289	/* user needs to SET_SIZE before mapping */
290	if (unlikely(!asma->size)) {
291		ret = -EINVAL;
292		goto out;
293	}
294
295	/* requested protection bits must match our allowed protection mask */
296	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
297		     calc_vm_prot_bits(PROT_MASK))) {
298		ret = -EPERM;
299		goto out;
300	}
301	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
302
303	if (!asma->file) {
304		char *name = ASHMEM_NAME_DEF;
305		struct file *vmfile;
306
307		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
308			name = asma->name;
309
310		/* ... and allocate the backing shmem file */
311		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
312		if (unlikely(IS_ERR(vmfile))) {
313			ret = PTR_ERR(vmfile);
314			goto out;
315		}
316		asma->file = vmfile;
317	}
318	get_file(asma->file);
319
320	/*
321	 * XXX - Reworked to use shmem_zero_setup() instead of
322	 * shmem_set_file while we're in staging. -jstultz
323	 */
324	if (vma->vm_flags & VM_SHARED) {
325		ret = shmem_zero_setup(vma);
326		if (ret) {
327			fput(asma->file);
328			goto out;
329		}
330	}
331
332	if (vma->vm_file)
333		fput(vma->vm_file);
334	vma->vm_file = asma->file;
335	vma->vm_flags |= VM_CAN_NONLINEAR;
336
337out:
338	mutex_unlock(&ashmem_mutex);
339	return ret;
340}
341
342/*
343 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
344 *
345 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
346 * many objects (pages) we have in total.
347 *
348 * 'gfp_mask' is the mask of the allocation that got us into this mess.
349 *
350 * Return value is the number of objects (pages) remaining, or -1 if we cannot
351 * proceed without risk of deadlock (due to gfp_mask).
352 *
353 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
354 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
355 * pages freed.
356 */
357static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
358{
359	struct ashmem_range *range, *next;
360
361	/* We might recurse into filesystem code, so bail out if necessary */
362	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
363		return -1;
364	if (!sc->nr_to_scan)
365		return lru_count;
366
367	mutex_lock(&ashmem_mutex);
368	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
369		loff_t start = range->pgstart * PAGE_SIZE;
370		loff_t end = (range->pgend + 1) * PAGE_SIZE;
371
372		do_fallocate(range->asma->file,
373				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
374				start, end - start);
375		range->purged = ASHMEM_WAS_PURGED;
376		lru_del(range);
377
378		sc->nr_to_scan -= range_size(range);
379		if (sc->nr_to_scan <= 0)
380			break;
381	}
382	mutex_unlock(&ashmem_mutex);
383
384	return lru_count;
385}
386
387static struct shrinker ashmem_shrinker = {
388	.shrink = ashmem_shrink,
389	.seeks = DEFAULT_SEEKS * 4,
390};
391
392static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
393{
394	int ret = 0;
395
396	mutex_lock(&ashmem_mutex);
397
398	/* the user can only remove, not add, protection bits */
399	if (unlikely((asma->prot_mask & prot) != prot)) {
400		ret = -EINVAL;
401		goto out;
402	}
403
404	/* does the application expect PROT_READ to imply PROT_EXEC? */
405	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
406		prot |= PROT_EXEC;
407
408	asma->prot_mask = prot;
409
410out:
411	mutex_unlock(&ashmem_mutex);
412	return ret;
413}
414
415static int set_name(struct ashmem_area *asma, void __user *name)
416{
417	int ret = 0;
418
419	mutex_lock(&ashmem_mutex);
420
421	/* cannot change an existing mapping's name */
422	if (unlikely(asma->file)) {
423		ret = -EINVAL;
424		goto out;
425	}
426
427	if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
428				    name, ASHMEM_NAME_LEN)))
429		ret = -EFAULT;
430	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
431
432out:
433	mutex_unlock(&ashmem_mutex);
434
435	return ret;
436}
437
438static int get_name(struct ashmem_area *asma, void __user *name)
439{
440	int ret = 0;
441
442	mutex_lock(&ashmem_mutex);
443	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
444		size_t len;
445
446		/*
447		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
448		 * prevents us from revealing one user's stack to another.
449		 */
450		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
451		if (unlikely(copy_to_user(name,
452				asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
453			ret = -EFAULT;
454	} else {
455		if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
456					  sizeof(ASHMEM_NAME_DEF))))
457			ret = -EFAULT;
458	}
459	mutex_unlock(&ashmem_mutex);
460
461	return ret;
462}
463
464/*
465 * ashmem_pin - pin the given ashmem region, returning whether it was
466 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
467 *
468 * Caller must hold ashmem_mutex.
469 */
470static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
471{
472	struct ashmem_range *range, *next;
473	int ret = ASHMEM_NOT_PURGED;
474
475	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
476		/* moved past last applicable page; we can short circuit */
477		if (range_before_page(range, pgstart))
478			break;
479
480		/*
481		 * The user can ask us to pin pages that span multiple ranges,
482		 * or to pin pages that aren't even unpinned, so this is messy.
483		 *
484		 * Four cases:
485		 * 1. The requested range subsumes an existing range, so we
486		 *    just remove the entire matching range.
487		 * 2. The requested range overlaps the start of an existing
488		 *    range, so we just update that range.
489		 * 3. The requested range overlaps the end of an existing
490		 *    range, so we just update that range.
491		 * 4. The requested range punches a hole in an existing range,
492		 *    so we have to update one side of the range and then
493		 *    create a new range for the other side.
494		 */
495		if (page_range_in_range(range, pgstart, pgend)) {
496			ret |= range->purged;
497
498			/* Case #1: Easy. Just nuke the whole thing. */
499			if (page_range_subsumes_range(range, pgstart, pgend)) {
500				range_del(range);
501				continue;
502			}
503
504			/* Case #2: We overlap from the start, so adjust it */
505			if (range->pgstart >= pgstart) {
506				range_shrink(range, pgend + 1, range->pgend);
507				continue;
508			}
509
510			/* Case #3: We overlap from the rear, so adjust it */
511			if (range->pgend <= pgend) {
512				range_shrink(range, range->pgstart, pgstart-1);
513				continue;
514			}
515
516			/*
517			 * Case #4: We eat a chunk out of the middle. A bit
518			 * more complicated, we allocate a new range for the
519			 * second half and adjust the first chunk's endpoint.
520			 */
521			range_alloc(asma, range, range->purged,
522				    pgend + 1, range->pgend);
523			range_shrink(range, range->pgstart, pgstart - 1);
524			break;
525		}
526	}
527
528	return ret;
529}
530
531/*
532 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
533 *
534 * Caller must hold ashmem_mutex.
535 */
536static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
537{
538	struct ashmem_range *range, *next;
539	unsigned int purged = ASHMEM_NOT_PURGED;
540
541restart:
542	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
543		/* short circuit: this is our insertion point */
544		if (range_before_page(range, pgstart))
545			break;
546
547		/*
548		 * The user can ask us to unpin pages that are already entirely
549		 * or partially pinned. We handle those two cases here.
550		 */
551		if (page_range_subsumed_by_range(range, pgstart, pgend))
552			return 0;
553		if (page_range_in_range(range, pgstart, pgend)) {
554			pgstart = min_t(size_t, range->pgstart, pgstart),
555			pgend = max_t(size_t, range->pgend, pgend);
556			purged |= range->purged;
557			range_del(range);
558			goto restart;
559		}
560	}
561
562	return range_alloc(asma, range, purged, pgstart, pgend);
563}
564
565/*
566 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
567 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
568 *
569 * Caller must hold ashmem_mutex.
570 */
571static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
572				 size_t pgend)
573{
574	struct ashmem_range *range;
575	int ret = ASHMEM_IS_PINNED;
576
577	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
578		if (range_before_page(range, pgstart))
579			break;
580		if (page_range_in_range(range, pgstart, pgend)) {
581			ret = ASHMEM_IS_UNPINNED;
582			break;
583		}
584	}
585
586	return ret;
587}
588
589static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
590			    void __user *p)
591{
592	struct ashmem_pin pin;
593	size_t pgstart, pgend;
594	int ret = -EINVAL;
595
596	if (unlikely(!asma->file))
597		return -EINVAL;
598
599	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
600		return -EFAULT;
601
602	/* per custom, you can pass zero for len to mean "everything onward" */
603	if (!pin.len)
604		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
605
606	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
607		return -EINVAL;
608
609	if (unlikely(((__u32) -1) - pin.offset < pin.len))
610		return -EINVAL;
611
612	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
613		return -EINVAL;
614
615	pgstart = pin.offset / PAGE_SIZE;
616	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
617
618	mutex_lock(&ashmem_mutex);
619
620	switch (cmd) {
621	case ASHMEM_PIN:
622		ret = ashmem_pin(asma, pgstart, pgend);
623		break;
624	case ASHMEM_UNPIN:
625		ret = ashmem_unpin(asma, pgstart, pgend);
626		break;
627	case ASHMEM_GET_PIN_STATUS:
628		ret = ashmem_get_pin_status(asma, pgstart, pgend);
629		break;
630	}
631
632	mutex_unlock(&ashmem_mutex);
633
634	return ret;
635}
636
637static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
638{
639	struct ashmem_area *asma = file->private_data;
640	long ret = -ENOTTY;
641
642	switch (cmd) {
643	case ASHMEM_SET_NAME:
644		ret = set_name(asma, (void __user *) arg);
645		break;
646	case ASHMEM_GET_NAME:
647		ret = get_name(asma, (void __user *) arg);
648		break;
649	case ASHMEM_SET_SIZE:
650		ret = -EINVAL;
651		if (!asma->file) {
652			ret = 0;
653			asma->size = (size_t) arg;
654		}
655		break;
656	case ASHMEM_GET_SIZE:
657		ret = asma->size;
658		break;
659	case ASHMEM_SET_PROT_MASK:
660		ret = set_prot_mask(asma, arg);
661		break;
662	case ASHMEM_GET_PROT_MASK:
663		ret = asma->prot_mask;
664		break;
665	case ASHMEM_PIN:
666	case ASHMEM_UNPIN:
667	case ASHMEM_GET_PIN_STATUS:
668		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
669		break;
670	case ASHMEM_PURGE_ALL_CACHES:
671		ret = -EPERM;
672		if (capable(CAP_SYS_ADMIN)) {
673			struct shrink_control sc = {
674				.gfp_mask = GFP_KERNEL,
675				.nr_to_scan = 0,
676			};
677			ret = ashmem_shrink(&ashmem_shrinker, &sc);
678			sc.nr_to_scan = ret;
679			ashmem_shrink(&ashmem_shrinker, &sc);
680		}
681		break;
682	}
683
684	return ret;
685}
686
687static const struct file_operations ashmem_fops = {
688	.owner = THIS_MODULE,
689	.open = ashmem_open,
690	.release = ashmem_release,
691	.read = ashmem_read,
692	.llseek = ashmem_llseek,
693	.mmap = ashmem_mmap,
694	.unlocked_ioctl = ashmem_ioctl,
695	.compat_ioctl = ashmem_ioctl,
696};
697
698static struct miscdevice ashmem_misc = {
699	.minor = MISC_DYNAMIC_MINOR,
700	.name = "ashmem",
701	.fops = &ashmem_fops,
702};
703
704static int __init ashmem_init(void)
705{
706	int ret;
707
708	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
709					  sizeof(struct ashmem_area),
710					  0, 0, NULL);
711	if (unlikely(!ashmem_area_cachep)) {
712		pr_err("failed to create slab cache\n");
713		return -ENOMEM;
714	}
715
716	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
717					  sizeof(struct ashmem_range),
718					  0, 0, NULL);
719	if (unlikely(!ashmem_range_cachep)) {
720		pr_err("failed to create slab cache\n");
721		return -ENOMEM;
722	}
723
724	ret = misc_register(&ashmem_misc);
725	if (unlikely(ret)) {
726		pr_err("failed to register misc device!\n");
727		return ret;
728	}
729
730	register_shrinker(&ashmem_shrinker);
731
732	pr_info("initialized\n");
733
734	return 0;
735}
736
737static void __exit ashmem_exit(void)
738{
739	int ret;
740
741	unregister_shrinker(&ashmem_shrinker);
742
743	ret = misc_deregister(&ashmem_misc);
744	if (unlikely(ret))
745		pr_err("failed to unregister misc device!\n");
746
747	kmem_cache_destroy(ashmem_range_cachep);
748	kmem_cache_destroy(ashmem_area_cachep);
749
750	pr_info("unloaded\n");
751}
752
753module_init(ashmem_init);
754module_exit(ashmem_exit);
755
756MODULE_LICENSE("GPL");
757