ashmem.c revision 781114ced258bac3cff00342c9dafda1afe93c6d
1/* mm/ashmem.c
2 *
3 * Anonymous Shared Memory Subsystem, ashmem
4 *
5 * Copyright (C) 2008 Google, Inc.
6 *
7 * Robert Love <rlove@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 */
18
19#define pr_fmt(fmt) "ashmem: " fmt
20
21#include <linux/module.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/falloc.h>
25#include <linux/miscdevice.h>
26#include <linux/security.h>
27#include <linux/mm.h>
28#include <linux/mman.h>
29#include <linux/uaccess.h>
30#include <linux/personality.h>
31#include <linux/bitops.h>
32#include <linux/mutex.h>
33#include <linux/shmem_fs.h>
34#include "ashmem.h"
35
36#define ASHMEM_NAME_PREFIX "dev/ashmem/"
37#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
39
40/**
41 * struct ashmem_area - The anonymous shared memory area
42 * @name:		The optional name in /proc/pid/maps
43 * @unpinned_list:	The list of all ashmem areas
44 * @file:		The shmem-based backing file
45 * @size:		The size of the mapping, in bytes
46 * @prot_masks:		The allowed protection bits, as vm_flags
47 *
48 * The lifecycle of this structure is from our parent file's open() until
49 * its release(). It is also protected by 'ashmem_mutex'
50 *
51 * Warning: Mappings do NOT pin this structure; It dies on close()
52 */
53struct ashmem_area {
54	char name[ASHMEM_FULL_NAME_LEN];
55	struct list_head unpinned_list;
56	struct file *file;
57	size_t size;
58	unsigned long prot_mask;
59};
60
61/**
62 * struct ashmem_range - A range of unpinned/evictable pages
63 * @lru:	         The entry in the LRU list
64 * @unpinned:	         The entry in its area's unpinned list
65 * @asma:	         The associated anonymous shared memory area.
66 * @pgstart:	         The starting page (inclusive)
67 * @pgend:	         The ending page (inclusive)
68 * @purged:	         The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
69 *
70 * The lifecycle of this structure is from unpin to pin.
71 * It is protected by 'ashmem_mutex'
72 */
73struct ashmem_range {
74	struct list_head lru;
75	struct list_head unpinned;
76	struct ashmem_area *asma;
77	size_t pgstart;
78	size_t pgend;
79	unsigned int purged;
80};
81
82/* LRU list of unpinned pages, protected by ashmem_mutex */
83static LIST_HEAD(ashmem_lru_list);
84
85/**
86 * long lru_count - The count of pages on our LRU list.
87 *
88 * This is protected by ashmem_mutex.
89 */
90static unsigned long lru_count;
91
92/**
93 * ashmem_mutex - protects the list of and each individual ashmem_area
94 *
95 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
96 */
97static DEFINE_MUTEX(ashmem_mutex);
98
99static struct kmem_cache *ashmem_area_cachep __read_mostly;
100static struct kmem_cache *ashmem_range_cachep __read_mostly;
101
102#define range_size(range) \
103	((range)->pgend - (range)->pgstart + 1)
104
105#define range_on_lru(range) \
106	((range)->purged == ASHMEM_NOT_PURGED)
107
108#define page_range_subsumes_range(range, start, end) \
109	(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
110
111#define page_range_subsumed_by_range(range, start, end) \
112	(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
113
114#define page_in_range(range, page) \
115	(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
116
117#define page_range_in_range(range, start, end) \
118	(page_in_range(range, start) || page_in_range(range, end) || \
119		page_range_subsumes_range(range, start, end))
120
121#define range_before_page(range, page) \
122	((range)->pgend < (page))
123
124#define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
125
126/**
127 * lru_add() - Adds a range of memory to the LRU list
128 * @range:     The memory range being added.
129 *
130 * The range is first added to the end (tail) of the LRU list.
131 * After this, the size of the range is added to @lru_count
132 */
133static inline void lru_add(struct ashmem_range *range)
134{
135	list_add_tail(&range->lru, &ashmem_lru_list);
136	lru_count += range_size(range);
137}
138
139/**
140 * lru_del() - Removes a range of memory from the LRU list
141 * @range:     The memory range being removed
142 *
143 * The range is first deleted from the LRU list.
144 * After this, the size of the range is removed from @lru_count
145 */
146static inline void lru_del(struct ashmem_range *range)
147{
148	list_del(&range->lru);
149	lru_count -= range_size(range);
150}
151
152/**
153 * range_alloc() - Allocates and initializes a new ashmem_range structure
154 * @asma:	   The associated ashmem_area
155 * @prev_range:	   The previous ashmem_range in the sorted asma->unpinned list
156 * @purged:	   Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
157 * @start:	   The starting page (inclusive)
158 * @end:	   The ending page (inclusive)
159 *
160 * This function is protected by ashmem_mutex.
161 *
162 * Return: 0 if successful, or -ENOMEM if there is an error
163 */
164static int range_alloc(struct ashmem_area *asma,
165		       struct ashmem_range *prev_range, unsigned int purged,
166		       size_t start, size_t end)
167{
168	struct ashmem_range *range;
169
170	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
171	if (unlikely(!range))
172		return -ENOMEM;
173
174	range->asma = asma;
175	range->pgstart = start;
176	range->pgend = end;
177	range->purged = purged;
178
179	list_add_tail(&range->unpinned, &prev_range->unpinned);
180
181	if (range_on_lru(range))
182		lru_add(range);
183
184	return 0;
185}
186
187/**
188 * range_del() - Deletes and dealloctes an ashmem_range structure
189 * @range:	 The associated ashmem_range that has previously been allocated
190 */
191static void range_del(struct ashmem_range *range)
192{
193	list_del(&range->unpinned);
194	if (range_on_lru(range))
195		lru_del(range);
196	kmem_cache_free(ashmem_range_cachep, range);
197}
198
199/**
200 * range_shrink() - Shrinks an ashmem_range
201 * @range:	    The associated ashmem_range being shrunk
202 * @start:	    The starting byte of the new range
203 * @end:	    The ending byte of the new range
204 *
205 * This does not modify the data inside the existing range in any way - It
206 * simply shrinks the boundaries of the range.
207 *
208 * Theoretically, with a little tweaking, this could eventually be changed
209 * to range_resize, and expand the lru_count if the new range is larger.
210 */
211static inline void range_shrink(struct ashmem_range *range,
212				size_t start, size_t end)
213{
214	size_t pre = range_size(range);
215
216	range->pgstart = start;
217	range->pgend = end;
218
219	if (range_on_lru(range))
220		lru_count -= pre - range_size(range);
221}
222
223/**
224 * ashmem_open() - Opens an Anonymous Shared Memory structure
225 * @inode:	   The backing file's index node(?)
226 * @file:	   The backing file
227 *
228 * Please note that the ashmem_area is not returned by this function - It is
229 * instead written to "file->private_data".
230 *
231 * Return: 0 if successful, or another code if unsuccessful.
232 */
233static int ashmem_open(struct inode *inode, struct file *file)
234{
235	struct ashmem_area *asma;
236	int ret;
237
238	ret = generic_file_open(inode, file);
239	if (unlikely(ret))
240		return ret;
241
242	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
243	if (unlikely(!asma))
244		return -ENOMEM;
245
246	INIT_LIST_HEAD(&asma->unpinned_list);
247	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
248	asma->prot_mask = PROT_MASK;
249	file->private_data = asma;
250
251	return 0;
252}
253
254/**
255 * ashmem_release() - Releases an Anonymous Shared Memory structure
256 * @ignored:	      The backing file's Index Node(?) - It is ignored here.
257 * @file:	      The backing file
258 *
259 * Return: 0 if successful. If it is anything else, go have a coffee and
260 * try again.
261 */
262static int ashmem_release(struct inode *ignored, struct file *file)
263{
264	struct ashmem_area *asma = file->private_data;
265	struct ashmem_range *range, *next;
266
267	mutex_lock(&ashmem_mutex);
268	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
269		range_del(range);
270	mutex_unlock(&ashmem_mutex);
271
272	if (asma->file)
273		fput(asma->file);
274	kmem_cache_free(ashmem_area_cachep, asma);
275
276	return 0;
277}
278
279/**
280 * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
281 * @file:	   The associated backing file.
282 * @buf:	   The buffer of data being written to
283 * @len:	   The number of bytes being read
284 * @pos:	   The position of the first byte to read.
285 *
286 * Return: 0 if successful, or another return code if not.
287 */
288static ssize_t ashmem_read(struct file *file, char __user *buf,
289			   size_t len, loff_t *pos)
290{
291	struct ashmem_area *asma = file->private_data;
292	int ret = 0;
293
294	mutex_lock(&ashmem_mutex);
295
296	/* If size is not set, or set to 0, always return EOF. */
297	if (asma->size == 0)
298		goto out;
299
300	if (!asma->file) {
301		ret = -EBADF;
302		goto out;
303	}
304
305	ret = asma->file->f_op->read(asma->file, buf, len, pos);
306	if (ret < 0)
307		goto out;
308
309	/** Update backing file pos, since f_ops->read() doesn't */
310	asma->file->f_pos = *pos;
311
312out:
313	mutex_unlock(&ashmem_mutex);
314	return ret;
315}
316
317static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
318{
319	struct ashmem_area *asma = file->private_data;
320	int ret;
321
322	mutex_lock(&ashmem_mutex);
323
324	if (asma->size == 0) {
325		ret = -EINVAL;
326		goto out;
327	}
328
329	if (!asma->file) {
330		ret = -EBADF;
331		goto out;
332	}
333
334	ret = asma->file->f_op->llseek(asma->file, offset, origin);
335	if (ret < 0)
336		goto out;
337
338	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
339	file->f_pos = asma->file->f_pos;
340
341out:
342	mutex_unlock(&ashmem_mutex);
343	return ret;
344}
345
346static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
347{
348	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
349	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
350	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
351}
352
353static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
354{
355	struct ashmem_area *asma = file->private_data;
356	int ret = 0;
357
358	mutex_lock(&ashmem_mutex);
359
360	/* user needs to SET_SIZE before mapping */
361	if (unlikely(!asma->size)) {
362		ret = -EINVAL;
363		goto out;
364	}
365
366	/* requested protection bits must match our allowed protection mask */
367	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
368		     calc_vm_prot_bits(PROT_MASK))) {
369		ret = -EPERM;
370		goto out;
371	}
372	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
373
374	if (!asma->file) {
375		char *name = ASHMEM_NAME_DEF;
376		struct file *vmfile;
377
378		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
379			name = asma->name;
380
381		/* ... and allocate the backing shmem file */
382		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
383		if (unlikely(IS_ERR(vmfile))) {
384			ret = PTR_ERR(vmfile);
385			goto out;
386		}
387		asma->file = vmfile;
388	}
389	get_file(asma->file);
390
391	/*
392	 * XXX - Reworked to use shmem_zero_setup() instead of
393	 * shmem_set_file while we're in staging. -jstultz
394	 */
395	if (vma->vm_flags & VM_SHARED) {
396		ret = shmem_zero_setup(vma);
397		if (ret) {
398			fput(asma->file);
399			goto out;
400		}
401	}
402
403	if (vma->vm_file)
404		fput(vma->vm_file);
405	vma->vm_file = asma->file;
406
407out:
408	mutex_unlock(&ashmem_mutex);
409	return ret;
410}
411
412/*
413 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
414 *
415 * 'nr_to_scan' is the number of objects to scan for freeing.
416 *
417 * 'gfp_mask' is the mask of the allocation that got us into this mess.
418 *
419 * Return value is the number of objects freed or -1 if we cannot
420 * proceed without risk of deadlock (due to gfp_mask).
421 *
422 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
423 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
424 * pages freed.
425 */
426static unsigned long
427ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
428{
429	struct ashmem_range *range, *next;
430	unsigned long freed = 0;
431
432	/* We might recurse into filesystem code, so bail out if necessary */
433	if (!(sc->gfp_mask & __GFP_FS))
434		return SHRINK_STOP;
435
436	mutex_lock(&ashmem_mutex);
437	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
438		loff_t start = range->pgstart * PAGE_SIZE;
439		loff_t end = (range->pgend + 1) * PAGE_SIZE;
440
441		do_fallocate(range->asma->file,
442				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
443				start, end - start);
444		range->purged = ASHMEM_WAS_PURGED;
445		lru_del(range);
446
447		freed += range_size(range);
448		if (--sc->nr_to_scan <= 0)
449			break;
450	}
451	mutex_unlock(&ashmem_mutex);
452	return freed;
453}
454
455static unsigned long
456ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
457{
458	/*
459	 * note that lru_count is count of pages on the lru, not a count of
460	 * objects on the list. This means the scan function needs to return the
461	 * number of pages freed, not the number of objects scanned.
462	 */
463	return lru_count;
464}
465
466static struct shrinker ashmem_shrinker = {
467	.count_objects = ashmem_shrink_count,
468	.scan_objects = ashmem_shrink_scan,
469	/*
470	 * XXX (dchinner): I wish people would comment on why they need on
471	 * significant changes to the default value here
472	 */
473	.seeks = DEFAULT_SEEKS * 4,
474};
475
476static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
477{
478	int ret = 0;
479
480	mutex_lock(&ashmem_mutex);
481
482	/* the user can only remove, not add, protection bits */
483	if (unlikely((asma->prot_mask & prot) != prot)) {
484		ret = -EINVAL;
485		goto out;
486	}
487
488	/* does the application expect PROT_READ to imply PROT_EXEC? */
489	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
490		prot |= PROT_EXEC;
491
492	asma->prot_mask = prot;
493
494out:
495	mutex_unlock(&ashmem_mutex);
496	return ret;
497}
498
499static int set_name(struct ashmem_area *asma, void __user *name)
500{
501	int ret = 0;
502	char local_name[ASHMEM_NAME_LEN];
503
504	/*
505	 * Holding the ashmem_mutex while doing a copy_from_user might cause
506	 * an data abort which would try to access mmap_sem. If another
507	 * thread has invoked ashmem_mmap then it will be holding the
508	 * semaphore and will be waiting for ashmem_mutex, there by leading to
509	 * deadlock. We'll release the mutex  and take the name to a local
510	 * variable that does not need protection and later copy the local
511	 * variable to the structure member with lock held.
512	 */
513	if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
514		return -EFAULT;
515
516	mutex_lock(&ashmem_mutex);
517	/* cannot change an existing mapping's name */
518	if (unlikely(asma->file)) {
519		ret = -EINVAL;
520		goto out;
521	}
522	memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
523		local_name, ASHMEM_NAME_LEN);
524	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
525out:
526	mutex_unlock(&ashmem_mutex);
527
528	return ret;
529}
530
531static int get_name(struct ashmem_area *asma, void __user *name)
532{
533	int ret = 0;
534	size_t len;
535	/*
536	 * Have a local variable to which we'll copy the content
537	 * from asma with the lock held. Later we can copy this to the user
538	 * space safely without holding any locks. So even if we proceed to
539	 * wait for mmap_sem, it won't lead to deadlock.
540	 */
541	char local_name[ASHMEM_NAME_LEN];
542
543	mutex_lock(&ashmem_mutex);
544	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
545
546		/*
547		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
548		 * prevents us from revealing one user's stack to another.
549		 */
550		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
551		memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
552	} else {
553		len = sizeof(ASHMEM_NAME_DEF);
554		memcpy(local_name, ASHMEM_NAME_DEF, len);
555	}
556	mutex_unlock(&ashmem_mutex);
557
558	/*
559	 * Now we are just copying from the stack variable to userland
560	 * No lock held
561	 */
562	if (unlikely(copy_to_user(name, local_name, len)))
563		ret = -EFAULT;
564	return ret;
565}
566
567/*
568 * ashmem_pin - pin the given ashmem region, returning whether it was
569 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
570 *
571 * Caller must hold ashmem_mutex.
572 */
573static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
574{
575	struct ashmem_range *range, *next;
576	int ret = ASHMEM_NOT_PURGED;
577
578	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
579		/* moved past last applicable page; we can short circuit */
580		if (range_before_page(range, pgstart))
581			break;
582
583		/*
584		 * The user can ask us to pin pages that span multiple ranges,
585		 * or to pin pages that aren't even unpinned, so this is messy.
586		 *
587		 * Four cases:
588		 * 1. The requested range subsumes an existing range, so we
589		 *    just remove the entire matching range.
590		 * 2. The requested range overlaps the start of an existing
591		 *    range, so we just update that range.
592		 * 3. The requested range overlaps the end of an existing
593		 *    range, so we just update that range.
594		 * 4. The requested range punches a hole in an existing range,
595		 *    so we have to update one side of the range and then
596		 *    create a new range for the other side.
597		 */
598		if (page_range_in_range(range, pgstart, pgend)) {
599			ret |= range->purged;
600
601			/* Case #1: Easy. Just nuke the whole thing. */
602			if (page_range_subsumes_range(range, pgstart, pgend)) {
603				range_del(range);
604				continue;
605			}
606
607			/* Case #2: We overlap from the start, so adjust it */
608			if (range->pgstart >= pgstart) {
609				range_shrink(range, pgend + 1, range->pgend);
610				continue;
611			}
612
613			/* Case #3: We overlap from the rear, so adjust it */
614			if (range->pgend <= pgend) {
615				range_shrink(range, range->pgstart, pgstart-1);
616				continue;
617			}
618
619			/*
620			 * Case #4: We eat a chunk out of the middle. A bit
621			 * more complicated, we allocate a new range for the
622			 * second half and adjust the first chunk's endpoint.
623			 */
624			range_alloc(asma, range, range->purged,
625				    pgend + 1, range->pgend);
626			range_shrink(range, range->pgstart, pgstart - 1);
627			break;
628		}
629	}
630
631	return ret;
632}
633
634/*
635 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
636 *
637 * Caller must hold ashmem_mutex.
638 */
639static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
640{
641	struct ashmem_range *range, *next;
642	unsigned int purged = ASHMEM_NOT_PURGED;
643
644restart:
645	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
646		/* short circuit: this is our insertion point */
647		if (range_before_page(range, pgstart))
648			break;
649
650		/*
651		 * The user can ask us to unpin pages that are already entirely
652		 * or partially pinned. We handle those two cases here.
653		 */
654		if (page_range_subsumed_by_range(range, pgstart, pgend))
655			return 0;
656		if (page_range_in_range(range, pgstart, pgend)) {
657			pgstart = min_t(size_t, range->pgstart, pgstart),
658			pgend = max_t(size_t, range->pgend, pgend);
659			purged |= range->purged;
660			range_del(range);
661			goto restart;
662		}
663	}
664
665	return range_alloc(asma, range, purged, pgstart, pgend);
666}
667
668/*
669 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
670 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
671 *
672 * Caller must hold ashmem_mutex.
673 */
674static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
675				 size_t pgend)
676{
677	struct ashmem_range *range;
678	int ret = ASHMEM_IS_PINNED;
679
680	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
681		if (range_before_page(range, pgstart))
682			break;
683		if (page_range_in_range(range, pgstart, pgend)) {
684			ret = ASHMEM_IS_UNPINNED;
685			break;
686		}
687	}
688
689	return ret;
690}
691
692static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
693			    void __user *p)
694{
695	struct ashmem_pin pin;
696	size_t pgstart, pgend;
697	int ret = -EINVAL;
698
699	if (unlikely(!asma->file))
700		return -EINVAL;
701
702	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
703		return -EFAULT;
704
705	/* per custom, you can pass zero for len to mean "everything onward" */
706	if (!pin.len)
707		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
708
709	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
710		return -EINVAL;
711
712	if (unlikely(((__u32) -1) - pin.offset < pin.len))
713		return -EINVAL;
714
715	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
716		return -EINVAL;
717
718	pgstart = pin.offset / PAGE_SIZE;
719	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
720
721	mutex_lock(&ashmem_mutex);
722
723	switch (cmd) {
724	case ASHMEM_PIN:
725		ret = ashmem_pin(asma, pgstart, pgend);
726		break;
727	case ASHMEM_UNPIN:
728		ret = ashmem_unpin(asma, pgstart, pgend);
729		break;
730	case ASHMEM_GET_PIN_STATUS:
731		ret = ashmem_get_pin_status(asma, pgstart, pgend);
732		break;
733	}
734
735	mutex_unlock(&ashmem_mutex);
736
737	return ret;
738}
739
740static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
741{
742	struct ashmem_area *asma = file->private_data;
743	long ret = -ENOTTY;
744
745	switch (cmd) {
746	case ASHMEM_SET_NAME:
747		ret = set_name(asma, (void __user *) arg);
748		break;
749	case ASHMEM_GET_NAME:
750		ret = get_name(asma, (void __user *) arg);
751		break;
752	case ASHMEM_SET_SIZE:
753		ret = -EINVAL;
754		if (!asma->file) {
755			ret = 0;
756			asma->size = (size_t) arg;
757		}
758		break;
759	case ASHMEM_GET_SIZE:
760		ret = asma->size;
761		break;
762	case ASHMEM_SET_PROT_MASK:
763		ret = set_prot_mask(asma, arg);
764		break;
765	case ASHMEM_GET_PROT_MASK:
766		ret = asma->prot_mask;
767		break;
768	case ASHMEM_PIN:
769	case ASHMEM_UNPIN:
770	case ASHMEM_GET_PIN_STATUS:
771		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
772		break;
773	case ASHMEM_PURGE_ALL_CACHES:
774		ret = -EPERM;
775		if (capable(CAP_SYS_ADMIN)) {
776			struct shrink_control sc = {
777				.gfp_mask = GFP_KERNEL,
778				.nr_to_scan = LONG_MAX,
779			};
780
781			nodes_setall(sc.nodes_to_scan);
782			ashmem_shrink_scan(&ashmem_shrinker, &sc);
783		}
784		break;
785	}
786
787	return ret;
788}
789
790/* support of 32bit userspace on 64bit platforms */
791#ifdef CONFIG_COMPAT
792static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
793				unsigned long arg)
794{
795
796	switch (cmd) {
797	case COMPAT_ASHMEM_SET_SIZE:
798		cmd = ASHMEM_SET_SIZE;
799		break;
800	case COMPAT_ASHMEM_SET_PROT_MASK:
801		cmd = ASHMEM_SET_PROT_MASK;
802		break;
803	}
804	return ashmem_ioctl(file, cmd, arg);
805}
806#endif
807
808static const struct file_operations ashmem_fops = {
809	.owner = THIS_MODULE,
810	.open = ashmem_open,
811	.release = ashmem_release,
812	.read = ashmem_read,
813	.llseek = ashmem_llseek,
814	.mmap = ashmem_mmap,
815	.unlocked_ioctl = ashmem_ioctl,
816#ifdef CONFIG_COMPAT
817	.compat_ioctl = compat_ashmem_ioctl,
818#endif
819};
820
821static struct miscdevice ashmem_misc = {
822	.minor = MISC_DYNAMIC_MINOR,
823	.name = "ashmem",
824	.fops = &ashmem_fops,
825};
826
827static int __init ashmem_init(void)
828{
829	int ret;
830
831	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
832					  sizeof(struct ashmem_area),
833					  0, 0, NULL);
834	if (unlikely(!ashmem_area_cachep)) {
835		pr_err("failed to create slab cache\n");
836		return -ENOMEM;
837	}
838
839	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
840					  sizeof(struct ashmem_range),
841					  0, 0, NULL);
842	if (unlikely(!ashmem_range_cachep)) {
843		pr_err("failed to create slab cache\n");
844		return -ENOMEM;
845	}
846
847	ret = misc_register(&ashmem_misc);
848	if (unlikely(ret)) {
849		pr_err("failed to register misc device!\n");
850		return ret;
851	}
852
853	register_shrinker(&ashmem_shrinker);
854
855	pr_info("initialized\n");
856
857	return 0;
858}
859
860static void __exit ashmem_exit(void)
861{
862	int ret;
863
864	unregister_shrinker(&ashmem_shrinker);
865
866	ret = misc_deregister(&ashmem_misc);
867	if (unlikely(ret))
868		pr_err("failed to unregister misc device!\n");
869
870	kmem_cache_destroy(ashmem_range_cachep);
871	kmem_cache_destroy(ashmem_area_cachep);
872
873	pr_info("unloaded\n");
874}
875
876module_init(ashmem_init);
877module_exit(ashmem_exit);
878
879MODULE_LICENSE("GPL");
880