filemap_xip.c revision 2f1936b87783a3a56c9441b27b9ba7a747f11e8e
1/*
2 *	linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/module.h>
14#include <linux/uio.h>
15#include <linux/rmap.h>
16#include <linux/sched.h>
17#include <asm/tlbflush.h>
18#include <asm/io.h>
19
20/*
21 * We do use our own empty page to avoid interference with other users
22 * of ZERO_PAGE(), such as /dev/zero
23 */
24static struct page *__xip_sparse_page;
25
26static struct page *xip_sparse_page(void)
27{
28	if (!__xip_sparse_page) {
29		struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
30
31		if (page) {
32			static DEFINE_SPINLOCK(xip_alloc_lock);
33			spin_lock(&xip_alloc_lock);
34			if (!__xip_sparse_page)
35				__xip_sparse_page = page;
36			else
37				__free_page(page);
38			spin_unlock(&xip_alloc_lock);
39		}
40	}
41	return __xip_sparse_page;
42}
43
44/*
45 * This is a file read routine for execute in place files, and uses
46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
47 * stuff.
48 *
49 * Note the struct file* is not used at all.  It may be NULL.
50 */
51static ssize_t
52do_xip_mapping_read(struct address_space *mapping,
53		    struct file_ra_state *_ra,
54		    struct file *filp,
55		    char __user *buf,
56		    size_t len,
57		    loff_t *ppos)
58{
59	struct inode *inode = mapping->host;
60	pgoff_t index, end_index;
61	unsigned long offset;
62	loff_t isize, pos;
63	size_t copied = 0, error = 0;
64
65	BUG_ON(!mapping->a_ops->get_xip_mem);
66
67	pos = *ppos;
68	index = pos >> PAGE_CACHE_SHIFT;
69	offset = pos & ~PAGE_CACHE_MASK;
70
71	isize = i_size_read(inode);
72	if (!isize)
73		goto out;
74
75	end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
76	do {
77		unsigned long nr, left;
78		void *xip_mem;
79		unsigned long xip_pfn;
80		int zero = 0;
81
82		/* nr is the maximum number of bytes to copy from this page */
83		nr = PAGE_CACHE_SIZE;
84		if (index >= end_index) {
85			if (index > end_index)
86				goto out;
87			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88			if (nr <= offset) {
89				goto out;
90			}
91		}
92		nr = nr - offset;
93		if (nr > len)
94			nr = len;
95
96		error = mapping->a_ops->get_xip_mem(mapping, index, 0,
97							&xip_mem, &xip_pfn);
98		if (unlikely(error)) {
99			if (error == -ENODATA) {
100				/* sparse */
101				zero = 1;
102			} else
103				goto out;
104		}
105
106		/* If users can be writing to this page using arbitrary
107		 * virtual addresses, take care about potential aliasing
108		 * before reading the page on the kernel side.
109		 */
110		if (mapping_writably_mapped(mapping))
111			/* address based flush */ ;
112
113		/*
114		 * Ok, we have the mem, so now we can copy it to user space...
115		 *
116		 * The actor routine returns how many bytes were actually used..
117		 * NOTE! This may not be the same as how much of a user buffer
118		 * we filled up (we may be padding etc), so we can only update
119		 * "pos" here (the actor routine has to update the user buffer
120		 * pointers and the remaining count).
121		 */
122		if (!zero)
123			left = __copy_to_user(buf+copied, xip_mem+offset, nr);
124		else
125			left = __clear_user(buf + copied, nr);
126
127		if (left) {
128			error = -EFAULT;
129			goto out;
130		}
131
132		copied += (nr - left);
133		offset += (nr - left);
134		index += offset >> PAGE_CACHE_SHIFT;
135		offset &= ~PAGE_CACHE_MASK;
136	} while (copied < len);
137
138out:
139	*ppos = pos + copied;
140	if (filp)
141		file_accessed(filp);
142
143	return (copied ? copied : error);
144}
145
146ssize_t
147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
148{
149	if (!access_ok(VERIFY_WRITE, buf, len))
150		return -EFAULT;
151
152	return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153			    buf, len, ppos);
154}
155EXPORT_SYMBOL_GPL(xip_file_read);
156
157/*
158 * __xip_unmap is invoked from xip_unmap and
159 * xip_write
160 *
161 * This function walks all vmas of the address_space and unmaps the
162 * __xip_sparse_page when found at pgoff.
163 */
164static void
165__xip_unmap (struct address_space * mapping,
166		     unsigned long pgoff)
167{
168	struct vm_area_struct *vma;
169	struct mm_struct *mm;
170	struct prio_tree_iter iter;
171	unsigned long address;
172	pte_t *pte;
173	pte_t pteval;
174	spinlock_t *ptl;
175	struct page *page;
176
177	page = __xip_sparse_page;
178	if (!page)
179		return;
180
181	spin_lock(&mapping->i_mmap_lock);
182	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
183		mm = vma->vm_mm;
184		address = vma->vm_start +
185			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
186		BUG_ON(address < vma->vm_start || address >= vma->vm_end);
187		pte = page_check_address(page, mm, address, &ptl);
188		if (pte) {
189			/* Nuke the page table entry. */
190			flush_cache_page(vma, address, pte_pfn(*pte));
191			pteval = ptep_clear_flush(vma, address, pte);
192			page_remove_rmap(page, vma);
193			dec_mm_counter(mm, file_rss);
194			BUG_ON(pte_dirty(pteval));
195			pte_unmap_unlock(pte, ptl);
196			page_cache_release(page);
197		}
198	}
199	spin_unlock(&mapping->i_mmap_lock);
200}
201
202/*
203 * xip_fault() is invoked via the vma operations vector for a
204 * mapped memory region to read in file data during a page fault.
205 *
206 * This function is derived from filemap_fault, but used for execute in place
207 */
208static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
209{
210	struct file *file = vma->vm_file;
211	struct address_space *mapping = file->f_mapping;
212	struct inode *inode = mapping->host;
213	pgoff_t size;
214	void *xip_mem;
215	unsigned long xip_pfn;
216	struct page *page;
217	int error;
218
219	/* XXX: are VM_FAULT_ codes OK? */
220
221	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
222	if (vmf->pgoff >= size)
223		return VM_FAULT_SIGBUS;
224
225	error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
226						&xip_mem, &xip_pfn);
227	if (likely(!error))
228		goto found;
229	if (error != -ENODATA)
230		return VM_FAULT_OOM;
231
232	/* sparse block */
233	if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
234	    (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
235	    (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
236		int err;
237
238		/* maybe shared writable, allocate new block */
239		error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
240							&xip_mem, &xip_pfn);
241		if (error)
242			return VM_FAULT_SIGBUS;
243		/* unmap sparse mappings at pgoff from all other vmas */
244		__xip_unmap(mapping, vmf->pgoff);
245
246found:
247		err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
248							xip_pfn);
249		if (err == -ENOMEM)
250			return VM_FAULT_OOM;
251		BUG_ON(err);
252		return VM_FAULT_NOPAGE;
253	} else {
254		/* not shared and writable, use xip_sparse_page() */
255		page = xip_sparse_page();
256		if (!page)
257			return VM_FAULT_OOM;
258
259		page_cache_get(page);
260		vmf->page = page;
261		return 0;
262	}
263}
264
265static struct vm_operations_struct xip_file_vm_ops = {
266	.fault	= xip_file_fault,
267};
268
269int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
270{
271	BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
272
273	file_accessed(file);
274	vma->vm_ops = &xip_file_vm_ops;
275	vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
276	return 0;
277}
278EXPORT_SYMBOL_GPL(xip_file_mmap);
279
280static ssize_t
281__xip_file_write(struct file *filp, const char __user *buf,
282		  size_t count, loff_t pos, loff_t *ppos)
283{
284	struct address_space * mapping = filp->f_mapping;
285	const struct address_space_operations *a_ops = mapping->a_ops;
286	struct inode 	*inode = mapping->host;
287	long		status = 0;
288	size_t		bytes;
289	ssize_t		written = 0;
290
291	BUG_ON(!mapping->a_ops->get_xip_mem);
292
293	do {
294		unsigned long index;
295		unsigned long offset;
296		size_t copied;
297		void *xip_mem;
298		unsigned long xip_pfn;
299
300		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
301		index = pos >> PAGE_CACHE_SHIFT;
302		bytes = PAGE_CACHE_SIZE - offset;
303		if (bytes > count)
304			bytes = count;
305
306		status = a_ops->get_xip_mem(mapping, index, 0,
307						&xip_mem, &xip_pfn);
308		if (status == -ENODATA) {
309			/* we allocate a new page unmap it */
310			status = a_ops->get_xip_mem(mapping, index, 1,
311							&xip_mem, &xip_pfn);
312			if (!status)
313				/* unmap page at pgoff from all other vmas */
314				__xip_unmap(mapping, index);
315		}
316
317		if (status)
318			break;
319
320		copied = bytes -
321			__copy_from_user_nocache(xip_mem + offset, buf, bytes);
322
323		if (likely(copied > 0)) {
324			status = copied;
325
326			if (status >= 0) {
327				written += status;
328				count -= status;
329				pos += status;
330				buf += status;
331			}
332		}
333		if (unlikely(copied != bytes))
334			if (status >= 0)
335				status = -EFAULT;
336		if (status < 0)
337			break;
338	} while (count);
339	*ppos = pos;
340	/*
341	 * No need to use i_size_read() here, the i_size
342	 * cannot change under us because we hold i_mutex.
343	 */
344	if (pos > inode->i_size) {
345		i_size_write(inode, pos);
346		mark_inode_dirty(inode);
347	}
348
349	return written ? written : status;
350}
351
352ssize_t
353xip_file_write(struct file *filp, const char __user *buf, size_t len,
354	       loff_t *ppos)
355{
356	struct address_space *mapping = filp->f_mapping;
357	struct inode *inode = mapping->host;
358	size_t count;
359	loff_t pos;
360	ssize_t ret;
361
362	mutex_lock(&inode->i_mutex);
363
364	if (!access_ok(VERIFY_READ, buf, len)) {
365		ret=-EFAULT;
366		goto out_up;
367	}
368
369	pos = *ppos;
370	count = len;
371
372	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
373
374	/* We can write back this queue in page reclaim */
375	current->backing_dev_info = mapping->backing_dev_info;
376
377	ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
378	if (ret)
379		goto out_backing;
380	if (count == 0)
381		goto out_backing;
382
383	ret = file_remove_suid(filp);
384	if (ret)
385		goto out_backing;
386
387	file_update_time(filp);
388
389	ret = __xip_file_write (filp, buf, count, pos, ppos);
390
391 out_backing:
392	current->backing_dev_info = NULL;
393 out_up:
394	mutex_unlock(&inode->i_mutex);
395	return ret;
396}
397EXPORT_SYMBOL_GPL(xip_file_write);
398
399/*
400 * truncate a page used for execute in place
401 * functionality is analog to block_truncate_page but does use get_xip_mem
402 * to get the page instead of page cache
403 */
404int
405xip_truncate_page(struct address_space *mapping, loff_t from)
406{
407	pgoff_t index = from >> PAGE_CACHE_SHIFT;
408	unsigned offset = from & (PAGE_CACHE_SIZE-1);
409	unsigned blocksize;
410	unsigned length;
411	void *xip_mem;
412	unsigned long xip_pfn;
413	int err;
414
415	BUG_ON(!mapping->a_ops->get_xip_mem);
416
417	blocksize = 1 << mapping->host->i_blkbits;
418	length = offset & (blocksize - 1);
419
420	/* Block boundary? Nothing to do */
421	if (!length)
422		return 0;
423
424	length = blocksize - length;
425
426	err = mapping->a_ops->get_xip_mem(mapping, index, 0,
427						&xip_mem, &xip_pfn);
428	if (unlikely(err)) {
429		if (err == -ENODATA)
430			/* Hole? No need to truncate */
431			return 0;
432		else
433			return err;
434	}
435	memset(xip_mem + offset, 0, length);
436	return 0;
437}
438EXPORT_SYMBOL_GPL(xip_truncate_page);
439