file.c revision fbd9b09a177a481eda256447c881f014f29034fe
1/*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4
5#include <linux/time.h>
6#include <linux/reiserfs_fs.h>
7#include <linux/reiserfs_acl.h>
8#include <linux/reiserfs_xattr.h>
9#include <asm/uaccess.h>
10#include <linux/pagemap.h>
11#include <linux/swap.h>
12#include <linux/writeback.h>
13#include <linux/blkdev.h>
14#include <linux/buffer_head.h>
15#include <linux/quotaops.h>
16
17/*
18** We pack the tails of files on file close, not at the time they are written.
19** This implies an unnecessary copy of the tail and an unnecessary indirect item
20** insertion/balancing, for files that are written in one write.
21** It avoids unnecessary tail packings (balances) for files that are written in
22** multiple writes and are small enough to have tails.
23**
24** file_release is called by the VFS layer when the file is closed.  If
25** this is the last open file descriptor, and the file
26** small enough to have a tail, and the tail is currently in an
27** unformatted node, the tail is converted back into a direct item.
28**
29** We use reiserfs_truncate_file to pack the tail, since it already has
30** all the conditions coded.
31*/
32static int reiserfs_file_release(struct inode *inode, struct file *filp)
33{
34
35	struct reiserfs_transaction_handle th;
36	int err;
37	int jbegin_failure = 0;
38
39	BUG_ON(!S_ISREG(inode->i_mode));
40
41	/* fast out for when nothing needs to be done */
42	if ((atomic_read(&inode->i_count) > 1 ||
43	     !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
44	     !tail_has_to_be_packed(inode)) &&
45	    REISERFS_I(inode)->i_prealloc_count <= 0) {
46		return 0;
47	}
48
49	mutex_lock(&inode->i_mutex);
50
51	mutex_lock(&(REISERFS_I(inode)->i_mmap));
52	if (REISERFS_I(inode)->i_flags & i_ever_mapped)
53		REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
54
55	reiserfs_write_lock(inode->i_sb);
56	/* freeing preallocation only involves relogging blocks that
57	 * are already in the current transaction.  preallocation gets
58	 * freed at the end of each transaction, so it is impossible for
59	 * us to log any additional blocks (including quota blocks)
60	 */
61	err = journal_begin(&th, inode->i_sb, 1);
62	if (err) {
63		/* uh oh, we can't allow the inode to go away while there
64		 * is still preallocation blocks pending.  Try to join the
65		 * aborted transaction
66		 */
67		jbegin_failure = err;
68		err = journal_join_abort(&th, inode->i_sb, 1);
69
70		if (err) {
71			/* hmpf, our choices here aren't good.  We can pin the inode
72			 * which will disallow unmount from every happening, we can
73			 * do nothing, which will corrupt random memory on unmount,
74			 * or we can forcibly remove the file from the preallocation
75			 * list, which will leak blocks on disk.  Lets pin the inode
76			 * and let the admin know what is going on.
77			 */
78			igrab(inode);
79			reiserfs_warning(inode->i_sb, "clm-9001",
80					 "pinning inode %lu because the "
81					 "preallocation can't be freed",
82					 inode->i_ino);
83			goto out;
84		}
85	}
86	reiserfs_update_inode_transaction(inode);
87
88#ifdef REISERFS_PREALLOCATE
89	reiserfs_discard_prealloc(&th, inode);
90#endif
91	err = journal_end(&th, inode->i_sb, 1);
92
93	/* copy back the error code from journal_begin */
94	if (!err)
95		err = jbegin_failure;
96
97	if (!err && atomic_read(&inode->i_count) <= 1 &&
98	    (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
99	    tail_has_to_be_packed(inode)) {
100		/* if regular file is released by last holder and it has been
101		   appended (we append by unformatted node only) or its direct
102		   item(s) had to be converted, then it may have to be
103		   indirect2direct converted */
104		err = reiserfs_truncate_file(inode, 0);
105	}
106      out:
107	mutex_unlock(&(REISERFS_I(inode)->i_mmap));
108	mutex_unlock(&inode->i_mutex);
109	reiserfs_write_unlock(inode->i_sb);
110	return err;
111}
112
113static int reiserfs_file_mmap(struct file *file, struct vm_area_struct *vma)
114{
115	struct inode *inode;
116
117	inode = file->f_path.dentry->d_inode;
118	mutex_lock(&(REISERFS_I(inode)->i_mmap));
119	REISERFS_I(inode)->i_flags |= i_ever_mapped;
120	mutex_unlock(&(REISERFS_I(inode)->i_mmap));
121
122	return generic_file_mmap(file, vma);
123}
124
125static void reiserfs_vfs_truncate_file(struct inode *inode)
126{
127	reiserfs_truncate_file(inode, 1);
128}
129
130/* Sync a reiserfs file. */
131
132/*
133 * FIXME: sync_mapping_buffers() never has anything to sync.  Can
134 * be removed...
135 */
136
137static int reiserfs_sync_file(struct file *filp,
138			      struct dentry *dentry, int datasync)
139{
140	struct inode *inode = dentry->d_inode;
141	int err;
142	int barrier_done;
143
144	BUG_ON(!S_ISREG(inode->i_mode));
145	err = sync_mapping_buffers(inode->i_mapping);
146	reiserfs_write_lock(inode->i_sb);
147	barrier_done = reiserfs_commit_for_inode(inode);
148	reiserfs_write_unlock(inode->i_sb);
149	if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
150		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
151			BLKDEV_IFL_WAIT);
152	if (barrier_done < 0)
153		return barrier_done;
154	return (err < 0) ? -EIO : 0;
155}
156
157/* taken fs/buffer.c:__block_commit_write */
158int reiserfs_commit_page(struct inode *inode, struct page *page,
159			 unsigned from, unsigned to)
160{
161	unsigned block_start, block_end;
162	int partial = 0;
163	unsigned blocksize;
164	struct buffer_head *bh, *head;
165	unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
166	int new;
167	int logit = reiserfs_file_data_log(inode);
168	struct super_block *s = inode->i_sb;
169	int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
170	struct reiserfs_transaction_handle th;
171	int ret = 0;
172
173	th.t_trans_id = 0;
174	blocksize = 1 << inode->i_blkbits;
175
176	if (logit) {
177		reiserfs_write_lock(s);
178		ret = journal_begin(&th, s, bh_per_page + 1);
179		if (ret)
180			goto drop_write_lock;
181		reiserfs_update_inode_transaction(inode);
182	}
183	for (bh = head = page_buffers(page), block_start = 0;
184	     bh != head || !block_start;
185	     block_start = block_end, bh = bh->b_this_page) {
186
187		new = buffer_new(bh);
188		clear_buffer_new(bh);
189		block_end = block_start + blocksize;
190		if (block_end <= from || block_start >= to) {
191			if (!buffer_uptodate(bh))
192				partial = 1;
193		} else {
194			set_buffer_uptodate(bh);
195			if (logit) {
196				reiserfs_prepare_for_journal(s, bh, 1);
197				journal_mark_dirty(&th, s, bh);
198			} else if (!buffer_dirty(bh)) {
199				mark_buffer_dirty(bh);
200				/* do data=ordered on any page past the end
201				 * of file and any buffer marked BH_New.
202				 */
203				if (reiserfs_data_ordered(inode->i_sb) &&
204				    (new || page->index >= i_size_index)) {
205					reiserfs_add_ordered_list(inode, bh);
206				}
207			}
208		}
209	}
210	if (logit) {
211		ret = journal_end(&th, s, bh_per_page + 1);
212	      drop_write_lock:
213		reiserfs_write_unlock(s);
214	}
215	/*
216	 * If this is a partial write which happened to make all buffers
217	 * uptodate then we can optimize away a bogus readpage() for
218	 * the next read(). Here we 'discover' whether the page went
219	 * uptodate as a result of this (potentially partial) write.
220	 */
221	if (!partial)
222		SetPageUptodate(page);
223	return ret;
224}
225
226/* Write @count bytes at position @ppos in a file indicated by @file
227   from the buffer @buf.
228
229   generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
230   something simple that works.  It is not for serious use by general purpose filesystems, excepting the one that it was
231   written for (ext2/3).  This is for several reasons:
232
233   * It has no understanding of any filesystem specific optimizations.
234
235   * It enters the filesystem repeatedly for each page that is written.
236
237   * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
238   * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
239   * to reiserfs which allows for fewer tree traversals.
240
241   * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
242
243   * Asking the block allocation code for blocks one at a time is slightly less efficient.
244
245   All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
246   use it, but we were in a hurry to make code freeze, and so it couldn't be revised then.  This new code should make
247   things right finally.
248
249   Future Features: providing search_by_key with hints.
250
251*/
252static ssize_t reiserfs_file_write(struct file *file,	/* the file we are going to write into */
253				   const char __user * buf,	/*  pointer to user supplied data
254								   (in userspace) */
255				   size_t count,	/* amount of bytes to write */
256				   loff_t * ppos	/* pointer to position in file that we start writing at. Should be updated to
257							 * new current position before returning. */
258				   )
259{
260	struct inode *inode = file->f_path.dentry->d_inode;	// Inode of the file that we are writing to.
261	/* To simplify coding at this time, we store
262	   locked pages in array for now */
263	struct reiserfs_transaction_handle th;
264	th.t_trans_id = 0;
265
266	/* If a filesystem is converted from 3.5 to 3.6, we'll have v3.5 items
267	* lying around (most of the disk, in fact). Despite the filesystem
268	* now being a v3.6 format, the old items still can't support large
269	* file sizes. Catch this case here, as the rest of the VFS layer is
270	* oblivious to the different limitations between old and new items.
271	* reiserfs_setattr catches this for truncates. This chunk is lifted
272	* from generic_write_checks. */
273	if (get_inode_item_key_version (inode) == KEY_FORMAT_3_5 &&
274	    *ppos + count > MAX_NON_LFS) {
275		if (*ppos >= MAX_NON_LFS) {
276			return -EFBIG;
277		}
278		if (count > MAX_NON_LFS - (unsigned long)*ppos)
279			count = MAX_NON_LFS - (unsigned long)*ppos;
280	}
281
282	return do_sync_write(file, buf, count, ppos);
283}
284
285const struct file_operations reiserfs_file_operations = {
286	.read = do_sync_read,
287	.write = reiserfs_file_write,
288	.unlocked_ioctl = reiserfs_ioctl,
289#ifdef CONFIG_COMPAT
290	.compat_ioctl = reiserfs_compat_ioctl,
291#endif
292	.mmap = reiserfs_file_mmap,
293	.open = dquot_file_open,
294	.release = reiserfs_file_release,
295	.fsync = reiserfs_sync_file,
296	.aio_read = generic_file_aio_read,
297	.aio_write = generic_file_aio_write,
298	.splice_read = generic_file_splice_read,
299	.splice_write = generic_file_splice_write,
300	.llseek = generic_file_llseek,
301};
302
303const struct inode_operations reiserfs_file_inode_operations = {
304	.truncate = reiserfs_vfs_truncate_file,
305	.setattr = reiserfs_setattr,
306	.setxattr = reiserfs_setxattr,
307	.getxattr = reiserfs_getxattr,
308	.listxattr = reiserfs_listxattr,
309	.removexattr = reiserfs_removexattr,
310	.permission = reiserfs_permission,
311};
312