extents.c revision 0d14b098ce05c067e06eea5ed63d9b5c14656bdb
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * Architecture independence:
6 *   Copyright (c) 2005, Bull S.A.
7 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21 */
22
23/*
24 * Extents support for EXT4
25 *
26 * TODO:
27 *   - ext4*_error() should be used in some situations
28 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 *   - smart tree reduction
30 */
31
32#include <linux/fs.h>
33#include <linux/time.h>
34#include <linux/jbd2.h>
35#include <linux/highuid.h>
36#include <linux/pagemap.h>
37#include <linux/quotaops.h>
38#include <linux/string.h>
39#include <linux/slab.h>
40#include <linux/falloc.h>
41#include <asm/uaccess.h>
42#include <linux/fiemap.h>
43#include "ext4_jbd2.h"
44#include "ext4_extents.h"
45#include "xattr.h"
46
47#include <trace/events/ext4.h>
48
49/*
50 * used by extent splitting.
51 */
52#define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
53					due to ENOSPC */
54#define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
55#define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
56
57#define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
58#define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
59
60static __le32 ext4_extent_block_csum(struct inode *inode,
61				     struct ext4_extent_header *eh)
62{
63	struct ext4_inode_info *ei = EXT4_I(inode);
64	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
65	__u32 csum;
66
67	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
68			   EXT4_EXTENT_TAIL_OFFSET(eh));
69	return cpu_to_le32(csum);
70}
71
72static int ext4_extent_block_csum_verify(struct inode *inode,
73					 struct ext4_extent_header *eh)
74{
75	struct ext4_extent_tail *et;
76
77	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
78		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
79		return 1;
80
81	et = find_ext4_extent_tail(eh);
82	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
83		return 0;
84	return 1;
85}
86
87static void ext4_extent_block_csum_set(struct inode *inode,
88				       struct ext4_extent_header *eh)
89{
90	struct ext4_extent_tail *et;
91
92	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
93		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
94		return;
95
96	et = find_ext4_extent_tail(eh);
97	et->et_checksum = ext4_extent_block_csum(inode, eh);
98}
99
100static int ext4_split_extent(handle_t *handle,
101				struct inode *inode,
102				struct ext4_ext_path *path,
103				struct ext4_map_blocks *map,
104				int split_flag,
105				int flags);
106
107static int ext4_split_extent_at(handle_t *handle,
108			     struct inode *inode,
109			     struct ext4_ext_path *path,
110			     ext4_lblk_t split,
111			     int split_flag,
112			     int flags);
113
114static int ext4_find_delayed_extent(struct inode *inode,
115				    struct extent_status *newes);
116
117static int ext4_ext_truncate_extend_restart(handle_t *handle,
118					    struct inode *inode,
119					    int needed)
120{
121	int err;
122
123	if (!ext4_handle_valid(handle))
124		return 0;
125	if (handle->h_buffer_credits > needed)
126		return 0;
127	err = ext4_journal_extend(handle, needed);
128	if (err <= 0)
129		return err;
130	err = ext4_truncate_restart_trans(handle, inode, needed);
131	if (err == 0)
132		err = -EAGAIN;
133
134	return err;
135}
136
137/*
138 * could return:
139 *  - EROFS
140 *  - ENOMEM
141 */
142static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143				struct ext4_ext_path *path)
144{
145	if (path->p_bh) {
146		/* path points to block */
147		return ext4_journal_get_write_access(handle, path->p_bh);
148	}
149	/* path points to leaf/index in inode body */
150	/* we use in-core data, no need to protect them */
151	return 0;
152}
153
154/*
155 * could return:
156 *  - EROFS
157 *  - ENOMEM
158 *  - EIO
159 */
160#define ext4_ext_dirty(handle, inode, path) \
161		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
162static int __ext4_ext_dirty(const char *where, unsigned int line,
163			    handle_t *handle, struct inode *inode,
164			    struct ext4_ext_path *path)
165{
166	int err;
167	if (path->p_bh) {
168		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
169		/* path points to block */
170		err = __ext4_handle_dirty_metadata(where, line, handle,
171						   inode, path->p_bh);
172	} else {
173		/* path points to leaf/index in inode body */
174		err = ext4_mark_inode_dirty(handle, inode);
175	}
176	return err;
177}
178
179static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
180			      struct ext4_ext_path *path,
181			      ext4_lblk_t block)
182{
183	if (path) {
184		int depth = path->p_depth;
185		struct ext4_extent *ex;
186
187		/*
188		 * Try to predict block placement assuming that we are
189		 * filling in a file which will eventually be
190		 * non-sparse --- i.e., in the case of libbfd writing
191		 * an ELF object sections out-of-order but in a way
192		 * the eventually results in a contiguous object or
193		 * executable file, or some database extending a table
194		 * space file.  However, this is actually somewhat
195		 * non-ideal if we are writing a sparse file such as
196		 * qemu or KVM writing a raw image file that is going
197		 * to stay fairly sparse, since it will end up
198		 * fragmenting the file system's free space.  Maybe we
199		 * should have some hueristics or some way to allow
200		 * userspace to pass a hint to file system,
201		 * especially if the latter case turns out to be
202		 * common.
203		 */
204		ex = path[depth].p_ext;
205		if (ex) {
206			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
207			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
208
209			if (block > ext_block)
210				return ext_pblk + (block - ext_block);
211			else
212				return ext_pblk - (ext_block - block);
213		}
214
215		/* it looks like index is empty;
216		 * try to find starting block from index itself */
217		if (path[depth].p_bh)
218			return path[depth].p_bh->b_blocknr;
219	}
220
221	/* OK. use inode's group */
222	return ext4_inode_to_goal_block(inode);
223}
224
225/*
226 * Allocation for a meta data block
227 */
228static ext4_fsblk_t
229ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
230			struct ext4_ext_path *path,
231			struct ext4_extent *ex, int *err, unsigned int flags)
232{
233	ext4_fsblk_t goal, newblock;
234
235	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
236	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
237					NULL, err);
238	return newblock;
239}
240
241static inline int ext4_ext_space_block(struct inode *inode, int check)
242{
243	int size;
244
245	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
246			/ sizeof(struct ext4_extent);
247#ifdef AGGRESSIVE_TEST
248	if (!check && size > 6)
249		size = 6;
250#endif
251	return size;
252}
253
254static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
255{
256	int size;
257
258	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
259			/ sizeof(struct ext4_extent_idx);
260#ifdef AGGRESSIVE_TEST
261	if (!check && size > 5)
262		size = 5;
263#endif
264	return size;
265}
266
267static inline int ext4_ext_space_root(struct inode *inode, int check)
268{
269	int size;
270
271	size = sizeof(EXT4_I(inode)->i_data);
272	size -= sizeof(struct ext4_extent_header);
273	size /= sizeof(struct ext4_extent);
274#ifdef AGGRESSIVE_TEST
275	if (!check && size > 3)
276		size = 3;
277#endif
278	return size;
279}
280
281static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
282{
283	int size;
284
285	size = sizeof(EXT4_I(inode)->i_data);
286	size -= sizeof(struct ext4_extent_header);
287	size /= sizeof(struct ext4_extent_idx);
288#ifdef AGGRESSIVE_TEST
289	if (!check && size > 4)
290		size = 4;
291#endif
292	return size;
293}
294
295/*
296 * Calculate the number of metadata blocks needed
297 * to allocate @blocks
298 * Worse case is one block per extent
299 */
300int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
301{
302	struct ext4_inode_info *ei = EXT4_I(inode);
303	int idxs;
304
305	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
306		/ sizeof(struct ext4_extent_idx));
307
308	/*
309	 * If the new delayed allocation block is contiguous with the
310	 * previous da block, it can share index blocks with the
311	 * previous block, so we only need to allocate a new index
312	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
313	 * an additional index block, and at ldxs**3 blocks, yet
314	 * another index blocks.
315	 */
316	if (ei->i_da_metadata_calc_len &&
317	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
318		int num = 0;
319
320		if ((ei->i_da_metadata_calc_len % idxs) == 0)
321			num++;
322		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
323			num++;
324		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
325			num++;
326			ei->i_da_metadata_calc_len = 0;
327		} else
328			ei->i_da_metadata_calc_len++;
329		ei->i_da_metadata_calc_last_lblock++;
330		return num;
331	}
332
333	/*
334	 * In the worst case we need a new set of index blocks at
335	 * every level of the inode's extent tree.
336	 */
337	ei->i_da_metadata_calc_len = 1;
338	ei->i_da_metadata_calc_last_lblock = lblock;
339	return ext_depth(inode) + 1;
340}
341
342static int
343ext4_ext_max_entries(struct inode *inode, int depth)
344{
345	int max;
346
347	if (depth == ext_depth(inode)) {
348		if (depth == 0)
349			max = ext4_ext_space_root(inode, 1);
350		else
351			max = ext4_ext_space_root_idx(inode, 1);
352	} else {
353		if (depth == 0)
354			max = ext4_ext_space_block(inode, 1);
355		else
356			max = ext4_ext_space_block_idx(inode, 1);
357	}
358
359	return max;
360}
361
362static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
363{
364	ext4_fsblk_t block = ext4_ext_pblock(ext);
365	int len = ext4_ext_get_actual_len(ext);
366
367	if (len == 0)
368		return 0;
369	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
370}
371
372static int ext4_valid_extent_idx(struct inode *inode,
373				struct ext4_extent_idx *ext_idx)
374{
375	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
376
377	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
378}
379
380static int ext4_valid_extent_entries(struct inode *inode,
381				struct ext4_extent_header *eh,
382				int depth)
383{
384	unsigned short entries;
385	if (eh->eh_entries == 0)
386		return 1;
387
388	entries = le16_to_cpu(eh->eh_entries);
389
390	if (depth == 0) {
391		/* leaf entries */
392		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
393		while (entries) {
394			if (!ext4_valid_extent(inode, ext))
395				return 0;
396			ext++;
397			entries--;
398		}
399	} else {
400		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
401		while (entries) {
402			if (!ext4_valid_extent_idx(inode, ext_idx))
403				return 0;
404			ext_idx++;
405			entries--;
406		}
407	}
408	return 1;
409}
410
411static int __ext4_ext_check(const char *function, unsigned int line,
412			    struct inode *inode, struct ext4_extent_header *eh,
413			    int depth)
414{
415	const char *error_msg;
416	int max = 0;
417
418	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
419		error_msg = "invalid magic";
420		goto corrupted;
421	}
422	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
423		error_msg = "unexpected eh_depth";
424		goto corrupted;
425	}
426	if (unlikely(eh->eh_max == 0)) {
427		error_msg = "invalid eh_max";
428		goto corrupted;
429	}
430	max = ext4_ext_max_entries(inode, depth);
431	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
432		error_msg = "too large eh_max";
433		goto corrupted;
434	}
435	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
436		error_msg = "invalid eh_entries";
437		goto corrupted;
438	}
439	if (!ext4_valid_extent_entries(inode, eh, depth)) {
440		error_msg = "invalid extent entries";
441		goto corrupted;
442	}
443	/* Verify checksum on non-root extent tree nodes */
444	if (ext_depth(inode) != depth &&
445	    !ext4_extent_block_csum_verify(inode, eh)) {
446		error_msg = "extent tree corrupted";
447		goto corrupted;
448	}
449	return 0;
450
451corrupted:
452	ext4_error_inode(inode, function, line, 0,
453			"bad header/extent: %s - magic %x, "
454			"entries %u, max %u(%u), depth %u(%u)",
455			error_msg, le16_to_cpu(eh->eh_magic),
456			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
457			max, le16_to_cpu(eh->eh_depth), depth);
458
459	return -EIO;
460}
461
462#define ext4_ext_check(inode, eh, depth)	\
463	__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
464
465int ext4_ext_check_inode(struct inode *inode)
466{
467	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
468}
469
470static int __ext4_ext_check_block(const char *function, unsigned int line,
471				  struct inode *inode,
472				  struct ext4_extent_header *eh,
473				  int depth,
474				  struct buffer_head *bh)
475{
476	int ret;
477
478	if (buffer_verified(bh))
479		return 0;
480	ret = ext4_ext_check(inode, eh, depth);
481	if (ret)
482		return ret;
483	set_buffer_verified(bh);
484	return ret;
485}
486
487#define ext4_ext_check_block(inode, eh, depth, bh)	\
488	__ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
489
490#ifdef EXT_DEBUG
491static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
492{
493	int k, l = path->p_depth;
494
495	ext_debug("path:");
496	for (k = 0; k <= l; k++, path++) {
497		if (path->p_idx) {
498		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
499			    ext4_idx_pblock(path->p_idx));
500		} else if (path->p_ext) {
501			ext_debug("  %d:[%d]%d:%llu ",
502				  le32_to_cpu(path->p_ext->ee_block),
503				  ext4_ext_is_uninitialized(path->p_ext),
504				  ext4_ext_get_actual_len(path->p_ext),
505				  ext4_ext_pblock(path->p_ext));
506		} else
507			ext_debug("  []");
508	}
509	ext_debug("\n");
510}
511
512static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
513{
514	int depth = ext_depth(inode);
515	struct ext4_extent_header *eh;
516	struct ext4_extent *ex;
517	int i;
518
519	if (!path)
520		return;
521
522	eh = path[depth].p_hdr;
523	ex = EXT_FIRST_EXTENT(eh);
524
525	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
526
527	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
528		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
529			  ext4_ext_is_uninitialized(ex),
530			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
531	}
532	ext_debug("\n");
533}
534
535static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
536			ext4_fsblk_t newblock, int level)
537{
538	int depth = ext_depth(inode);
539	struct ext4_extent *ex;
540
541	if (depth != level) {
542		struct ext4_extent_idx *idx;
543		idx = path[level].p_idx;
544		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
545			ext_debug("%d: move %d:%llu in new index %llu\n", level,
546					le32_to_cpu(idx->ei_block),
547					ext4_idx_pblock(idx),
548					newblock);
549			idx++;
550		}
551
552		return;
553	}
554
555	ex = path[depth].p_ext;
556	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
557		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
558				le32_to_cpu(ex->ee_block),
559				ext4_ext_pblock(ex),
560				ext4_ext_is_uninitialized(ex),
561				ext4_ext_get_actual_len(ex),
562				newblock);
563		ex++;
564	}
565}
566
567#else
568#define ext4_ext_show_path(inode, path)
569#define ext4_ext_show_leaf(inode, path)
570#define ext4_ext_show_move(inode, path, newblock, level)
571#endif
572
573void ext4_ext_drop_refs(struct ext4_ext_path *path)
574{
575	int depth = path->p_depth;
576	int i;
577
578	for (i = 0; i <= depth; i++, path++)
579		if (path->p_bh) {
580			brelse(path->p_bh);
581			path->p_bh = NULL;
582		}
583}
584
585/*
586 * ext4_ext_binsearch_idx:
587 * binary search for the closest index of the given block
588 * the header must be checked before calling this
589 */
590static void
591ext4_ext_binsearch_idx(struct inode *inode,
592			struct ext4_ext_path *path, ext4_lblk_t block)
593{
594	struct ext4_extent_header *eh = path->p_hdr;
595	struct ext4_extent_idx *r, *l, *m;
596
597
598	ext_debug("binsearch for %u(idx):  ", block);
599
600	l = EXT_FIRST_INDEX(eh) + 1;
601	r = EXT_LAST_INDEX(eh);
602	while (l <= r) {
603		m = l + (r - l) / 2;
604		if (block < le32_to_cpu(m->ei_block))
605			r = m - 1;
606		else
607			l = m + 1;
608		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
609				m, le32_to_cpu(m->ei_block),
610				r, le32_to_cpu(r->ei_block));
611	}
612
613	path->p_idx = l - 1;
614	ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
615		  ext4_idx_pblock(path->p_idx));
616
617#ifdef CHECK_BINSEARCH
618	{
619		struct ext4_extent_idx *chix, *ix;
620		int k;
621
622		chix = ix = EXT_FIRST_INDEX(eh);
623		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
624		  if (k != 0 &&
625		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
626				printk(KERN_DEBUG "k=%d, ix=0x%p, "
627				       "first=0x%p\n", k,
628				       ix, EXT_FIRST_INDEX(eh));
629				printk(KERN_DEBUG "%u <= %u\n",
630				       le32_to_cpu(ix->ei_block),
631				       le32_to_cpu(ix[-1].ei_block));
632			}
633			BUG_ON(k && le32_to_cpu(ix->ei_block)
634					   <= le32_to_cpu(ix[-1].ei_block));
635			if (block < le32_to_cpu(ix->ei_block))
636				break;
637			chix = ix;
638		}
639		BUG_ON(chix != path->p_idx);
640	}
641#endif
642
643}
644
645/*
646 * ext4_ext_binsearch:
647 * binary search for closest extent of the given block
648 * the header must be checked before calling this
649 */
650static void
651ext4_ext_binsearch(struct inode *inode,
652		struct ext4_ext_path *path, ext4_lblk_t block)
653{
654	struct ext4_extent_header *eh = path->p_hdr;
655	struct ext4_extent *r, *l, *m;
656
657	if (eh->eh_entries == 0) {
658		/*
659		 * this leaf is empty:
660		 * we get such a leaf in split/add case
661		 */
662		return;
663	}
664
665	ext_debug("binsearch for %u:  ", block);
666
667	l = EXT_FIRST_EXTENT(eh) + 1;
668	r = EXT_LAST_EXTENT(eh);
669
670	while (l <= r) {
671		m = l + (r - l) / 2;
672		if (block < le32_to_cpu(m->ee_block))
673			r = m - 1;
674		else
675			l = m + 1;
676		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
677				m, le32_to_cpu(m->ee_block),
678				r, le32_to_cpu(r->ee_block));
679	}
680
681	path->p_ext = l - 1;
682	ext_debug("  -> %d:%llu:[%d]%d ",
683			le32_to_cpu(path->p_ext->ee_block),
684			ext4_ext_pblock(path->p_ext),
685			ext4_ext_is_uninitialized(path->p_ext),
686			ext4_ext_get_actual_len(path->p_ext));
687
688#ifdef CHECK_BINSEARCH
689	{
690		struct ext4_extent *chex, *ex;
691		int k;
692
693		chex = ex = EXT_FIRST_EXTENT(eh);
694		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
695			BUG_ON(k && le32_to_cpu(ex->ee_block)
696					  <= le32_to_cpu(ex[-1].ee_block));
697			if (block < le32_to_cpu(ex->ee_block))
698				break;
699			chex = ex;
700		}
701		BUG_ON(chex != path->p_ext);
702	}
703#endif
704
705}
706
707int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
708{
709	struct ext4_extent_header *eh;
710
711	eh = ext_inode_hdr(inode);
712	eh->eh_depth = 0;
713	eh->eh_entries = 0;
714	eh->eh_magic = EXT4_EXT_MAGIC;
715	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
716	ext4_mark_inode_dirty(handle, inode);
717	return 0;
718}
719
720struct ext4_ext_path *
721ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
722					struct ext4_ext_path *path)
723{
724	struct ext4_extent_header *eh;
725	struct buffer_head *bh;
726	short int depth, i, ppos = 0, alloc = 0;
727	int ret;
728
729	eh = ext_inode_hdr(inode);
730	depth = ext_depth(inode);
731
732	/* account possible depth increase */
733	if (!path) {
734		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
735				GFP_NOFS);
736		if (!path)
737			return ERR_PTR(-ENOMEM);
738		alloc = 1;
739	}
740	path[0].p_hdr = eh;
741	path[0].p_bh = NULL;
742
743	i = depth;
744	/* walk through the tree */
745	while (i) {
746		ext_debug("depth %d: num %d, max %d\n",
747			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
748
749		ext4_ext_binsearch_idx(inode, path + ppos, block);
750		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
751		path[ppos].p_depth = i;
752		path[ppos].p_ext = NULL;
753
754		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
755		if (unlikely(!bh)) {
756			ret = -ENOMEM;
757			goto err;
758		}
759		if (!bh_uptodate_or_lock(bh)) {
760			trace_ext4_ext_load_extent(inode, block,
761						path[ppos].p_block);
762			ret = bh_submit_read(bh);
763			if (ret < 0) {
764				put_bh(bh);
765				goto err;
766			}
767		}
768		eh = ext_block_hdr(bh);
769		ppos++;
770		if (unlikely(ppos > depth)) {
771			put_bh(bh);
772			EXT4_ERROR_INODE(inode,
773					 "ppos %d > depth %d", ppos, depth);
774			ret = -EIO;
775			goto err;
776		}
777		path[ppos].p_bh = bh;
778		path[ppos].p_hdr = eh;
779		i--;
780
781		ret = ext4_ext_check_block(inode, eh, i, bh);
782		if (ret < 0)
783			goto err;
784	}
785
786	path[ppos].p_depth = i;
787	path[ppos].p_ext = NULL;
788	path[ppos].p_idx = NULL;
789
790	/* find extent */
791	ext4_ext_binsearch(inode, path + ppos, block);
792	/* if not an empty leaf */
793	if (path[ppos].p_ext)
794		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
795
796	ext4_ext_show_path(inode, path);
797
798	return path;
799
800err:
801	ext4_ext_drop_refs(path);
802	if (alloc)
803		kfree(path);
804	return ERR_PTR(ret);
805}
806
807/*
808 * ext4_ext_insert_index:
809 * insert new index [@logical;@ptr] into the block at @curp;
810 * check where to insert: before @curp or after @curp
811 */
812static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
813				 struct ext4_ext_path *curp,
814				 int logical, ext4_fsblk_t ptr)
815{
816	struct ext4_extent_idx *ix;
817	int len, err;
818
819	err = ext4_ext_get_access(handle, inode, curp);
820	if (err)
821		return err;
822
823	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
824		EXT4_ERROR_INODE(inode,
825				 "logical %d == ei_block %d!",
826				 logical, le32_to_cpu(curp->p_idx->ei_block));
827		return -EIO;
828	}
829
830	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
831			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
832		EXT4_ERROR_INODE(inode,
833				 "eh_entries %d >= eh_max %d!",
834				 le16_to_cpu(curp->p_hdr->eh_entries),
835				 le16_to_cpu(curp->p_hdr->eh_max));
836		return -EIO;
837	}
838
839	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
840		/* insert after */
841		ext_debug("insert new index %d after: %llu\n", logical, ptr);
842		ix = curp->p_idx + 1;
843	} else {
844		/* insert before */
845		ext_debug("insert new index %d before: %llu\n", logical, ptr);
846		ix = curp->p_idx;
847	}
848
849	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
850	BUG_ON(len < 0);
851	if (len > 0) {
852		ext_debug("insert new index %d: "
853				"move %d indices from 0x%p to 0x%p\n",
854				logical, len, ix, ix + 1);
855		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
856	}
857
858	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
859		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
860		return -EIO;
861	}
862
863	ix->ei_block = cpu_to_le32(logical);
864	ext4_idx_store_pblock(ix, ptr);
865	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
866
867	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
868		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
869		return -EIO;
870	}
871
872	err = ext4_ext_dirty(handle, inode, curp);
873	ext4_std_error(inode->i_sb, err);
874
875	return err;
876}
877
878/*
879 * ext4_ext_split:
880 * inserts new subtree into the path, using free index entry
881 * at depth @at:
882 * - allocates all needed blocks (new leaf and all intermediate index blocks)
883 * - makes decision where to split
884 * - moves remaining extents and index entries (right to the split point)
885 *   into the newly allocated blocks
886 * - initializes subtree
887 */
888static int ext4_ext_split(handle_t *handle, struct inode *inode,
889			  unsigned int flags,
890			  struct ext4_ext_path *path,
891			  struct ext4_extent *newext, int at)
892{
893	struct buffer_head *bh = NULL;
894	int depth = ext_depth(inode);
895	struct ext4_extent_header *neh;
896	struct ext4_extent_idx *fidx;
897	int i = at, k, m, a;
898	ext4_fsblk_t newblock, oldblock;
899	__le32 border;
900	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
901	int err = 0;
902
903	/* make decision: where to split? */
904	/* FIXME: now decision is simplest: at current extent */
905
906	/* if current leaf will be split, then we should use
907	 * border from split point */
908	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
909		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
910		return -EIO;
911	}
912	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
913		border = path[depth].p_ext[1].ee_block;
914		ext_debug("leaf will be split."
915				" next leaf starts at %d\n",
916				  le32_to_cpu(border));
917	} else {
918		border = newext->ee_block;
919		ext_debug("leaf will be added."
920				" next leaf starts at %d\n",
921				le32_to_cpu(border));
922	}
923
924	/*
925	 * If error occurs, then we break processing
926	 * and mark filesystem read-only. index won't
927	 * be inserted and tree will be in consistent
928	 * state. Next mount will repair buffers too.
929	 */
930
931	/*
932	 * Get array to track all allocated blocks.
933	 * We need this to handle errors and free blocks
934	 * upon them.
935	 */
936	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
937	if (!ablocks)
938		return -ENOMEM;
939
940	/* allocate all needed blocks */
941	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
942	for (a = 0; a < depth - at; a++) {
943		newblock = ext4_ext_new_meta_block(handle, inode, path,
944						   newext, &err, flags);
945		if (newblock == 0)
946			goto cleanup;
947		ablocks[a] = newblock;
948	}
949
950	/* initialize new leaf */
951	newblock = ablocks[--a];
952	if (unlikely(newblock == 0)) {
953		EXT4_ERROR_INODE(inode, "newblock == 0!");
954		err = -EIO;
955		goto cleanup;
956	}
957	bh = sb_getblk(inode->i_sb, newblock);
958	if (unlikely(!bh)) {
959		err = -ENOMEM;
960		goto cleanup;
961	}
962	lock_buffer(bh);
963
964	err = ext4_journal_get_create_access(handle, bh);
965	if (err)
966		goto cleanup;
967
968	neh = ext_block_hdr(bh);
969	neh->eh_entries = 0;
970	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
971	neh->eh_magic = EXT4_EXT_MAGIC;
972	neh->eh_depth = 0;
973
974	/* move remainder of path[depth] to the new leaf */
975	if (unlikely(path[depth].p_hdr->eh_entries !=
976		     path[depth].p_hdr->eh_max)) {
977		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
978				 path[depth].p_hdr->eh_entries,
979				 path[depth].p_hdr->eh_max);
980		err = -EIO;
981		goto cleanup;
982	}
983	/* start copy from next extent */
984	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
985	ext4_ext_show_move(inode, path, newblock, depth);
986	if (m) {
987		struct ext4_extent *ex;
988		ex = EXT_FIRST_EXTENT(neh);
989		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
990		le16_add_cpu(&neh->eh_entries, m);
991	}
992
993	ext4_extent_block_csum_set(inode, neh);
994	set_buffer_uptodate(bh);
995	unlock_buffer(bh);
996
997	err = ext4_handle_dirty_metadata(handle, inode, bh);
998	if (err)
999		goto cleanup;
1000	brelse(bh);
1001	bh = NULL;
1002
1003	/* correct old leaf */
1004	if (m) {
1005		err = ext4_ext_get_access(handle, inode, path + depth);
1006		if (err)
1007			goto cleanup;
1008		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1009		err = ext4_ext_dirty(handle, inode, path + depth);
1010		if (err)
1011			goto cleanup;
1012
1013	}
1014
1015	/* create intermediate indexes */
1016	k = depth - at - 1;
1017	if (unlikely(k < 0)) {
1018		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1019		err = -EIO;
1020		goto cleanup;
1021	}
1022	if (k)
1023		ext_debug("create %d intermediate indices\n", k);
1024	/* insert new index into current index block */
1025	/* current depth stored in i var */
1026	i = depth - 1;
1027	while (k--) {
1028		oldblock = newblock;
1029		newblock = ablocks[--a];
1030		bh = sb_getblk(inode->i_sb, newblock);
1031		if (unlikely(!bh)) {
1032			err = -ENOMEM;
1033			goto cleanup;
1034		}
1035		lock_buffer(bh);
1036
1037		err = ext4_journal_get_create_access(handle, bh);
1038		if (err)
1039			goto cleanup;
1040
1041		neh = ext_block_hdr(bh);
1042		neh->eh_entries = cpu_to_le16(1);
1043		neh->eh_magic = EXT4_EXT_MAGIC;
1044		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1045		neh->eh_depth = cpu_to_le16(depth - i);
1046		fidx = EXT_FIRST_INDEX(neh);
1047		fidx->ei_block = border;
1048		ext4_idx_store_pblock(fidx, oldblock);
1049
1050		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1051				i, newblock, le32_to_cpu(border), oldblock);
1052
1053		/* move remainder of path[i] to the new index block */
1054		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1055					EXT_LAST_INDEX(path[i].p_hdr))) {
1056			EXT4_ERROR_INODE(inode,
1057					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1058					 le32_to_cpu(path[i].p_ext->ee_block));
1059			err = -EIO;
1060			goto cleanup;
1061		}
1062		/* start copy indexes */
1063		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1064		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1065				EXT_MAX_INDEX(path[i].p_hdr));
1066		ext4_ext_show_move(inode, path, newblock, i);
1067		if (m) {
1068			memmove(++fidx, path[i].p_idx,
1069				sizeof(struct ext4_extent_idx) * m);
1070			le16_add_cpu(&neh->eh_entries, m);
1071		}
1072		ext4_extent_block_csum_set(inode, neh);
1073		set_buffer_uptodate(bh);
1074		unlock_buffer(bh);
1075
1076		err = ext4_handle_dirty_metadata(handle, inode, bh);
1077		if (err)
1078			goto cleanup;
1079		brelse(bh);
1080		bh = NULL;
1081
1082		/* correct old index */
1083		if (m) {
1084			err = ext4_ext_get_access(handle, inode, path + i);
1085			if (err)
1086				goto cleanup;
1087			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1088			err = ext4_ext_dirty(handle, inode, path + i);
1089			if (err)
1090				goto cleanup;
1091		}
1092
1093		i--;
1094	}
1095
1096	/* insert new index */
1097	err = ext4_ext_insert_index(handle, inode, path + at,
1098				    le32_to_cpu(border), newblock);
1099
1100cleanup:
1101	if (bh) {
1102		if (buffer_locked(bh))
1103			unlock_buffer(bh);
1104		brelse(bh);
1105	}
1106
1107	if (err) {
1108		/* free all allocated blocks in error case */
1109		for (i = 0; i < depth; i++) {
1110			if (!ablocks[i])
1111				continue;
1112			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1113					 EXT4_FREE_BLOCKS_METADATA);
1114		}
1115	}
1116	kfree(ablocks);
1117
1118	return err;
1119}
1120
1121/*
1122 * ext4_ext_grow_indepth:
1123 * implements tree growing procedure:
1124 * - allocates new block
1125 * - moves top-level data (index block or leaf) into the new block
1126 * - initializes new top-level, creating index that points to the
1127 *   just created block
1128 */
1129static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1130				 unsigned int flags,
1131				 struct ext4_extent *newext)
1132{
1133	struct ext4_extent_header *neh;
1134	struct buffer_head *bh;
1135	ext4_fsblk_t newblock;
1136	int err = 0;
1137
1138	newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1139		newext, &err, flags);
1140	if (newblock == 0)
1141		return err;
1142
1143	bh = sb_getblk(inode->i_sb, newblock);
1144	if (unlikely(!bh))
1145		return -ENOMEM;
1146	lock_buffer(bh);
1147
1148	err = ext4_journal_get_create_access(handle, bh);
1149	if (err) {
1150		unlock_buffer(bh);
1151		goto out;
1152	}
1153
1154	/* move top-level index/leaf into new block */
1155	memmove(bh->b_data, EXT4_I(inode)->i_data,
1156		sizeof(EXT4_I(inode)->i_data));
1157
1158	/* set size of new block */
1159	neh = ext_block_hdr(bh);
1160	/* old root could have indexes or leaves
1161	 * so calculate e_max right way */
1162	if (ext_depth(inode))
1163		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1164	else
1165		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1166	neh->eh_magic = EXT4_EXT_MAGIC;
1167	ext4_extent_block_csum_set(inode, neh);
1168	set_buffer_uptodate(bh);
1169	unlock_buffer(bh);
1170
1171	err = ext4_handle_dirty_metadata(handle, inode, bh);
1172	if (err)
1173		goto out;
1174
1175	/* Update top-level index: num,max,pointer */
1176	neh = ext_inode_hdr(inode);
1177	neh->eh_entries = cpu_to_le16(1);
1178	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1179	if (neh->eh_depth == 0) {
1180		/* Root extent block becomes index block */
1181		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1182		EXT_FIRST_INDEX(neh)->ei_block =
1183			EXT_FIRST_EXTENT(neh)->ee_block;
1184	}
1185	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1186		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1187		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1188		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1189
1190	le16_add_cpu(&neh->eh_depth, 1);
1191	ext4_mark_inode_dirty(handle, inode);
1192out:
1193	brelse(bh);
1194
1195	return err;
1196}
1197
1198/*
1199 * ext4_ext_create_new_leaf:
1200 * finds empty index and adds new leaf.
1201 * if no free index is found, then it requests in-depth growing.
1202 */
1203static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1204				    unsigned int flags,
1205				    struct ext4_ext_path *path,
1206				    struct ext4_extent *newext)
1207{
1208	struct ext4_ext_path *curp;
1209	int depth, i, err = 0;
1210
1211repeat:
1212	i = depth = ext_depth(inode);
1213
1214	/* walk up to the tree and look for free index entry */
1215	curp = path + depth;
1216	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1217		i--;
1218		curp--;
1219	}
1220
1221	/* we use already allocated block for index block,
1222	 * so subsequent data blocks should be contiguous */
1223	if (EXT_HAS_FREE_INDEX(curp)) {
1224		/* if we found index with free entry, then use that
1225		 * entry: create all needed subtree and add new leaf */
1226		err = ext4_ext_split(handle, inode, flags, path, newext, i);
1227		if (err)
1228			goto out;
1229
1230		/* refill path */
1231		ext4_ext_drop_refs(path);
1232		path = ext4_ext_find_extent(inode,
1233				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1234				    path);
1235		if (IS_ERR(path))
1236			err = PTR_ERR(path);
1237	} else {
1238		/* tree is full, time to grow in depth */
1239		err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1240		if (err)
1241			goto out;
1242
1243		/* refill path */
1244		ext4_ext_drop_refs(path);
1245		path = ext4_ext_find_extent(inode,
1246				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1247				    path);
1248		if (IS_ERR(path)) {
1249			err = PTR_ERR(path);
1250			goto out;
1251		}
1252
1253		/*
1254		 * only first (depth 0 -> 1) produces free space;
1255		 * in all other cases we have to split the grown tree
1256		 */
1257		depth = ext_depth(inode);
1258		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1259			/* now we need to split */
1260			goto repeat;
1261		}
1262	}
1263
1264out:
1265	return err;
1266}
1267
1268/*
1269 * search the closest allocated block to the left for *logical
1270 * and returns it at @logical + it's physical address at @phys
1271 * if *logical is the smallest allocated block, the function
1272 * returns 0 at @phys
1273 * return value contains 0 (success) or error code
1274 */
1275static int ext4_ext_search_left(struct inode *inode,
1276				struct ext4_ext_path *path,
1277				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1278{
1279	struct ext4_extent_idx *ix;
1280	struct ext4_extent *ex;
1281	int depth, ee_len;
1282
1283	if (unlikely(path == NULL)) {
1284		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1285		return -EIO;
1286	}
1287	depth = path->p_depth;
1288	*phys = 0;
1289
1290	if (depth == 0 && path->p_ext == NULL)
1291		return 0;
1292
1293	/* usually extent in the path covers blocks smaller
1294	 * then *logical, but it can be that extent is the
1295	 * first one in the file */
1296
1297	ex = path[depth].p_ext;
1298	ee_len = ext4_ext_get_actual_len(ex);
1299	if (*logical < le32_to_cpu(ex->ee_block)) {
1300		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1301			EXT4_ERROR_INODE(inode,
1302					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1303					 *logical, le32_to_cpu(ex->ee_block));
1304			return -EIO;
1305		}
1306		while (--depth >= 0) {
1307			ix = path[depth].p_idx;
1308			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1309				EXT4_ERROR_INODE(inode,
1310				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1311				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1312				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1313		le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1314				  depth);
1315				return -EIO;
1316			}
1317		}
1318		return 0;
1319	}
1320
1321	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1322		EXT4_ERROR_INODE(inode,
1323				 "logical %d < ee_block %d + ee_len %d!",
1324				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1325		return -EIO;
1326	}
1327
1328	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1329	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1330	return 0;
1331}
1332
1333/*
1334 * search the closest allocated block to the right for *logical
1335 * and returns it at @logical + it's physical address at @phys
1336 * if *logical is the largest allocated block, the function
1337 * returns 0 at @phys
1338 * return value contains 0 (success) or error code
1339 */
1340static int ext4_ext_search_right(struct inode *inode,
1341				 struct ext4_ext_path *path,
1342				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1343				 struct ext4_extent **ret_ex)
1344{
1345	struct buffer_head *bh = NULL;
1346	struct ext4_extent_header *eh;
1347	struct ext4_extent_idx *ix;
1348	struct ext4_extent *ex;
1349	ext4_fsblk_t block;
1350	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1351	int ee_len;
1352
1353	if (unlikely(path == NULL)) {
1354		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1355		return -EIO;
1356	}
1357	depth = path->p_depth;
1358	*phys = 0;
1359
1360	if (depth == 0 && path->p_ext == NULL)
1361		return 0;
1362
1363	/* usually extent in the path covers blocks smaller
1364	 * then *logical, but it can be that extent is the
1365	 * first one in the file */
1366
1367	ex = path[depth].p_ext;
1368	ee_len = ext4_ext_get_actual_len(ex);
1369	if (*logical < le32_to_cpu(ex->ee_block)) {
1370		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1371			EXT4_ERROR_INODE(inode,
1372					 "first_extent(path[%d].p_hdr) != ex",
1373					 depth);
1374			return -EIO;
1375		}
1376		while (--depth >= 0) {
1377			ix = path[depth].p_idx;
1378			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1379				EXT4_ERROR_INODE(inode,
1380						 "ix != EXT_FIRST_INDEX *logical %d!",
1381						 *logical);
1382				return -EIO;
1383			}
1384		}
1385		goto found_extent;
1386	}
1387
1388	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1389		EXT4_ERROR_INODE(inode,
1390				 "logical %d < ee_block %d + ee_len %d!",
1391				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1392		return -EIO;
1393	}
1394
1395	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1396		/* next allocated block in this leaf */
1397		ex++;
1398		goto found_extent;
1399	}
1400
1401	/* go up and search for index to the right */
1402	while (--depth >= 0) {
1403		ix = path[depth].p_idx;
1404		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1405			goto got_index;
1406	}
1407
1408	/* we've gone up to the root and found no index to the right */
1409	return 0;
1410
1411got_index:
1412	/* we've found index to the right, let's
1413	 * follow it and find the closest allocated
1414	 * block to the right */
1415	ix++;
1416	block = ext4_idx_pblock(ix);
1417	while (++depth < path->p_depth) {
1418		bh = sb_bread(inode->i_sb, block);
1419		if (bh == NULL)
1420			return -EIO;
1421		eh = ext_block_hdr(bh);
1422		/* subtract from p_depth to get proper eh_depth */
1423		if (ext4_ext_check_block(inode, eh,
1424					 path->p_depth - depth, bh)) {
1425			put_bh(bh);
1426			return -EIO;
1427		}
1428		ix = EXT_FIRST_INDEX(eh);
1429		block = ext4_idx_pblock(ix);
1430		put_bh(bh);
1431	}
1432
1433	bh = sb_bread(inode->i_sb, block);
1434	if (bh == NULL)
1435		return -EIO;
1436	eh = ext_block_hdr(bh);
1437	if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1438		put_bh(bh);
1439		return -EIO;
1440	}
1441	ex = EXT_FIRST_EXTENT(eh);
1442found_extent:
1443	*logical = le32_to_cpu(ex->ee_block);
1444	*phys = ext4_ext_pblock(ex);
1445	*ret_ex = ex;
1446	if (bh)
1447		put_bh(bh);
1448	return 0;
1449}
1450
1451/*
1452 * ext4_ext_next_allocated_block:
1453 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1454 * NOTE: it considers block number from index entry as
1455 * allocated block. Thus, index entries have to be consistent
1456 * with leaves.
1457 */
1458static ext4_lblk_t
1459ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1460{
1461	int depth;
1462
1463	BUG_ON(path == NULL);
1464	depth = path->p_depth;
1465
1466	if (depth == 0 && path->p_ext == NULL)
1467		return EXT_MAX_BLOCKS;
1468
1469	while (depth >= 0) {
1470		if (depth == path->p_depth) {
1471			/* leaf */
1472			if (path[depth].p_ext &&
1473				path[depth].p_ext !=
1474					EXT_LAST_EXTENT(path[depth].p_hdr))
1475			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1476		} else {
1477			/* index */
1478			if (path[depth].p_idx !=
1479					EXT_LAST_INDEX(path[depth].p_hdr))
1480			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1481		}
1482		depth--;
1483	}
1484
1485	return EXT_MAX_BLOCKS;
1486}
1487
1488/*
1489 * ext4_ext_next_leaf_block:
1490 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1491 */
1492static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1493{
1494	int depth;
1495
1496	BUG_ON(path == NULL);
1497	depth = path->p_depth;
1498
1499	/* zero-tree has no leaf blocks at all */
1500	if (depth == 0)
1501		return EXT_MAX_BLOCKS;
1502
1503	/* go to index block */
1504	depth--;
1505
1506	while (depth >= 0) {
1507		if (path[depth].p_idx !=
1508				EXT_LAST_INDEX(path[depth].p_hdr))
1509			return (ext4_lblk_t)
1510				le32_to_cpu(path[depth].p_idx[1].ei_block);
1511		depth--;
1512	}
1513
1514	return EXT_MAX_BLOCKS;
1515}
1516
1517/*
1518 * ext4_ext_correct_indexes:
1519 * if leaf gets modified and modified extent is first in the leaf,
1520 * then we have to correct all indexes above.
1521 * TODO: do we need to correct tree in all cases?
1522 */
1523static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1524				struct ext4_ext_path *path)
1525{
1526	struct ext4_extent_header *eh;
1527	int depth = ext_depth(inode);
1528	struct ext4_extent *ex;
1529	__le32 border;
1530	int k, err = 0;
1531
1532	eh = path[depth].p_hdr;
1533	ex = path[depth].p_ext;
1534
1535	if (unlikely(ex == NULL || eh == NULL)) {
1536		EXT4_ERROR_INODE(inode,
1537				 "ex %p == NULL or eh %p == NULL", ex, eh);
1538		return -EIO;
1539	}
1540
1541	if (depth == 0) {
1542		/* there is no tree at all */
1543		return 0;
1544	}
1545
1546	if (ex != EXT_FIRST_EXTENT(eh)) {
1547		/* we correct tree if first leaf got modified only */
1548		return 0;
1549	}
1550
1551	/*
1552	 * TODO: we need correction if border is smaller than current one
1553	 */
1554	k = depth - 1;
1555	border = path[depth].p_ext->ee_block;
1556	err = ext4_ext_get_access(handle, inode, path + k);
1557	if (err)
1558		return err;
1559	path[k].p_idx->ei_block = border;
1560	err = ext4_ext_dirty(handle, inode, path + k);
1561	if (err)
1562		return err;
1563
1564	while (k--) {
1565		/* change all left-side indexes */
1566		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1567			break;
1568		err = ext4_ext_get_access(handle, inode, path + k);
1569		if (err)
1570			break;
1571		path[k].p_idx->ei_block = border;
1572		err = ext4_ext_dirty(handle, inode, path + k);
1573		if (err)
1574			break;
1575	}
1576
1577	return err;
1578}
1579
1580int
1581ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1582				struct ext4_extent *ex2)
1583{
1584	unsigned short ext1_ee_len, ext2_ee_len, max_len;
1585
1586	/*
1587	 * Make sure that both extents are initialized. We don't merge
1588	 * uninitialized extents so that we can be sure that end_io code has
1589	 * the extent that was written properly split out and conversion to
1590	 * initialized is trivial.
1591	 */
1592	if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
1593		return 0;
1594
1595	if (ext4_ext_is_uninitialized(ex1))
1596		max_len = EXT_UNINIT_MAX_LEN;
1597	else
1598		max_len = EXT_INIT_MAX_LEN;
1599
1600	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1601	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1602
1603	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1604			le32_to_cpu(ex2->ee_block))
1605		return 0;
1606
1607	/*
1608	 * To allow future support for preallocated extents to be added
1609	 * as an RO_COMPAT feature, refuse to merge to extents if
1610	 * this can result in the top bit of ee_len being set.
1611	 */
1612	if (ext1_ee_len + ext2_ee_len > max_len)
1613		return 0;
1614#ifdef AGGRESSIVE_TEST
1615	if (ext1_ee_len >= 4)
1616		return 0;
1617#endif
1618
1619	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1620		return 1;
1621	return 0;
1622}
1623
1624/*
1625 * This function tries to merge the "ex" extent to the next extent in the tree.
1626 * It always tries to merge towards right. If you want to merge towards
1627 * left, pass "ex - 1" as argument instead of "ex".
1628 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1629 * 1 if they got merged.
1630 */
1631static int ext4_ext_try_to_merge_right(struct inode *inode,
1632				 struct ext4_ext_path *path,
1633				 struct ext4_extent *ex)
1634{
1635	struct ext4_extent_header *eh;
1636	unsigned int depth, len;
1637	int merge_done = 0;
1638	int uninitialized = 0;
1639
1640	depth = ext_depth(inode);
1641	BUG_ON(path[depth].p_hdr == NULL);
1642	eh = path[depth].p_hdr;
1643
1644	while (ex < EXT_LAST_EXTENT(eh)) {
1645		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1646			break;
1647		/* merge with next extent! */
1648		if (ext4_ext_is_uninitialized(ex))
1649			uninitialized = 1;
1650		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1651				+ ext4_ext_get_actual_len(ex + 1));
1652		if (uninitialized)
1653			ext4_ext_mark_uninitialized(ex);
1654
1655		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1656			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1657				* sizeof(struct ext4_extent);
1658			memmove(ex + 1, ex + 2, len);
1659		}
1660		le16_add_cpu(&eh->eh_entries, -1);
1661		merge_done = 1;
1662		WARN_ON(eh->eh_entries == 0);
1663		if (!eh->eh_entries)
1664			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1665	}
1666
1667	return merge_done;
1668}
1669
1670/*
1671 * This function does a very simple check to see if we can collapse
1672 * an extent tree with a single extent tree leaf block into the inode.
1673 */
1674static void ext4_ext_try_to_merge_up(handle_t *handle,
1675				     struct inode *inode,
1676				     struct ext4_ext_path *path)
1677{
1678	size_t s;
1679	unsigned max_root = ext4_ext_space_root(inode, 0);
1680	ext4_fsblk_t blk;
1681
1682	if ((path[0].p_depth != 1) ||
1683	    (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1684	    (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1685		return;
1686
1687	/*
1688	 * We need to modify the block allocation bitmap and the block
1689	 * group descriptor to release the extent tree block.  If we
1690	 * can't get the journal credits, give up.
1691	 */
1692	if (ext4_journal_extend(handle, 2))
1693		return;
1694
1695	/*
1696	 * Copy the extent data up to the inode
1697	 */
1698	blk = ext4_idx_pblock(path[0].p_idx);
1699	s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1700		sizeof(struct ext4_extent_idx);
1701	s += sizeof(struct ext4_extent_header);
1702
1703	memcpy(path[0].p_hdr, path[1].p_hdr, s);
1704	path[0].p_depth = 0;
1705	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1706		(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1707	path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1708
1709	brelse(path[1].p_bh);
1710	ext4_free_blocks(handle, inode, NULL, blk, 1,
1711			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1712}
1713
1714/*
1715 * This function tries to merge the @ex extent to neighbours in the tree.
1716 * return 1 if merge left else 0.
1717 */
1718static void ext4_ext_try_to_merge(handle_t *handle,
1719				  struct inode *inode,
1720				  struct ext4_ext_path *path,
1721				  struct ext4_extent *ex) {
1722	struct ext4_extent_header *eh;
1723	unsigned int depth;
1724	int merge_done = 0;
1725
1726	depth = ext_depth(inode);
1727	BUG_ON(path[depth].p_hdr == NULL);
1728	eh = path[depth].p_hdr;
1729
1730	if (ex > EXT_FIRST_EXTENT(eh))
1731		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1732
1733	if (!merge_done)
1734		(void) ext4_ext_try_to_merge_right(inode, path, ex);
1735
1736	ext4_ext_try_to_merge_up(handle, inode, path);
1737}
1738
1739/*
1740 * check if a portion of the "newext" extent overlaps with an
1741 * existing extent.
1742 *
1743 * If there is an overlap discovered, it updates the length of the newext
1744 * such that there will be no overlap, and then returns 1.
1745 * If there is no overlap found, it returns 0.
1746 */
1747static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1748					   struct inode *inode,
1749					   struct ext4_extent *newext,
1750					   struct ext4_ext_path *path)
1751{
1752	ext4_lblk_t b1, b2;
1753	unsigned int depth, len1;
1754	unsigned int ret = 0;
1755
1756	b1 = le32_to_cpu(newext->ee_block);
1757	len1 = ext4_ext_get_actual_len(newext);
1758	depth = ext_depth(inode);
1759	if (!path[depth].p_ext)
1760		goto out;
1761	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1762	b2 &= ~(sbi->s_cluster_ratio - 1);
1763
1764	/*
1765	 * get the next allocated block if the extent in the path
1766	 * is before the requested block(s)
1767	 */
1768	if (b2 < b1) {
1769		b2 = ext4_ext_next_allocated_block(path);
1770		if (b2 == EXT_MAX_BLOCKS)
1771			goto out;
1772		b2 &= ~(sbi->s_cluster_ratio - 1);
1773	}
1774
1775	/* check for wrap through zero on extent logical start block*/
1776	if (b1 + len1 < b1) {
1777		len1 = EXT_MAX_BLOCKS - b1;
1778		newext->ee_len = cpu_to_le16(len1);
1779		ret = 1;
1780	}
1781
1782	/* check for overlap */
1783	if (b1 + len1 > b2) {
1784		newext->ee_len = cpu_to_le16(b2 - b1);
1785		ret = 1;
1786	}
1787out:
1788	return ret;
1789}
1790
1791/*
1792 * ext4_ext_insert_extent:
1793 * tries to merge requsted extent into the existing extent or
1794 * inserts requested extent as new one into the tree,
1795 * creating new leaf in the no-space case.
1796 */
1797int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1798				struct ext4_ext_path *path,
1799				struct ext4_extent *newext, int flag)
1800{
1801	struct ext4_extent_header *eh;
1802	struct ext4_extent *ex, *fex;
1803	struct ext4_extent *nearex; /* nearest extent */
1804	struct ext4_ext_path *npath = NULL;
1805	int depth, len, err;
1806	ext4_lblk_t next;
1807	unsigned uninitialized = 0;
1808	int flags = 0;
1809
1810	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1811		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1812		return -EIO;
1813	}
1814	depth = ext_depth(inode);
1815	ex = path[depth].p_ext;
1816	eh = path[depth].p_hdr;
1817	if (unlikely(path[depth].p_hdr == NULL)) {
1818		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1819		return -EIO;
1820	}
1821
1822	/* try to insert block into found extent and return */
1823	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) {
1824
1825		/*
1826		 * Try to see whether we should rather test the extent on
1827		 * right from ex, or from the left of ex. This is because
1828		 * ext4_ext_find_extent() can return either extent on the
1829		 * left, or on the right from the searched position. This
1830		 * will make merging more effective.
1831		 */
1832		if (ex < EXT_LAST_EXTENT(eh) &&
1833		    (le32_to_cpu(ex->ee_block) +
1834		    ext4_ext_get_actual_len(ex) <
1835		    le32_to_cpu(newext->ee_block))) {
1836			ex += 1;
1837			goto prepend;
1838		} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1839			   (le32_to_cpu(newext->ee_block) +
1840			   ext4_ext_get_actual_len(newext) <
1841			   le32_to_cpu(ex->ee_block)))
1842			ex -= 1;
1843
1844		/* Try to append newex to the ex */
1845		if (ext4_can_extents_be_merged(inode, ex, newext)) {
1846			ext_debug("append [%d]%d block to %u:[%d]%d"
1847				  "(from %llu)\n",
1848				  ext4_ext_is_uninitialized(newext),
1849				  ext4_ext_get_actual_len(newext),
1850				  le32_to_cpu(ex->ee_block),
1851				  ext4_ext_is_uninitialized(ex),
1852				  ext4_ext_get_actual_len(ex),
1853				  ext4_ext_pblock(ex));
1854			err = ext4_ext_get_access(handle, inode,
1855						  path + depth);
1856			if (err)
1857				return err;
1858
1859			/*
1860			 * ext4_can_extents_be_merged should have checked
1861			 * that either both extents are uninitialized, or
1862			 * both aren't. Thus we need to check only one of
1863			 * them here.
1864			 */
1865			if (ext4_ext_is_uninitialized(ex))
1866				uninitialized = 1;
1867			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1868					+ ext4_ext_get_actual_len(newext));
1869			if (uninitialized)
1870				ext4_ext_mark_uninitialized(ex);
1871			eh = path[depth].p_hdr;
1872			nearex = ex;
1873			goto merge;
1874		}
1875
1876prepend:
1877		/* Try to prepend newex to the ex */
1878		if (ext4_can_extents_be_merged(inode, newext, ex)) {
1879			ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1880				  "(from %llu)\n",
1881				  le32_to_cpu(newext->ee_block),
1882				  ext4_ext_is_uninitialized(newext),
1883				  ext4_ext_get_actual_len(newext),
1884				  le32_to_cpu(ex->ee_block),
1885				  ext4_ext_is_uninitialized(ex),
1886				  ext4_ext_get_actual_len(ex),
1887				  ext4_ext_pblock(ex));
1888			err = ext4_ext_get_access(handle, inode,
1889						  path + depth);
1890			if (err)
1891				return err;
1892
1893			/*
1894			 * ext4_can_extents_be_merged should have checked
1895			 * that either both extents are uninitialized, or
1896			 * both aren't. Thus we need to check only one of
1897			 * them here.
1898			 */
1899			if (ext4_ext_is_uninitialized(ex))
1900				uninitialized = 1;
1901			ex->ee_block = newext->ee_block;
1902			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
1903			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1904					+ ext4_ext_get_actual_len(newext));
1905			if (uninitialized)
1906				ext4_ext_mark_uninitialized(ex);
1907			eh = path[depth].p_hdr;
1908			nearex = ex;
1909			goto merge;
1910		}
1911	}
1912
1913	depth = ext_depth(inode);
1914	eh = path[depth].p_hdr;
1915	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1916		goto has_space;
1917
1918	/* probably next leaf has space for us? */
1919	fex = EXT_LAST_EXTENT(eh);
1920	next = EXT_MAX_BLOCKS;
1921	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1922		next = ext4_ext_next_leaf_block(path);
1923	if (next != EXT_MAX_BLOCKS) {
1924		ext_debug("next leaf block - %u\n", next);
1925		BUG_ON(npath != NULL);
1926		npath = ext4_ext_find_extent(inode, next, NULL);
1927		if (IS_ERR(npath))
1928			return PTR_ERR(npath);
1929		BUG_ON(npath->p_depth != path->p_depth);
1930		eh = npath[depth].p_hdr;
1931		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1932			ext_debug("next leaf isn't full(%d)\n",
1933				  le16_to_cpu(eh->eh_entries));
1934			path = npath;
1935			goto has_space;
1936		}
1937		ext_debug("next leaf has no free space(%d,%d)\n",
1938			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1939	}
1940
1941	/*
1942	 * There is no free space in the found leaf.
1943	 * We're gonna add a new leaf in the tree.
1944	 */
1945	if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL)
1946		flags = EXT4_MB_USE_RESERVED;
1947	err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1948	if (err)
1949		goto cleanup;
1950	depth = ext_depth(inode);
1951	eh = path[depth].p_hdr;
1952
1953has_space:
1954	nearex = path[depth].p_ext;
1955
1956	err = ext4_ext_get_access(handle, inode, path + depth);
1957	if (err)
1958		goto cleanup;
1959
1960	if (!nearex) {
1961		/* there is no extent in this leaf, create first one */
1962		ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1963				le32_to_cpu(newext->ee_block),
1964				ext4_ext_pblock(newext),
1965				ext4_ext_is_uninitialized(newext),
1966				ext4_ext_get_actual_len(newext));
1967		nearex = EXT_FIRST_EXTENT(eh);
1968	} else {
1969		if (le32_to_cpu(newext->ee_block)
1970			   > le32_to_cpu(nearex->ee_block)) {
1971			/* Insert after */
1972			ext_debug("insert %u:%llu:[%d]%d before: "
1973					"nearest %p\n",
1974					le32_to_cpu(newext->ee_block),
1975					ext4_ext_pblock(newext),
1976					ext4_ext_is_uninitialized(newext),
1977					ext4_ext_get_actual_len(newext),
1978					nearex);
1979			nearex++;
1980		} else {
1981			/* Insert before */
1982			BUG_ON(newext->ee_block == nearex->ee_block);
1983			ext_debug("insert %u:%llu:[%d]%d after: "
1984					"nearest %p\n",
1985					le32_to_cpu(newext->ee_block),
1986					ext4_ext_pblock(newext),
1987					ext4_ext_is_uninitialized(newext),
1988					ext4_ext_get_actual_len(newext),
1989					nearex);
1990		}
1991		len = EXT_LAST_EXTENT(eh) - nearex + 1;
1992		if (len > 0) {
1993			ext_debug("insert %u:%llu:[%d]%d: "
1994					"move %d extents from 0x%p to 0x%p\n",
1995					le32_to_cpu(newext->ee_block),
1996					ext4_ext_pblock(newext),
1997					ext4_ext_is_uninitialized(newext),
1998					ext4_ext_get_actual_len(newext),
1999					len, nearex, nearex + 1);
2000			memmove(nearex + 1, nearex,
2001				len * sizeof(struct ext4_extent));
2002		}
2003	}
2004
2005	le16_add_cpu(&eh->eh_entries, 1);
2006	path[depth].p_ext = nearex;
2007	nearex->ee_block = newext->ee_block;
2008	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2009	nearex->ee_len = newext->ee_len;
2010
2011merge:
2012	/* try to merge extents */
2013	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
2014		ext4_ext_try_to_merge(handle, inode, path, nearex);
2015
2016
2017	/* time to correct all indexes above */
2018	err = ext4_ext_correct_indexes(handle, inode, path);
2019	if (err)
2020		goto cleanup;
2021
2022	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2023
2024cleanup:
2025	if (npath) {
2026		ext4_ext_drop_refs(npath);
2027		kfree(npath);
2028	}
2029	return err;
2030}
2031
2032static int ext4_fill_fiemap_extents(struct inode *inode,
2033				    ext4_lblk_t block, ext4_lblk_t num,
2034				    struct fiemap_extent_info *fieinfo)
2035{
2036	struct ext4_ext_path *path = NULL;
2037	struct ext4_extent *ex;
2038	struct extent_status es;
2039	ext4_lblk_t next, next_del, start = 0, end = 0;
2040	ext4_lblk_t last = block + num;
2041	int exists, depth = 0, err = 0;
2042	unsigned int flags = 0;
2043	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2044
2045	while (block < last && block != EXT_MAX_BLOCKS) {
2046		num = last - block;
2047		/* find extent for this block */
2048		down_read(&EXT4_I(inode)->i_data_sem);
2049
2050		if (path && ext_depth(inode) != depth) {
2051			/* depth was changed. we have to realloc path */
2052			kfree(path);
2053			path = NULL;
2054		}
2055
2056		path = ext4_ext_find_extent(inode, block, path);
2057		if (IS_ERR(path)) {
2058			up_read(&EXT4_I(inode)->i_data_sem);
2059			err = PTR_ERR(path);
2060			path = NULL;
2061			break;
2062		}
2063
2064		depth = ext_depth(inode);
2065		if (unlikely(path[depth].p_hdr == NULL)) {
2066			up_read(&EXT4_I(inode)->i_data_sem);
2067			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2068			err = -EIO;
2069			break;
2070		}
2071		ex = path[depth].p_ext;
2072		next = ext4_ext_next_allocated_block(path);
2073		ext4_ext_drop_refs(path);
2074
2075		flags = 0;
2076		exists = 0;
2077		if (!ex) {
2078			/* there is no extent yet, so try to allocate
2079			 * all requested space */
2080			start = block;
2081			end = block + num;
2082		} else if (le32_to_cpu(ex->ee_block) > block) {
2083			/* need to allocate space before found extent */
2084			start = block;
2085			end = le32_to_cpu(ex->ee_block);
2086			if (block + num < end)
2087				end = block + num;
2088		} else if (block >= le32_to_cpu(ex->ee_block)
2089					+ ext4_ext_get_actual_len(ex)) {
2090			/* need to allocate space after found extent */
2091			start = block;
2092			end = block + num;
2093			if (end >= next)
2094				end = next;
2095		} else if (block >= le32_to_cpu(ex->ee_block)) {
2096			/*
2097			 * some part of requested space is covered
2098			 * by found extent
2099			 */
2100			start = block;
2101			end = le32_to_cpu(ex->ee_block)
2102				+ ext4_ext_get_actual_len(ex);
2103			if (block + num < end)
2104				end = block + num;
2105			exists = 1;
2106		} else {
2107			BUG();
2108		}
2109		BUG_ON(end <= start);
2110
2111		if (!exists) {
2112			es.es_lblk = start;
2113			es.es_len = end - start;
2114			es.es_pblk = 0;
2115		} else {
2116			es.es_lblk = le32_to_cpu(ex->ee_block);
2117			es.es_len = ext4_ext_get_actual_len(ex);
2118			es.es_pblk = ext4_ext_pblock(ex);
2119			if (ext4_ext_is_uninitialized(ex))
2120				flags |= FIEMAP_EXTENT_UNWRITTEN;
2121		}
2122
2123		/*
2124		 * Find delayed extent and update es accordingly. We call
2125		 * it even in !exists case to find out whether es is the
2126		 * last existing extent or not.
2127		 */
2128		next_del = ext4_find_delayed_extent(inode, &es);
2129		if (!exists && next_del) {
2130			exists = 1;
2131			flags |= FIEMAP_EXTENT_DELALLOC;
2132		}
2133		up_read(&EXT4_I(inode)->i_data_sem);
2134
2135		if (unlikely(es.es_len == 0)) {
2136			EXT4_ERROR_INODE(inode, "es.es_len == 0");
2137			err = -EIO;
2138			break;
2139		}
2140
2141		/*
2142		 * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2143		 * we need to check next == EXT_MAX_BLOCKS because it is
2144		 * possible that an extent is with unwritten and delayed
2145		 * status due to when an extent is delayed allocated and
2146		 * is allocated by fallocate status tree will track both of
2147		 * them in a extent.
2148		 *
2149		 * So we could return a unwritten and delayed extent, and
2150		 * its block is equal to 'next'.
2151		 */
2152		if (next == next_del && next == EXT_MAX_BLOCKS) {
2153			flags |= FIEMAP_EXTENT_LAST;
2154			if (unlikely(next_del != EXT_MAX_BLOCKS ||
2155				     next != EXT_MAX_BLOCKS)) {
2156				EXT4_ERROR_INODE(inode,
2157						 "next extent == %u, next "
2158						 "delalloc extent = %u",
2159						 next, next_del);
2160				err = -EIO;
2161				break;
2162			}
2163		}
2164
2165		if (exists) {
2166			err = fiemap_fill_next_extent(fieinfo,
2167				(__u64)es.es_lblk << blksize_bits,
2168				(__u64)es.es_pblk << blksize_bits,
2169				(__u64)es.es_len << blksize_bits,
2170				flags);
2171			if (err < 0)
2172				break;
2173			if (err == 1) {
2174				err = 0;
2175				break;
2176			}
2177		}
2178
2179		block = es.es_lblk + es.es_len;
2180	}
2181
2182	if (path) {
2183		ext4_ext_drop_refs(path);
2184		kfree(path);
2185	}
2186
2187	return err;
2188}
2189
2190/*
2191 * ext4_ext_put_gap_in_cache:
2192 * calculate boundaries of the gap that the requested block fits into
2193 * and cache this gap
2194 */
2195static void
2196ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2197				ext4_lblk_t block)
2198{
2199	int depth = ext_depth(inode);
2200	unsigned long len;
2201	ext4_lblk_t lblock;
2202	struct ext4_extent *ex;
2203
2204	ex = path[depth].p_ext;
2205	if (ex == NULL) {
2206		/*
2207		 * there is no extent yet, so gap is [0;-] and we
2208		 * don't cache it
2209		 */
2210		ext_debug("cache gap(whole file):");
2211	} else if (block < le32_to_cpu(ex->ee_block)) {
2212		lblock = block;
2213		len = le32_to_cpu(ex->ee_block) - block;
2214		ext_debug("cache gap(before): %u [%u:%u]",
2215				block,
2216				le32_to_cpu(ex->ee_block),
2217				 ext4_ext_get_actual_len(ex));
2218		if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2219			ext4_es_insert_extent(inode, lblock, len, ~0,
2220					      EXTENT_STATUS_HOLE);
2221	} else if (block >= le32_to_cpu(ex->ee_block)
2222			+ ext4_ext_get_actual_len(ex)) {
2223		ext4_lblk_t next;
2224		lblock = le32_to_cpu(ex->ee_block)
2225			+ ext4_ext_get_actual_len(ex);
2226
2227		next = ext4_ext_next_allocated_block(path);
2228		ext_debug("cache gap(after): [%u:%u] %u",
2229				le32_to_cpu(ex->ee_block),
2230				ext4_ext_get_actual_len(ex),
2231				block);
2232		BUG_ON(next == lblock);
2233		len = next - lblock;
2234		if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2235			ext4_es_insert_extent(inode, lblock, len, ~0,
2236					      EXTENT_STATUS_HOLE);
2237	} else {
2238		lblock = len = 0;
2239		BUG();
2240	}
2241
2242	ext_debug(" -> %u:%lu\n", lblock, len);
2243}
2244
2245/*
2246 * ext4_ext_rm_idx:
2247 * removes index from the index block.
2248 */
2249static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2250			struct ext4_ext_path *path, int depth)
2251{
2252	int err;
2253	ext4_fsblk_t leaf;
2254
2255	/* free index block */
2256	depth--;
2257	path = path + depth;
2258	leaf = ext4_idx_pblock(path->p_idx);
2259	if (unlikely(path->p_hdr->eh_entries == 0)) {
2260		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2261		return -EIO;
2262	}
2263	err = ext4_ext_get_access(handle, inode, path);
2264	if (err)
2265		return err;
2266
2267	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2268		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2269		len *= sizeof(struct ext4_extent_idx);
2270		memmove(path->p_idx, path->p_idx + 1, len);
2271	}
2272
2273	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2274	err = ext4_ext_dirty(handle, inode, path);
2275	if (err)
2276		return err;
2277	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2278	trace_ext4_ext_rm_idx(inode, leaf);
2279
2280	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2281			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2282
2283	while (--depth >= 0) {
2284		if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2285			break;
2286		path--;
2287		err = ext4_ext_get_access(handle, inode, path);
2288		if (err)
2289			break;
2290		path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2291		err = ext4_ext_dirty(handle, inode, path);
2292		if (err)
2293			break;
2294	}
2295	return err;
2296}
2297
2298/*
2299 * ext4_ext_calc_credits_for_single_extent:
2300 * This routine returns max. credits that needed to insert an extent
2301 * to the extent tree.
2302 * When pass the actual path, the caller should calculate credits
2303 * under i_data_sem.
2304 */
2305int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2306						struct ext4_ext_path *path)
2307{
2308	if (path) {
2309		int depth = ext_depth(inode);
2310		int ret = 0;
2311
2312		/* probably there is space in leaf? */
2313		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2314				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2315
2316			/*
2317			 *  There are some space in the leaf tree, no
2318			 *  need to account for leaf block credit
2319			 *
2320			 *  bitmaps and block group descriptor blocks
2321			 *  and other metadata blocks still need to be
2322			 *  accounted.
2323			 */
2324			/* 1 bitmap, 1 block group descriptor */
2325			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2326			return ret;
2327		}
2328	}
2329
2330	return ext4_chunk_trans_blocks(inode, nrblocks);
2331}
2332
2333/*
2334 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2335 *
2336 * if nrblocks are fit in a single extent (chunk flag is 1), then
2337 * in the worse case, each tree level index/leaf need to be changed
2338 * if the tree split due to insert a new extent, then the old tree
2339 * index/leaf need to be updated too
2340 *
2341 * If the nrblocks are discontiguous, they could cause
2342 * the whole tree split more than once, but this is really rare.
2343 */
2344int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2345{
2346	int index;
2347	int depth;
2348
2349	/* If we are converting the inline data, only one is needed here. */
2350	if (ext4_has_inline_data(inode))
2351		return 1;
2352
2353	depth = ext_depth(inode);
2354
2355	if (chunk)
2356		index = depth * 2;
2357	else
2358		index = depth * 3;
2359
2360	return index;
2361}
2362
2363static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2364			      struct ext4_extent *ex,
2365			      ext4_fsblk_t *partial_cluster,
2366			      ext4_lblk_t from, ext4_lblk_t to)
2367{
2368	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2369	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2370	ext4_fsblk_t pblk;
2371	int flags = 0;
2372
2373	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2374		flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2375	else if (ext4_should_journal_data(inode))
2376		flags |= EXT4_FREE_BLOCKS_FORGET;
2377
2378	/*
2379	 * For bigalloc file systems, we never free a partial cluster
2380	 * at the beginning of the extent.  Instead, we make a note
2381	 * that we tried freeing the cluster, and check to see if we
2382	 * need to free it on a subsequent call to ext4_remove_blocks,
2383	 * or at the end of the ext4_truncate() operation.
2384	 */
2385	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2386
2387	trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2388	/*
2389	 * If we have a partial cluster, and it's different from the
2390	 * cluster of the last block, we need to explicitly free the
2391	 * partial cluster here.
2392	 */
2393	pblk = ext4_ext_pblock(ex) + ee_len - 1;
2394	if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2395		ext4_free_blocks(handle, inode, NULL,
2396				 EXT4_C2B(sbi, *partial_cluster),
2397				 sbi->s_cluster_ratio, flags);
2398		*partial_cluster = 0;
2399	}
2400
2401#ifdef EXTENTS_STATS
2402	{
2403		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2404		spin_lock(&sbi->s_ext_stats_lock);
2405		sbi->s_ext_blocks += ee_len;
2406		sbi->s_ext_extents++;
2407		if (ee_len < sbi->s_ext_min)
2408			sbi->s_ext_min = ee_len;
2409		if (ee_len > sbi->s_ext_max)
2410			sbi->s_ext_max = ee_len;
2411		if (ext_depth(inode) > sbi->s_depth_max)
2412			sbi->s_depth_max = ext_depth(inode);
2413		spin_unlock(&sbi->s_ext_stats_lock);
2414	}
2415#endif
2416	if (from >= le32_to_cpu(ex->ee_block)
2417	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2418		/* tail removal */
2419		ext4_lblk_t num;
2420
2421		num = le32_to_cpu(ex->ee_block) + ee_len - from;
2422		pblk = ext4_ext_pblock(ex) + ee_len - num;
2423		ext_debug("free last %u blocks starting %llu\n", num, pblk);
2424		ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2425		/*
2426		 * If the block range to be freed didn't start at the
2427		 * beginning of a cluster, and we removed the entire
2428		 * extent, save the partial cluster here, since we
2429		 * might need to delete if we determine that the
2430		 * truncate operation has removed all of the blocks in
2431		 * the cluster.
2432		 */
2433		if (pblk & (sbi->s_cluster_ratio - 1) &&
2434		    (ee_len == num))
2435			*partial_cluster = EXT4_B2C(sbi, pblk);
2436		else
2437			*partial_cluster = 0;
2438	} else if (from == le32_to_cpu(ex->ee_block)
2439		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2440		/* head removal */
2441		ext4_lblk_t num;
2442		ext4_fsblk_t start;
2443
2444		num = to - from;
2445		start = ext4_ext_pblock(ex);
2446
2447		ext_debug("free first %u blocks starting %llu\n", num, start);
2448		ext4_free_blocks(handle, inode, NULL, start, num, flags);
2449
2450	} else {
2451		printk(KERN_INFO "strange request: removal(2) "
2452				"%u-%u from %u:%u\n",
2453				from, to, le32_to_cpu(ex->ee_block), ee_len);
2454	}
2455	return 0;
2456}
2457
2458
2459/*
2460 * ext4_ext_rm_leaf() Removes the extents associated with the
2461 * blocks appearing between "start" and "end", and splits the extents
2462 * if "start" and "end" appear in the same extent
2463 *
2464 * @handle: The journal handle
2465 * @inode:  The files inode
2466 * @path:   The path to the leaf
2467 * @start:  The first block to remove
2468 * @end:   The last block to remove
2469 */
2470static int
2471ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2472		 struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2473		 ext4_lblk_t start, ext4_lblk_t end)
2474{
2475	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2476	int err = 0, correct_index = 0;
2477	int depth = ext_depth(inode), credits;
2478	struct ext4_extent_header *eh;
2479	ext4_lblk_t a, b;
2480	unsigned num;
2481	ext4_lblk_t ex_ee_block;
2482	unsigned short ex_ee_len;
2483	unsigned uninitialized = 0;
2484	struct ext4_extent *ex;
2485
2486	/* the header must be checked already in ext4_ext_remove_space() */
2487	ext_debug("truncate since %u in leaf to %u\n", start, end);
2488	if (!path[depth].p_hdr)
2489		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2490	eh = path[depth].p_hdr;
2491	if (unlikely(path[depth].p_hdr == NULL)) {
2492		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2493		return -EIO;
2494	}
2495	/* find where to start removing */
2496	ex = EXT_LAST_EXTENT(eh);
2497
2498	ex_ee_block = le32_to_cpu(ex->ee_block);
2499	ex_ee_len = ext4_ext_get_actual_len(ex);
2500
2501	trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2502
2503	while (ex >= EXT_FIRST_EXTENT(eh) &&
2504			ex_ee_block + ex_ee_len > start) {
2505
2506		if (ext4_ext_is_uninitialized(ex))
2507			uninitialized = 1;
2508		else
2509			uninitialized = 0;
2510
2511		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2512			 uninitialized, ex_ee_len);
2513		path[depth].p_ext = ex;
2514
2515		a = ex_ee_block > start ? ex_ee_block : start;
2516		b = ex_ee_block+ex_ee_len - 1 < end ?
2517			ex_ee_block+ex_ee_len - 1 : end;
2518
2519		ext_debug("  border %u:%u\n", a, b);
2520
2521		/* If this extent is beyond the end of the hole, skip it */
2522		if (end < ex_ee_block) {
2523			ex--;
2524			ex_ee_block = le32_to_cpu(ex->ee_block);
2525			ex_ee_len = ext4_ext_get_actual_len(ex);
2526			continue;
2527		} else if (b != ex_ee_block + ex_ee_len - 1) {
2528			EXT4_ERROR_INODE(inode,
2529					 "can not handle truncate %u:%u "
2530					 "on extent %u:%u",
2531					 start, end, ex_ee_block,
2532					 ex_ee_block + ex_ee_len - 1);
2533			err = -EIO;
2534			goto out;
2535		} else if (a != ex_ee_block) {
2536			/* remove tail of the extent */
2537			num = a - ex_ee_block;
2538		} else {
2539			/* remove whole extent: excellent! */
2540			num = 0;
2541		}
2542		/*
2543		 * 3 for leaf, sb, and inode plus 2 (bmap and group
2544		 * descriptor) for each block group; assume two block
2545		 * groups plus ex_ee_len/blocks_per_block_group for
2546		 * the worst case
2547		 */
2548		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2549		if (ex == EXT_FIRST_EXTENT(eh)) {
2550			correct_index = 1;
2551			credits += (ext_depth(inode)) + 1;
2552		}
2553		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2554
2555		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2556		if (err)
2557			goto out;
2558
2559		err = ext4_ext_get_access(handle, inode, path + depth);
2560		if (err)
2561			goto out;
2562
2563		err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2564					 a, b);
2565		if (err)
2566			goto out;
2567
2568		if (num == 0)
2569			/* this extent is removed; mark slot entirely unused */
2570			ext4_ext_store_pblock(ex, 0);
2571
2572		ex->ee_len = cpu_to_le16(num);
2573		/*
2574		 * Do not mark uninitialized if all the blocks in the
2575		 * extent have been removed.
2576		 */
2577		if (uninitialized && num)
2578			ext4_ext_mark_uninitialized(ex);
2579		/*
2580		 * If the extent was completely released,
2581		 * we need to remove it from the leaf
2582		 */
2583		if (num == 0) {
2584			if (end != EXT_MAX_BLOCKS - 1) {
2585				/*
2586				 * For hole punching, we need to scoot all the
2587				 * extents up when an extent is removed so that
2588				 * we dont have blank extents in the middle
2589				 */
2590				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2591					sizeof(struct ext4_extent));
2592
2593				/* Now get rid of the one at the end */
2594				memset(EXT_LAST_EXTENT(eh), 0,
2595					sizeof(struct ext4_extent));
2596			}
2597			le16_add_cpu(&eh->eh_entries, -1);
2598		} else
2599			*partial_cluster = 0;
2600
2601		err = ext4_ext_dirty(handle, inode, path + depth);
2602		if (err)
2603			goto out;
2604
2605		ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2606				ext4_ext_pblock(ex));
2607		ex--;
2608		ex_ee_block = le32_to_cpu(ex->ee_block);
2609		ex_ee_len = ext4_ext_get_actual_len(ex);
2610	}
2611
2612	if (correct_index && eh->eh_entries)
2613		err = ext4_ext_correct_indexes(handle, inode, path);
2614
2615	/*
2616	 * If there is still a entry in the leaf node, check to see if
2617	 * it references the partial cluster.  This is the only place
2618	 * where it could; if it doesn't, we can free the cluster.
2619	 */
2620	if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2621	    (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2622	     *partial_cluster)) {
2623		int flags = EXT4_FREE_BLOCKS_FORGET;
2624
2625		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2626			flags |= EXT4_FREE_BLOCKS_METADATA;
2627
2628		ext4_free_blocks(handle, inode, NULL,
2629				 EXT4_C2B(sbi, *partial_cluster),
2630				 sbi->s_cluster_ratio, flags);
2631		*partial_cluster = 0;
2632	}
2633
2634	/* if this leaf is free, then we should
2635	 * remove it from index block above */
2636	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2637		err = ext4_ext_rm_idx(handle, inode, path, depth);
2638
2639out:
2640	return err;
2641}
2642
2643/*
2644 * ext4_ext_more_to_rm:
2645 * returns 1 if current index has to be freed (even partial)
2646 */
2647static int
2648ext4_ext_more_to_rm(struct ext4_ext_path *path)
2649{
2650	BUG_ON(path->p_idx == NULL);
2651
2652	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2653		return 0;
2654
2655	/*
2656	 * if truncate on deeper level happened, it wasn't partial,
2657	 * so we have to consider current index for truncation
2658	 */
2659	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2660		return 0;
2661	return 1;
2662}
2663
2664int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2665			  ext4_lblk_t end)
2666{
2667	struct super_block *sb = inode->i_sb;
2668	int depth = ext_depth(inode);
2669	struct ext4_ext_path *path = NULL;
2670	ext4_fsblk_t partial_cluster = 0;
2671	handle_t *handle;
2672	int i = 0, err = 0;
2673
2674	ext_debug("truncate since %u to %u\n", start, end);
2675
2676	/* probably first extent we're gonna free will be last in block */
2677	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2678	if (IS_ERR(handle))
2679		return PTR_ERR(handle);
2680
2681again:
2682	trace_ext4_ext_remove_space(inode, start, depth);
2683
2684	/*
2685	 * Check if we are removing extents inside the extent tree. If that
2686	 * is the case, we are going to punch a hole inside the extent tree
2687	 * so we have to check whether we need to split the extent covering
2688	 * the last block to remove so we can easily remove the part of it
2689	 * in ext4_ext_rm_leaf().
2690	 */
2691	if (end < EXT_MAX_BLOCKS - 1) {
2692		struct ext4_extent *ex;
2693		ext4_lblk_t ee_block;
2694
2695		/* find extent for this block */
2696		path = ext4_ext_find_extent(inode, end, NULL);
2697		if (IS_ERR(path)) {
2698			ext4_journal_stop(handle);
2699			return PTR_ERR(path);
2700		}
2701		depth = ext_depth(inode);
2702		/* Leaf not may not exist only if inode has no blocks at all */
2703		ex = path[depth].p_ext;
2704		if (!ex) {
2705			if (depth) {
2706				EXT4_ERROR_INODE(inode,
2707						 "path[%d].p_hdr == NULL",
2708						 depth);
2709				err = -EIO;
2710			}
2711			goto out;
2712		}
2713
2714		ee_block = le32_to_cpu(ex->ee_block);
2715
2716		/*
2717		 * See if the last block is inside the extent, if so split
2718		 * the extent at 'end' block so we can easily remove the
2719		 * tail of the first part of the split extent in
2720		 * ext4_ext_rm_leaf().
2721		 */
2722		if (end >= ee_block &&
2723		    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2724			int split_flag = 0;
2725
2726			if (ext4_ext_is_uninitialized(ex))
2727				split_flag = EXT4_EXT_MARK_UNINIT1 |
2728					     EXT4_EXT_MARK_UNINIT2;
2729
2730			/*
2731			 * Split the extent in two so that 'end' is the last
2732			 * block in the first new extent. Also we should not
2733			 * fail removing space due to ENOSPC so try to use
2734			 * reserved block if that happens.
2735			 */
2736			err = ext4_split_extent_at(handle, inode, path,
2737					end + 1, split_flag,
2738					EXT4_GET_BLOCKS_PRE_IO |
2739					EXT4_GET_BLOCKS_METADATA_NOFAIL);
2740
2741			if (err < 0)
2742				goto out;
2743		}
2744	}
2745	/*
2746	 * We start scanning from right side, freeing all the blocks
2747	 * after i_size and walking into the tree depth-wise.
2748	 */
2749	depth = ext_depth(inode);
2750	if (path) {
2751		int k = i = depth;
2752		while (--k > 0)
2753			path[k].p_block =
2754				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2755	} else {
2756		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2757			       GFP_NOFS);
2758		if (path == NULL) {
2759			ext4_journal_stop(handle);
2760			return -ENOMEM;
2761		}
2762		path[0].p_depth = depth;
2763		path[0].p_hdr = ext_inode_hdr(inode);
2764		i = 0;
2765
2766		if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2767			err = -EIO;
2768			goto out;
2769		}
2770	}
2771	err = 0;
2772
2773	while (i >= 0 && err == 0) {
2774		if (i == depth) {
2775			/* this is leaf block */
2776			err = ext4_ext_rm_leaf(handle, inode, path,
2777					       &partial_cluster, start,
2778					       end);
2779			/* root level has p_bh == NULL, brelse() eats this */
2780			brelse(path[i].p_bh);
2781			path[i].p_bh = NULL;
2782			i--;
2783			continue;
2784		}
2785
2786		/* this is index block */
2787		if (!path[i].p_hdr) {
2788			ext_debug("initialize header\n");
2789			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2790		}
2791
2792		if (!path[i].p_idx) {
2793			/* this level hasn't been touched yet */
2794			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2795			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2796			ext_debug("init index ptr: hdr 0x%p, num %d\n",
2797				  path[i].p_hdr,
2798				  le16_to_cpu(path[i].p_hdr->eh_entries));
2799		} else {
2800			/* we were already here, see at next index */
2801			path[i].p_idx--;
2802		}
2803
2804		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2805				i, EXT_FIRST_INDEX(path[i].p_hdr),
2806				path[i].p_idx);
2807		if (ext4_ext_more_to_rm(path + i)) {
2808			struct buffer_head *bh;
2809			/* go to the next level */
2810			ext_debug("move to level %d (block %llu)\n",
2811				  i + 1, ext4_idx_pblock(path[i].p_idx));
2812			memset(path + i + 1, 0, sizeof(*path));
2813			bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2814			if (!bh) {
2815				/* should we reset i_size? */
2816				err = -EIO;
2817				break;
2818			}
2819			if (WARN_ON(i + 1 > depth)) {
2820				err = -EIO;
2821				break;
2822			}
2823			if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2824							depth - i - 1, bh)) {
2825				err = -EIO;
2826				break;
2827			}
2828			path[i + 1].p_bh = bh;
2829
2830			/* save actual number of indexes since this
2831			 * number is changed at the next iteration */
2832			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2833			i++;
2834		} else {
2835			/* we finished processing this index, go up */
2836			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2837				/* index is empty, remove it;
2838				 * handle must be already prepared by the
2839				 * truncatei_leaf() */
2840				err = ext4_ext_rm_idx(handle, inode, path, i);
2841			}
2842			/* root level has p_bh == NULL, brelse() eats this */
2843			brelse(path[i].p_bh);
2844			path[i].p_bh = NULL;
2845			i--;
2846			ext_debug("return to level %d\n", i);
2847		}
2848	}
2849
2850	trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2851			path->p_hdr->eh_entries);
2852
2853	/* If we still have something in the partial cluster and we have removed
2854	 * even the first extent, then we should free the blocks in the partial
2855	 * cluster as well. */
2856	if (partial_cluster && path->p_hdr->eh_entries == 0) {
2857		int flags = EXT4_FREE_BLOCKS_FORGET;
2858
2859		if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2860			flags |= EXT4_FREE_BLOCKS_METADATA;
2861
2862		ext4_free_blocks(handle, inode, NULL,
2863				 EXT4_C2B(EXT4_SB(sb), partial_cluster),
2864				 EXT4_SB(sb)->s_cluster_ratio, flags);
2865		partial_cluster = 0;
2866	}
2867
2868	/* TODO: flexible tree reduction should be here */
2869	if (path->p_hdr->eh_entries == 0) {
2870		/*
2871		 * truncate to zero freed all the tree,
2872		 * so we need to correct eh_depth
2873		 */
2874		err = ext4_ext_get_access(handle, inode, path);
2875		if (err == 0) {
2876			ext_inode_hdr(inode)->eh_depth = 0;
2877			ext_inode_hdr(inode)->eh_max =
2878				cpu_to_le16(ext4_ext_space_root(inode, 0));
2879			err = ext4_ext_dirty(handle, inode, path);
2880		}
2881	}
2882out:
2883	ext4_ext_drop_refs(path);
2884	kfree(path);
2885	if (err == -EAGAIN) {
2886		path = NULL;
2887		goto again;
2888	}
2889	ext4_journal_stop(handle);
2890
2891	return err;
2892}
2893
2894/*
2895 * called at mount time
2896 */
2897void ext4_ext_init(struct super_block *sb)
2898{
2899	/*
2900	 * possible initialization would be here
2901	 */
2902
2903	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2904#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2905		printk(KERN_INFO "EXT4-fs: file extents enabled"
2906#ifdef AGGRESSIVE_TEST
2907		       ", aggressive tests"
2908#endif
2909#ifdef CHECK_BINSEARCH
2910		       ", check binsearch"
2911#endif
2912#ifdef EXTENTS_STATS
2913		       ", stats"
2914#endif
2915		       "\n");
2916#endif
2917#ifdef EXTENTS_STATS
2918		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2919		EXT4_SB(sb)->s_ext_min = 1 << 30;
2920		EXT4_SB(sb)->s_ext_max = 0;
2921#endif
2922	}
2923}
2924
2925/*
2926 * called at umount time
2927 */
2928void ext4_ext_release(struct super_block *sb)
2929{
2930	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2931		return;
2932
2933#ifdef EXTENTS_STATS
2934	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2935		struct ext4_sb_info *sbi = EXT4_SB(sb);
2936		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2937			sbi->s_ext_blocks, sbi->s_ext_extents,
2938			sbi->s_ext_blocks / sbi->s_ext_extents);
2939		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2940			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2941	}
2942#endif
2943}
2944
2945/* FIXME!! we need to try to merge to left or right after zero-out  */
2946static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2947{
2948	ext4_fsblk_t ee_pblock;
2949	unsigned int ee_len;
2950	int ret;
2951
2952	ee_len    = ext4_ext_get_actual_len(ex);
2953	ee_pblock = ext4_ext_pblock(ex);
2954
2955	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2956	if (ret > 0)
2957		ret = 0;
2958
2959	return ret;
2960}
2961
2962/*
2963 * ext4_split_extent_at() splits an extent at given block.
2964 *
2965 * @handle: the journal handle
2966 * @inode: the file inode
2967 * @path: the path to the extent
2968 * @split: the logical block where the extent is splitted.
2969 * @split_flags: indicates if the extent could be zeroout if split fails, and
2970 *		 the states(init or uninit) of new extents.
2971 * @flags: flags used to insert new extent to extent tree.
2972 *
2973 *
2974 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2975 * of which are deterimined by split_flag.
2976 *
2977 * There are two cases:
2978 *  a> the extent are splitted into two extent.
2979 *  b> split is not needed, and just mark the extent.
2980 *
2981 * return 0 on success.
2982 */
2983static int ext4_split_extent_at(handle_t *handle,
2984			     struct inode *inode,
2985			     struct ext4_ext_path *path,
2986			     ext4_lblk_t split,
2987			     int split_flag,
2988			     int flags)
2989{
2990	ext4_fsblk_t newblock;
2991	ext4_lblk_t ee_block;
2992	struct ext4_extent *ex, newex, orig_ex, zero_ex;
2993	struct ext4_extent *ex2 = NULL;
2994	unsigned int ee_len, depth;
2995	int err = 0;
2996
2997	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
2998	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
2999
3000	ext_debug("ext4_split_extents_at: inode %lu, logical"
3001		"block %llu\n", inode->i_ino, (unsigned long long)split);
3002
3003	ext4_ext_show_leaf(inode, path);
3004
3005	depth = ext_depth(inode);
3006	ex = path[depth].p_ext;
3007	ee_block = le32_to_cpu(ex->ee_block);
3008	ee_len = ext4_ext_get_actual_len(ex);
3009	newblock = split - ee_block + ext4_ext_pblock(ex);
3010
3011	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3012	BUG_ON(!ext4_ext_is_uninitialized(ex) &&
3013	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
3014			     EXT4_EXT_MARK_UNINIT1 |
3015			     EXT4_EXT_MARK_UNINIT2));
3016
3017	err = ext4_ext_get_access(handle, inode, path + depth);
3018	if (err)
3019		goto out;
3020
3021	if (split == ee_block) {
3022		/*
3023		 * case b: block @split is the block that the extent begins with
3024		 * then we just change the state of the extent, and splitting
3025		 * is not needed.
3026		 */
3027		if (split_flag & EXT4_EXT_MARK_UNINIT2)
3028			ext4_ext_mark_uninitialized(ex);
3029		else
3030			ext4_ext_mark_initialized(ex);
3031
3032		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3033			ext4_ext_try_to_merge(handle, inode, path, ex);
3034
3035		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3036		goto out;
3037	}
3038
3039	/* case a */
3040	memcpy(&orig_ex, ex, sizeof(orig_ex));
3041	ex->ee_len = cpu_to_le16(split - ee_block);
3042	if (split_flag & EXT4_EXT_MARK_UNINIT1)
3043		ext4_ext_mark_uninitialized(ex);
3044
3045	/*
3046	 * path may lead to new leaf, not to original leaf any more
3047	 * after ext4_ext_insert_extent() returns,
3048	 */
3049	err = ext4_ext_dirty(handle, inode, path + depth);
3050	if (err)
3051		goto fix_extent_len;
3052
3053	ex2 = &newex;
3054	ex2->ee_block = cpu_to_le32(split);
3055	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3056	ext4_ext_store_pblock(ex2, newblock);
3057	if (split_flag & EXT4_EXT_MARK_UNINIT2)
3058		ext4_ext_mark_uninitialized(ex2);
3059
3060	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3061	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3062		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3063			if (split_flag & EXT4_EXT_DATA_VALID1) {
3064				err = ext4_ext_zeroout(inode, ex2);
3065				zero_ex.ee_block = ex2->ee_block;
3066				zero_ex.ee_len = cpu_to_le16(
3067						ext4_ext_get_actual_len(ex2));
3068				ext4_ext_store_pblock(&zero_ex,
3069						      ext4_ext_pblock(ex2));
3070			} else {
3071				err = ext4_ext_zeroout(inode, ex);
3072				zero_ex.ee_block = ex->ee_block;
3073				zero_ex.ee_len = cpu_to_le16(
3074						ext4_ext_get_actual_len(ex));
3075				ext4_ext_store_pblock(&zero_ex,
3076						      ext4_ext_pblock(ex));
3077			}
3078		} else {
3079			err = ext4_ext_zeroout(inode, &orig_ex);
3080			zero_ex.ee_block = orig_ex.ee_block;
3081			zero_ex.ee_len = cpu_to_le16(
3082						ext4_ext_get_actual_len(&orig_ex));
3083			ext4_ext_store_pblock(&zero_ex,
3084					      ext4_ext_pblock(&orig_ex));
3085		}
3086
3087		if (err)
3088			goto fix_extent_len;
3089		/* update the extent length and mark as initialized */
3090		ex->ee_len = cpu_to_le16(ee_len);
3091		ext4_ext_try_to_merge(handle, inode, path, ex);
3092		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3093		if (err)
3094			goto fix_extent_len;
3095
3096		/* update extent status tree */
3097		err = ext4_es_zeroout(inode, &zero_ex);
3098
3099		goto out;
3100	} else if (err)
3101		goto fix_extent_len;
3102
3103out:
3104	ext4_ext_show_leaf(inode, path);
3105	return err;
3106
3107fix_extent_len:
3108	ex->ee_len = orig_ex.ee_len;
3109	ext4_ext_dirty(handle, inode, path + depth);
3110	return err;
3111}
3112
3113/*
3114 * ext4_split_extents() splits an extent and mark extent which is covered
3115 * by @map as split_flags indicates
3116 *
3117 * It may result in splitting the extent into multiple extents (upto three)
3118 * There are three possibilities:
3119 *   a> There is no split required
3120 *   b> Splits in two extents: Split is happening at either end of the extent
3121 *   c> Splits in three extents: Somone is splitting in middle of the extent
3122 *
3123 */
3124static int ext4_split_extent(handle_t *handle,
3125			      struct inode *inode,
3126			      struct ext4_ext_path *path,
3127			      struct ext4_map_blocks *map,
3128			      int split_flag,
3129			      int flags)
3130{
3131	ext4_lblk_t ee_block;
3132	struct ext4_extent *ex;
3133	unsigned int ee_len, depth;
3134	int err = 0;
3135	int uninitialized;
3136	int split_flag1, flags1;
3137	int allocated = map->m_len;
3138
3139	depth = ext_depth(inode);
3140	ex = path[depth].p_ext;
3141	ee_block = le32_to_cpu(ex->ee_block);
3142	ee_len = ext4_ext_get_actual_len(ex);
3143	uninitialized = ext4_ext_is_uninitialized(ex);
3144
3145	if (map->m_lblk + map->m_len < ee_block + ee_len) {
3146		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3147		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3148		if (uninitialized)
3149			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3150				       EXT4_EXT_MARK_UNINIT2;
3151		if (split_flag & EXT4_EXT_DATA_VALID2)
3152			split_flag1 |= EXT4_EXT_DATA_VALID1;
3153		err = ext4_split_extent_at(handle, inode, path,
3154				map->m_lblk + map->m_len, split_flag1, flags1);
3155		if (err)
3156			goto out;
3157	} else {
3158		allocated = ee_len - (map->m_lblk - ee_block);
3159	}
3160	/*
3161	 * Update path is required because previous ext4_split_extent_at() may
3162	 * result in split of original leaf or extent zeroout.
3163	 */
3164	ext4_ext_drop_refs(path);
3165	path = ext4_ext_find_extent(inode, map->m_lblk, path);
3166	if (IS_ERR(path))
3167		return PTR_ERR(path);
3168	depth = ext_depth(inode);
3169	ex = path[depth].p_ext;
3170	uninitialized = ext4_ext_is_uninitialized(ex);
3171	split_flag1 = 0;
3172
3173	if (map->m_lblk >= ee_block) {
3174		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3175		if (uninitialized) {
3176			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3177			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3178						     EXT4_EXT_MARK_UNINIT2);
3179		}
3180		err = ext4_split_extent_at(handle, inode, path,
3181				map->m_lblk, split_flag1, flags);
3182		if (err)
3183			goto out;
3184	}
3185
3186	ext4_ext_show_leaf(inode, path);
3187out:
3188	return err ? err : allocated;
3189}
3190
3191/*
3192 * This function is called by ext4_ext_map_blocks() if someone tries to write
3193 * to an uninitialized extent. It may result in splitting the uninitialized
3194 * extent into multiple extents (up to three - one initialized and two
3195 * uninitialized).
3196 * There are three possibilities:
3197 *   a> There is no split required: Entire extent should be initialized
3198 *   b> Splits in two extents: Write is happening at either end of the extent
3199 *   c> Splits in three extents: Somone is writing in middle of the extent
3200 *
3201 * Pre-conditions:
3202 *  - The extent pointed to by 'path' is uninitialized.
3203 *  - The extent pointed to by 'path' contains a superset
3204 *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3205 *
3206 * Post-conditions on success:
3207 *  - the returned value is the number of blocks beyond map->l_lblk
3208 *    that are allocated and initialized.
3209 *    It is guaranteed to be >= map->m_len.
3210 */
3211static int ext4_ext_convert_to_initialized(handle_t *handle,
3212					   struct inode *inode,
3213					   struct ext4_map_blocks *map,
3214					   struct ext4_ext_path *path,
3215					   int flags)
3216{
3217	struct ext4_sb_info *sbi;
3218	struct ext4_extent_header *eh;
3219	struct ext4_map_blocks split_map;
3220	struct ext4_extent zero_ex;
3221	struct ext4_extent *ex, *abut_ex;
3222	ext4_lblk_t ee_block, eof_block;
3223	unsigned int ee_len, depth, map_len = map->m_len;
3224	int allocated = 0, max_zeroout = 0;
3225	int err = 0;
3226	int split_flag = 0;
3227
3228	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3229		"block %llu, max_blocks %u\n", inode->i_ino,
3230		(unsigned long long)map->m_lblk, map_len);
3231
3232	sbi = EXT4_SB(inode->i_sb);
3233	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3234		inode->i_sb->s_blocksize_bits;
3235	if (eof_block < map->m_lblk + map_len)
3236		eof_block = map->m_lblk + map_len;
3237
3238	depth = ext_depth(inode);
3239	eh = path[depth].p_hdr;
3240	ex = path[depth].p_ext;
3241	ee_block = le32_to_cpu(ex->ee_block);
3242	ee_len = ext4_ext_get_actual_len(ex);
3243	zero_ex.ee_len = 0;
3244
3245	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3246
3247	/* Pre-conditions */
3248	BUG_ON(!ext4_ext_is_uninitialized(ex));
3249	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3250
3251	/*
3252	 * Attempt to transfer newly initialized blocks from the currently
3253	 * uninitialized extent to its neighbor. This is much cheaper
3254	 * than an insertion followed by a merge as those involve costly
3255	 * memmove() calls. Transferring to the left is the common case in
3256	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3257	 * followed by append writes.
3258	 *
3259	 * Limitations of the current logic:
3260	 *  - L1: we do not deal with writes covering the whole extent.
3261	 *    This would require removing the extent if the transfer
3262	 *    is possible.
3263	 *  - L2: we only attempt to merge with an extent stored in the
3264	 *    same extent tree node.
3265	 */
3266	if ((map->m_lblk == ee_block) &&
3267		/* See if we can merge left */
3268		(map_len < ee_len) &&		/*L1*/
3269		(ex > EXT_FIRST_EXTENT(eh))) {	/*L2*/
3270		ext4_lblk_t prev_lblk;
3271		ext4_fsblk_t prev_pblk, ee_pblk;
3272		unsigned int prev_len;
3273
3274		abut_ex = ex - 1;
3275		prev_lblk = le32_to_cpu(abut_ex->ee_block);
3276		prev_len = ext4_ext_get_actual_len(abut_ex);
3277		prev_pblk = ext4_ext_pblock(abut_ex);
3278		ee_pblk = ext4_ext_pblock(ex);
3279
3280		/*
3281		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3282		 * upon those conditions:
3283		 * - C1: abut_ex is initialized,
3284		 * - C2: abut_ex is logically abutting ex,
3285		 * - C3: abut_ex is physically abutting ex,
3286		 * - C4: abut_ex can receive the additional blocks without
3287		 *   overflowing the (initialized) length limit.
3288		 */
3289		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
3290			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
3291			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
3292			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3293			err = ext4_ext_get_access(handle, inode, path + depth);
3294			if (err)
3295				goto out;
3296
3297			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3298				map, ex, abut_ex);
3299
3300			/* Shift the start of ex by 'map_len' blocks */
3301			ex->ee_block = cpu_to_le32(ee_block + map_len);
3302			ext4_ext_store_pblock(ex, ee_pblk + map_len);
3303			ex->ee_len = cpu_to_le16(ee_len - map_len);
3304			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3305
3306			/* Extend abut_ex by 'map_len' blocks */
3307			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3308
3309			/* Result: number of initialized blocks past m_lblk */
3310			allocated = map_len;
3311		}
3312	} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3313		   (map_len < ee_len) &&	/*L1*/
3314		   ex < EXT_LAST_EXTENT(eh)) {	/*L2*/
3315		/* See if we can merge right */
3316		ext4_lblk_t next_lblk;
3317		ext4_fsblk_t next_pblk, ee_pblk;
3318		unsigned int next_len;
3319
3320		abut_ex = ex + 1;
3321		next_lblk = le32_to_cpu(abut_ex->ee_block);
3322		next_len = ext4_ext_get_actual_len(abut_ex);
3323		next_pblk = ext4_ext_pblock(abut_ex);
3324		ee_pblk = ext4_ext_pblock(ex);
3325
3326		/*
3327		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3328		 * upon those conditions:
3329		 * - C1: abut_ex is initialized,
3330		 * - C2: abut_ex is logically abutting ex,
3331		 * - C3: abut_ex is physically abutting ex,
3332		 * - C4: abut_ex can receive the additional blocks without
3333		 *   overflowing the (initialized) length limit.
3334		 */
3335		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
3336		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
3337		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
3338		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3339			err = ext4_ext_get_access(handle, inode, path + depth);
3340			if (err)
3341				goto out;
3342
3343			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3344				map, ex, abut_ex);
3345
3346			/* Shift the start of abut_ex by 'map_len' blocks */
3347			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3348			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3349			ex->ee_len = cpu_to_le16(ee_len - map_len);
3350			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3351
3352			/* Extend abut_ex by 'map_len' blocks */
3353			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3354
3355			/* Result: number of initialized blocks past m_lblk */
3356			allocated = map_len;
3357		}
3358	}
3359	if (allocated) {
3360		/* Mark the block containing both extents as dirty */
3361		ext4_ext_dirty(handle, inode, path + depth);
3362
3363		/* Update path to point to the right extent */
3364		path[depth].p_ext = abut_ex;
3365		goto out;
3366	} else
3367		allocated = ee_len - (map->m_lblk - ee_block);
3368
3369	WARN_ON(map->m_lblk < ee_block);
3370	/*
3371	 * It is safe to convert extent to initialized via explicit
3372	 * zeroout only if extent is fully insde i_size or new_size.
3373	 */
3374	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3375
3376	if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3377		max_zeroout = sbi->s_extent_max_zeroout_kb >>
3378			(inode->i_sb->s_blocksize_bits - 10);
3379
3380	/* If extent is less than s_max_zeroout_kb, zeroout directly */
3381	if (max_zeroout && (ee_len <= max_zeroout)) {
3382		err = ext4_ext_zeroout(inode, ex);
3383		if (err)
3384			goto out;
3385		zero_ex.ee_block = ex->ee_block;
3386		zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3387		ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3388
3389		err = ext4_ext_get_access(handle, inode, path + depth);
3390		if (err)
3391			goto out;
3392		ext4_ext_mark_initialized(ex);
3393		ext4_ext_try_to_merge(handle, inode, path, ex);
3394		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3395		goto out;
3396	}
3397
3398	/*
3399	 * four cases:
3400	 * 1. split the extent into three extents.
3401	 * 2. split the extent into two extents, zeroout the first half.
3402	 * 3. split the extent into two extents, zeroout the second half.
3403	 * 4. split the extent into two extents with out zeroout.
3404	 */
3405	split_map.m_lblk = map->m_lblk;
3406	split_map.m_len = map->m_len;
3407
3408	if (max_zeroout && (allocated > map->m_len)) {
3409		if (allocated <= max_zeroout) {
3410			/* case 3 */
3411			zero_ex.ee_block =
3412					 cpu_to_le32(map->m_lblk);
3413			zero_ex.ee_len = cpu_to_le16(allocated);
3414			ext4_ext_store_pblock(&zero_ex,
3415				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3416			err = ext4_ext_zeroout(inode, &zero_ex);
3417			if (err)
3418				goto out;
3419			split_map.m_lblk = map->m_lblk;
3420			split_map.m_len = allocated;
3421		} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3422			/* case 2 */
3423			if (map->m_lblk != ee_block) {
3424				zero_ex.ee_block = ex->ee_block;
3425				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3426							ee_block);
3427				ext4_ext_store_pblock(&zero_ex,
3428						      ext4_ext_pblock(ex));
3429				err = ext4_ext_zeroout(inode, &zero_ex);
3430				if (err)
3431					goto out;
3432			}
3433
3434			split_map.m_lblk = ee_block;
3435			split_map.m_len = map->m_lblk - ee_block + map->m_len;
3436			allocated = map->m_len;
3437		}
3438	}
3439
3440	allocated = ext4_split_extent(handle, inode, path,
3441				      &split_map, split_flag, flags);
3442	if (allocated < 0)
3443		err = allocated;
3444
3445out:
3446	/* If we have gotten a failure, don't zero out status tree */
3447	if (!err)
3448		err = ext4_es_zeroout(inode, &zero_ex);
3449	return err ? err : allocated;
3450}
3451
3452/*
3453 * This function is called by ext4_ext_map_blocks() from
3454 * ext4_get_blocks_dio_write() when DIO to write
3455 * to an uninitialized extent.
3456 *
3457 * Writing to an uninitialized extent may result in splitting the uninitialized
3458 * extent into multiple initialized/uninitialized extents (up to three)
3459 * There are three possibilities:
3460 *   a> There is no split required: Entire extent should be uninitialized
3461 *   b> Splits in two extents: Write is happening at either end of the extent
3462 *   c> Splits in three extents: Somone is writing in middle of the extent
3463 *
3464 * One of more index blocks maybe needed if the extent tree grow after
3465 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3466 * complete, we need to split the uninitialized extent before DIO submit
3467 * the IO. The uninitialized extent called at this time will be split
3468 * into three uninitialized extent(at most). After IO complete, the part
3469 * being filled will be convert to initialized by the end_io callback function
3470 * via ext4_convert_unwritten_extents().
3471 *
3472 * Returns the size of uninitialized extent to be written on success.
3473 */
3474static int ext4_split_unwritten_extents(handle_t *handle,
3475					struct inode *inode,
3476					struct ext4_map_blocks *map,
3477					struct ext4_ext_path *path,
3478					int flags)
3479{
3480	ext4_lblk_t eof_block;
3481	ext4_lblk_t ee_block;
3482	struct ext4_extent *ex;
3483	unsigned int ee_len;
3484	int split_flag = 0, depth;
3485
3486	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3487		"block %llu, max_blocks %u\n", inode->i_ino,
3488		(unsigned long long)map->m_lblk, map->m_len);
3489
3490	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3491		inode->i_sb->s_blocksize_bits;
3492	if (eof_block < map->m_lblk + map->m_len)
3493		eof_block = map->m_lblk + map->m_len;
3494	/*
3495	 * It is safe to convert extent to initialized via explicit
3496	 * zeroout only if extent is fully insde i_size or new_size.
3497	 */
3498	depth = ext_depth(inode);
3499	ex = path[depth].p_ext;
3500	ee_block = le32_to_cpu(ex->ee_block);
3501	ee_len = ext4_ext_get_actual_len(ex);
3502
3503	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3504	split_flag |= EXT4_EXT_MARK_UNINIT2;
3505	if (flags & EXT4_GET_BLOCKS_CONVERT)
3506		split_flag |= EXT4_EXT_DATA_VALID2;
3507	flags |= EXT4_GET_BLOCKS_PRE_IO;
3508	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3509}
3510
3511static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3512						struct inode *inode,
3513						struct ext4_map_blocks *map,
3514						struct ext4_ext_path *path)
3515{
3516	struct ext4_extent *ex;
3517	ext4_lblk_t ee_block;
3518	unsigned int ee_len;
3519	int depth;
3520	int err = 0;
3521
3522	depth = ext_depth(inode);
3523	ex = path[depth].p_ext;
3524	ee_block = le32_to_cpu(ex->ee_block);
3525	ee_len = ext4_ext_get_actual_len(ex);
3526
3527	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3528		"block %llu, max_blocks %u\n", inode->i_ino,
3529		  (unsigned long long)ee_block, ee_len);
3530
3531	/* If extent is larger than requested it is a clear sign that we still
3532	 * have some extent state machine issues left. So extent_split is still
3533	 * required.
3534	 * TODO: Once all related issues will be fixed this situation should be
3535	 * illegal.
3536	 */
3537	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3538#ifdef EXT4_DEBUG
3539		ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3540			     " len %u; IO logical block %llu, len %u\n",
3541			     inode->i_ino, (unsigned long long)ee_block, ee_len,
3542			     (unsigned long long)map->m_lblk, map->m_len);
3543#endif
3544		err = ext4_split_unwritten_extents(handle, inode, map, path,
3545						   EXT4_GET_BLOCKS_CONVERT);
3546		if (err < 0)
3547			goto out;
3548		ext4_ext_drop_refs(path);
3549		path = ext4_ext_find_extent(inode, map->m_lblk, path);
3550		if (IS_ERR(path)) {
3551			err = PTR_ERR(path);
3552			goto out;
3553		}
3554		depth = ext_depth(inode);
3555		ex = path[depth].p_ext;
3556	}
3557
3558	err = ext4_ext_get_access(handle, inode, path + depth);
3559	if (err)
3560		goto out;
3561	/* first mark the extent as initialized */
3562	ext4_ext_mark_initialized(ex);
3563
3564	/* note: ext4_ext_correct_indexes() isn't needed here because
3565	 * borders are not changed
3566	 */
3567	ext4_ext_try_to_merge(handle, inode, path, ex);
3568
3569	/* Mark modified extent as dirty */
3570	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3571out:
3572	ext4_ext_show_leaf(inode, path);
3573	return err;
3574}
3575
3576static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3577			sector_t block, int count)
3578{
3579	int i;
3580	for (i = 0; i < count; i++)
3581                unmap_underlying_metadata(bdev, block + i);
3582}
3583
3584/*
3585 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3586 */
3587static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3588			      ext4_lblk_t lblk,
3589			      struct ext4_ext_path *path,
3590			      unsigned int len)
3591{
3592	int i, depth;
3593	struct ext4_extent_header *eh;
3594	struct ext4_extent *last_ex;
3595
3596	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3597		return 0;
3598
3599	depth = ext_depth(inode);
3600	eh = path[depth].p_hdr;
3601
3602	/*
3603	 * We're going to remove EOFBLOCKS_FL entirely in future so we
3604	 * do not care for this case anymore. Simply remove the flag
3605	 * if there are no extents.
3606	 */
3607	if (unlikely(!eh->eh_entries))
3608		goto out;
3609	last_ex = EXT_LAST_EXTENT(eh);
3610	/*
3611	 * We should clear the EOFBLOCKS_FL flag if we are writing the
3612	 * last block in the last extent in the file.  We test this by
3613	 * first checking to see if the caller to
3614	 * ext4_ext_get_blocks() was interested in the last block (or
3615	 * a block beyond the last block) in the current extent.  If
3616	 * this turns out to be false, we can bail out from this
3617	 * function immediately.
3618	 */
3619	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3620	    ext4_ext_get_actual_len(last_ex))
3621		return 0;
3622	/*
3623	 * If the caller does appear to be planning to write at or
3624	 * beyond the end of the current extent, we then test to see
3625	 * if the current extent is the last extent in the file, by
3626	 * checking to make sure it was reached via the rightmost node
3627	 * at each level of the tree.
3628	 */
3629	for (i = depth-1; i >= 0; i--)
3630		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3631			return 0;
3632out:
3633	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3634	return ext4_mark_inode_dirty(handle, inode);
3635}
3636
3637/**
3638 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3639 *
3640 * Return 1 if there is a delalloc block in the range, otherwise 0.
3641 */
3642int ext4_find_delalloc_range(struct inode *inode,
3643			     ext4_lblk_t lblk_start,
3644			     ext4_lblk_t lblk_end)
3645{
3646	struct extent_status es;
3647
3648	ext4_es_find_delayed_extent(inode, lblk_start, &es);
3649	if (es.es_len == 0)
3650		return 0; /* there is no delay extent in this tree */
3651	else if (es.es_lblk <= lblk_start &&
3652		 lblk_start < es.es_lblk + es.es_len)
3653		return 1;
3654	else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
3655		return 1;
3656	else
3657		return 0;
3658}
3659
3660int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3661{
3662	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3663	ext4_lblk_t lblk_start, lblk_end;
3664	lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3665	lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3666
3667	return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3668}
3669
3670/**
3671 * Determines how many complete clusters (out of those specified by the 'map')
3672 * are under delalloc and were reserved quota for.
3673 * This function is called when we are writing out the blocks that were
3674 * originally written with their allocation delayed, but then the space was
3675 * allocated using fallocate() before the delayed allocation could be resolved.
3676 * The cases to look for are:
3677 * ('=' indicated delayed allocated blocks
3678 *  '-' indicates non-delayed allocated blocks)
3679 * (a) partial clusters towards beginning and/or end outside of allocated range
3680 *     are not delalloc'ed.
3681 *	Ex:
3682 *	|----c---=|====c====|====c====|===-c----|
3683 *	         |++++++ allocated ++++++|
3684 *	==> 4 complete clusters in above example
3685 *
3686 * (b) partial cluster (outside of allocated range) towards either end is
3687 *     marked for delayed allocation. In this case, we will exclude that
3688 *     cluster.
3689 *	Ex:
3690 *	|----====c========|========c========|
3691 *	     |++++++ allocated ++++++|
3692 *	==> 1 complete clusters in above example
3693 *
3694 *	Ex:
3695 *	|================c================|
3696 *            |++++++ allocated ++++++|
3697 *	==> 0 complete clusters in above example
3698 *
3699 * The ext4_da_update_reserve_space will be called only if we
3700 * determine here that there were some "entire" clusters that span
3701 * this 'allocated' range.
3702 * In the non-bigalloc case, this function will just end up returning num_blks
3703 * without ever calling ext4_find_delalloc_range.
3704 */
3705static unsigned int
3706get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3707			   unsigned int num_blks)
3708{
3709	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3710	ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3711	ext4_lblk_t lblk_from, lblk_to, c_offset;
3712	unsigned int allocated_clusters = 0;
3713
3714	alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3715	alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3716
3717	/* max possible clusters for this allocation */
3718	allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3719
3720	trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3721
3722	/* Check towards left side */
3723	c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3724	if (c_offset) {
3725		lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3726		lblk_to = lblk_from + c_offset - 1;
3727
3728		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3729			allocated_clusters--;
3730	}
3731
3732	/* Now check towards right. */
3733	c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3734	if (allocated_clusters && c_offset) {
3735		lblk_from = lblk_start + num_blks;
3736		lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3737
3738		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3739			allocated_clusters--;
3740	}
3741
3742	return allocated_clusters;
3743}
3744
3745static int
3746ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3747			struct ext4_map_blocks *map,
3748			struct ext4_ext_path *path, int flags,
3749			unsigned int allocated, ext4_fsblk_t newblock)
3750{
3751	int ret = 0;
3752	int err = 0;
3753	ext4_io_end_t *io = ext4_inode_aio(inode);
3754
3755	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3756		  "block %llu, max_blocks %u, flags %x, allocated %u\n",
3757		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3758		  flags, allocated);
3759	ext4_ext_show_leaf(inode, path);
3760
3761	/*
3762	 * When writing into uninitialized space, we should not fail to
3763	 * allocate metadata blocks for the new extent block if needed.
3764	 */
3765	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3766
3767	trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3768						    allocated, newblock);
3769
3770	/* get_block() before submit the IO, split the extent */
3771	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3772		ret = ext4_split_unwritten_extents(handle, inode, map,
3773						   path, flags);
3774		if (ret <= 0)
3775			goto out;
3776		/*
3777		 * Flag the inode(non aio case) or end_io struct (aio case)
3778		 * that this IO needs to conversion to written when IO is
3779		 * completed
3780		 */
3781		if (io)
3782			ext4_set_io_unwritten_flag(inode, io);
3783		else
3784			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3785		map->m_flags |= EXT4_MAP_UNWRITTEN;
3786		if (ext4_should_dioread_nolock(inode))
3787			map->m_flags |= EXT4_MAP_UNINIT;
3788		goto out;
3789	}
3790	/* IO end_io complete, convert the filled extent to written */
3791	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3792		ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3793							path);
3794		if (ret >= 0) {
3795			ext4_update_inode_fsync_trans(handle, inode, 1);
3796			err = check_eofblocks_fl(handle, inode, map->m_lblk,
3797						 path, map->m_len);
3798		} else
3799			err = ret;
3800		map->m_flags |= EXT4_MAP_MAPPED;
3801		if (allocated > map->m_len)
3802			allocated = map->m_len;
3803		map->m_len = allocated;
3804		goto out2;
3805	}
3806	/* buffered IO case */
3807	/*
3808	 * repeat fallocate creation request
3809	 * we already have an unwritten extent
3810	 */
3811	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
3812		map->m_flags |= EXT4_MAP_UNWRITTEN;
3813		goto map_out;
3814	}
3815
3816	/* buffered READ or buffered write_begin() lookup */
3817	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3818		/*
3819		 * We have blocks reserved already.  We
3820		 * return allocated blocks so that delalloc
3821		 * won't do block reservation for us.  But
3822		 * the buffer head will be unmapped so that
3823		 * a read from the block returns 0s.
3824		 */
3825		map->m_flags |= EXT4_MAP_UNWRITTEN;
3826		goto out1;
3827	}
3828
3829	/* buffered write, writepage time, convert*/
3830	ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
3831	if (ret >= 0)
3832		ext4_update_inode_fsync_trans(handle, inode, 1);
3833out:
3834	if (ret <= 0) {
3835		err = ret;
3836		goto out2;
3837	} else
3838		allocated = ret;
3839	map->m_flags |= EXT4_MAP_NEW;
3840	/*
3841	 * if we allocated more blocks than requested
3842	 * we need to make sure we unmap the extra block
3843	 * allocated. The actual needed block will get
3844	 * unmapped later when we find the buffer_head marked
3845	 * new.
3846	 */
3847	if (allocated > map->m_len) {
3848		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3849					newblock + map->m_len,
3850					allocated - map->m_len);
3851		allocated = map->m_len;
3852	}
3853	map->m_len = allocated;
3854
3855	/*
3856	 * If we have done fallocate with the offset that is already
3857	 * delayed allocated, we would have block reservation
3858	 * and quota reservation done in the delayed write path.
3859	 * But fallocate would have already updated quota and block
3860	 * count for this offset. So cancel these reservation
3861	 */
3862	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3863		unsigned int reserved_clusters;
3864		reserved_clusters = get_reserved_cluster_alloc(inode,
3865				map->m_lblk, map->m_len);
3866		if (reserved_clusters)
3867			ext4_da_update_reserve_space(inode,
3868						     reserved_clusters,
3869						     0);
3870	}
3871
3872map_out:
3873	map->m_flags |= EXT4_MAP_MAPPED;
3874	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3875		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3876					 map->m_len);
3877		if (err < 0)
3878			goto out2;
3879	}
3880out1:
3881	if (allocated > map->m_len)
3882		allocated = map->m_len;
3883	ext4_ext_show_leaf(inode, path);
3884	map->m_pblk = newblock;
3885	map->m_len = allocated;
3886out2:
3887	if (path) {
3888		ext4_ext_drop_refs(path);
3889		kfree(path);
3890	}
3891	return err ? err : allocated;
3892}
3893
3894/*
3895 * get_implied_cluster_alloc - check to see if the requested
3896 * allocation (in the map structure) overlaps with a cluster already
3897 * allocated in an extent.
3898 *	@sb	The filesystem superblock structure
3899 *	@map	The requested lblk->pblk mapping
3900 *	@ex	The extent structure which might contain an implied
3901 *			cluster allocation
3902 *
3903 * This function is called by ext4_ext_map_blocks() after we failed to
3904 * find blocks that were already in the inode's extent tree.  Hence,
3905 * we know that the beginning of the requested region cannot overlap
3906 * the extent from the inode's extent tree.  There are three cases we
3907 * want to catch.  The first is this case:
3908 *
3909 *		 |--- cluster # N--|
3910 *    |--- extent ---|	|---- requested region ---|
3911 *			|==========|
3912 *
3913 * The second case that we need to test for is this one:
3914 *
3915 *   |--------- cluster # N ----------------|
3916 *	   |--- requested region --|   |------- extent ----|
3917 *	   |=======================|
3918 *
3919 * The third case is when the requested region lies between two extents
3920 * within the same cluster:
3921 *          |------------- cluster # N-------------|
3922 * |----- ex -----|                  |---- ex_right ----|
3923 *                  |------ requested region ------|
3924 *                  |================|
3925 *
3926 * In each of the above cases, we need to set the map->m_pblk and
3927 * map->m_len so it corresponds to the return the extent labelled as
3928 * "|====|" from cluster #N, since it is already in use for data in
3929 * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
3930 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3931 * as a new "allocated" block region.  Otherwise, we will return 0 and
3932 * ext4_ext_map_blocks() will then allocate one or more new clusters
3933 * by calling ext4_mb_new_blocks().
3934 */
3935static int get_implied_cluster_alloc(struct super_block *sb,
3936				     struct ext4_map_blocks *map,
3937				     struct ext4_extent *ex,
3938				     struct ext4_ext_path *path)
3939{
3940	struct ext4_sb_info *sbi = EXT4_SB(sb);
3941	ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3942	ext4_lblk_t ex_cluster_start, ex_cluster_end;
3943	ext4_lblk_t rr_cluster_start;
3944	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3945	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3946	unsigned short ee_len = ext4_ext_get_actual_len(ex);
3947
3948	/* The extent passed in that we are trying to match */
3949	ex_cluster_start = EXT4_B2C(sbi, ee_block);
3950	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3951
3952	/* The requested region passed into ext4_map_blocks() */
3953	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3954
3955	if ((rr_cluster_start == ex_cluster_end) ||
3956	    (rr_cluster_start == ex_cluster_start)) {
3957		if (rr_cluster_start == ex_cluster_end)
3958			ee_start += ee_len - 1;
3959		map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3960			c_offset;
3961		map->m_len = min(map->m_len,
3962				 (unsigned) sbi->s_cluster_ratio - c_offset);
3963		/*
3964		 * Check for and handle this case:
3965		 *
3966		 *   |--------- cluster # N-------------|
3967		 *		       |------- extent ----|
3968		 *	   |--- requested region ---|
3969		 *	   |===========|
3970		 */
3971
3972		if (map->m_lblk < ee_block)
3973			map->m_len = min(map->m_len, ee_block - map->m_lblk);
3974
3975		/*
3976		 * Check for the case where there is already another allocated
3977		 * block to the right of 'ex' but before the end of the cluster.
3978		 *
3979		 *          |------------- cluster # N-------------|
3980		 * |----- ex -----|                  |---- ex_right ----|
3981		 *                  |------ requested region ------|
3982		 *                  |================|
3983		 */
3984		if (map->m_lblk > ee_block) {
3985			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3986			map->m_len = min(map->m_len, next - map->m_lblk);
3987		}
3988
3989		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3990		return 1;
3991	}
3992
3993	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3994	return 0;
3995}
3996
3997
3998/*
3999 * Block allocation/map/preallocation routine for extents based files
4000 *
4001 *
4002 * Need to be called with
4003 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4004 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4005 *
4006 * return > 0, number of of blocks already mapped/allocated
4007 *          if create == 0 and these are pre-allocated blocks
4008 *          	buffer head is unmapped
4009 *          otherwise blocks are mapped
4010 *
4011 * return = 0, if plain look up failed (blocks have not been allocated)
4012 *          buffer head is unmapped
4013 *
4014 * return < 0, error case.
4015 */
4016int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4017			struct ext4_map_blocks *map, int flags)
4018{
4019	struct ext4_ext_path *path = NULL;
4020	struct ext4_extent newex, *ex, *ex2;
4021	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4022	ext4_fsblk_t newblock = 0;
4023	int free_on_err = 0, err = 0, depth;
4024	unsigned int allocated = 0, offset = 0;
4025	unsigned int allocated_clusters = 0;
4026	struct ext4_allocation_request ar;
4027	ext4_io_end_t *io = ext4_inode_aio(inode);
4028	ext4_lblk_t cluster_offset;
4029	int set_unwritten = 0;
4030
4031	ext_debug("blocks %u/%u requested for inode %lu\n",
4032		  map->m_lblk, map->m_len, inode->i_ino);
4033	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4034
4035	/* find extent for this block */
4036	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
4037	if (IS_ERR(path)) {
4038		err = PTR_ERR(path);
4039		path = NULL;
4040		goto out2;
4041	}
4042
4043	depth = ext_depth(inode);
4044
4045	/*
4046	 * consistent leaf must not be empty;
4047	 * this situation is possible, though, _during_ tree modification;
4048	 * this is why assert can't be put in ext4_ext_find_extent()
4049	 */
4050	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4051		EXT4_ERROR_INODE(inode, "bad extent address "
4052				 "lblock: %lu, depth: %d pblock %lld",
4053				 (unsigned long) map->m_lblk, depth,
4054				 path[depth].p_block);
4055		err = -EIO;
4056		goto out2;
4057	}
4058
4059	ex = path[depth].p_ext;
4060	if (ex) {
4061		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4062		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4063		unsigned short ee_len;
4064
4065		/*
4066		 * Uninitialized extents are treated as holes, except that
4067		 * we split out initialized portions during a write.
4068		 */
4069		ee_len = ext4_ext_get_actual_len(ex);
4070
4071		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4072
4073		/* if found extent covers block, simply return it */
4074		if (in_range(map->m_lblk, ee_block, ee_len)) {
4075			newblock = map->m_lblk - ee_block + ee_start;
4076			/* number of remaining blocks in the extent */
4077			allocated = ee_len - (map->m_lblk - ee_block);
4078			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4079				  ee_block, ee_len, newblock);
4080
4081			if (!ext4_ext_is_uninitialized(ex))
4082				goto out;
4083
4084			allocated = ext4_ext_handle_uninitialized_extents(
4085				handle, inode, map, path, flags,
4086				allocated, newblock);
4087			goto out3;
4088		}
4089	}
4090
4091	if ((sbi->s_cluster_ratio > 1) &&
4092	    ext4_find_delalloc_cluster(inode, map->m_lblk))
4093		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4094
4095	/*
4096	 * requested block isn't allocated yet;
4097	 * we couldn't try to create block if create flag is zero
4098	 */
4099	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4100		/*
4101		 * put just found gap into cache to speed up
4102		 * subsequent requests
4103		 */
4104		if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4105			ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4106		goto out2;
4107	}
4108
4109	/*
4110	 * Okay, we need to do block allocation.
4111	 */
4112	map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4113	newex.ee_block = cpu_to_le32(map->m_lblk);
4114	cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4115
4116	/*
4117	 * If we are doing bigalloc, check to see if the extent returned
4118	 * by ext4_ext_find_extent() implies a cluster we can use.
4119	 */
4120	if (cluster_offset && ex &&
4121	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4122		ar.len = allocated = map->m_len;
4123		newblock = map->m_pblk;
4124		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4125		goto got_allocated_blocks;
4126	}
4127
4128	/* find neighbour allocated blocks */
4129	ar.lleft = map->m_lblk;
4130	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4131	if (err)
4132		goto out2;
4133	ar.lright = map->m_lblk;
4134	ex2 = NULL;
4135	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4136	if (err)
4137		goto out2;
4138
4139	/* Check if the extent after searching to the right implies a
4140	 * cluster we can use. */
4141	if ((sbi->s_cluster_ratio > 1) && ex2 &&
4142	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4143		ar.len = allocated = map->m_len;
4144		newblock = map->m_pblk;
4145		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4146		goto got_allocated_blocks;
4147	}
4148
4149	/*
4150	 * See if request is beyond maximum number of blocks we can have in
4151	 * a single extent. For an initialized extent this limit is
4152	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4153	 * EXT_UNINIT_MAX_LEN.
4154	 */
4155	if (map->m_len > EXT_INIT_MAX_LEN &&
4156	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4157		map->m_len = EXT_INIT_MAX_LEN;
4158	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4159		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4160		map->m_len = EXT_UNINIT_MAX_LEN;
4161
4162	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4163	newex.ee_len = cpu_to_le16(map->m_len);
4164	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4165	if (err)
4166		allocated = ext4_ext_get_actual_len(&newex);
4167	else
4168		allocated = map->m_len;
4169
4170	/* allocate new block */
4171	ar.inode = inode;
4172	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4173	ar.logical = map->m_lblk;
4174	/*
4175	 * We calculate the offset from the beginning of the cluster
4176	 * for the logical block number, since when we allocate a
4177	 * physical cluster, the physical block should start at the
4178	 * same offset from the beginning of the cluster.  This is
4179	 * needed so that future calls to get_implied_cluster_alloc()
4180	 * work correctly.
4181	 */
4182	offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4183	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4184	ar.goal -= offset;
4185	ar.logical -= offset;
4186	if (S_ISREG(inode->i_mode))
4187		ar.flags = EXT4_MB_HINT_DATA;
4188	else
4189		/* disable in-core preallocation for non-regular files */
4190		ar.flags = 0;
4191	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4192		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4193	newblock = ext4_mb_new_blocks(handle, &ar, &err);
4194	if (!newblock)
4195		goto out2;
4196	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4197		  ar.goal, newblock, allocated);
4198	free_on_err = 1;
4199	allocated_clusters = ar.len;
4200	ar.len = EXT4_C2B(sbi, ar.len) - offset;
4201	if (ar.len > allocated)
4202		ar.len = allocated;
4203
4204got_allocated_blocks:
4205	/* try to insert new extent into found leaf and return */
4206	ext4_ext_store_pblock(&newex, newblock + offset);
4207	newex.ee_len = cpu_to_le16(ar.len);
4208	/* Mark uninitialized */
4209	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4210		ext4_ext_mark_uninitialized(&newex);
4211		map->m_flags |= EXT4_MAP_UNWRITTEN;
4212		/*
4213		 * io_end structure was created for every IO write to an
4214		 * uninitialized extent. To avoid unnecessary conversion,
4215		 * here we flag the IO that really needs the conversion.
4216		 * For non asycn direct IO case, flag the inode state
4217		 * that we need to perform conversion when IO is done.
4218		 */
4219		if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4220			set_unwritten = 1;
4221		if (ext4_should_dioread_nolock(inode))
4222			map->m_flags |= EXT4_MAP_UNINIT;
4223	}
4224
4225	err = 0;
4226	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4227		err = check_eofblocks_fl(handle, inode, map->m_lblk,
4228					 path, ar.len);
4229	if (!err)
4230		err = ext4_ext_insert_extent(handle, inode, path,
4231					     &newex, flags);
4232
4233	if (!err && set_unwritten) {
4234		if (io)
4235			ext4_set_io_unwritten_flag(inode, io);
4236		else
4237			ext4_set_inode_state(inode,
4238					     EXT4_STATE_DIO_UNWRITTEN);
4239	}
4240
4241	if (err && free_on_err) {
4242		int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4243			EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4244		/* free data blocks we just allocated */
4245		/* not a good idea to call discard here directly,
4246		 * but otherwise we'd need to call it every free() */
4247		ext4_discard_preallocations(inode);
4248		ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4249				 ext4_ext_get_actual_len(&newex), fb_flags);
4250		goto out2;
4251	}
4252
4253	/* previous routine could use block we allocated */
4254	newblock = ext4_ext_pblock(&newex);
4255	allocated = ext4_ext_get_actual_len(&newex);
4256	if (allocated > map->m_len)
4257		allocated = map->m_len;
4258	map->m_flags |= EXT4_MAP_NEW;
4259
4260	/*
4261	 * Update reserved blocks/metadata blocks after successful
4262	 * block allocation which had been deferred till now.
4263	 */
4264	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4265		unsigned int reserved_clusters;
4266		/*
4267		 * Check how many clusters we had reserved this allocated range
4268		 */
4269		reserved_clusters = get_reserved_cluster_alloc(inode,
4270						map->m_lblk, allocated);
4271		if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4272			if (reserved_clusters) {
4273				/*
4274				 * We have clusters reserved for this range.
4275				 * But since we are not doing actual allocation
4276				 * and are simply using blocks from previously
4277				 * allocated cluster, we should release the
4278				 * reservation and not claim quota.
4279				 */
4280				ext4_da_update_reserve_space(inode,
4281						reserved_clusters, 0);
4282			}
4283		} else {
4284			BUG_ON(allocated_clusters < reserved_clusters);
4285			if (reserved_clusters < allocated_clusters) {
4286				struct ext4_inode_info *ei = EXT4_I(inode);
4287				int reservation = allocated_clusters -
4288						  reserved_clusters;
4289				/*
4290				 * It seems we claimed few clusters outside of
4291				 * the range of this allocation. We should give
4292				 * it back to the reservation pool. This can
4293				 * happen in the following case:
4294				 *
4295				 * * Suppose s_cluster_ratio is 4 (i.e., each
4296				 *   cluster has 4 blocks. Thus, the clusters
4297				 *   are [0-3],[4-7],[8-11]...
4298				 * * First comes delayed allocation write for
4299				 *   logical blocks 10 & 11. Since there were no
4300				 *   previous delayed allocated blocks in the
4301				 *   range [8-11], we would reserve 1 cluster
4302				 *   for this write.
4303				 * * Next comes write for logical blocks 3 to 8.
4304				 *   In this case, we will reserve 2 clusters
4305				 *   (for [0-3] and [4-7]; and not for [8-11] as
4306				 *   that range has a delayed allocated blocks.
4307				 *   Thus total reserved clusters now becomes 3.
4308				 * * Now, during the delayed allocation writeout
4309				 *   time, we will first write blocks [3-8] and
4310				 *   allocate 3 clusters for writing these
4311				 *   blocks. Also, we would claim all these
4312				 *   three clusters above.
4313				 * * Now when we come here to writeout the
4314				 *   blocks [10-11], we would expect to claim
4315				 *   the reservation of 1 cluster we had made
4316				 *   (and we would claim it since there are no
4317				 *   more delayed allocated blocks in the range
4318				 *   [8-11]. But our reserved cluster count had
4319				 *   already gone to 0.
4320				 *
4321				 *   Thus, at the step 4 above when we determine
4322				 *   that there are still some unwritten delayed
4323				 *   allocated blocks outside of our current
4324				 *   block range, we should increment the
4325				 *   reserved clusters count so that when the
4326				 *   remaining blocks finally gets written, we
4327				 *   could claim them.
4328				 */
4329				dquot_reserve_block(inode,
4330						EXT4_C2B(sbi, reservation));
4331				spin_lock(&ei->i_block_reservation_lock);
4332				ei->i_reserved_data_blocks += reservation;
4333				spin_unlock(&ei->i_block_reservation_lock);
4334			}
4335			/*
4336			 * We will claim quota for all newly allocated blocks.
4337			 * We're updating the reserved space *after* the
4338			 * correction above so we do not accidentally free
4339			 * all the metadata reservation because we might
4340			 * actually need it later on.
4341			 */
4342			ext4_da_update_reserve_space(inode, allocated_clusters,
4343							1);
4344		}
4345	}
4346
4347	/*
4348	 * Cache the extent and update transaction to commit on fdatasync only
4349	 * when it is _not_ an uninitialized extent.
4350	 */
4351	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
4352		ext4_update_inode_fsync_trans(handle, inode, 1);
4353	else
4354		ext4_update_inode_fsync_trans(handle, inode, 0);
4355out:
4356	if (allocated > map->m_len)
4357		allocated = map->m_len;
4358	ext4_ext_show_leaf(inode, path);
4359	map->m_flags |= EXT4_MAP_MAPPED;
4360	map->m_pblk = newblock;
4361	map->m_len = allocated;
4362out2:
4363	if (path) {
4364		ext4_ext_drop_refs(path);
4365		kfree(path);
4366	}
4367
4368out3:
4369	trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
4370
4371	return err ? err : allocated;
4372}
4373
4374void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4375{
4376	struct super_block *sb = inode->i_sb;
4377	ext4_lblk_t last_block;
4378	int err = 0;
4379
4380	/*
4381	 * TODO: optimization is possible here.
4382	 * Probably we need not scan at all,
4383	 * because page truncation is enough.
4384	 */
4385
4386	/* we have to know where to truncate from in crash case */
4387	EXT4_I(inode)->i_disksize = inode->i_size;
4388	ext4_mark_inode_dirty(handle, inode);
4389
4390	last_block = (inode->i_size + sb->s_blocksize - 1)
4391			>> EXT4_BLOCK_SIZE_BITS(sb);
4392	err = ext4_es_remove_extent(inode, last_block,
4393				    EXT_MAX_BLOCKS - last_block);
4394	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4395}
4396
4397static void ext4_falloc_update_inode(struct inode *inode,
4398				int mode, loff_t new_size, int update_ctime)
4399{
4400	struct timespec now;
4401
4402	if (update_ctime) {
4403		now = current_fs_time(inode->i_sb);
4404		if (!timespec_equal(&inode->i_ctime, &now))
4405			inode->i_ctime = now;
4406	}
4407	/*
4408	 * Update only when preallocation was requested beyond
4409	 * the file size.
4410	 */
4411	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4412		if (new_size > i_size_read(inode))
4413			i_size_write(inode, new_size);
4414		if (new_size > EXT4_I(inode)->i_disksize)
4415			ext4_update_i_disksize(inode, new_size);
4416	} else {
4417		/*
4418		 * Mark that we allocate beyond EOF so the subsequent truncate
4419		 * can proceed even if the new size is the same as i_size.
4420		 */
4421		if (new_size > i_size_read(inode))
4422			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4423	}
4424
4425}
4426
4427/*
4428 * preallocate space for a file. This implements ext4's fallocate file
4429 * operation, which gets called from sys_fallocate system call.
4430 * For block-mapped files, posix_fallocate should fall back to the method
4431 * of writing zeroes to the required new blocks (the same behavior which is
4432 * expected for file systems which do not support fallocate() system call).
4433 */
4434long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4435{
4436	struct inode *inode = file_inode(file);
4437	handle_t *handle;
4438	loff_t new_size;
4439	unsigned int max_blocks;
4440	int ret = 0;
4441	int ret2 = 0;
4442	int retries = 0;
4443	int flags;
4444	struct ext4_map_blocks map;
4445	unsigned int credits, blkbits = inode->i_blkbits;
4446
4447	/* Return error if mode is not supported */
4448	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4449		return -EOPNOTSUPP;
4450
4451	if (mode & FALLOC_FL_PUNCH_HOLE)
4452		return ext4_punch_hole(file, offset, len);
4453
4454	ret = ext4_convert_inline_data(inode);
4455	if (ret)
4456		return ret;
4457
4458	/*
4459	 * currently supporting (pre)allocate mode for extent-based
4460	 * files _only_
4461	 */
4462	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4463		return -EOPNOTSUPP;
4464
4465	trace_ext4_fallocate_enter(inode, offset, len, mode);
4466	map.m_lblk = offset >> blkbits;
4467	/*
4468	 * We can't just convert len to max_blocks because
4469	 * If blocksize = 4096 offset = 3072 and len = 2048
4470	 */
4471	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4472		- map.m_lblk;
4473	/*
4474	 * credits to insert 1 extent into extent tree
4475	 */
4476	credits = ext4_chunk_trans_blocks(inode, max_blocks);
4477	mutex_lock(&inode->i_mutex);
4478	ret = inode_newsize_ok(inode, (len + offset));
4479	if (ret) {
4480		mutex_unlock(&inode->i_mutex);
4481		trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4482		return ret;
4483	}
4484	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4485	if (mode & FALLOC_FL_KEEP_SIZE)
4486		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4487	/*
4488	 * Don't normalize the request if it can fit in one extent so
4489	 * that it doesn't get unnecessarily split into multiple
4490	 * extents.
4491	 */
4492	if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4493		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4494
4495retry:
4496	while (ret >= 0 && ret < max_blocks) {
4497		map.m_lblk = map.m_lblk + ret;
4498		map.m_len = max_blocks = max_blocks - ret;
4499		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4500					    credits);
4501		if (IS_ERR(handle)) {
4502			ret = PTR_ERR(handle);
4503			break;
4504		}
4505		ret = ext4_map_blocks(handle, inode, &map, flags);
4506		if (ret <= 0) {
4507#ifdef EXT4FS_DEBUG
4508			ext4_warning(inode->i_sb,
4509				     "inode #%lu: block %u: len %u: "
4510				     "ext4_ext_map_blocks returned %d",
4511				     inode->i_ino, map.m_lblk,
4512				     map.m_len, ret);
4513#endif
4514			ext4_mark_inode_dirty(handle, inode);
4515			ret2 = ext4_journal_stop(handle);
4516			break;
4517		}
4518		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4519						blkbits) >> blkbits))
4520			new_size = offset + len;
4521		else
4522			new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4523
4524		ext4_falloc_update_inode(inode, mode, new_size,
4525					 (map.m_flags & EXT4_MAP_NEW));
4526		ext4_mark_inode_dirty(handle, inode);
4527		if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4528			ext4_handle_sync(handle);
4529		ret2 = ext4_journal_stop(handle);
4530		if (ret2)
4531			break;
4532	}
4533	if (ret == -ENOSPC &&
4534			ext4_should_retry_alloc(inode->i_sb, &retries)) {
4535		ret = 0;
4536		goto retry;
4537	}
4538	mutex_unlock(&inode->i_mutex);
4539	trace_ext4_fallocate_exit(inode, offset, max_blocks,
4540				ret > 0 ? ret2 : ret);
4541	return ret > 0 ? ret2 : ret;
4542}
4543
4544/*
4545 * This function convert a range of blocks to written extents
4546 * The caller of this function will pass the start offset and the size.
4547 * all unwritten extents within this range will be converted to
4548 * written extents.
4549 *
4550 * This function is called from the direct IO end io call back
4551 * function, to convert the fallocated extents after IO is completed.
4552 * Returns 0 on success.
4553 */
4554int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4555				    ssize_t len)
4556{
4557	handle_t *handle;
4558	unsigned int max_blocks;
4559	int ret = 0;
4560	int ret2 = 0;
4561	struct ext4_map_blocks map;
4562	unsigned int credits, blkbits = inode->i_blkbits;
4563
4564	map.m_lblk = offset >> blkbits;
4565	/*
4566	 * We can't just convert len to max_blocks because
4567	 * If blocksize = 4096 offset = 3072 and len = 2048
4568	 */
4569	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4570		      map.m_lblk);
4571	/*
4572	 * credits to insert 1 extent into extent tree
4573	 */
4574	credits = ext4_chunk_trans_blocks(inode, max_blocks);
4575	while (ret >= 0 && ret < max_blocks) {
4576		map.m_lblk += ret;
4577		map.m_len = (max_blocks -= ret);
4578		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
4579		if (IS_ERR(handle)) {
4580			ret = PTR_ERR(handle);
4581			break;
4582		}
4583		ret = ext4_map_blocks(handle, inode, &map,
4584				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4585		if (ret <= 0)
4586			ext4_warning(inode->i_sb,
4587				     "inode #%lu: block %u: len %u: "
4588				     "ext4_ext_map_blocks returned %d",
4589				     inode->i_ino, map.m_lblk,
4590				     map.m_len, ret);
4591		ext4_mark_inode_dirty(handle, inode);
4592		ret2 = ext4_journal_stop(handle);
4593		if (ret <= 0 || ret2 )
4594			break;
4595	}
4596	return ret > 0 ? ret2 : ret;
4597}
4598
4599/*
4600 * If newes is not existing extent (newes->ec_pblk equals zero) find
4601 * delayed extent at start of newes and update newes accordingly and
4602 * return start of the next delayed extent.
4603 *
4604 * If newes is existing extent (newes->ec_pblk is not equal zero)
4605 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
4606 * extent found. Leave newes unmodified.
4607 */
4608static int ext4_find_delayed_extent(struct inode *inode,
4609				    struct extent_status *newes)
4610{
4611	struct extent_status es;
4612	ext4_lblk_t block, next_del;
4613
4614	ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
4615
4616	if (newes->es_pblk == 0) {
4617		/*
4618		 * No extent in extent-tree contains block @newes->es_pblk,
4619		 * then the block may stay in 1)a hole or 2)delayed-extent.
4620		 */
4621		if (es.es_len == 0)
4622			/* A hole found. */
4623			return 0;
4624
4625		if (es.es_lblk > newes->es_lblk) {
4626			/* A hole found. */
4627			newes->es_len = min(es.es_lblk - newes->es_lblk,
4628					    newes->es_len);
4629			return 0;
4630		}
4631
4632		newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
4633	}
4634
4635	block = newes->es_lblk + newes->es_len;
4636	ext4_es_find_delayed_extent(inode, block, &es);
4637	if (es.es_len == 0)
4638		next_del = EXT_MAX_BLOCKS;
4639	else
4640		next_del = es.es_lblk;
4641
4642	return next_del;
4643}
4644/* fiemap flags we can handle specified here */
4645#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4646
4647static int ext4_xattr_fiemap(struct inode *inode,
4648				struct fiemap_extent_info *fieinfo)
4649{
4650	__u64 physical = 0;
4651	__u64 length;
4652	__u32 flags = FIEMAP_EXTENT_LAST;
4653	int blockbits = inode->i_sb->s_blocksize_bits;
4654	int error = 0;
4655
4656	/* in-inode? */
4657	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4658		struct ext4_iloc iloc;
4659		int offset;	/* offset of xattr in inode */
4660
4661		error = ext4_get_inode_loc(inode, &iloc);
4662		if (error)
4663			return error;
4664		physical = iloc.bh->b_blocknr << blockbits;
4665		offset = EXT4_GOOD_OLD_INODE_SIZE +
4666				EXT4_I(inode)->i_extra_isize;
4667		physical += offset;
4668		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4669		flags |= FIEMAP_EXTENT_DATA_INLINE;
4670		brelse(iloc.bh);
4671	} else { /* external block */
4672		physical = EXT4_I(inode)->i_file_acl << blockbits;
4673		length = inode->i_sb->s_blocksize;
4674	}
4675
4676	if (physical)
4677		error = fiemap_fill_next_extent(fieinfo, 0, physical,
4678						length, flags);
4679	return (error < 0 ? error : 0);
4680}
4681
4682int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4683		__u64 start, __u64 len)
4684{
4685	ext4_lblk_t start_blk;
4686	int error = 0;
4687
4688	if (ext4_has_inline_data(inode)) {
4689		int has_inline = 1;
4690
4691		error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
4692
4693		if (has_inline)
4694			return error;
4695	}
4696
4697	/* fallback to generic here if not in extents fmt */
4698	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4699		return generic_block_fiemap(inode, fieinfo, start, len,
4700			ext4_get_block);
4701
4702	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4703		return -EBADR;
4704
4705	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4706		error = ext4_xattr_fiemap(inode, fieinfo);
4707	} else {
4708		ext4_lblk_t len_blks;
4709		__u64 last_blk;
4710
4711		start_blk = start >> inode->i_sb->s_blocksize_bits;
4712		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4713		if (last_blk >= EXT_MAX_BLOCKS)
4714			last_blk = EXT_MAX_BLOCKS-1;
4715		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4716
4717		/*
4718		 * Walk the extent tree gathering extent information
4719		 * and pushing extents back to the user.
4720		 */
4721		error = ext4_fill_fiemap_extents(inode, start_blk,
4722						 len_blks, fieinfo);
4723	}
4724
4725	return error;
4726}
4727