extents.c revision c9877b205f6ce7943bb95281342f4001cc1c00ec
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * Architecture independence:
6 *   Copyright (c) 2005, Bull S.A.
7 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21 */
22
23/*
24 * Extents support for EXT4
25 *
26 * TODO:
27 *   - ext4*_error() should be used in some situations
28 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 *   - smart tree reduction
30 */
31
32#include <linux/module.h>
33#include <linux/fs.h>
34#include <linux/time.h>
35#include <linux/jbd2.h>
36#include <linux/highuid.h>
37#include <linux/pagemap.h>
38#include <linux/quotaops.h>
39#include <linux/string.h>
40#include <linux/slab.h>
41#include <linux/falloc.h>
42#include <asm/uaccess.h>
43#include <linux/fiemap.h>
44#include "ext4_jbd2.h"
45#include "ext4_extents.h"
46
47
48/*
49 * ext_pblock:
50 * combine low and high parts of physical block number into ext4_fsblk_t
51 */
52static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53{
54	ext4_fsblk_t block;
55
56	block = le32_to_cpu(ex->ee_start_lo);
57	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58	return block;
59}
60
61/*
62 * idx_pblock:
63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64 */
65ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66{
67	ext4_fsblk_t block;
68
69	block = le32_to_cpu(ix->ei_leaf_lo);
70	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71	return block;
72}
73
74/*
75 * ext4_ext_store_pblock:
76 * stores a large physical block number into an extent struct,
77 * breaking it into parts
78 */
79void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80{
81	ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83}
84
85/*
86 * ext4_idx_store_pblock:
87 * stores a large physical block number into an index struct,
88 * breaking it into parts
89 */
90static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91{
92	ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94}
95
96static int ext4_ext_journal_restart(handle_t *handle, int needed)
97{
98	int err;
99
100	if (!ext4_handle_valid(handle))
101		return 0;
102	if (handle->h_buffer_credits > needed)
103		return 0;
104	err = ext4_journal_extend(handle, needed);
105	if (err <= 0)
106		return err;
107	return ext4_journal_restart(handle, needed);
108}
109
110/*
111 * could return:
112 *  - EROFS
113 *  - ENOMEM
114 */
115static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
116				struct ext4_ext_path *path)
117{
118	if (path->p_bh) {
119		/* path points to block */
120		return ext4_journal_get_write_access(handle, path->p_bh);
121	}
122	/* path points to leaf/index in inode body */
123	/* we use in-core data, no need to protect them */
124	return 0;
125}
126
127/*
128 * could return:
129 *  - EROFS
130 *  - ENOMEM
131 *  - EIO
132 */
133static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
134				struct ext4_ext_path *path)
135{
136	int err;
137	if (path->p_bh) {
138		/* path points to block */
139		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
140	} else {
141		/* path points to leaf/index in inode body */
142		err = ext4_mark_inode_dirty(handle, inode);
143	}
144	return err;
145}
146
147static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
148			      struct ext4_ext_path *path,
149			      ext4_lblk_t block)
150{
151	struct ext4_inode_info *ei = EXT4_I(inode);
152	ext4_fsblk_t bg_start;
153	ext4_fsblk_t last_block;
154	ext4_grpblk_t colour;
155	ext4_group_t block_group;
156	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
157	int depth;
158
159	if (path) {
160		struct ext4_extent *ex;
161		depth = path->p_depth;
162
163		/* try to predict block placement */
164		ex = path[depth].p_ext;
165		if (ex)
166			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
167
168		/* it looks like index is empty;
169		 * try to find starting block from index itself */
170		if (path[depth].p_bh)
171			return path[depth].p_bh->b_blocknr;
172	}
173
174	/* OK. use inode's group */
175	block_group = ei->i_block_group;
176	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
177		/*
178		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
179		 * block groups per flexgroup, reserve the first block
180		 * group for directories and special files.  Regular
181		 * files will start at the second block group.  This
182		 * tends to speed up directory access and improves
183		 * fsck times.
184		 */
185		block_group &= ~(flex_size-1);
186		if (S_ISREG(inode->i_mode))
187			block_group++;
188	}
189	bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
190		le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
191	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
192
193	/*
194	 * If we are doing delayed allocation, we don't need take
195	 * colour into account.
196	 */
197	if (test_opt(inode->i_sb, DELALLOC))
198		return bg_start;
199
200	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
201		colour = (current->pid % 16) *
202			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
203	else
204		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
205	return bg_start + colour + block;
206}
207
208/*
209 * Allocation for a meta data block
210 */
211static ext4_fsblk_t
212ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
213			struct ext4_ext_path *path,
214			struct ext4_extent *ex, int *err)
215{
216	ext4_fsblk_t goal, newblock;
217
218	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
219	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
220	return newblock;
221}
222
223static int ext4_ext_space_block(struct inode *inode)
224{
225	int size;
226
227	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
228			/ sizeof(struct ext4_extent);
229#ifdef AGGRESSIVE_TEST
230	if (size > 6)
231		size = 6;
232#endif
233	return size;
234}
235
236static int ext4_ext_space_block_idx(struct inode *inode)
237{
238	int size;
239
240	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
241			/ sizeof(struct ext4_extent_idx);
242#ifdef AGGRESSIVE_TEST
243	if (size > 5)
244		size = 5;
245#endif
246	return size;
247}
248
249static int ext4_ext_space_root(struct inode *inode)
250{
251	int size;
252
253	size = sizeof(EXT4_I(inode)->i_data);
254	size -= sizeof(struct ext4_extent_header);
255	size /= sizeof(struct ext4_extent);
256#ifdef AGGRESSIVE_TEST
257	if (size > 3)
258		size = 3;
259#endif
260	return size;
261}
262
263static int ext4_ext_space_root_idx(struct inode *inode)
264{
265	int size;
266
267	size = sizeof(EXT4_I(inode)->i_data);
268	size -= sizeof(struct ext4_extent_header);
269	size /= sizeof(struct ext4_extent_idx);
270#ifdef AGGRESSIVE_TEST
271	if (size > 4)
272		size = 4;
273#endif
274	return size;
275}
276
277/*
278 * Calculate the number of metadata blocks needed
279 * to allocate @blocks
280 * Worse case is one block per extent
281 */
282int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
283{
284	int lcap, icap, rcap, leafs, idxs, num;
285	int newextents = blocks;
286
287	rcap = ext4_ext_space_root_idx(inode);
288	lcap = ext4_ext_space_block(inode);
289	icap = ext4_ext_space_block_idx(inode);
290
291	/* number of new leaf blocks needed */
292	num = leafs = (newextents + lcap - 1) / lcap;
293
294	/*
295	 * Worse case, we need separate index block(s)
296	 * to link all new leaf blocks
297	 */
298	idxs = (leafs + icap - 1) / icap;
299	do {
300		num += idxs;
301		idxs = (idxs + icap - 1) / icap;
302	} while (idxs > rcap);
303
304	return num;
305}
306
307static int
308ext4_ext_max_entries(struct inode *inode, int depth)
309{
310	int max;
311
312	if (depth == ext_depth(inode)) {
313		if (depth == 0)
314			max = ext4_ext_space_root(inode);
315		else
316			max = ext4_ext_space_root_idx(inode);
317	} else {
318		if (depth == 0)
319			max = ext4_ext_space_block(inode);
320		else
321			max = ext4_ext_space_block_idx(inode);
322	}
323
324	return max;
325}
326
327static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
328{
329	ext4_fsblk_t block = ext_pblock(ext), valid_block;
330	int len = ext4_ext_get_actual_len(ext);
331	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
332
333	valid_block = le32_to_cpu(es->s_first_data_block) +
334		EXT4_SB(inode->i_sb)->s_gdb_count;
335	if (unlikely(block <= valid_block ||
336		     ((block + len) > ext4_blocks_count(es))))
337		return 0;
338	else
339		return 1;
340}
341
342static int ext4_valid_extent_idx(struct inode *inode,
343				struct ext4_extent_idx *ext_idx)
344{
345	ext4_fsblk_t block = idx_pblock(ext_idx), valid_block;
346	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
347
348	valid_block = le32_to_cpu(es->s_first_data_block) +
349		EXT4_SB(inode->i_sb)->s_gdb_count;
350	if (unlikely(block <= valid_block ||
351		     (block >= ext4_blocks_count(es))))
352		return 0;
353	else
354		return 1;
355}
356
357static int ext4_valid_extent_entries(struct inode *inode,
358				struct ext4_extent_header *eh,
359				int depth)
360{
361	struct ext4_extent *ext;
362	struct ext4_extent_idx *ext_idx;
363	unsigned short entries;
364	if (eh->eh_entries == 0)
365		return 1;
366
367	entries = le16_to_cpu(eh->eh_entries);
368
369	if (depth == 0) {
370		/* leaf entries */
371		ext = EXT_FIRST_EXTENT(eh);
372		while (entries) {
373			if (!ext4_valid_extent(inode, ext))
374				return 0;
375			ext++;
376			entries--;
377		}
378	} else {
379		ext_idx = EXT_FIRST_INDEX(eh);
380		while (entries) {
381			if (!ext4_valid_extent_idx(inode, ext_idx))
382				return 0;
383			ext_idx++;
384			entries--;
385		}
386	}
387	return 1;
388}
389
390static int __ext4_ext_check(const char *function, struct inode *inode,
391					struct ext4_extent_header *eh,
392					int depth)
393{
394	const char *error_msg;
395	int max = 0;
396
397	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
398		error_msg = "invalid magic";
399		goto corrupted;
400	}
401	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
402		error_msg = "unexpected eh_depth";
403		goto corrupted;
404	}
405	if (unlikely(eh->eh_max == 0)) {
406		error_msg = "invalid eh_max";
407		goto corrupted;
408	}
409	max = ext4_ext_max_entries(inode, depth);
410	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
411		error_msg = "too large eh_max";
412		goto corrupted;
413	}
414	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
415		error_msg = "invalid eh_entries";
416		goto corrupted;
417	}
418	if (!ext4_valid_extent_entries(inode, eh, depth)) {
419		error_msg = "invalid extent entries";
420		goto corrupted;
421	}
422	return 0;
423
424corrupted:
425	ext4_error(inode->i_sb, function,
426			"bad header/extent in inode #%lu: %s - magic %x, "
427			"entries %u, max %u(%u), depth %u(%u)",
428			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
429			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
430			max, le16_to_cpu(eh->eh_depth), depth);
431
432	return -EIO;
433}
434
435#define ext4_ext_check(inode, eh, depth)	\
436	__ext4_ext_check(__func__, inode, eh, depth)
437
438int ext4_ext_check_inode(struct inode *inode)
439{
440	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
441}
442
443#ifdef EXT_DEBUG
444static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
445{
446	int k, l = path->p_depth;
447
448	ext_debug("path:");
449	for (k = 0; k <= l; k++, path++) {
450		if (path->p_idx) {
451		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
452			    idx_pblock(path->p_idx));
453		} else if (path->p_ext) {
454			ext_debug("  %d:%d:%llu ",
455				  le32_to_cpu(path->p_ext->ee_block),
456				  ext4_ext_get_actual_len(path->p_ext),
457				  ext_pblock(path->p_ext));
458		} else
459			ext_debug("  []");
460	}
461	ext_debug("\n");
462}
463
464static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
465{
466	int depth = ext_depth(inode);
467	struct ext4_extent_header *eh;
468	struct ext4_extent *ex;
469	int i;
470
471	if (!path)
472		return;
473
474	eh = path[depth].p_hdr;
475	ex = EXT_FIRST_EXTENT(eh);
476
477	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
478		ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
479			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
480	}
481	ext_debug("\n");
482}
483#else
484#define ext4_ext_show_path(inode, path)
485#define ext4_ext_show_leaf(inode, path)
486#endif
487
488void ext4_ext_drop_refs(struct ext4_ext_path *path)
489{
490	int depth = path->p_depth;
491	int i;
492
493	for (i = 0; i <= depth; i++, path++)
494		if (path->p_bh) {
495			brelse(path->p_bh);
496			path->p_bh = NULL;
497		}
498}
499
500/*
501 * ext4_ext_binsearch_idx:
502 * binary search for the closest index of the given block
503 * the header must be checked before calling this
504 */
505static void
506ext4_ext_binsearch_idx(struct inode *inode,
507			struct ext4_ext_path *path, ext4_lblk_t block)
508{
509	struct ext4_extent_header *eh = path->p_hdr;
510	struct ext4_extent_idx *r, *l, *m;
511
512
513	ext_debug("binsearch for %u(idx):  ", block);
514
515	l = EXT_FIRST_INDEX(eh) + 1;
516	r = EXT_LAST_INDEX(eh);
517	while (l <= r) {
518		m = l + (r - l) / 2;
519		if (block < le32_to_cpu(m->ei_block))
520			r = m - 1;
521		else
522			l = m + 1;
523		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
524				m, le32_to_cpu(m->ei_block),
525				r, le32_to_cpu(r->ei_block));
526	}
527
528	path->p_idx = l - 1;
529	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
530		  idx_pblock(path->p_idx));
531
532#ifdef CHECK_BINSEARCH
533	{
534		struct ext4_extent_idx *chix, *ix;
535		int k;
536
537		chix = ix = EXT_FIRST_INDEX(eh);
538		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
539		  if (k != 0 &&
540		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
541				printk(KERN_DEBUG "k=%d, ix=0x%p, "
542				       "first=0x%p\n", k,
543				       ix, EXT_FIRST_INDEX(eh));
544				printk(KERN_DEBUG "%u <= %u\n",
545				       le32_to_cpu(ix->ei_block),
546				       le32_to_cpu(ix[-1].ei_block));
547			}
548			BUG_ON(k && le32_to_cpu(ix->ei_block)
549					   <= le32_to_cpu(ix[-1].ei_block));
550			if (block < le32_to_cpu(ix->ei_block))
551				break;
552			chix = ix;
553		}
554		BUG_ON(chix != path->p_idx);
555	}
556#endif
557
558}
559
560/*
561 * ext4_ext_binsearch:
562 * binary search for closest extent of the given block
563 * the header must be checked before calling this
564 */
565static void
566ext4_ext_binsearch(struct inode *inode,
567		struct ext4_ext_path *path, ext4_lblk_t block)
568{
569	struct ext4_extent_header *eh = path->p_hdr;
570	struct ext4_extent *r, *l, *m;
571
572	if (eh->eh_entries == 0) {
573		/*
574		 * this leaf is empty:
575		 * we get such a leaf in split/add case
576		 */
577		return;
578	}
579
580	ext_debug("binsearch for %u:  ", block);
581
582	l = EXT_FIRST_EXTENT(eh) + 1;
583	r = EXT_LAST_EXTENT(eh);
584
585	while (l <= r) {
586		m = l + (r - l) / 2;
587		if (block < le32_to_cpu(m->ee_block))
588			r = m - 1;
589		else
590			l = m + 1;
591		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
592				m, le32_to_cpu(m->ee_block),
593				r, le32_to_cpu(r->ee_block));
594	}
595
596	path->p_ext = l - 1;
597	ext_debug("  -> %d:%llu:%d ",
598			le32_to_cpu(path->p_ext->ee_block),
599			ext_pblock(path->p_ext),
600			ext4_ext_get_actual_len(path->p_ext));
601
602#ifdef CHECK_BINSEARCH
603	{
604		struct ext4_extent *chex, *ex;
605		int k;
606
607		chex = ex = EXT_FIRST_EXTENT(eh);
608		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
609			BUG_ON(k && le32_to_cpu(ex->ee_block)
610					  <= le32_to_cpu(ex[-1].ee_block));
611			if (block < le32_to_cpu(ex->ee_block))
612				break;
613			chex = ex;
614		}
615		BUG_ON(chex != path->p_ext);
616	}
617#endif
618
619}
620
621int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
622{
623	struct ext4_extent_header *eh;
624
625	eh = ext_inode_hdr(inode);
626	eh->eh_depth = 0;
627	eh->eh_entries = 0;
628	eh->eh_magic = EXT4_EXT_MAGIC;
629	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
630	ext4_mark_inode_dirty(handle, inode);
631	ext4_ext_invalidate_cache(inode);
632	return 0;
633}
634
635struct ext4_ext_path *
636ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
637					struct ext4_ext_path *path)
638{
639	struct ext4_extent_header *eh;
640	struct buffer_head *bh;
641	short int depth, i, ppos = 0, alloc = 0;
642
643	eh = ext_inode_hdr(inode);
644	depth = ext_depth(inode);
645
646	/* account possible depth increase */
647	if (!path) {
648		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
649				GFP_NOFS);
650		if (!path)
651			return ERR_PTR(-ENOMEM);
652		alloc = 1;
653	}
654	path[0].p_hdr = eh;
655	path[0].p_bh = NULL;
656
657	i = depth;
658	/* walk through the tree */
659	while (i) {
660		int need_to_validate = 0;
661
662		ext_debug("depth %d: num %d, max %d\n",
663			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
664
665		ext4_ext_binsearch_idx(inode, path + ppos, block);
666		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
667		path[ppos].p_depth = i;
668		path[ppos].p_ext = NULL;
669
670		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
671		if (unlikely(!bh))
672			goto err;
673		if (!bh_uptodate_or_lock(bh)) {
674			if (bh_submit_read(bh) < 0) {
675				put_bh(bh);
676				goto err;
677			}
678			/* validate the extent entries */
679			need_to_validate = 1;
680		}
681		eh = ext_block_hdr(bh);
682		ppos++;
683		BUG_ON(ppos > depth);
684		path[ppos].p_bh = bh;
685		path[ppos].p_hdr = eh;
686		i--;
687
688		if (need_to_validate && ext4_ext_check(inode, eh, i))
689			goto err;
690	}
691
692	path[ppos].p_depth = i;
693	path[ppos].p_ext = NULL;
694	path[ppos].p_idx = NULL;
695
696	/* find extent */
697	ext4_ext_binsearch(inode, path + ppos, block);
698	/* if not an empty leaf */
699	if (path[ppos].p_ext)
700		path[ppos].p_block = ext_pblock(path[ppos].p_ext);
701
702	ext4_ext_show_path(inode, path);
703
704	return path;
705
706err:
707	ext4_ext_drop_refs(path);
708	if (alloc)
709		kfree(path);
710	return ERR_PTR(-EIO);
711}
712
713/*
714 * ext4_ext_insert_index:
715 * insert new index [@logical;@ptr] into the block at @curp;
716 * check where to insert: before @curp or after @curp
717 */
718static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
719				struct ext4_ext_path *curp,
720				int logical, ext4_fsblk_t ptr)
721{
722	struct ext4_extent_idx *ix;
723	int len, err;
724
725	err = ext4_ext_get_access(handle, inode, curp);
726	if (err)
727		return err;
728
729	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
730	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
731	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
732		/* insert after */
733		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
734			len = (len - 1) * sizeof(struct ext4_extent_idx);
735			len = len < 0 ? 0 : len;
736			ext_debug("insert new index %d after: %llu. "
737					"move %d from 0x%p to 0x%p\n",
738					logical, ptr, len,
739					(curp->p_idx + 1), (curp->p_idx + 2));
740			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
741		}
742		ix = curp->p_idx + 1;
743	} else {
744		/* insert before */
745		len = len * sizeof(struct ext4_extent_idx);
746		len = len < 0 ? 0 : len;
747		ext_debug("insert new index %d before: %llu. "
748				"move %d from 0x%p to 0x%p\n",
749				logical, ptr, len,
750				curp->p_idx, (curp->p_idx + 1));
751		memmove(curp->p_idx + 1, curp->p_idx, len);
752		ix = curp->p_idx;
753	}
754
755	ix->ei_block = cpu_to_le32(logical);
756	ext4_idx_store_pblock(ix, ptr);
757	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
758
759	BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
760			     > le16_to_cpu(curp->p_hdr->eh_max));
761	BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
762
763	err = ext4_ext_dirty(handle, inode, curp);
764	ext4_std_error(inode->i_sb, err);
765
766	return err;
767}
768
769/*
770 * ext4_ext_split:
771 * inserts new subtree into the path, using free index entry
772 * at depth @at:
773 * - allocates all needed blocks (new leaf and all intermediate index blocks)
774 * - makes decision where to split
775 * - moves remaining extents and index entries (right to the split point)
776 *   into the newly allocated blocks
777 * - initializes subtree
778 */
779static int ext4_ext_split(handle_t *handle, struct inode *inode,
780				struct ext4_ext_path *path,
781				struct ext4_extent *newext, int at)
782{
783	struct buffer_head *bh = NULL;
784	int depth = ext_depth(inode);
785	struct ext4_extent_header *neh;
786	struct ext4_extent_idx *fidx;
787	struct ext4_extent *ex;
788	int i = at, k, m, a;
789	ext4_fsblk_t newblock, oldblock;
790	__le32 border;
791	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
792	int err = 0;
793
794	/* make decision: where to split? */
795	/* FIXME: now decision is simplest: at current extent */
796
797	/* if current leaf will be split, then we should use
798	 * border from split point */
799	BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
800	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
801		border = path[depth].p_ext[1].ee_block;
802		ext_debug("leaf will be split."
803				" next leaf starts at %d\n",
804				  le32_to_cpu(border));
805	} else {
806		border = newext->ee_block;
807		ext_debug("leaf will be added."
808				" next leaf starts at %d\n",
809				le32_to_cpu(border));
810	}
811
812	/*
813	 * If error occurs, then we break processing
814	 * and mark filesystem read-only. index won't
815	 * be inserted and tree will be in consistent
816	 * state. Next mount will repair buffers too.
817	 */
818
819	/*
820	 * Get array to track all allocated blocks.
821	 * We need this to handle errors and free blocks
822	 * upon them.
823	 */
824	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
825	if (!ablocks)
826		return -ENOMEM;
827
828	/* allocate all needed blocks */
829	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
830	for (a = 0; a < depth - at; a++) {
831		newblock = ext4_ext_new_meta_block(handle, inode, path,
832						   newext, &err);
833		if (newblock == 0)
834			goto cleanup;
835		ablocks[a] = newblock;
836	}
837
838	/* initialize new leaf */
839	newblock = ablocks[--a];
840	BUG_ON(newblock == 0);
841	bh = sb_getblk(inode->i_sb, newblock);
842	if (!bh) {
843		err = -EIO;
844		goto cleanup;
845	}
846	lock_buffer(bh);
847
848	err = ext4_journal_get_create_access(handle, bh);
849	if (err)
850		goto cleanup;
851
852	neh = ext_block_hdr(bh);
853	neh->eh_entries = 0;
854	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
855	neh->eh_magic = EXT4_EXT_MAGIC;
856	neh->eh_depth = 0;
857	ex = EXT_FIRST_EXTENT(neh);
858
859	/* move remainder of path[depth] to the new leaf */
860	BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
861	/* start copy from next extent */
862	/* TODO: we could do it by single memmove */
863	m = 0;
864	path[depth].p_ext++;
865	while (path[depth].p_ext <=
866			EXT_MAX_EXTENT(path[depth].p_hdr)) {
867		ext_debug("move %d:%llu:%d in new leaf %llu\n",
868				le32_to_cpu(path[depth].p_ext->ee_block),
869				ext_pblock(path[depth].p_ext),
870				ext4_ext_get_actual_len(path[depth].p_ext),
871				newblock);
872		/*memmove(ex++, path[depth].p_ext++,
873				sizeof(struct ext4_extent));
874		neh->eh_entries++;*/
875		path[depth].p_ext++;
876		m++;
877	}
878	if (m) {
879		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
880		le16_add_cpu(&neh->eh_entries, m);
881	}
882
883	set_buffer_uptodate(bh);
884	unlock_buffer(bh);
885
886	err = ext4_handle_dirty_metadata(handle, inode, bh);
887	if (err)
888		goto cleanup;
889	brelse(bh);
890	bh = NULL;
891
892	/* correct old leaf */
893	if (m) {
894		err = ext4_ext_get_access(handle, inode, path + depth);
895		if (err)
896			goto cleanup;
897		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
898		err = ext4_ext_dirty(handle, inode, path + depth);
899		if (err)
900			goto cleanup;
901
902	}
903
904	/* create intermediate indexes */
905	k = depth - at - 1;
906	BUG_ON(k < 0);
907	if (k)
908		ext_debug("create %d intermediate indices\n", k);
909	/* insert new index into current index block */
910	/* current depth stored in i var */
911	i = depth - 1;
912	while (k--) {
913		oldblock = newblock;
914		newblock = ablocks[--a];
915		bh = sb_getblk(inode->i_sb, newblock);
916		if (!bh) {
917			err = -EIO;
918			goto cleanup;
919		}
920		lock_buffer(bh);
921
922		err = ext4_journal_get_create_access(handle, bh);
923		if (err)
924			goto cleanup;
925
926		neh = ext_block_hdr(bh);
927		neh->eh_entries = cpu_to_le16(1);
928		neh->eh_magic = EXT4_EXT_MAGIC;
929		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
930		neh->eh_depth = cpu_to_le16(depth - i);
931		fidx = EXT_FIRST_INDEX(neh);
932		fidx->ei_block = border;
933		ext4_idx_store_pblock(fidx, oldblock);
934
935		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
936				i, newblock, le32_to_cpu(border), oldblock);
937		/* copy indexes */
938		m = 0;
939		path[i].p_idx++;
940
941		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
942				EXT_MAX_INDEX(path[i].p_hdr));
943		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
944				EXT_LAST_INDEX(path[i].p_hdr));
945		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
946			ext_debug("%d: move %d:%llu in new index %llu\n", i,
947					le32_to_cpu(path[i].p_idx->ei_block),
948					idx_pblock(path[i].p_idx),
949					newblock);
950			/*memmove(++fidx, path[i].p_idx++,
951					sizeof(struct ext4_extent_idx));
952			neh->eh_entries++;
953			BUG_ON(neh->eh_entries > neh->eh_max);*/
954			path[i].p_idx++;
955			m++;
956		}
957		if (m) {
958			memmove(++fidx, path[i].p_idx - m,
959				sizeof(struct ext4_extent_idx) * m);
960			le16_add_cpu(&neh->eh_entries, m);
961		}
962		set_buffer_uptodate(bh);
963		unlock_buffer(bh);
964
965		err = ext4_handle_dirty_metadata(handle, inode, bh);
966		if (err)
967			goto cleanup;
968		brelse(bh);
969		bh = NULL;
970
971		/* correct old index */
972		if (m) {
973			err = ext4_ext_get_access(handle, inode, path + i);
974			if (err)
975				goto cleanup;
976			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
977			err = ext4_ext_dirty(handle, inode, path + i);
978			if (err)
979				goto cleanup;
980		}
981
982		i--;
983	}
984
985	/* insert new index */
986	err = ext4_ext_insert_index(handle, inode, path + at,
987				    le32_to_cpu(border), newblock);
988
989cleanup:
990	if (bh) {
991		if (buffer_locked(bh))
992			unlock_buffer(bh);
993		brelse(bh);
994	}
995
996	if (err) {
997		/* free all allocated blocks in error case */
998		for (i = 0; i < depth; i++) {
999			if (!ablocks[i])
1000				continue;
1001			ext4_free_blocks(handle, inode, ablocks[i], 1, 1);
1002		}
1003	}
1004	kfree(ablocks);
1005
1006	return err;
1007}
1008
1009/*
1010 * ext4_ext_grow_indepth:
1011 * implements tree growing procedure:
1012 * - allocates new block
1013 * - moves top-level data (index block or leaf) into the new block
1014 * - initializes new top-level, creating index that points to the
1015 *   just created block
1016 */
1017static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1018					struct ext4_ext_path *path,
1019					struct ext4_extent *newext)
1020{
1021	struct ext4_ext_path *curp = path;
1022	struct ext4_extent_header *neh;
1023	struct ext4_extent_idx *fidx;
1024	struct buffer_head *bh;
1025	ext4_fsblk_t newblock;
1026	int err = 0;
1027
1028	newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
1029	if (newblock == 0)
1030		return err;
1031
1032	bh = sb_getblk(inode->i_sb, newblock);
1033	if (!bh) {
1034		err = -EIO;
1035		ext4_std_error(inode->i_sb, err);
1036		return err;
1037	}
1038	lock_buffer(bh);
1039
1040	err = ext4_journal_get_create_access(handle, bh);
1041	if (err) {
1042		unlock_buffer(bh);
1043		goto out;
1044	}
1045
1046	/* move top-level index/leaf into new block */
1047	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1048
1049	/* set size of new block */
1050	neh = ext_block_hdr(bh);
1051	/* old root could have indexes or leaves
1052	 * so calculate e_max right way */
1053	if (ext_depth(inode))
1054	  neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
1055	else
1056	  neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
1057	neh->eh_magic = EXT4_EXT_MAGIC;
1058	set_buffer_uptodate(bh);
1059	unlock_buffer(bh);
1060
1061	err = ext4_handle_dirty_metadata(handle, inode, bh);
1062	if (err)
1063		goto out;
1064
1065	/* create index in new top-level index: num,max,pointer */
1066	err = ext4_ext_get_access(handle, inode, curp);
1067	if (err)
1068		goto out;
1069
1070	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1071	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
1072	curp->p_hdr->eh_entries = cpu_to_le16(1);
1073	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
1074
1075	if (path[0].p_hdr->eh_depth)
1076		curp->p_idx->ei_block =
1077			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1078	else
1079		curp->p_idx->ei_block =
1080			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1081	ext4_idx_store_pblock(curp->p_idx, newblock);
1082
1083	neh = ext_inode_hdr(inode);
1084	fidx = EXT_FIRST_INDEX(neh);
1085	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1086		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1087		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
1088
1089	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1090	err = ext4_ext_dirty(handle, inode, curp);
1091out:
1092	brelse(bh);
1093
1094	return err;
1095}
1096
1097/*
1098 * ext4_ext_create_new_leaf:
1099 * finds empty index and adds new leaf.
1100 * if no free index is found, then it requests in-depth growing.
1101 */
1102static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1103					struct ext4_ext_path *path,
1104					struct ext4_extent *newext)
1105{
1106	struct ext4_ext_path *curp;
1107	int depth, i, err = 0;
1108
1109repeat:
1110	i = depth = ext_depth(inode);
1111
1112	/* walk up to the tree and look for free index entry */
1113	curp = path + depth;
1114	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1115		i--;
1116		curp--;
1117	}
1118
1119	/* we use already allocated block for index block,
1120	 * so subsequent data blocks should be contiguous */
1121	if (EXT_HAS_FREE_INDEX(curp)) {
1122		/* if we found index with free entry, then use that
1123		 * entry: create all needed subtree and add new leaf */
1124		err = ext4_ext_split(handle, inode, path, newext, i);
1125		if (err)
1126			goto out;
1127
1128		/* refill path */
1129		ext4_ext_drop_refs(path);
1130		path = ext4_ext_find_extent(inode,
1131				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1132				    path);
1133		if (IS_ERR(path))
1134			err = PTR_ERR(path);
1135	} else {
1136		/* tree is full, time to grow in depth */
1137		err = ext4_ext_grow_indepth(handle, inode, path, newext);
1138		if (err)
1139			goto out;
1140
1141		/* refill path */
1142		ext4_ext_drop_refs(path);
1143		path = ext4_ext_find_extent(inode,
1144				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1145				    path);
1146		if (IS_ERR(path)) {
1147			err = PTR_ERR(path);
1148			goto out;
1149		}
1150
1151		/*
1152		 * only first (depth 0 -> 1) produces free space;
1153		 * in all other cases we have to split the grown tree
1154		 */
1155		depth = ext_depth(inode);
1156		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1157			/* now we need to split */
1158			goto repeat;
1159		}
1160	}
1161
1162out:
1163	return err;
1164}
1165
1166/*
1167 * search the closest allocated block to the left for *logical
1168 * and returns it at @logical + it's physical address at @phys
1169 * if *logical is the smallest allocated block, the function
1170 * returns 0 at @phys
1171 * return value contains 0 (success) or error code
1172 */
1173int
1174ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1175			ext4_lblk_t *logical, ext4_fsblk_t *phys)
1176{
1177	struct ext4_extent_idx *ix;
1178	struct ext4_extent *ex;
1179	int depth, ee_len;
1180
1181	BUG_ON(path == NULL);
1182	depth = path->p_depth;
1183	*phys = 0;
1184
1185	if (depth == 0 && path->p_ext == NULL)
1186		return 0;
1187
1188	/* usually extent in the path covers blocks smaller
1189	 * then *logical, but it can be that extent is the
1190	 * first one in the file */
1191
1192	ex = path[depth].p_ext;
1193	ee_len = ext4_ext_get_actual_len(ex);
1194	if (*logical < le32_to_cpu(ex->ee_block)) {
1195		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1196		while (--depth >= 0) {
1197			ix = path[depth].p_idx;
1198			BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1199		}
1200		return 0;
1201	}
1202
1203	BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1204
1205	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1206	*phys = ext_pblock(ex) + ee_len - 1;
1207	return 0;
1208}
1209
1210/*
1211 * search the closest allocated block to the right for *logical
1212 * and returns it at @logical + it's physical address at @phys
1213 * if *logical is the smallest allocated block, the function
1214 * returns 0 at @phys
1215 * return value contains 0 (success) or error code
1216 */
1217int
1218ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1219			ext4_lblk_t *logical, ext4_fsblk_t *phys)
1220{
1221	struct buffer_head *bh = NULL;
1222	struct ext4_extent_header *eh;
1223	struct ext4_extent_idx *ix;
1224	struct ext4_extent *ex;
1225	ext4_fsblk_t block;
1226	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1227	int ee_len;
1228
1229	BUG_ON(path == NULL);
1230	depth = path->p_depth;
1231	*phys = 0;
1232
1233	if (depth == 0 && path->p_ext == NULL)
1234		return 0;
1235
1236	/* usually extent in the path covers blocks smaller
1237	 * then *logical, but it can be that extent is the
1238	 * first one in the file */
1239
1240	ex = path[depth].p_ext;
1241	ee_len = ext4_ext_get_actual_len(ex);
1242	if (*logical < le32_to_cpu(ex->ee_block)) {
1243		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1244		while (--depth >= 0) {
1245			ix = path[depth].p_idx;
1246			BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1247		}
1248		*logical = le32_to_cpu(ex->ee_block);
1249		*phys = ext_pblock(ex);
1250		return 0;
1251	}
1252
1253	BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1254
1255	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1256		/* next allocated block in this leaf */
1257		ex++;
1258		*logical = le32_to_cpu(ex->ee_block);
1259		*phys = ext_pblock(ex);
1260		return 0;
1261	}
1262
1263	/* go up and search for index to the right */
1264	while (--depth >= 0) {
1265		ix = path[depth].p_idx;
1266		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1267			goto got_index;
1268	}
1269
1270	/* we've gone up to the root and found no index to the right */
1271	return 0;
1272
1273got_index:
1274	/* we've found index to the right, let's
1275	 * follow it and find the closest allocated
1276	 * block to the right */
1277	ix++;
1278	block = idx_pblock(ix);
1279	while (++depth < path->p_depth) {
1280		bh = sb_bread(inode->i_sb, block);
1281		if (bh == NULL)
1282			return -EIO;
1283		eh = ext_block_hdr(bh);
1284		/* subtract from p_depth to get proper eh_depth */
1285		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1286			put_bh(bh);
1287			return -EIO;
1288		}
1289		ix = EXT_FIRST_INDEX(eh);
1290		block = idx_pblock(ix);
1291		put_bh(bh);
1292	}
1293
1294	bh = sb_bread(inode->i_sb, block);
1295	if (bh == NULL)
1296		return -EIO;
1297	eh = ext_block_hdr(bh);
1298	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1299		put_bh(bh);
1300		return -EIO;
1301	}
1302	ex = EXT_FIRST_EXTENT(eh);
1303	*logical = le32_to_cpu(ex->ee_block);
1304	*phys = ext_pblock(ex);
1305	put_bh(bh);
1306	return 0;
1307}
1308
1309/*
1310 * ext4_ext_next_allocated_block:
1311 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1312 * NOTE: it considers block number from index entry as
1313 * allocated block. Thus, index entries have to be consistent
1314 * with leaves.
1315 */
1316static ext4_lblk_t
1317ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1318{
1319	int depth;
1320
1321	BUG_ON(path == NULL);
1322	depth = path->p_depth;
1323
1324	if (depth == 0 && path->p_ext == NULL)
1325		return EXT_MAX_BLOCK;
1326
1327	while (depth >= 0) {
1328		if (depth == path->p_depth) {
1329			/* leaf */
1330			if (path[depth].p_ext !=
1331					EXT_LAST_EXTENT(path[depth].p_hdr))
1332			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1333		} else {
1334			/* index */
1335			if (path[depth].p_idx !=
1336					EXT_LAST_INDEX(path[depth].p_hdr))
1337			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1338		}
1339		depth--;
1340	}
1341
1342	return EXT_MAX_BLOCK;
1343}
1344
1345/*
1346 * ext4_ext_next_leaf_block:
1347 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1348 */
1349static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1350					struct ext4_ext_path *path)
1351{
1352	int depth;
1353
1354	BUG_ON(path == NULL);
1355	depth = path->p_depth;
1356
1357	/* zero-tree has no leaf blocks at all */
1358	if (depth == 0)
1359		return EXT_MAX_BLOCK;
1360
1361	/* go to index block */
1362	depth--;
1363
1364	while (depth >= 0) {
1365		if (path[depth].p_idx !=
1366				EXT_LAST_INDEX(path[depth].p_hdr))
1367			return (ext4_lblk_t)
1368				le32_to_cpu(path[depth].p_idx[1].ei_block);
1369		depth--;
1370	}
1371
1372	return EXT_MAX_BLOCK;
1373}
1374
1375/*
1376 * ext4_ext_correct_indexes:
1377 * if leaf gets modified and modified extent is first in the leaf,
1378 * then we have to correct all indexes above.
1379 * TODO: do we need to correct tree in all cases?
1380 */
1381static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1382				struct ext4_ext_path *path)
1383{
1384	struct ext4_extent_header *eh;
1385	int depth = ext_depth(inode);
1386	struct ext4_extent *ex;
1387	__le32 border;
1388	int k, err = 0;
1389
1390	eh = path[depth].p_hdr;
1391	ex = path[depth].p_ext;
1392	BUG_ON(ex == NULL);
1393	BUG_ON(eh == NULL);
1394
1395	if (depth == 0) {
1396		/* there is no tree at all */
1397		return 0;
1398	}
1399
1400	if (ex != EXT_FIRST_EXTENT(eh)) {
1401		/* we correct tree if first leaf got modified only */
1402		return 0;
1403	}
1404
1405	/*
1406	 * TODO: we need correction if border is smaller than current one
1407	 */
1408	k = depth - 1;
1409	border = path[depth].p_ext->ee_block;
1410	err = ext4_ext_get_access(handle, inode, path + k);
1411	if (err)
1412		return err;
1413	path[k].p_idx->ei_block = border;
1414	err = ext4_ext_dirty(handle, inode, path + k);
1415	if (err)
1416		return err;
1417
1418	while (k--) {
1419		/* change all left-side indexes */
1420		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1421			break;
1422		err = ext4_ext_get_access(handle, inode, path + k);
1423		if (err)
1424			break;
1425		path[k].p_idx->ei_block = border;
1426		err = ext4_ext_dirty(handle, inode, path + k);
1427		if (err)
1428			break;
1429	}
1430
1431	return err;
1432}
1433
1434static int
1435ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1436				struct ext4_extent *ex2)
1437{
1438	unsigned short ext1_ee_len, ext2_ee_len, max_len;
1439
1440	/*
1441	 * Make sure that either both extents are uninitialized, or
1442	 * both are _not_.
1443	 */
1444	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1445		return 0;
1446
1447	if (ext4_ext_is_uninitialized(ex1))
1448		max_len = EXT_UNINIT_MAX_LEN;
1449	else
1450		max_len = EXT_INIT_MAX_LEN;
1451
1452	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1453	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1454
1455	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1456			le32_to_cpu(ex2->ee_block))
1457		return 0;
1458
1459	/*
1460	 * To allow future support for preallocated extents to be added
1461	 * as an RO_COMPAT feature, refuse to merge to extents if
1462	 * this can result in the top bit of ee_len being set.
1463	 */
1464	if (ext1_ee_len + ext2_ee_len > max_len)
1465		return 0;
1466#ifdef AGGRESSIVE_TEST
1467	if (ext1_ee_len >= 4)
1468		return 0;
1469#endif
1470
1471	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1472		return 1;
1473	return 0;
1474}
1475
1476/*
1477 * This function tries to merge the "ex" extent to the next extent in the tree.
1478 * It always tries to merge towards right. If you want to merge towards
1479 * left, pass "ex - 1" as argument instead of "ex".
1480 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1481 * 1 if they got merged.
1482 */
1483int ext4_ext_try_to_merge(struct inode *inode,
1484			  struct ext4_ext_path *path,
1485			  struct ext4_extent *ex)
1486{
1487	struct ext4_extent_header *eh;
1488	unsigned int depth, len;
1489	int merge_done = 0;
1490	int uninitialized = 0;
1491
1492	depth = ext_depth(inode);
1493	BUG_ON(path[depth].p_hdr == NULL);
1494	eh = path[depth].p_hdr;
1495
1496	while (ex < EXT_LAST_EXTENT(eh)) {
1497		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1498			break;
1499		/* merge with next extent! */
1500		if (ext4_ext_is_uninitialized(ex))
1501			uninitialized = 1;
1502		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1503				+ ext4_ext_get_actual_len(ex + 1));
1504		if (uninitialized)
1505			ext4_ext_mark_uninitialized(ex);
1506
1507		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1508			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1509				* sizeof(struct ext4_extent);
1510			memmove(ex + 1, ex + 2, len);
1511		}
1512		le16_add_cpu(&eh->eh_entries, -1);
1513		merge_done = 1;
1514		WARN_ON(eh->eh_entries == 0);
1515		if (!eh->eh_entries)
1516			ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1517			   "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1518	}
1519
1520	return merge_done;
1521}
1522
1523/*
1524 * check if a portion of the "newext" extent overlaps with an
1525 * existing extent.
1526 *
1527 * If there is an overlap discovered, it updates the length of the newext
1528 * such that there will be no overlap, and then returns 1.
1529 * If there is no overlap found, it returns 0.
1530 */
1531unsigned int ext4_ext_check_overlap(struct inode *inode,
1532				    struct ext4_extent *newext,
1533				    struct ext4_ext_path *path)
1534{
1535	ext4_lblk_t b1, b2;
1536	unsigned int depth, len1;
1537	unsigned int ret = 0;
1538
1539	b1 = le32_to_cpu(newext->ee_block);
1540	len1 = ext4_ext_get_actual_len(newext);
1541	depth = ext_depth(inode);
1542	if (!path[depth].p_ext)
1543		goto out;
1544	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1545
1546	/*
1547	 * get the next allocated block if the extent in the path
1548	 * is before the requested block(s)
1549	 */
1550	if (b2 < b1) {
1551		b2 = ext4_ext_next_allocated_block(path);
1552		if (b2 == EXT_MAX_BLOCK)
1553			goto out;
1554	}
1555
1556	/* check for wrap through zero on extent logical start block*/
1557	if (b1 + len1 < b1) {
1558		len1 = EXT_MAX_BLOCK - b1;
1559		newext->ee_len = cpu_to_le16(len1);
1560		ret = 1;
1561	}
1562
1563	/* check for overlap */
1564	if (b1 + len1 > b2) {
1565		newext->ee_len = cpu_to_le16(b2 - b1);
1566		ret = 1;
1567	}
1568out:
1569	return ret;
1570}
1571
1572/*
1573 * ext4_ext_insert_extent:
1574 * tries to merge requsted extent into the existing extent or
1575 * inserts requested extent as new one into the tree,
1576 * creating new leaf in the no-space case.
1577 */
1578int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1579				struct ext4_ext_path *path,
1580				struct ext4_extent *newext)
1581{
1582	struct ext4_extent_header *eh;
1583	struct ext4_extent *ex, *fex;
1584	struct ext4_extent *nearex; /* nearest extent */
1585	struct ext4_ext_path *npath = NULL;
1586	int depth, len, err;
1587	ext4_lblk_t next;
1588	unsigned uninitialized = 0;
1589
1590	BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1591	depth = ext_depth(inode);
1592	ex = path[depth].p_ext;
1593	BUG_ON(path[depth].p_hdr == NULL);
1594
1595	/* try to insert block into found extent and return */
1596	if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1597		ext_debug("append %d block to %d:%d (from %llu)\n",
1598				ext4_ext_get_actual_len(newext),
1599				le32_to_cpu(ex->ee_block),
1600				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1601		err = ext4_ext_get_access(handle, inode, path + depth);
1602		if (err)
1603			return err;
1604
1605		/*
1606		 * ext4_can_extents_be_merged should have checked that either
1607		 * both extents are uninitialized, or both aren't. Thus we
1608		 * need to check only one of them here.
1609		 */
1610		if (ext4_ext_is_uninitialized(ex))
1611			uninitialized = 1;
1612		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1613					+ ext4_ext_get_actual_len(newext));
1614		if (uninitialized)
1615			ext4_ext_mark_uninitialized(ex);
1616		eh = path[depth].p_hdr;
1617		nearex = ex;
1618		goto merge;
1619	}
1620
1621repeat:
1622	depth = ext_depth(inode);
1623	eh = path[depth].p_hdr;
1624	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1625		goto has_space;
1626
1627	/* probably next leaf has space for us? */
1628	fex = EXT_LAST_EXTENT(eh);
1629	next = ext4_ext_next_leaf_block(inode, path);
1630	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1631	    && next != EXT_MAX_BLOCK) {
1632		ext_debug("next leaf block - %d\n", next);
1633		BUG_ON(npath != NULL);
1634		npath = ext4_ext_find_extent(inode, next, NULL);
1635		if (IS_ERR(npath))
1636			return PTR_ERR(npath);
1637		BUG_ON(npath->p_depth != path->p_depth);
1638		eh = npath[depth].p_hdr;
1639		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1640			ext_debug("next leaf isnt full(%d)\n",
1641				  le16_to_cpu(eh->eh_entries));
1642			path = npath;
1643			goto repeat;
1644		}
1645		ext_debug("next leaf has no free space(%d,%d)\n",
1646			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1647	}
1648
1649	/*
1650	 * There is no free space in the found leaf.
1651	 * We're gonna add a new leaf in the tree.
1652	 */
1653	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1654	if (err)
1655		goto cleanup;
1656	depth = ext_depth(inode);
1657	eh = path[depth].p_hdr;
1658
1659has_space:
1660	nearex = path[depth].p_ext;
1661
1662	err = ext4_ext_get_access(handle, inode, path + depth);
1663	if (err)
1664		goto cleanup;
1665
1666	if (!nearex) {
1667		/* there is no extent in this leaf, create first one */
1668		ext_debug("first extent in the leaf: %d:%llu:%d\n",
1669				le32_to_cpu(newext->ee_block),
1670				ext_pblock(newext),
1671				ext4_ext_get_actual_len(newext));
1672		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1673	} else if (le32_to_cpu(newext->ee_block)
1674			   > le32_to_cpu(nearex->ee_block)) {
1675/*		BUG_ON(newext->ee_block == nearex->ee_block); */
1676		if (nearex != EXT_LAST_EXTENT(eh)) {
1677			len = EXT_MAX_EXTENT(eh) - nearex;
1678			len = (len - 1) * sizeof(struct ext4_extent);
1679			len = len < 0 ? 0 : len;
1680			ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1681					"move %d from 0x%p to 0x%p\n",
1682					le32_to_cpu(newext->ee_block),
1683					ext_pblock(newext),
1684					ext4_ext_get_actual_len(newext),
1685					nearex, len, nearex + 1, nearex + 2);
1686			memmove(nearex + 2, nearex + 1, len);
1687		}
1688		path[depth].p_ext = nearex + 1;
1689	} else {
1690		BUG_ON(newext->ee_block == nearex->ee_block);
1691		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1692		len = len < 0 ? 0 : len;
1693		ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1694				"move %d from 0x%p to 0x%p\n",
1695				le32_to_cpu(newext->ee_block),
1696				ext_pblock(newext),
1697				ext4_ext_get_actual_len(newext),
1698				nearex, len, nearex + 1, nearex + 2);
1699		memmove(nearex + 1, nearex, len);
1700		path[depth].p_ext = nearex;
1701	}
1702
1703	le16_add_cpu(&eh->eh_entries, 1);
1704	nearex = path[depth].p_ext;
1705	nearex->ee_block = newext->ee_block;
1706	ext4_ext_store_pblock(nearex, ext_pblock(newext));
1707	nearex->ee_len = newext->ee_len;
1708
1709merge:
1710	/* try to merge extents to the right */
1711	ext4_ext_try_to_merge(inode, path, nearex);
1712
1713	/* try to merge extents to the left */
1714
1715	/* time to correct all indexes above */
1716	err = ext4_ext_correct_indexes(handle, inode, path);
1717	if (err)
1718		goto cleanup;
1719
1720	err = ext4_ext_dirty(handle, inode, path + depth);
1721
1722cleanup:
1723	if (npath) {
1724		ext4_ext_drop_refs(npath);
1725		kfree(npath);
1726	}
1727	ext4_ext_invalidate_cache(inode);
1728	return err;
1729}
1730
1731int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1732			ext4_lblk_t num, ext_prepare_callback func,
1733			void *cbdata)
1734{
1735	struct ext4_ext_path *path = NULL;
1736	struct ext4_ext_cache cbex;
1737	struct ext4_extent *ex;
1738	ext4_lblk_t next, start = 0, end = 0;
1739	ext4_lblk_t last = block + num;
1740	int depth, exists, err = 0;
1741
1742	BUG_ON(func == NULL);
1743	BUG_ON(inode == NULL);
1744
1745	while (block < last && block != EXT_MAX_BLOCK) {
1746		num = last - block;
1747		/* find extent for this block */
1748		path = ext4_ext_find_extent(inode, block, path);
1749		if (IS_ERR(path)) {
1750			err = PTR_ERR(path);
1751			path = NULL;
1752			break;
1753		}
1754
1755		depth = ext_depth(inode);
1756		BUG_ON(path[depth].p_hdr == NULL);
1757		ex = path[depth].p_ext;
1758		next = ext4_ext_next_allocated_block(path);
1759
1760		exists = 0;
1761		if (!ex) {
1762			/* there is no extent yet, so try to allocate
1763			 * all requested space */
1764			start = block;
1765			end = block + num;
1766		} else if (le32_to_cpu(ex->ee_block) > block) {
1767			/* need to allocate space before found extent */
1768			start = block;
1769			end = le32_to_cpu(ex->ee_block);
1770			if (block + num < end)
1771				end = block + num;
1772		} else if (block >= le32_to_cpu(ex->ee_block)
1773					+ ext4_ext_get_actual_len(ex)) {
1774			/* need to allocate space after found extent */
1775			start = block;
1776			end = block + num;
1777			if (end >= next)
1778				end = next;
1779		} else if (block >= le32_to_cpu(ex->ee_block)) {
1780			/*
1781			 * some part of requested space is covered
1782			 * by found extent
1783			 */
1784			start = block;
1785			end = le32_to_cpu(ex->ee_block)
1786				+ ext4_ext_get_actual_len(ex);
1787			if (block + num < end)
1788				end = block + num;
1789			exists = 1;
1790		} else {
1791			BUG();
1792		}
1793		BUG_ON(end <= start);
1794
1795		if (!exists) {
1796			cbex.ec_block = start;
1797			cbex.ec_len = end - start;
1798			cbex.ec_start = 0;
1799			cbex.ec_type = EXT4_EXT_CACHE_GAP;
1800		} else {
1801			cbex.ec_block = le32_to_cpu(ex->ee_block);
1802			cbex.ec_len = ext4_ext_get_actual_len(ex);
1803			cbex.ec_start = ext_pblock(ex);
1804			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1805		}
1806
1807		BUG_ON(cbex.ec_len == 0);
1808		err = func(inode, path, &cbex, ex, cbdata);
1809		ext4_ext_drop_refs(path);
1810
1811		if (err < 0)
1812			break;
1813
1814		if (err == EXT_REPEAT)
1815			continue;
1816		else if (err == EXT_BREAK) {
1817			err = 0;
1818			break;
1819		}
1820
1821		if (ext_depth(inode) != depth) {
1822			/* depth was changed. we have to realloc path */
1823			kfree(path);
1824			path = NULL;
1825		}
1826
1827		block = cbex.ec_block + cbex.ec_len;
1828	}
1829
1830	if (path) {
1831		ext4_ext_drop_refs(path);
1832		kfree(path);
1833	}
1834
1835	return err;
1836}
1837
1838static void
1839ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1840			__u32 len, ext4_fsblk_t start, int type)
1841{
1842	struct ext4_ext_cache *cex;
1843	BUG_ON(len == 0);
1844	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1845	cex = &EXT4_I(inode)->i_cached_extent;
1846	cex->ec_type = type;
1847	cex->ec_block = block;
1848	cex->ec_len = len;
1849	cex->ec_start = start;
1850	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1851}
1852
1853/*
1854 * ext4_ext_put_gap_in_cache:
1855 * calculate boundaries of the gap that the requested block fits into
1856 * and cache this gap
1857 */
1858static void
1859ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1860				ext4_lblk_t block)
1861{
1862	int depth = ext_depth(inode);
1863	unsigned long len;
1864	ext4_lblk_t lblock;
1865	struct ext4_extent *ex;
1866
1867	ex = path[depth].p_ext;
1868	if (ex == NULL) {
1869		/* there is no extent yet, so gap is [0;-] */
1870		lblock = 0;
1871		len = EXT_MAX_BLOCK;
1872		ext_debug("cache gap(whole file):");
1873	} else if (block < le32_to_cpu(ex->ee_block)) {
1874		lblock = block;
1875		len = le32_to_cpu(ex->ee_block) - block;
1876		ext_debug("cache gap(before): %u [%u:%u]",
1877				block,
1878				le32_to_cpu(ex->ee_block),
1879				 ext4_ext_get_actual_len(ex));
1880	} else if (block >= le32_to_cpu(ex->ee_block)
1881			+ ext4_ext_get_actual_len(ex)) {
1882		ext4_lblk_t next;
1883		lblock = le32_to_cpu(ex->ee_block)
1884			+ ext4_ext_get_actual_len(ex);
1885
1886		next = ext4_ext_next_allocated_block(path);
1887		ext_debug("cache gap(after): [%u:%u] %u",
1888				le32_to_cpu(ex->ee_block),
1889				ext4_ext_get_actual_len(ex),
1890				block);
1891		BUG_ON(next == lblock);
1892		len = next - lblock;
1893	} else {
1894		lblock = len = 0;
1895		BUG();
1896	}
1897
1898	ext_debug(" -> %u:%lu\n", lblock, len);
1899	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1900}
1901
1902static int
1903ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1904			struct ext4_extent *ex)
1905{
1906	struct ext4_ext_cache *cex;
1907	int ret = EXT4_EXT_CACHE_NO;
1908
1909	/*
1910	 * We borrow i_block_reservation_lock to protect i_cached_extent
1911	 */
1912	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1913	cex = &EXT4_I(inode)->i_cached_extent;
1914
1915	/* has cache valid data? */
1916	if (cex->ec_type == EXT4_EXT_CACHE_NO)
1917		goto errout;
1918
1919	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1920			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1921	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1922		ex->ee_block = cpu_to_le32(cex->ec_block);
1923		ext4_ext_store_pblock(ex, cex->ec_start);
1924		ex->ee_len = cpu_to_le16(cex->ec_len);
1925		ext_debug("%u cached by %u:%u:%llu\n",
1926				block,
1927				cex->ec_block, cex->ec_len, cex->ec_start);
1928		ret = cex->ec_type;
1929	}
1930errout:
1931	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1932	return ret;
1933}
1934
1935/*
1936 * ext4_ext_rm_idx:
1937 * removes index from the index block.
1938 * It's used in truncate case only, thus all requests are for
1939 * last index in the block only.
1940 */
1941static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1942			struct ext4_ext_path *path)
1943{
1944	struct buffer_head *bh;
1945	int err;
1946	ext4_fsblk_t leaf;
1947
1948	/* free index block */
1949	path--;
1950	leaf = idx_pblock(path->p_idx);
1951	BUG_ON(path->p_hdr->eh_entries == 0);
1952	err = ext4_ext_get_access(handle, inode, path);
1953	if (err)
1954		return err;
1955	le16_add_cpu(&path->p_hdr->eh_entries, -1);
1956	err = ext4_ext_dirty(handle, inode, path);
1957	if (err)
1958		return err;
1959	ext_debug("index is empty, remove it, free block %llu\n", leaf);
1960	bh = sb_find_get_block(inode->i_sb, leaf);
1961	ext4_forget(handle, 1, inode, bh, leaf);
1962	ext4_free_blocks(handle, inode, leaf, 1, 1);
1963	return err;
1964}
1965
1966/*
1967 * ext4_ext_calc_credits_for_single_extent:
1968 * This routine returns max. credits that needed to insert an extent
1969 * to the extent tree.
1970 * When pass the actual path, the caller should calculate credits
1971 * under i_data_sem.
1972 */
1973int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
1974						struct ext4_ext_path *path)
1975{
1976	if (path) {
1977		int depth = ext_depth(inode);
1978		int ret = 0;
1979
1980		/* probably there is space in leaf? */
1981		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1982				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
1983
1984			/*
1985			 *  There are some space in the leaf tree, no
1986			 *  need to account for leaf block credit
1987			 *
1988			 *  bitmaps and block group descriptor blocks
1989			 *  and other metadat blocks still need to be
1990			 *  accounted.
1991			 */
1992			/* 1 bitmap, 1 block group descriptor */
1993			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
1994		}
1995	}
1996
1997	return ext4_chunk_trans_blocks(inode, nrblocks);
1998}
1999
2000/*
2001 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2002 *
2003 * if nrblocks are fit in a single extent (chunk flag is 1), then
2004 * in the worse case, each tree level index/leaf need to be changed
2005 * if the tree split due to insert a new extent, then the old tree
2006 * index/leaf need to be updated too
2007 *
2008 * If the nrblocks are discontiguous, they could cause
2009 * the whole tree split more than once, but this is really rare.
2010 */
2011int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2012{
2013	int index;
2014	int depth = ext_depth(inode);
2015
2016	if (chunk)
2017		index = depth * 2;
2018	else
2019		index = depth * 3;
2020
2021	return index;
2022}
2023
2024static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2025				struct ext4_extent *ex,
2026				ext4_lblk_t from, ext4_lblk_t to)
2027{
2028	struct buffer_head *bh;
2029	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2030	int i, metadata = 0;
2031
2032	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2033		metadata = 1;
2034#ifdef EXTENTS_STATS
2035	{
2036		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2037		spin_lock(&sbi->s_ext_stats_lock);
2038		sbi->s_ext_blocks += ee_len;
2039		sbi->s_ext_extents++;
2040		if (ee_len < sbi->s_ext_min)
2041			sbi->s_ext_min = ee_len;
2042		if (ee_len > sbi->s_ext_max)
2043			sbi->s_ext_max = ee_len;
2044		if (ext_depth(inode) > sbi->s_depth_max)
2045			sbi->s_depth_max = ext_depth(inode);
2046		spin_unlock(&sbi->s_ext_stats_lock);
2047	}
2048#endif
2049	if (from >= le32_to_cpu(ex->ee_block)
2050	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2051		/* tail removal */
2052		ext4_lblk_t num;
2053		ext4_fsblk_t start;
2054
2055		num = le32_to_cpu(ex->ee_block) + ee_len - from;
2056		start = ext_pblock(ex) + ee_len - num;
2057		ext_debug("free last %u blocks starting %llu\n", num, start);
2058		for (i = 0; i < num; i++) {
2059			bh = sb_find_get_block(inode->i_sb, start + i);
2060			ext4_forget(handle, 0, inode, bh, start + i);
2061		}
2062		ext4_free_blocks(handle, inode, start, num, metadata);
2063	} else if (from == le32_to_cpu(ex->ee_block)
2064		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2065		printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
2066			from, to, le32_to_cpu(ex->ee_block), ee_len);
2067	} else {
2068		printk(KERN_INFO "strange request: removal(2) "
2069				"%u-%u from %u:%u\n",
2070				from, to, le32_to_cpu(ex->ee_block), ee_len);
2071	}
2072	return 0;
2073}
2074
2075static int
2076ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2077		struct ext4_ext_path *path, ext4_lblk_t start)
2078{
2079	int err = 0, correct_index = 0;
2080	int depth = ext_depth(inode), credits;
2081	struct ext4_extent_header *eh;
2082	ext4_lblk_t a, b, block;
2083	unsigned num;
2084	ext4_lblk_t ex_ee_block;
2085	unsigned short ex_ee_len;
2086	unsigned uninitialized = 0;
2087	struct ext4_extent *ex;
2088
2089	/* the header must be checked already in ext4_ext_remove_space() */
2090	ext_debug("truncate since %u in leaf\n", start);
2091	if (!path[depth].p_hdr)
2092		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2093	eh = path[depth].p_hdr;
2094	BUG_ON(eh == NULL);
2095
2096	/* find where to start removing */
2097	ex = EXT_LAST_EXTENT(eh);
2098
2099	ex_ee_block = le32_to_cpu(ex->ee_block);
2100	if (ext4_ext_is_uninitialized(ex))
2101		uninitialized = 1;
2102	ex_ee_len = ext4_ext_get_actual_len(ex);
2103
2104	while (ex >= EXT_FIRST_EXTENT(eh) &&
2105			ex_ee_block + ex_ee_len > start) {
2106		ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
2107		path[depth].p_ext = ex;
2108
2109		a = ex_ee_block > start ? ex_ee_block : start;
2110		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2111			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2112
2113		ext_debug("  border %u:%u\n", a, b);
2114
2115		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2116			block = 0;
2117			num = 0;
2118			BUG();
2119		} else if (a != ex_ee_block) {
2120			/* remove tail of the extent */
2121			block = ex_ee_block;
2122			num = a - block;
2123		} else if (b != ex_ee_block + ex_ee_len - 1) {
2124			/* remove head of the extent */
2125			block = a;
2126			num = b - a;
2127			/* there is no "make a hole" API yet */
2128			BUG();
2129		} else {
2130			/* remove whole extent: excellent! */
2131			block = ex_ee_block;
2132			num = 0;
2133			BUG_ON(a != ex_ee_block);
2134			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2135		}
2136
2137		/*
2138		 * 3 for leaf, sb, and inode plus 2 (bmap and group
2139		 * descriptor) for each block group; assume two block
2140		 * groups plus ex_ee_len/blocks_per_block_group for
2141		 * the worst case
2142		 */
2143		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2144		if (ex == EXT_FIRST_EXTENT(eh)) {
2145			correct_index = 1;
2146			credits += (ext_depth(inode)) + 1;
2147		}
2148		credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2149
2150		err = ext4_ext_journal_restart(handle, credits);
2151		if (err)
2152			goto out;
2153
2154		err = ext4_ext_get_access(handle, inode, path + depth);
2155		if (err)
2156			goto out;
2157
2158		err = ext4_remove_blocks(handle, inode, ex, a, b);
2159		if (err)
2160			goto out;
2161
2162		if (num == 0) {
2163			/* this extent is removed; mark slot entirely unused */
2164			ext4_ext_store_pblock(ex, 0);
2165			le16_add_cpu(&eh->eh_entries, -1);
2166		}
2167
2168		ex->ee_block = cpu_to_le32(block);
2169		ex->ee_len = cpu_to_le16(num);
2170		/*
2171		 * Do not mark uninitialized if all the blocks in the
2172		 * extent have been removed.
2173		 */
2174		if (uninitialized && num)
2175			ext4_ext_mark_uninitialized(ex);
2176
2177		err = ext4_ext_dirty(handle, inode, path + depth);
2178		if (err)
2179			goto out;
2180
2181		ext_debug("new extent: %u:%u:%llu\n", block, num,
2182				ext_pblock(ex));
2183		ex--;
2184		ex_ee_block = le32_to_cpu(ex->ee_block);
2185		ex_ee_len = ext4_ext_get_actual_len(ex);
2186	}
2187
2188	if (correct_index && eh->eh_entries)
2189		err = ext4_ext_correct_indexes(handle, inode, path);
2190
2191	/* if this leaf is free, then we should
2192	 * remove it from index block above */
2193	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2194		err = ext4_ext_rm_idx(handle, inode, path + depth);
2195
2196out:
2197	return err;
2198}
2199
2200/*
2201 * ext4_ext_more_to_rm:
2202 * returns 1 if current index has to be freed (even partial)
2203 */
2204static int
2205ext4_ext_more_to_rm(struct ext4_ext_path *path)
2206{
2207	BUG_ON(path->p_idx == NULL);
2208
2209	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2210		return 0;
2211
2212	/*
2213	 * if truncate on deeper level happened, it wasn't partial,
2214	 * so we have to consider current index for truncation
2215	 */
2216	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2217		return 0;
2218	return 1;
2219}
2220
2221static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2222{
2223	struct super_block *sb = inode->i_sb;
2224	int depth = ext_depth(inode);
2225	struct ext4_ext_path *path;
2226	handle_t *handle;
2227	int i = 0, err = 0;
2228
2229	ext_debug("truncate since %u\n", start);
2230
2231	/* probably first extent we're gonna free will be last in block */
2232	handle = ext4_journal_start(inode, depth + 1);
2233	if (IS_ERR(handle))
2234		return PTR_ERR(handle);
2235
2236	ext4_ext_invalidate_cache(inode);
2237
2238	/*
2239	 * We start scanning from right side, freeing all the blocks
2240	 * after i_size and walking into the tree depth-wise.
2241	 */
2242	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2243	if (path == NULL) {
2244		ext4_journal_stop(handle);
2245		return -ENOMEM;
2246	}
2247	path[0].p_hdr = ext_inode_hdr(inode);
2248	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2249		err = -EIO;
2250		goto out;
2251	}
2252	path[0].p_depth = depth;
2253
2254	while (i >= 0 && err == 0) {
2255		if (i == depth) {
2256			/* this is leaf block */
2257			err = ext4_ext_rm_leaf(handle, inode, path, start);
2258			/* root level has p_bh == NULL, brelse() eats this */
2259			brelse(path[i].p_bh);
2260			path[i].p_bh = NULL;
2261			i--;
2262			continue;
2263		}
2264
2265		/* this is index block */
2266		if (!path[i].p_hdr) {
2267			ext_debug("initialize header\n");
2268			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2269		}
2270
2271		if (!path[i].p_idx) {
2272			/* this level hasn't been touched yet */
2273			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2274			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2275			ext_debug("init index ptr: hdr 0x%p, num %d\n",
2276				  path[i].p_hdr,
2277				  le16_to_cpu(path[i].p_hdr->eh_entries));
2278		} else {
2279			/* we were already here, see at next index */
2280			path[i].p_idx--;
2281		}
2282
2283		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2284				i, EXT_FIRST_INDEX(path[i].p_hdr),
2285				path[i].p_idx);
2286		if (ext4_ext_more_to_rm(path + i)) {
2287			struct buffer_head *bh;
2288			/* go to the next level */
2289			ext_debug("move to level %d (block %llu)\n",
2290				  i + 1, idx_pblock(path[i].p_idx));
2291			memset(path + i + 1, 0, sizeof(*path));
2292			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2293			if (!bh) {
2294				/* should we reset i_size? */
2295				err = -EIO;
2296				break;
2297			}
2298			if (WARN_ON(i + 1 > depth)) {
2299				err = -EIO;
2300				break;
2301			}
2302			if (ext4_ext_check(inode, ext_block_hdr(bh),
2303							depth - i - 1)) {
2304				err = -EIO;
2305				break;
2306			}
2307			path[i + 1].p_bh = bh;
2308
2309			/* save actual number of indexes since this
2310			 * number is changed at the next iteration */
2311			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2312			i++;
2313		} else {
2314			/* we finished processing this index, go up */
2315			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2316				/* index is empty, remove it;
2317				 * handle must be already prepared by the
2318				 * truncatei_leaf() */
2319				err = ext4_ext_rm_idx(handle, inode, path + i);
2320			}
2321			/* root level has p_bh == NULL, brelse() eats this */
2322			brelse(path[i].p_bh);
2323			path[i].p_bh = NULL;
2324			i--;
2325			ext_debug("return to level %d\n", i);
2326		}
2327	}
2328
2329	/* TODO: flexible tree reduction should be here */
2330	if (path->p_hdr->eh_entries == 0) {
2331		/*
2332		 * truncate to zero freed all the tree,
2333		 * so we need to correct eh_depth
2334		 */
2335		err = ext4_ext_get_access(handle, inode, path);
2336		if (err == 0) {
2337			ext_inode_hdr(inode)->eh_depth = 0;
2338			ext_inode_hdr(inode)->eh_max =
2339				cpu_to_le16(ext4_ext_space_root(inode));
2340			err = ext4_ext_dirty(handle, inode, path);
2341		}
2342	}
2343out:
2344	ext4_ext_drop_refs(path);
2345	kfree(path);
2346	ext4_journal_stop(handle);
2347
2348	return err;
2349}
2350
2351/*
2352 * called at mount time
2353 */
2354void ext4_ext_init(struct super_block *sb)
2355{
2356	/*
2357	 * possible initialization would be here
2358	 */
2359
2360	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2361		printk(KERN_INFO "EXT4-fs: file extents enabled");
2362#ifdef AGGRESSIVE_TEST
2363		printk(", aggressive tests");
2364#endif
2365#ifdef CHECK_BINSEARCH
2366		printk(", check binsearch");
2367#endif
2368#ifdef EXTENTS_STATS
2369		printk(", stats");
2370#endif
2371		printk("\n");
2372#ifdef EXTENTS_STATS
2373		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2374		EXT4_SB(sb)->s_ext_min = 1 << 30;
2375		EXT4_SB(sb)->s_ext_max = 0;
2376#endif
2377	}
2378}
2379
2380/*
2381 * called at umount time
2382 */
2383void ext4_ext_release(struct super_block *sb)
2384{
2385	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2386		return;
2387
2388#ifdef EXTENTS_STATS
2389	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2390		struct ext4_sb_info *sbi = EXT4_SB(sb);
2391		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2392			sbi->s_ext_blocks, sbi->s_ext_extents,
2393			sbi->s_ext_blocks / sbi->s_ext_extents);
2394		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2395			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2396	}
2397#endif
2398}
2399
2400static void bi_complete(struct bio *bio, int error)
2401{
2402	complete((struct completion *)bio->bi_private);
2403}
2404
2405/* FIXME!! we need to try to merge to left or right after zero-out  */
2406static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2407{
2408	int ret = -EIO;
2409	struct bio *bio;
2410	int blkbits, blocksize;
2411	sector_t ee_pblock;
2412	struct completion event;
2413	unsigned int ee_len, len, done, offset;
2414
2415
2416	blkbits   = inode->i_blkbits;
2417	blocksize = inode->i_sb->s_blocksize;
2418	ee_len    = ext4_ext_get_actual_len(ex);
2419	ee_pblock = ext_pblock(ex);
2420
2421	/* convert ee_pblock to 512 byte sectors */
2422	ee_pblock = ee_pblock << (blkbits - 9);
2423
2424	while (ee_len > 0) {
2425
2426		if (ee_len > BIO_MAX_PAGES)
2427			len = BIO_MAX_PAGES;
2428		else
2429			len = ee_len;
2430
2431		bio = bio_alloc(GFP_NOIO, len);
2432		bio->bi_sector = ee_pblock;
2433		bio->bi_bdev   = inode->i_sb->s_bdev;
2434
2435		done = 0;
2436		offset = 0;
2437		while (done < len) {
2438			ret = bio_add_page(bio, ZERO_PAGE(0),
2439							blocksize, offset);
2440			if (ret != blocksize) {
2441				/*
2442				 * We can't add any more pages because of
2443				 * hardware limitations.  Start a new bio.
2444				 */
2445				break;
2446			}
2447			done++;
2448			offset += blocksize;
2449			if (offset >= PAGE_CACHE_SIZE)
2450				offset = 0;
2451		}
2452
2453		init_completion(&event);
2454		bio->bi_private = &event;
2455		bio->bi_end_io = bi_complete;
2456		submit_bio(WRITE, bio);
2457		wait_for_completion(&event);
2458
2459		if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2460			ret = 0;
2461		else {
2462			ret = -EIO;
2463			break;
2464		}
2465		bio_put(bio);
2466		ee_len    -= done;
2467		ee_pblock += done  << (blkbits - 9);
2468	}
2469	return ret;
2470}
2471
2472#define EXT4_EXT_ZERO_LEN 7
2473
2474/*
2475 * This function is called by ext4_ext_get_blocks() if someone tries to write
2476 * to an uninitialized extent. It may result in splitting the uninitialized
2477 * extent into multiple extents (upto three - one initialized and two
2478 * uninitialized).
2479 * There are three possibilities:
2480 *   a> There is no split required: Entire extent should be initialized
2481 *   b> Splits in two extents: Write is happening at either end of the extent
2482 *   c> Splits in three extents: Somone is writing in middle of the extent
2483 */
2484static int ext4_ext_convert_to_initialized(handle_t *handle,
2485						struct inode *inode,
2486						struct ext4_ext_path *path,
2487						ext4_lblk_t iblock,
2488						unsigned int max_blocks)
2489{
2490	struct ext4_extent *ex, newex, orig_ex;
2491	struct ext4_extent *ex1 = NULL;
2492	struct ext4_extent *ex2 = NULL;
2493	struct ext4_extent *ex3 = NULL;
2494	struct ext4_extent_header *eh;
2495	ext4_lblk_t ee_block;
2496	unsigned int allocated, ee_len, depth;
2497	ext4_fsblk_t newblock;
2498	int err = 0;
2499	int ret = 0;
2500
2501	depth = ext_depth(inode);
2502	eh = path[depth].p_hdr;
2503	ex = path[depth].p_ext;
2504	ee_block = le32_to_cpu(ex->ee_block);
2505	ee_len = ext4_ext_get_actual_len(ex);
2506	allocated = ee_len - (iblock - ee_block);
2507	newblock = iblock - ee_block + ext_pblock(ex);
2508	ex2 = ex;
2509	orig_ex.ee_block = ex->ee_block;
2510	orig_ex.ee_len   = cpu_to_le16(ee_len);
2511	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2512
2513	err = ext4_ext_get_access(handle, inode, path + depth);
2514	if (err)
2515		goto out;
2516	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2517	if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2518		err =  ext4_ext_zeroout(inode, &orig_ex);
2519		if (err)
2520			goto fix_extent_len;
2521		/* update the extent length and mark as initialized */
2522		ex->ee_block = orig_ex.ee_block;
2523		ex->ee_len   = orig_ex.ee_len;
2524		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2525		ext4_ext_dirty(handle, inode, path + depth);
2526		/* zeroed the full extent */
2527		return allocated;
2528	}
2529
2530	/* ex1: ee_block to iblock - 1 : uninitialized */
2531	if (iblock > ee_block) {
2532		ex1 = ex;
2533		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2534		ext4_ext_mark_uninitialized(ex1);
2535		ex2 = &newex;
2536	}
2537	/*
2538	 * for sanity, update the length of the ex2 extent before
2539	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2540	 * overlap of blocks.
2541	 */
2542	if (!ex1 && allocated > max_blocks)
2543		ex2->ee_len = cpu_to_le16(max_blocks);
2544	/* ex3: to ee_block + ee_len : uninitialised */
2545	if (allocated > max_blocks) {
2546		unsigned int newdepth;
2547		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2548		if (allocated <= EXT4_EXT_ZERO_LEN) {
2549			/*
2550			 * iblock == ee_block is handled by the zerouout
2551			 * at the beginning.
2552			 * Mark first half uninitialized.
2553			 * Mark second half initialized and zero out the
2554			 * initialized extent
2555			 */
2556			ex->ee_block = orig_ex.ee_block;
2557			ex->ee_len   = cpu_to_le16(ee_len - allocated);
2558			ext4_ext_mark_uninitialized(ex);
2559			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2560			ext4_ext_dirty(handle, inode, path + depth);
2561
2562			ex3 = &newex;
2563			ex3->ee_block = cpu_to_le32(iblock);
2564			ext4_ext_store_pblock(ex3, newblock);
2565			ex3->ee_len = cpu_to_le16(allocated);
2566			err = ext4_ext_insert_extent(handle, inode, path, ex3);
2567			if (err == -ENOSPC) {
2568				err =  ext4_ext_zeroout(inode, &orig_ex);
2569				if (err)
2570					goto fix_extent_len;
2571				ex->ee_block = orig_ex.ee_block;
2572				ex->ee_len   = orig_ex.ee_len;
2573				ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2574				ext4_ext_dirty(handle, inode, path + depth);
2575				/* blocks available from iblock */
2576				return allocated;
2577
2578			} else if (err)
2579				goto fix_extent_len;
2580
2581			/*
2582			 * We need to zero out the second half because
2583			 * an fallocate request can update file size and
2584			 * converting the second half to initialized extent
2585			 * implies that we can leak some junk data to user
2586			 * space.
2587			 */
2588			err =  ext4_ext_zeroout(inode, ex3);
2589			if (err) {
2590				/*
2591				 * We should actually mark the
2592				 * second half as uninit and return error
2593				 * Insert would have changed the extent
2594				 */
2595				depth = ext_depth(inode);
2596				ext4_ext_drop_refs(path);
2597				path = ext4_ext_find_extent(inode,
2598								iblock, path);
2599				if (IS_ERR(path)) {
2600					err = PTR_ERR(path);
2601					return err;
2602				}
2603				/* get the second half extent details */
2604				ex = path[depth].p_ext;
2605				err = ext4_ext_get_access(handle, inode,
2606								path + depth);
2607				if (err)
2608					return err;
2609				ext4_ext_mark_uninitialized(ex);
2610				ext4_ext_dirty(handle, inode, path + depth);
2611				return err;
2612			}
2613
2614			/* zeroed the second half */
2615			return allocated;
2616		}
2617		ex3 = &newex;
2618		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2619		ext4_ext_store_pblock(ex3, newblock + max_blocks);
2620		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2621		ext4_ext_mark_uninitialized(ex3);
2622		err = ext4_ext_insert_extent(handle, inode, path, ex3);
2623		if (err == -ENOSPC) {
2624			err =  ext4_ext_zeroout(inode, &orig_ex);
2625			if (err)
2626				goto fix_extent_len;
2627			/* update the extent length and mark as initialized */
2628			ex->ee_block = orig_ex.ee_block;
2629			ex->ee_len   = orig_ex.ee_len;
2630			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2631			ext4_ext_dirty(handle, inode, path + depth);
2632			/* zeroed the full extent */
2633			/* blocks available from iblock */
2634			return allocated;
2635
2636		} else if (err)
2637			goto fix_extent_len;
2638		/*
2639		 * The depth, and hence eh & ex might change
2640		 * as part of the insert above.
2641		 */
2642		newdepth = ext_depth(inode);
2643		/*
2644		 * update the extent length after successful insert of the
2645		 * split extent
2646		 */
2647		orig_ex.ee_len = cpu_to_le16(ee_len -
2648						ext4_ext_get_actual_len(ex3));
2649		depth = newdepth;
2650		ext4_ext_drop_refs(path);
2651		path = ext4_ext_find_extent(inode, iblock, path);
2652		if (IS_ERR(path)) {
2653			err = PTR_ERR(path);
2654			goto out;
2655		}
2656		eh = path[depth].p_hdr;
2657		ex = path[depth].p_ext;
2658		if (ex2 != &newex)
2659			ex2 = ex;
2660
2661		err = ext4_ext_get_access(handle, inode, path + depth);
2662		if (err)
2663			goto out;
2664
2665		allocated = max_blocks;
2666
2667		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2668		 * to insert a extent in the middle zerout directly
2669		 * otherwise give the extent a chance to merge to left
2670		 */
2671		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2672							iblock != ee_block) {
2673			err =  ext4_ext_zeroout(inode, &orig_ex);
2674			if (err)
2675				goto fix_extent_len;
2676			/* update the extent length and mark as initialized */
2677			ex->ee_block = orig_ex.ee_block;
2678			ex->ee_len   = orig_ex.ee_len;
2679			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2680			ext4_ext_dirty(handle, inode, path + depth);
2681			/* zero out the first half */
2682			/* blocks available from iblock */
2683			return allocated;
2684		}
2685	}
2686	/*
2687	 * If there was a change of depth as part of the
2688	 * insertion of ex3 above, we need to update the length
2689	 * of the ex1 extent again here
2690	 */
2691	if (ex1 && ex1 != ex) {
2692		ex1 = ex;
2693		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2694		ext4_ext_mark_uninitialized(ex1);
2695		ex2 = &newex;
2696	}
2697	/* ex2: iblock to iblock + maxblocks-1 : initialised */
2698	ex2->ee_block = cpu_to_le32(iblock);
2699	ext4_ext_store_pblock(ex2, newblock);
2700	ex2->ee_len = cpu_to_le16(allocated);
2701	if (ex2 != ex)
2702		goto insert;
2703	/*
2704	 * New (initialized) extent starts from the first block
2705	 * in the current extent. i.e., ex2 == ex
2706	 * We have to see if it can be merged with the extent
2707	 * on the left.
2708	 */
2709	if (ex2 > EXT_FIRST_EXTENT(eh)) {
2710		/*
2711		 * To merge left, pass "ex2 - 1" to try_to_merge(),
2712		 * since it merges towards right _only_.
2713		 */
2714		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2715		if (ret) {
2716			err = ext4_ext_correct_indexes(handle, inode, path);
2717			if (err)
2718				goto out;
2719			depth = ext_depth(inode);
2720			ex2--;
2721		}
2722	}
2723	/*
2724	 * Try to Merge towards right. This might be required
2725	 * only when the whole extent is being written to.
2726	 * i.e. ex2 == ex and ex3 == NULL.
2727	 */
2728	if (!ex3) {
2729		ret = ext4_ext_try_to_merge(inode, path, ex2);
2730		if (ret) {
2731			err = ext4_ext_correct_indexes(handle, inode, path);
2732			if (err)
2733				goto out;
2734		}
2735	}
2736	/* Mark modified extent as dirty */
2737	err = ext4_ext_dirty(handle, inode, path + depth);
2738	goto out;
2739insert:
2740	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2741	if (err == -ENOSPC) {
2742		err =  ext4_ext_zeroout(inode, &orig_ex);
2743		if (err)
2744			goto fix_extent_len;
2745		/* update the extent length and mark as initialized */
2746		ex->ee_block = orig_ex.ee_block;
2747		ex->ee_len   = orig_ex.ee_len;
2748		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2749		ext4_ext_dirty(handle, inode, path + depth);
2750		/* zero out the first half */
2751		return allocated;
2752	} else if (err)
2753		goto fix_extent_len;
2754out:
2755	return err ? err : allocated;
2756
2757fix_extent_len:
2758	ex->ee_block = orig_ex.ee_block;
2759	ex->ee_len   = orig_ex.ee_len;
2760	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2761	ext4_ext_mark_uninitialized(ex);
2762	ext4_ext_dirty(handle, inode, path + depth);
2763	return err;
2764}
2765
2766/*
2767 * Block allocation/map/preallocation routine for extents based files
2768 *
2769 *
2770 * Need to be called with
2771 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
2772 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
2773 *
2774 * return > 0, number of of blocks already mapped/allocated
2775 *          if create == 0 and these are pre-allocated blocks
2776 *          	buffer head is unmapped
2777 *          otherwise blocks are mapped
2778 *
2779 * return = 0, if plain look up failed (blocks have not been allocated)
2780 *          buffer head is unmapped
2781 *
2782 * return < 0, error case.
2783 */
2784int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2785			ext4_lblk_t iblock,
2786			unsigned int max_blocks, struct buffer_head *bh_result,
2787			int create, int extend_disksize)
2788{
2789	struct ext4_ext_path *path = NULL;
2790	struct ext4_extent_header *eh;
2791	struct ext4_extent newex, *ex;
2792	ext4_fsblk_t newblock;
2793	int err = 0, depth, ret, cache_type;
2794	unsigned int allocated = 0;
2795	struct ext4_allocation_request ar;
2796	loff_t disksize;
2797
2798	__clear_bit(BH_New, &bh_result->b_state);
2799	ext_debug("blocks %u/%u requested for inode %u\n",
2800			iblock, max_blocks, inode->i_ino);
2801
2802	/* check in cache */
2803	cache_type = ext4_ext_in_cache(inode, iblock, &newex);
2804	if (cache_type) {
2805		if (cache_type == EXT4_EXT_CACHE_GAP) {
2806			if (!create) {
2807				/*
2808				 * block isn't allocated yet and
2809				 * user doesn't want to allocate it
2810				 */
2811				goto out2;
2812			}
2813			/* we should allocate requested block */
2814		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
2815			/* block is already allocated */
2816			newblock = iblock
2817				   - le32_to_cpu(newex.ee_block)
2818				   + ext_pblock(&newex);
2819			/* number of remaining blocks in the extent */
2820			allocated = ext4_ext_get_actual_len(&newex) -
2821					(iblock - le32_to_cpu(newex.ee_block));
2822			goto out;
2823		} else {
2824			BUG();
2825		}
2826	}
2827
2828	/* find extent for this block */
2829	path = ext4_ext_find_extent(inode, iblock, NULL);
2830	if (IS_ERR(path)) {
2831		err = PTR_ERR(path);
2832		path = NULL;
2833		goto out2;
2834	}
2835
2836	depth = ext_depth(inode);
2837
2838	/*
2839	 * consistent leaf must not be empty;
2840	 * this situation is possible, though, _during_ tree modification;
2841	 * this is why assert can't be put in ext4_ext_find_extent()
2842	 */
2843	BUG_ON(path[depth].p_ext == NULL && depth != 0);
2844	eh = path[depth].p_hdr;
2845
2846	ex = path[depth].p_ext;
2847	if (ex) {
2848		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
2849		ext4_fsblk_t ee_start = ext_pblock(ex);
2850		unsigned short ee_len;
2851
2852		/*
2853		 * Uninitialized extents are treated as holes, except that
2854		 * we split out initialized portions during a write.
2855		 */
2856		ee_len = ext4_ext_get_actual_len(ex);
2857		/* if found extent covers block, simply return it */
2858		if (iblock >= ee_block && iblock < ee_block + ee_len) {
2859			newblock = iblock - ee_block + ee_start;
2860			/* number of remaining blocks in the extent */
2861			allocated = ee_len - (iblock - ee_block);
2862			ext_debug("%u fit into %lu:%d -> %llu\n", iblock,
2863					ee_block, ee_len, newblock);
2864
2865			/* Do not put uninitialized extent in the cache */
2866			if (!ext4_ext_is_uninitialized(ex)) {
2867				ext4_ext_put_in_cache(inode, ee_block,
2868							ee_len, ee_start,
2869							EXT4_EXT_CACHE_EXTENT);
2870				goto out;
2871			}
2872			if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2873				goto out;
2874			if (!create) {
2875				if (allocated > max_blocks)
2876					allocated = max_blocks;
2877				/*
2878				 * We have blocks reserved already.  We
2879				 * return allocated blocks so that delalloc
2880				 * won't do block reservation for us.  But
2881				 * the buffer head will be unmapped so that
2882				 * a read from the block returns 0s.
2883				 */
2884				set_buffer_unwritten(bh_result);
2885				bh_result->b_bdev = inode->i_sb->s_bdev;
2886				bh_result->b_blocknr = newblock;
2887				goto out2;
2888			}
2889
2890			ret = ext4_ext_convert_to_initialized(handle, inode,
2891								path, iblock,
2892								max_blocks);
2893			if (ret <= 0) {
2894				err = ret;
2895				goto out2;
2896			} else
2897				allocated = ret;
2898			goto outnew;
2899		}
2900	}
2901
2902	/*
2903	 * requested block isn't allocated yet;
2904	 * we couldn't try to create block if create flag is zero
2905	 */
2906	if (!create) {
2907		/*
2908		 * put just found gap into cache to speed up
2909		 * subsequent requests
2910		 */
2911		ext4_ext_put_gap_in_cache(inode, path, iblock);
2912		goto out2;
2913	}
2914	/*
2915	 * Okay, we need to do block allocation.
2916	 */
2917
2918	/* find neighbour allocated blocks */
2919	ar.lleft = iblock;
2920	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
2921	if (err)
2922		goto out2;
2923	ar.lright = iblock;
2924	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
2925	if (err)
2926		goto out2;
2927
2928	/*
2929	 * See if request is beyond maximum number of blocks we can have in
2930	 * a single extent. For an initialized extent this limit is
2931	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2932	 * EXT_UNINIT_MAX_LEN.
2933	 */
2934	if (max_blocks > EXT_INIT_MAX_LEN &&
2935	    create != EXT4_CREATE_UNINITIALIZED_EXT)
2936		max_blocks = EXT_INIT_MAX_LEN;
2937	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2938		 create == EXT4_CREATE_UNINITIALIZED_EXT)
2939		max_blocks = EXT_UNINIT_MAX_LEN;
2940
2941	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2942	newex.ee_block = cpu_to_le32(iblock);
2943	newex.ee_len = cpu_to_le16(max_blocks);
2944	err = ext4_ext_check_overlap(inode, &newex, path);
2945	if (err)
2946		allocated = ext4_ext_get_actual_len(&newex);
2947	else
2948		allocated = max_blocks;
2949
2950	/* allocate new block */
2951	ar.inode = inode;
2952	ar.goal = ext4_ext_find_goal(inode, path, iblock);
2953	ar.logical = iblock;
2954	ar.len = allocated;
2955	if (S_ISREG(inode->i_mode))
2956		ar.flags = EXT4_MB_HINT_DATA;
2957	else
2958		/* disable in-core preallocation for non-regular files */
2959		ar.flags = 0;
2960	newblock = ext4_mb_new_blocks(handle, &ar, &err);
2961	if (!newblock)
2962		goto out2;
2963	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2964		  ar.goal, newblock, allocated);
2965
2966	/* try to insert new extent into found leaf and return */
2967	ext4_ext_store_pblock(&newex, newblock);
2968	newex.ee_len = cpu_to_le16(ar.len);
2969	if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
2970		ext4_ext_mark_uninitialized(&newex);
2971	err = ext4_ext_insert_extent(handle, inode, path, &newex);
2972	if (err) {
2973		/* free data blocks we just allocated */
2974		/* not a good idea to call discard here directly,
2975		 * but otherwise we'd need to call it every free() */
2976		ext4_discard_preallocations(inode);
2977		ext4_free_blocks(handle, inode, ext_pblock(&newex),
2978					ext4_ext_get_actual_len(&newex), 0);
2979		goto out2;
2980	}
2981
2982	/* previous routine could use block we allocated */
2983	newblock = ext_pblock(&newex);
2984	allocated = ext4_ext_get_actual_len(&newex);
2985outnew:
2986	if (extend_disksize) {
2987		disksize = ((loff_t) iblock + ar.len) << inode->i_blkbits;
2988		if (disksize > i_size_read(inode))
2989			disksize = i_size_read(inode);
2990		if (disksize > EXT4_I(inode)->i_disksize)
2991			EXT4_I(inode)->i_disksize = disksize;
2992	}
2993
2994	set_buffer_new(bh_result);
2995
2996	/* Cache only when it is _not_ an uninitialized extent */
2997	if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2998		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2999						EXT4_EXT_CACHE_EXTENT);
3000out:
3001	if (allocated > max_blocks)
3002		allocated = max_blocks;
3003	ext4_ext_show_leaf(inode, path);
3004	set_buffer_mapped(bh_result);
3005	bh_result->b_bdev = inode->i_sb->s_bdev;
3006	bh_result->b_blocknr = newblock;
3007out2:
3008	if (path) {
3009		ext4_ext_drop_refs(path);
3010		kfree(path);
3011	}
3012	return err ? err : allocated;
3013}
3014
3015void ext4_ext_truncate(struct inode *inode)
3016{
3017	struct address_space *mapping = inode->i_mapping;
3018	struct super_block *sb = inode->i_sb;
3019	ext4_lblk_t last_block;
3020	handle_t *handle;
3021	int err = 0;
3022
3023	/*
3024	 * probably first extent we're gonna free will be last in block
3025	 */
3026	err = ext4_writepage_trans_blocks(inode);
3027	handle = ext4_journal_start(inode, err);
3028	if (IS_ERR(handle))
3029		return;
3030
3031	if (inode->i_size & (sb->s_blocksize - 1))
3032		ext4_block_truncate_page(handle, mapping, inode->i_size);
3033
3034	if (ext4_orphan_add(handle, inode))
3035		goto out_stop;
3036
3037	down_write(&EXT4_I(inode)->i_data_sem);
3038	ext4_ext_invalidate_cache(inode);
3039
3040	ext4_discard_preallocations(inode);
3041
3042	/*
3043	 * TODO: optimization is possible here.
3044	 * Probably we need not scan at all,
3045	 * because page truncation is enough.
3046	 */
3047
3048	/* we have to know where to truncate from in crash case */
3049	EXT4_I(inode)->i_disksize = inode->i_size;
3050	ext4_mark_inode_dirty(handle, inode);
3051
3052	last_block = (inode->i_size + sb->s_blocksize - 1)
3053			>> EXT4_BLOCK_SIZE_BITS(sb);
3054	err = ext4_ext_remove_space(inode, last_block);
3055
3056	/* In a multi-transaction truncate, we only make the final
3057	 * transaction synchronous.
3058	 */
3059	if (IS_SYNC(inode))
3060		ext4_handle_sync(handle);
3061
3062out_stop:
3063	up_write(&EXT4_I(inode)->i_data_sem);
3064	/*
3065	 * If this was a simple ftruncate() and the file will remain alive,
3066	 * then we need to clear up the orphan record which we created above.
3067	 * However, if this was a real unlink then we were called by
3068	 * ext4_delete_inode(), and we allow that function to clean up the
3069	 * orphan info for us.
3070	 */
3071	if (inode->i_nlink)
3072		ext4_orphan_del(handle, inode);
3073
3074	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3075	ext4_mark_inode_dirty(handle, inode);
3076	ext4_journal_stop(handle);
3077}
3078
3079static void ext4_falloc_update_inode(struct inode *inode,
3080				int mode, loff_t new_size, int update_ctime)
3081{
3082	struct timespec now;
3083
3084	if (update_ctime) {
3085		now = current_fs_time(inode->i_sb);
3086		if (!timespec_equal(&inode->i_ctime, &now))
3087			inode->i_ctime = now;
3088	}
3089	/*
3090	 * Update only when preallocation was requested beyond
3091	 * the file size.
3092	 */
3093	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3094		if (new_size > i_size_read(inode))
3095			i_size_write(inode, new_size);
3096		if (new_size > EXT4_I(inode)->i_disksize)
3097			ext4_update_i_disksize(inode, new_size);
3098	}
3099
3100}
3101
3102/*
3103 * preallocate space for a file. This implements ext4's fallocate inode
3104 * operation, which gets called from sys_fallocate system call.
3105 * For block-mapped files, posix_fallocate should fall back to the method
3106 * of writing zeroes to the required new blocks (the same behavior which is
3107 * expected for file systems which do not support fallocate() system call).
3108 */
3109long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3110{
3111	handle_t *handle;
3112	ext4_lblk_t block;
3113	loff_t new_size;
3114	unsigned int max_blocks;
3115	int ret = 0;
3116	int ret2 = 0;
3117	int retries = 0;
3118	struct buffer_head map_bh;
3119	unsigned int credits, blkbits = inode->i_blkbits;
3120
3121	/*
3122	 * currently supporting (pre)allocate mode for extent-based
3123	 * files _only_
3124	 */
3125	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3126		return -EOPNOTSUPP;
3127
3128	/* preallocation to directories is currently not supported */
3129	if (S_ISDIR(inode->i_mode))
3130		return -ENODEV;
3131
3132	block = offset >> blkbits;
3133	/*
3134	 * We can't just convert len to max_blocks because
3135	 * If blocksize = 4096 offset = 3072 and len = 2048
3136	 */
3137	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3138							- block;
3139	/*
3140	 * credits to insert 1 extent into extent tree
3141	 */
3142	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3143	mutex_lock(&inode->i_mutex);
3144retry:
3145	while (ret >= 0 && ret < max_blocks) {
3146		block = block + ret;
3147		max_blocks = max_blocks - ret;
3148		handle = ext4_journal_start(inode, credits);
3149		if (IS_ERR(handle)) {
3150			ret = PTR_ERR(handle);
3151			break;
3152		}
3153		map_bh.b_state = 0;
3154		ret = ext4_get_blocks_wrap(handle, inode, block,
3155					  max_blocks, &map_bh,
3156					  EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
3157		if (ret <= 0) {
3158#ifdef EXT4FS_DEBUG
3159			WARN_ON(ret <= 0);
3160			printk(KERN_ERR "%s: ext4_ext_get_blocks "
3161				    "returned error inode#%lu, block=%u, "
3162				    "max_blocks=%u", __func__,
3163				    inode->i_ino, block, max_blocks);
3164#endif
3165			ext4_mark_inode_dirty(handle, inode);
3166			ret2 = ext4_journal_stop(handle);
3167			break;
3168		}
3169		if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3170						blkbits) >> blkbits))
3171			new_size = offset + len;
3172		else
3173			new_size = (block + ret) << blkbits;
3174
3175		ext4_falloc_update_inode(inode, mode, new_size,
3176						buffer_new(&map_bh));
3177		ext4_mark_inode_dirty(handle, inode);
3178		ret2 = ext4_journal_stop(handle);
3179		if (ret2)
3180			break;
3181	}
3182	if (ret == -ENOSPC &&
3183			ext4_should_retry_alloc(inode->i_sb, &retries)) {
3184		ret = 0;
3185		goto retry;
3186	}
3187	mutex_unlock(&inode->i_mutex);
3188	return ret > 0 ? ret2 : ret;
3189}
3190
3191/*
3192 * Callback function called for each extent to gather FIEMAP information.
3193 */
3194static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3195		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
3196		       void *data)
3197{
3198	struct fiemap_extent_info *fieinfo = data;
3199	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3200	__u64	logical;
3201	__u64	physical;
3202	__u64	length;
3203	__u32	flags = 0;
3204	int	error;
3205
3206	logical =  (__u64)newex->ec_block << blksize_bits;
3207
3208	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3209		pgoff_t offset;
3210		struct page *page;
3211		struct buffer_head *bh = NULL;
3212
3213		offset = logical >> PAGE_SHIFT;
3214		page = find_get_page(inode->i_mapping, offset);
3215		if (!page || !page_has_buffers(page))
3216			return EXT_CONTINUE;
3217
3218		bh = page_buffers(page);
3219
3220		if (!bh)
3221			return EXT_CONTINUE;
3222
3223		if (buffer_delay(bh)) {
3224			flags |= FIEMAP_EXTENT_DELALLOC;
3225			page_cache_release(page);
3226		} else {
3227			page_cache_release(page);
3228			return EXT_CONTINUE;
3229		}
3230	}
3231
3232	physical = (__u64)newex->ec_start << blksize_bits;
3233	length =   (__u64)newex->ec_len << blksize_bits;
3234
3235	if (ex && ext4_ext_is_uninitialized(ex))
3236		flags |= FIEMAP_EXTENT_UNWRITTEN;
3237
3238	/*
3239	 * If this extent reaches EXT_MAX_BLOCK, it must be last.
3240	 *
3241	 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3242	 * this also indicates no more allocated blocks.
3243	 *
3244	 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3245	 */
3246	if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3247	    newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK)
3248		flags |= FIEMAP_EXTENT_LAST;
3249
3250	error = fiemap_fill_next_extent(fieinfo, logical, physical,
3251					length, flags);
3252	if (error < 0)
3253		return error;
3254	if (error == 1)
3255		return EXT_BREAK;
3256
3257	return EXT_CONTINUE;
3258}
3259
3260/* fiemap flags we can handle specified here */
3261#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3262
3263static int ext4_xattr_fiemap(struct inode *inode,
3264				struct fiemap_extent_info *fieinfo)
3265{
3266	__u64 physical = 0;
3267	__u64 length;
3268	__u32 flags = FIEMAP_EXTENT_LAST;
3269	int blockbits = inode->i_sb->s_blocksize_bits;
3270	int error = 0;
3271
3272	/* in-inode? */
3273	if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
3274		struct ext4_iloc iloc;
3275		int offset;	/* offset of xattr in inode */
3276
3277		error = ext4_get_inode_loc(inode, &iloc);
3278		if (error)
3279			return error;
3280		physical = iloc.bh->b_blocknr << blockbits;
3281		offset = EXT4_GOOD_OLD_INODE_SIZE +
3282				EXT4_I(inode)->i_extra_isize;
3283		physical += offset;
3284		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3285		flags |= FIEMAP_EXTENT_DATA_INLINE;
3286	} else { /* external block */
3287		physical = EXT4_I(inode)->i_file_acl << blockbits;
3288		length = inode->i_sb->s_blocksize;
3289	}
3290
3291	if (physical)
3292		error = fiemap_fill_next_extent(fieinfo, 0, physical,
3293						length, flags);
3294	return (error < 0 ? error : 0);
3295}
3296
3297int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3298		__u64 start, __u64 len)
3299{
3300	ext4_lblk_t start_blk;
3301	ext4_lblk_t len_blks;
3302	int error = 0;
3303
3304	/* fallback to generic here if not in extents fmt */
3305	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3306		return generic_block_fiemap(inode, fieinfo, start, len,
3307			ext4_get_block);
3308
3309	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3310		return -EBADR;
3311
3312	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3313		error = ext4_xattr_fiemap(inode, fieinfo);
3314	} else {
3315		start_blk = start >> inode->i_sb->s_blocksize_bits;
3316		len_blks = len >> inode->i_sb->s_blocksize_bits;
3317
3318		/*
3319		 * Walk the extent tree gathering extent information.
3320		 * ext4_ext_fiemap_cb will push extents back to user.
3321		 */
3322		down_write(&EXT4_I(inode)->i_data_sem);
3323		error = ext4_ext_walk_space(inode, start_blk, len_blks,
3324					  ext4_ext_fiemap_cb, fieinfo);
3325		up_write(&EXT4_I(inode)->i_data_sem);
3326	}
3327
3328	return error;
3329}
3330
3331