extents.c revision 5661bd6861b7490394e29aaf74dca812188272e4
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * Architecture independence:
6 *   Copyright (c) 2005, Bull S.A.
7 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21 */
22
23/*
24 * Extents support for EXT4
25 *
26 * TODO:
27 *   - ext4*_error() should be used in some situations
28 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 *   - smart tree reduction
30 */
31
32#include <linux/module.h>
33#include <linux/fs.h>
34#include <linux/time.h>
35#include <linux/jbd2.h>
36#include <linux/highuid.h>
37#include <linux/pagemap.h>
38#include <linux/quotaops.h>
39#include <linux/string.h>
40#include <linux/slab.h>
41#include <linux/falloc.h>
42#include <asm/uaccess.h>
43#include <linux/fiemap.h>
44#include "ext4_jbd2.h"
45#include "ext4_extents.h"
46
47
48/*
49 * ext_pblock:
50 * combine low and high parts of physical block number into ext4_fsblk_t
51 */
52ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53{
54	ext4_fsblk_t block;
55
56	block = le32_to_cpu(ex->ee_start_lo);
57	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58	return block;
59}
60
61/*
62 * idx_pblock:
63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64 */
65ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66{
67	ext4_fsblk_t block;
68
69	block = le32_to_cpu(ix->ei_leaf_lo);
70	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71	return block;
72}
73
74/*
75 * ext4_ext_store_pblock:
76 * stores a large physical block number into an extent struct,
77 * breaking it into parts
78 */
79void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80{
81	ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83}
84
85/*
86 * ext4_idx_store_pblock:
87 * stores a large physical block number into an index struct,
88 * breaking it into parts
89 */
90static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91{
92	ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94}
95
96static int ext4_ext_truncate_extend_restart(handle_t *handle,
97					    struct inode *inode,
98					    int needed)
99{
100	int err;
101
102	if (!ext4_handle_valid(handle))
103		return 0;
104	if (handle->h_buffer_credits > needed)
105		return 0;
106	err = ext4_journal_extend(handle, needed);
107	if (err <= 0)
108		return err;
109	err = ext4_truncate_restart_trans(handle, inode, needed);
110	/*
111	 * We have dropped i_data_sem so someone might have cached again
112	 * an extent we are going to truncate.
113	 */
114	ext4_ext_invalidate_cache(inode);
115
116	return err;
117}
118
119/*
120 * could return:
121 *  - EROFS
122 *  - ENOMEM
123 */
124static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
125				struct ext4_ext_path *path)
126{
127	if (path->p_bh) {
128		/* path points to block */
129		return ext4_journal_get_write_access(handle, path->p_bh);
130	}
131	/* path points to leaf/index in inode body */
132	/* we use in-core data, no need to protect them */
133	return 0;
134}
135
136/*
137 * could return:
138 *  - EROFS
139 *  - ENOMEM
140 *  - EIO
141 */
142static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
143				struct ext4_ext_path *path)
144{
145	int err;
146	if (path->p_bh) {
147		/* path points to block */
148		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
149	} else {
150		/* path points to leaf/index in inode body */
151		err = ext4_mark_inode_dirty(handle, inode);
152	}
153	return err;
154}
155
156static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
157			      struct ext4_ext_path *path,
158			      ext4_lblk_t block)
159{
160	struct ext4_inode_info *ei = EXT4_I(inode);
161	ext4_fsblk_t bg_start;
162	ext4_fsblk_t last_block;
163	ext4_grpblk_t colour;
164	ext4_group_t block_group;
165	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
166	int depth;
167
168	if (path) {
169		struct ext4_extent *ex;
170		depth = path->p_depth;
171
172		/* try to predict block placement */
173		ex = path[depth].p_ext;
174		if (ex)
175			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
176
177		/* it looks like index is empty;
178		 * try to find starting block from index itself */
179		if (path[depth].p_bh)
180			return path[depth].p_bh->b_blocknr;
181	}
182
183	/* OK. use inode's group */
184	block_group = ei->i_block_group;
185	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
186		/*
187		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
188		 * block groups per flexgroup, reserve the first block
189		 * group for directories and special files.  Regular
190		 * files will start at the second block group.  This
191		 * tends to speed up directory access and improves
192		 * fsck times.
193		 */
194		block_group &= ~(flex_size-1);
195		if (S_ISREG(inode->i_mode))
196			block_group++;
197	}
198	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
199	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
200
201	/*
202	 * If we are doing delayed allocation, we don't need take
203	 * colour into account.
204	 */
205	if (test_opt(inode->i_sb, DELALLOC))
206		return bg_start;
207
208	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
209		colour = (current->pid % 16) *
210			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
211	else
212		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
213	return bg_start + colour + block;
214}
215
216/*
217 * Allocation for a meta data block
218 */
219static ext4_fsblk_t
220ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
221			struct ext4_ext_path *path,
222			struct ext4_extent *ex, int *err)
223{
224	ext4_fsblk_t goal, newblock;
225
226	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
227	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
228	return newblock;
229}
230
231static inline int ext4_ext_space_block(struct inode *inode, int check)
232{
233	int size;
234
235	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
236			/ sizeof(struct ext4_extent);
237	if (!check) {
238#ifdef AGGRESSIVE_TEST
239		if (size > 6)
240			size = 6;
241#endif
242	}
243	return size;
244}
245
246static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
247{
248	int size;
249
250	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
251			/ sizeof(struct ext4_extent_idx);
252	if (!check) {
253#ifdef AGGRESSIVE_TEST
254		if (size > 5)
255			size = 5;
256#endif
257	}
258	return size;
259}
260
261static inline int ext4_ext_space_root(struct inode *inode, int check)
262{
263	int size;
264
265	size = sizeof(EXT4_I(inode)->i_data);
266	size -= sizeof(struct ext4_extent_header);
267	size /= sizeof(struct ext4_extent);
268	if (!check) {
269#ifdef AGGRESSIVE_TEST
270		if (size > 3)
271			size = 3;
272#endif
273	}
274	return size;
275}
276
277static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
278{
279	int size;
280
281	size = sizeof(EXT4_I(inode)->i_data);
282	size -= sizeof(struct ext4_extent_header);
283	size /= sizeof(struct ext4_extent_idx);
284	if (!check) {
285#ifdef AGGRESSIVE_TEST
286		if (size > 4)
287			size = 4;
288#endif
289	}
290	return size;
291}
292
293/*
294 * Calculate the number of metadata blocks needed
295 * to allocate @blocks
296 * Worse case is one block per extent
297 */
298int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
299{
300	struct ext4_inode_info *ei = EXT4_I(inode);
301	int idxs, num = 0;
302
303	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
304		/ sizeof(struct ext4_extent_idx));
305
306	/*
307	 * If the new delayed allocation block is contiguous with the
308	 * previous da block, it can share index blocks with the
309	 * previous block, so we only need to allocate a new index
310	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
311	 * an additional index block, and at ldxs**3 blocks, yet
312	 * another index blocks.
313	 */
314	if (ei->i_da_metadata_calc_len &&
315	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
316		if ((ei->i_da_metadata_calc_len % idxs) == 0)
317			num++;
318		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
319			num++;
320		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
321			num++;
322			ei->i_da_metadata_calc_len = 0;
323		} else
324			ei->i_da_metadata_calc_len++;
325		ei->i_da_metadata_calc_last_lblock++;
326		return num;
327	}
328
329	/*
330	 * In the worst case we need a new set of index blocks at
331	 * every level of the inode's extent tree.
332	 */
333	ei->i_da_metadata_calc_len = 1;
334	ei->i_da_metadata_calc_last_lblock = lblock;
335	return ext_depth(inode) + 1;
336}
337
338static int
339ext4_ext_max_entries(struct inode *inode, int depth)
340{
341	int max;
342
343	if (depth == ext_depth(inode)) {
344		if (depth == 0)
345			max = ext4_ext_space_root(inode, 1);
346		else
347			max = ext4_ext_space_root_idx(inode, 1);
348	} else {
349		if (depth == 0)
350			max = ext4_ext_space_block(inode, 1);
351		else
352			max = ext4_ext_space_block_idx(inode, 1);
353	}
354
355	return max;
356}
357
358static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
359{
360	ext4_fsblk_t block = ext_pblock(ext);
361	int len = ext4_ext_get_actual_len(ext);
362
363	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
364}
365
366static int ext4_valid_extent_idx(struct inode *inode,
367				struct ext4_extent_idx *ext_idx)
368{
369	ext4_fsblk_t block = idx_pblock(ext_idx);
370
371	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
372}
373
374static int ext4_valid_extent_entries(struct inode *inode,
375				struct ext4_extent_header *eh,
376				int depth)
377{
378	struct ext4_extent *ext;
379	struct ext4_extent_idx *ext_idx;
380	unsigned short entries;
381	if (eh->eh_entries == 0)
382		return 1;
383
384	entries = le16_to_cpu(eh->eh_entries);
385
386	if (depth == 0) {
387		/* leaf entries */
388		ext = EXT_FIRST_EXTENT(eh);
389		while (entries) {
390			if (!ext4_valid_extent(inode, ext))
391				return 0;
392			ext++;
393			entries--;
394		}
395	} else {
396		ext_idx = EXT_FIRST_INDEX(eh);
397		while (entries) {
398			if (!ext4_valid_extent_idx(inode, ext_idx))
399				return 0;
400			ext_idx++;
401			entries--;
402		}
403	}
404	return 1;
405}
406
407static int __ext4_ext_check(const char *function, struct inode *inode,
408					struct ext4_extent_header *eh,
409					int depth)
410{
411	const char *error_msg;
412	int max = 0;
413
414	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
415		error_msg = "invalid magic";
416		goto corrupted;
417	}
418	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
419		error_msg = "unexpected eh_depth";
420		goto corrupted;
421	}
422	if (unlikely(eh->eh_max == 0)) {
423		error_msg = "invalid eh_max";
424		goto corrupted;
425	}
426	max = ext4_ext_max_entries(inode, depth);
427	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
428		error_msg = "too large eh_max";
429		goto corrupted;
430	}
431	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
432		error_msg = "invalid eh_entries";
433		goto corrupted;
434	}
435	if (!ext4_valid_extent_entries(inode, eh, depth)) {
436		error_msg = "invalid extent entries";
437		goto corrupted;
438	}
439	return 0;
440
441corrupted:
442	__ext4_error(inode->i_sb, function,
443			"bad header/extent in inode #%lu: %s - magic %x, "
444			"entries %u, max %u(%u), depth %u(%u)",
445			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
446			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
447			max, le16_to_cpu(eh->eh_depth), depth);
448
449	return -EIO;
450}
451
452#define ext4_ext_check(inode, eh, depth)	\
453	__ext4_ext_check(__func__, inode, eh, depth)
454
455int ext4_ext_check_inode(struct inode *inode)
456{
457	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
458}
459
460#ifdef EXT_DEBUG
461static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
462{
463	int k, l = path->p_depth;
464
465	ext_debug("path:");
466	for (k = 0; k <= l; k++, path++) {
467		if (path->p_idx) {
468		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
469			    idx_pblock(path->p_idx));
470		} else if (path->p_ext) {
471			ext_debug("  %d:[%d]%d:%llu ",
472				  le32_to_cpu(path->p_ext->ee_block),
473				  ext4_ext_is_uninitialized(path->p_ext),
474				  ext4_ext_get_actual_len(path->p_ext),
475				  ext_pblock(path->p_ext));
476		} else
477			ext_debug("  []");
478	}
479	ext_debug("\n");
480}
481
482static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
483{
484	int depth = ext_depth(inode);
485	struct ext4_extent_header *eh;
486	struct ext4_extent *ex;
487	int i;
488
489	if (!path)
490		return;
491
492	eh = path[depth].p_hdr;
493	ex = EXT_FIRST_EXTENT(eh);
494
495	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
496
497	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
498		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
499			  ext4_ext_is_uninitialized(ex),
500			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
501	}
502	ext_debug("\n");
503}
504#else
505#define ext4_ext_show_path(inode, path)
506#define ext4_ext_show_leaf(inode, path)
507#endif
508
509void ext4_ext_drop_refs(struct ext4_ext_path *path)
510{
511	int depth = path->p_depth;
512	int i;
513
514	for (i = 0; i <= depth; i++, path++)
515		if (path->p_bh) {
516			brelse(path->p_bh);
517			path->p_bh = NULL;
518		}
519}
520
521/*
522 * ext4_ext_binsearch_idx:
523 * binary search for the closest index of the given block
524 * the header must be checked before calling this
525 */
526static void
527ext4_ext_binsearch_idx(struct inode *inode,
528			struct ext4_ext_path *path, ext4_lblk_t block)
529{
530	struct ext4_extent_header *eh = path->p_hdr;
531	struct ext4_extent_idx *r, *l, *m;
532
533
534	ext_debug("binsearch for %u(idx):  ", block);
535
536	l = EXT_FIRST_INDEX(eh) + 1;
537	r = EXT_LAST_INDEX(eh);
538	while (l <= r) {
539		m = l + (r - l) / 2;
540		if (block < le32_to_cpu(m->ei_block))
541			r = m - 1;
542		else
543			l = m + 1;
544		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
545				m, le32_to_cpu(m->ei_block),
546				r, le32_to_cpu(r->ei_block));
547	}
548
549	path->p_idx = l - 1;
550	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
551		  idx_pblock(path->p_idx));
552
553#ifdef CHECK_BINSEARCH
554	{
555		struct ext4_extent_idx *chix, *ix;
556		int k;
557
558		chix = ix = EXT_FIRST_INDEX(eh);
559		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
560		  if (k != 0 &&
561		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
562				printk(KERN_DEBUG "k=%d, ix=0x%p, "
563				       "first=0x%p\n", k,
564				       ix, EXT_FIRST_INDEX(eh));
565				printk(KERN_DEBUG "%u <= %u\n",
566				       le32_to_cpu(ix->ei_block),
567				       le32_to_cpu(ix[-1].ei_block));
568			}
569			BUG_ON(k && le32_to_cpu(ix->ei_block)
570					   <= le32_to_cpu(ix[-1].ei_block));
571			if (block < le32_to_cpu(ix->ei_block))
572				break;
573			chix = ix;
574		}
575		BUG_ON(chix != path->p_idx);
576	}
577#endif
578
579}
580
581/*
582 * ext4_ext_binsearch:
583 * binary search for closest extent of the given block
584 * the header must be checked before calling this
585 */
586static void
587ext4_ext_binsearch(struct inode *inode,
588		struct ext4_ext_path *path, ext4_lblk_t block)
589{
590	struct ext4_extent_header *eh = path->p_hdr;
591	struct ext4_extent *r, *l, *m;
592
593	if (eh->eh_entries == 0) {
594		/*
595		 * this leaf is empty:
596		 * we get such a leaf in split/add case
597		 */
598		return;
599	}
600
601	ext_debug("binsearch for %u:  ", block);
602
603	l = EXT_FIRST_EXTENT(eh) + 1;
604	r = EXT_LAST_EXTENT(eh);
605
606	while (l <= r) {
607		m = l + (r - l) / 2;
608		if (block < le32_to_cpu(m->ee_block))
609			r = m - 1;
610		else
611			l = m + 1;
612		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
613				m, le32_to_cpu(m->ee_block),
614				r, le32_to_cpu(r->ee_block));
615	}
616
617	path->p_ext = l - 1;
618	ext_debug("  -> %d:%llu:[%d]%d ",
619			le32_to_cpu(path->p_ext->ee_block),
620			ext_pblock(path->p_ext),
621			ext4_ext_is_uninitialized(path->p_ext),
622			ext4_ext_get_actual_len(path->p_ext));
623
624#ifdef CHECK_BINSEARCH
625	{
626		struct ext4_extent *chex, *ex;
627		int k;
628
629		chex = ex = EXT_FIRST_EXTENT(eh);
630		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
631			BUG_ON(k && le32_to_cpu(ex->ee_block)
632					  <= le32_to_cpu(ex[-1].ee_block));
633			if (block < le32_to_cpu(ex->ee_block))
634				break;
635			chex = ex;
636		}
637		BUG_ON(chex != path->p_ext);
638	}
639#endif
640
641}
642
643int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
644{
645	struct ext4_extent_header *eh;
646
647	eh = ext_inode_hdr(inode);
648	eh->eh_depth = 0;
649	eh->eh_entries = 0;
650	eh->eh_magic = EXT4_EXT_MAGIC;
651	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
652	ext4_mark_inode_dirty(handle, inode);
653	ext4_ext_invalidate_cache(inode);
654	return 0;
655}
656
657struct ext4_ext_path *
658ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
659					struct ext4_ext_path *path)
660{
661	struct ext4_extent_header *eh;
662	struct buffer_head *bh;
663	short int depth, i, ppos = 0, alloc = 0;
664
665	eh = ext_inode_hdr(inode);
666	depth = ext_depth(inode);
667
668	/* account possible depth increase */
669	if (!path) {
670		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
671				GFP_NOFS);
672		if (!path)
673			return ERR_PTR(-ENOMEM);
674		alloc = 1;
675	}
676	path[0].p_hdr = eh;
677	path[0].p_bh = NULL;
678
679	i = depth;
680	/* walk through the tree */
681	while (i) {
682		int need_to_validate = 0;
683
684		ext_debug("depth %d: num %d, max %d\n",
685			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
686
687		ext4_ext_binsearch_idx(inode, path + ppos, block);
688		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
689		path[ppos].p_depth = i;
690		path[ppos].p_ext = NULL;
691
692		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
693		if (unlikely(!bh))
694			goto err;
695		if (!bh_uptodate_or_lock(bh)) {
696			if (bh_submit_read(bh) < 0) {
697				put_bh(bh);
698				goto err;
699			}
700			/* validate the extent entries */
701			need_to_validate = 1;
702		}
703		eh = ext_block_hdr(bh);
704		ppos++;
705		if (unlikely(ppos > depth)) {
706			put_bh(bh);
707			EXT4_ERROR_INODE(inode,
708					 "ppos %d > depth %d", ppos, depth);
709			goto err;
710		}
711		path[ppos].p_bh = bh;
712		path[ppos].p_hdr = eh;
713		i--;
714
715		if (need_to_validate && ext4_ext_check(inode, eh, i))
716			goto err;
717	}
718
719	path[ppos].p_depth = i;
720	path[ppos].p_ext = NULL;
721	path[ppos].p_idx = NULL;
722
723	/* find extent */
724	ext4_ext_binsearch(inode, path + ppos, block);
725	/* if not an empty leaf */
726	if (path[ppos].p_ext)
727		path[ppos].p_block = ext_pblock(path[ppos].p_ext);
728
729	ext4_ext_show_path(inode, path);
730
731	return path;
732
733err:
734	ext4_ext_drop_refs(path);
735	if (alloc)
736		kfree(path);
737	return ERR_PTR(-EIO);
738}
739
740/*
741 * ext4_ext_insert_index:
742 * insert new index [@logical;@ptr] into the block at @curp;
743 * check where to insert: before @curp or after @curp
744 */
745int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
746				struct ext4_ext_path *curp,
747				int logical, ext4_fsblk_t ptr)
748{
749	struct ext4_extent_idx *ix;
750	int len, err;
751
752	err = ext4_ext_get_access(handle, inode, curp);
753	if (err)
754		return err;
755
756	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
757		EXT4_ERROR_INODE(inode,
758				 "logical %d == ei_block %d!",
759				 logical, le32_to_cpu(curp->p_idx->ei_block));
760		return -EIO;
761	}
762	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
763	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
764		/* insert after */
765		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
766			len = (len - 1) * sizeof(struct ext4_extent_idx);
767			len = len < 0 ? 0 : len;
768			ext_debug("insert new index %d after: %llu. "
769					"move %d from 0x%p to 0x%p\n",
770					logical, ptr, len,
771					(curp->p_idx + 1), (curp->p_idx + 2));
772			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
773		}
774		ix = curp->p_idx + 1;
775	} else {
776		/* insert before */
777		len = len * sizeof(struct ext4_extent_idx);
778		len = len < 0 ? 0 : len;
779		ext_debug("insert new index %d before: %llu. "
780				"move %d from 0x%p to 0x%p\n",
781				logical, ptr, len,
782				curp->p_idx, (curp->p_idx + 1));
783		memmove(curp->p_idx + 1, curp->p_idx, len);
784		ix = curp->p_idx;
785	}
786
787	ix->ei_block = cpu_to_le32(logical);
788	ext4_idx_store_pblock(ix, ptr);
789	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
790
791	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
792			     > le16_to_cpu(curp->p_hdr->eh_max))) {
793		EXT4_ERROR_INODE(inode,
794				 "logical %d == ei_block %d!",
795				 logical, le32_to_cpu(curp->p_idx->ei_block));
796		return -EIO;
797	}
798	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
799		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
800		return -EIO;
801	}
802
803	err = ext4_ext_dirty(handle, inode, curp);
804	ext4_std_error(inode->i_sb, err);
805
806	return err;
807}
808
809/*
810 * ext4_ext_split:
811 * inserts new subtree into the path, using free index entry
812 * at depth @at:
813 * - allocates all needed blocks (new leaf and all intermediate index blocks)
814 * - makes decision where to split
815 * - moves remaining extents and index entries (right to the split point)
816 *   into the newly allocated blocks
817 * - initializes subtree
818 */
819static int ext4_ext_split(handle_t *handle, struct inode *inode,
820				struct ext4_ext_path *path,
821				struct ext4_extent *newext, int at)
822{
823	struct buffer_head *bh = NULL;
824	int depth = ext_depth(inode);
825	struct ext4_extent_header *neh;
826	struct ext4_extent_idx *fidx;
827	struct ext4_extent *ex;
828	int i = at, k, m, a;
829	ext4_fsblk_t newblock, oldblock;
830	__le32 border;
831	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
832	int err = 0;
833
834	/* make decision: where to split? */
835	/* FIXME: now decision is simplest: at current extent */
836
837	/* if current leaf will be split, then we should use
838	 * border from split point */
839	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
840		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
841		return -EIO;
842	}
843	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
844		border = path[depth].p_ext[1].ee_block;
845		ext_debug("leaf will be split."
846				" next leaf starts at %d\n",
847				  le32_to_cpu(border));
848	} else {
849		border = newext->ee_block;
850		ext_debug("leaf will be added."
851				" next leaf starts at %d\n",
852				le32_to_cpu(border));
853	}
854
855	/*
856	 * If error occurs, then we break processing
857	 * and mark filesystem read-only. index won't
858	 * be inserted and tree will be in consistent
859	 * state. Next mount will repair buffers too.
860	 */
861
862	/*
863	 * Get array to track all allocated blocks.
864	 * We need this to handle errors and free blocks
865	 * upon them.
866	 */
867	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
868	if (!ablocks)
869		return -ENOMEM;
870
871	/* allocate all needed blocks */
872	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
873	for (a = 0; a < depth - at; a++) {
874		newblock = ext4_ext_new_meta_block(handle, inode, path,
875						   newext, &err);
876		if (newblock == 0)
877			goto cleanup;
878		ablocks[a] = newblock;
879	}
880
881	/* initialize new leaf */
882	newblock = ablocks[--a];
883	if (unlikely(newblock == 0)) {
884		EXT4_ERROR_INODE(inode, "newblock == 0!");
885		err = -EIO;
886		goto cleanup;
887	}
888	bh = sb_getblk(inode->i_sb, newblock);
889	if (!bh) {
890		err = -EIO;
891		goto cleanup;
892	}
893	lock_buffer(bh);
894
895	err = ext4_journal_get_create_access(handle, bh);
896	if (err)
897		goto cleanup;
898
899	neh = ext_block_hdr(bh);
900	neh->eh_entries = 0;
901	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
902	neh->eh_magic = EXT4_EXT_MAGIC;
903	neh->eh_depth = 0;
904	ex = EXT_FIRST_EXTENT(neh);
905
906	/* move remainder of path[depth] to the new leaf */
907	if (unlikely(path[depth].p_hdr->eh_entries !=
908		     path[depth].p_hdr->eh_max)) {
909		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
910				 path[depth].p_hdr->eh_entries,
911				 path[depth].p_hdr->eh_max);
912		err = -EIO;
913		goto cleanup;
914	}
915	/* start copy from next extent */
916	/* TODO: we could do it by single memmove */
917	m = 0;
918	path[depth].p_ext++;
919	while (path[depth].p_ext <=
920			EXT_MAX_EXTENT(path[depth].p_hdr)) {
921		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
922				le32_to_cpu(path[depth].p_ext->ee_block),
923				ext_pblock(path[depth].p_ext),
924				ext4_ext_is_uninitialized(path[depth].p_ext),
925				ext4_ext_get_actual_len(path[depth].p_ext),
926				newblock);
927		/*memmove(ex++, path[depth].p_ext++,
928				sizeof(struct ext4_extent));
929		neh->eh_entries++;*/
930		path[depth].p_ext++;
931		m++;
932	}
933	if (m) {
934		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
935		le16_add_cpu(&neh->eh_entries, m);
936	}
937
938	set_buffer_uptodate(bh);
939	unlock_buffer(bh);
940
941	err = ext4_handle_dirty_metadata(handle, inode, bh);
942	if (err)
943		goto cleanup;
944	brelse(bh);
945	bh = NULL;
946
947	/* correct old leaf */
948	if (m) {
949		err = ext4_ext_get_access(handle, inode, path + depth);
950		if (err)
951			goto cleanup;
952		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
953		err = ext4_ext_dirty(handle, inode, path + depth);
954		if (err)
955			goto cleanup;
956
957	}
958
959	/* create intermediate indexes */
960	k = depth - at - 1;
961	if (unlikely(k < 0)) {
962		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
963		err = -EIO;
964		goto cleanup;
965	}
966	if (k)
967		ext_debug("create %d intermediate indices\n", k);
968	/* insert new index into current index block */
969	/* current depth stored in i var */
970	i = depth - 1;
971	while (k--) {
972		oldblock = newblock;
973		newblock = ablocks[--a];
974		bh = sb_getblk(inode->i_sb, newblock);
975		if (!bh) {
976			err = -EIO;
977			goto cleanup;
978		}
979		lock_buffer(bh);
980
981		err = ext4_journal_get_create_access(handle, bh);
982		if (err)
983			goto cleanup;
984
985		neh = ext_block_hdr(bh);
986		neh->eh_entries = cpu_to_le16(1);
987		neh->eh_magic = EXT4_EXT_MAGIC;
988		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
989		neh->eh_depth = cpu_to_le16(depth - i);
990		fidx = EXT_FIRST_INDEX(neh);
991		fidx->ei_block = border;
992		ext4_idx_store_pblock(fidx, oldblock);
993
994		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
995				i, newblock, le32_to_cpu(border), oldblock);
996		/* copy indexes */
997		m = 0;
998		path[i].p_idx++;
999
1000		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1001				EXT_MAX_INDEX(path[i].p_hdr));
1002		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1003					EXT_LAST_INDEX(path[i].p_hdr))) {
1004			EXT4_ERROR_INODE(inode,
1005					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1006					 le32_to_cpu(path[i].p_ext->ee_block));
1007			err = -EIO;
1008			goto cleanup;
1009		}
1010		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
1011			ext_debug("%d: move %d:%llu in new index %llu\n", i,
1012					le32_to_cpu(path[i].p_idx->ei_block),
1013					idx_pblock(path[i].p_idx),
1014					newblock);
1015			/*memmove(++fidx, path[i].p_idx++,
1016					sizeof(struct ext4_extent_idx));
1017			neh->eh_entries++;
1018			BUG_ON(neh->eh_entries > neh->eh_max);*/
1019			path[i].p_idx++;
1020			m++;
1021		}
1022		if (m) {
1023			memmove(++fidx, path[i].p_idx - m,
1024				sizeof(struct ext4_extent_idx) * m);
1025			le16_add_cpu(&neh->eh_entries, m);
1026		}
1027		set_buffer_uptodate(bh);
1028		unlock_buffer(bh);
1029
1030		err = ext4_handle_dirty_metadata(handle, inode, bh);
1031		if (err)
1032			goto cleanup;
1033		brelse(bh);
1034		bh = NULL;
1035
1036		/* correct old index */
1037		if (m) {
1038			err = ext4_ext_get_access(handle, inode, path + i);
1039			if (err)
1040				goto cleanup;
1041			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1042			err = ext4_ext_dirty(handle, inode, path + i);
1043			if (err)
1044				goto cleanup;
1045		}
1046
1047		i--;
1048	}
1049
1050	/* insert new index */
1051	err = ext4_ext_insert_index(handle, inode, path + at,
1052				    le32_to_cpu(border), newblock);
1053
1054cleanup:
1055	if (bh) {
1056		if (buffer_locked(bh))
1057			unlock_buffer(bh);
1058		brelse(bh);
1059	}
1060
1061	if (err) {
1062		/* free all allocated blocks in error case */
1063		for (i = 0; i < depth; i++) {
1064			if (!ablocks[i])
1065				continue;
1066			ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
1067					 EXT4_FREE_BLOCKS_METADATA);
1068		}
1069	}
1070	kfree(ablocks);
1071
1072	return err;
1073}
1074
1075/*
1076 * ext4_ext_grow_indepth:
1077 * implements tree growing procedure:
1078 * - allocates new block
1079 * - moves top-level data (index block or leaf) into the new block
1080 * - initializes new top-level, creating index that points to the
1081 *   just created block
1082 */
1083static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1084					struct ext4_ext_path *path,
1085					struct ext4_extent *newext)
1086{
1087	struct ext4_ext_path *curp = path;
1088	struct ext4_extent_header *neh;
1089	struct ext4_extent_idx *fidx;
1090	struct buffer_head *bh;
1091	ext4_fsblk_t newblock;
1092	int err = 0;
1093
1094	newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
1095	if (newblock == 0)
1096		return err;
1097
1098	bh = sb_getblk(inode->i_sb, newblock);
1099	if (!bh) {
1100		err = -EIO;
1101		ext4_std_error(inode->i_sb, err);
1102		return err;
1103	}
1104	lock_buffer(bh);
1105
1106	err = ext4_journal_get_create_access(handle, bh);
1107	if (err) {
1108		unlock_buffer(bh);
1109		goto out;
1110	}
1111
1112	/* move top-level index/leaf into new block */
1113	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1114
1115	/* set size of new block */
1116	neh = ext_block_hdr(bh);
1117	/* old root could have indexes or leaves
1118	 * so calculate e_max right way */
1119	if (ext_depth(inode))
1120		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1121	else
1122		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1123	neh->eh_magic = EXT4_EXT_MAGIC;
1124	set_buffer_uptodate(bh);
1125	unlock_buffer(bh);
1126
1127	err = ext4_handle_dirty_metadata(handle, inode, bh);
1128	if (err)
1129		goto out;
1130
1131	/* create index in new top-level index: num,max,pointer */
1132	err = ext4_ext_get_access(handle, inode, curp);
1133	if (err)
1134		goto out;
1135
1136	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1137	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1138	curp->p_hdr->eh_entries = cpu_to_le16(1);
1139	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
1140
1141	if (path[0].p_hdr->eh_depth)
1142		curp->p_idx->ei_block =
1143			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1144	else
1145		curp->p_idx->ei_block =
1146			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1147	ext4_idx_store_pblock(curp->p_idx, newblock);
1148
1149	neh = ext_inode_hdr(inode);
1150	fidx = EXT_FIRST_INDEX(neh);
1151	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1152		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1153		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
1154
1155	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1156	err = ext4_ext_dirty(handle, inode, curp);
1157out:
1158	brelse(bh);
1159
1160	return err;
1161}
1162
1163/*
1164 * ext4_ext_create_new_leaf:
1165 * finds empty index and adds new leaf.
1166 * if no free index is found, then it requests in-depth growing.
1167 */
1168static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1169					struct ext4_ext_path *path,
1170					struct ext4_extent *newext)
1171{
1172	struct ext4_ext_path *curp;
1173	int depth, i, err = 0;
1174
1175repeat:
1176	i = depth = ext_depth(inode);
1177
1178	/* walk up to the tree and look for free index entry */
1179	curp = path + depth;
1180	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1181		i--;
1182		curp--;
1183	}
1184
1185	/* we use already allocated block for index block,
1186	 * so subsequent data blocks should be contiguous */
1187	if (EXT_HAS_FREE_INDEX(curp)) {
1188		/* if we found index with free entry, then use that
1189		 * entry: create all needed subtree and add new leaf */
1190		err = ext4_ext_split(handle, inode, path, newext, i);
1191		if (err)
1192			goto out;
1193
1194		/* refill path */
1195		ext4_ext_drop_refs(path);
1196		path = ext4_ext_find_extent(inode,
1197				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1198				    path);
1199		if (IS_ERR(path))
1200			err = PTR_ERR(path);
1201	} else {
1202		/* tree is full, time to grow in depth */
1203		err = ext4_ext_grow_indepth(handle, inode, path, newext);
1204		if (err)
1205			goto out;
1206
1207		/* refill path */
1208		ext4_ext_drop_refs(path);
1209		path = ext4_ext_find_extent(inode,
1210				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1211				    path);
1212		if (IS_ERR(path)) {
1213			err = PTR_ERR(path);
1214			goto out;
1215		}
1216
1217		/*
1218		 * only first (depth 0 -> 1) produces free space;
1219		 * in all other cases we have to split the grown tree
1220		 */
1221		depth = ext_depth(inode);
1222		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1223			/* now we need to split */
1224			goto repeat;
1225		}
1226	}
1227
1228out:
1229	return err;
1230}
1231
1232/*
1233 * search the closest allocated block to the left for *logical
1234 * and returns it at @logical + it's physical address at @phys
1235 * if *logical is the smallest allocated block, the function
1236 * returns 0 at @phys
1237 * return value contains 0 (success) or error code
1238 */
1239int
1240ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1241			ext4_lblk_t *logical, ext4_fsblk_t *phys)
1242{
1243	struct ext4_extent_idx *ix;
1244	struct ext4_extent *ex;
1245	int depth, ee_len;
1246
1247	if (unlikely(path == NULL)) {
1248		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1249		return -EIO;
1250	}
1251	depth = path->p_depth;
1252	*phys = 0;
1253
1254	if (depth == 0 && path->p_ext == NULL)
1255		return 0;
1256
1257	/* usually extent in the path covers blocks smaller
1258	 * then *logical, but it can be that extent is the
1259	 * first one in the file */
1260
1261	ex = path[depth].p_ext;
1262	ee_len = ext4_ext_get_actual_len(ex);
1263	if (*logical < le32_to_cpu(ex->ee_block)) {
1264		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1265			EXT4_ERROR_INODE(inode,
1266					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1267					 *logical, le32_to_cpu(ex->ee_block));
1268			return -EIO;
1269		}
1270		while (--depth >= 0) {
1271			ix = path[depth].p_idx;
1272			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1273				EXT4_ERROR_INODE(inode,
1274				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1275				  ix != NULL ? ix->ei_block : 0,
1276				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1277				    EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1278				  depth);
1279				return -EIO;
1280			}
1281		}
1282		return 0;
1283	}
1284
1285	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1286		EXT4_ERROR_INODE(inode,
1287				 "logical %d < ee_block %d + ee_len %d!",
1288				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1289		return -EIO;
1290	}
1291
1292	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1293	*phys = ext_pblock(ex) + ee_len - 1;
1294	return 0;
1295}
1296
1297/*
1298 * search the closest allocated block to the right for *logical
1299 * and returns it at @logical + it's physical address at @phys
1300 * if *logical is the smallest allocated block, the function
1301 * returns 0 at @phys
1302 * return value contains 0 (success) or error code
1303 */
1304int
1305ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1306			ext4_lblk_t *logical, ext4_fsblk_t *phys)
1307{
1308	struct buffer_head *bh = NULL;
1309	struct ext4_extent_header *eh;
1310	struct ext4_extent_idx *ix;
1311	struct ext4_extent *ex;
1312	ext4_fsblk_t block;
1313	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1314	int ee_len;
1315
1316	if (unlikely(path == NULL)) {
1317		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1318		return -EIO;
1319	}
1320	depth = path->p_depth;
1321	*phys = 0;
1322
1323	if (depth == 0 && path->p_ext == NULL)
1324		return 0;
1325
1326	/* usually extent in the path covers blocks smaller
1327	 * then *logical, but it can be that extent is the
1328	 * first one in the file */
1329
1330	ex = path[depth].p_ext;
1331	ee_len = ext4_ext_get_actual_len(ex);
1332	if (*logical < le32_to_cpu(ex->ee_block)) {
1333		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1334			EXT4_ERROR_INODE(inode,
1335					 "first_extent(path[%d].p_hdr) != ex",
1336					 depth);
1337			return -EIO;
1338		}
1339		while (--depth >= 0) {
1340			ix = path[depth].p_idx;
1341			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1342				EXT4_ERROR_INODE(inode,
1343						 "ix != EXT_FIRST_INDEX *logical %d!",
1344						 *logical);
1345				return -EIO;
1346			}
1347		}
1348		*logical = le32_to_cpu(ex->ee_block);
1349		*phys = ext_pblock(ex);
1350		return 0;
1351	}
1352
1353	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1354		EXT4_ERROR_INODE(inode,
1355				 "logical %d < ee_block %d + ee_len %d!",
1356				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1357		return -EIO;
1358	}
1359
1360	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1361		/* next allocated block in this leaf */
1362		ex++;
1363		*logical = le32_to_cpu(ex->ee_block);
1364		*phys = ext_pblock(ex);
1365		return 0;
1366	}
1367
1368	/* go up and search for index to the right */
1369	while (--depth >= 0) {
1370		ix = path[depth].p_idx;
1371		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1372			goto got_index;
1373	}
1374
1375	/* we've gone up to the root and found no index to the right */
1376	return 0;
1377
1378got_index:
1379	/* we've found index to the right, let's
1380	 * follow it and find the closest allocated
1381	 * block to the right */
1382	ix++;
1383	block = idx_pblock(ix);
1384	while (++depth < path->p_depth) {
1385		bh = sb_bread(inode->i_sb, block);
1386		if (bh == NULL)
1387			return -EIO;
1388		eh = ext_block_hdr(bh);
1389		/* subtract from p_depth to get proper eh_depth */
1390		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1391			put_bh(bh);
1392			return -EIO;
1393		}
1394		ix = EXT_FIRST_INDEX(eh);
1395		block = idx_pblock(ix);
1396		put_bh(bh);
1397	}
1398
1399	bh = sb_bread(inode->i_sb, block);
1400	if (bh == NULL)
1401		return -EIO;
1402	eh = ext_block_hdr(bh);
1403	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1404		put_bh(bh);
1405		return -EIO;
1406	}
1407	ex = EXT_FIRST_EXTENT(eh);
1408	*logical = le32_to_cpu(ex->ee_block);
1409	*phys = ext_pblock(ex);
1410	put_bh(bh);
1411	return 0;
1412}
1413
1414/*
1415 * ext4_ext_next_allocated_block:
1416 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1417 * NOTE: it considers block number from index entry as
1418 * allocated block. Thus, index entries have to be consistent
1419 * with leaves.
1420 */
1421static ext4_lblk_t
1422ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1423{
1424	int depth;
1425
1426	BUG_ON(path == NULL);
1427	depth = path->p_depth;
1428
1429	if (depth == 0 && path->p_ext == NULL)
1430		return EXT_MAX_BLOCK;
1431
1432	while (depth >= 0) {
1433		if (depth == path->p_depth) {
1434			/* leaf */
1435			if (path[depth].p_ext !=
1436					EXT_LAST_EXTENT(path[depth].p_hdr))
1437			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1438		} else {
1439			/* index */
1440			if (path[depth].p_idx !=
1441					EXT_LAST_INDEX(path[depth].p_hdr))
1442			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1443		}
1444		depth--;
1445	}
1446
1447	return EXT_MAX_BLOCK;
1448}
1449
1450/*
1451 * ext4_ext_next_leaf_block:
1452 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1453 */
1454static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1455					struct ext4_ext_path *path)
1456{
1457	int depth;
1458
1459	BUG_ON(path == NULL);
1460	depth = path->p_depth;
1461
1462	/* zero-tree has no leaf blocks at all */
1463	if (depth == 0)
1464		return EXT_MAX_BLOCK;
1465
1466	/* go to index block */
1467	depth--;
1468
1469	while (depth >= 0) {
1470		if (path[depth].p_idx !=
1471				EXT_LAST_INDEX(path[depth].p_hdr))
1472			return (ext4_lblk_t)
1473				le32_to_cpu(path[depth].p_idx[1].ei_block);
1474		depth--;
1475	}
1476
1477	return EXT_MAX_BLOCK;
1478}
1479
1480/*
1481 * ext4_ext_correct_indexes:
1482 * if leaf gets modified and modified extent is first in the leaf,
1483 * then we have to correct all indexes above.
1484 * TODO: do we need to correct tree in all cases?
1485 */
1486static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1487				struct ext4_ext_path *path)
1488{
1489	struct ext4_extent_header *eh;
1490	int depth = ext_depth(inode);
1491	struct ext4_extent *ex;
1492	__le32 border;
1493	int k, err = 0;
1494
1495	eh = path[depth].p_hdr;
1496	ex = path[depth].p_ext;
1497
1498	if (unlikely(ex == NULL || eh == NULL)) {
1499		EXT4_ERROR_INODE(inode,
1500				 "ex %p == NULL or eh %p == NULL", ex, eh);
1501		return -EIO;
1502	}
1503
1504	if (depth == 0) {
1505		/* there is no tree at all */
1506		return 0;
1507	}
1508
1509	if (ex != EXT_FIRST_EXTENT(eh)) {
1510		/* we correct tree if first leaf got modified only */
1511		return 0;
1512	}
1513
1514	/*
1515	 * TODO: we need correction if border is smaller than current one
1516	 */
1517	k = depth - 1;
1518	border = path[depth].p_ext->ee_block;
1519	err = ext4_ext_get_access(handle, inode, path + k);
1520	if (err)
1521		return err;
1522	path[k].p_idx->ei_block = border;
1523	err = ext4_ext_dirty(handle, inode, path + k);
1524	if (err)
1525		return err;
1526
1527	while (k--) {
1528		/* change all left-side indexes */
1529		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1530			break;
1531		err = ext4_ext_get_access(handle, inode, path + k);
1532		if (err)
1533			break;
1534		path[k].p_idx->ei_block = border;
1535		err = ext4_ext_dirty(handle, inode, path + k);
1536		if (err)
1537			break;
1538	}
1539
1540	return err;
1541}
1542
1543int
1544ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1545				struct ext4_extent *ex2)
1546{
1547	unsigned short ext1_ee_len, ext2_ee_len, max_len;
1548
1549	/*
1550	 * Make sure that either both extents are uninitialized, or
1551	 * both are _not_.
1552	 */
1553	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1554		return 0;
1555
1556	if (ext4_ext_is_uninitialized(ex1))
1557		max_len = EXT_UNINIT_MAX_LEN;
1558	else
1559		max_len = EXT_INIT_MAX_LEN;
1560
1561	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1562	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1563
1564	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1565			le32_to_cpu(ex2->ee_block))
1566		return 0;
1567
1568	/*
1569	 * To allow future support for preallocated extents to be added
1570	 * as an RO_COMPAT feature, refuse to merge to extents if
1571	 * this can result in the top bit of ee_len being set.
1572	 */
1573	if (ext1_ee_len + ext2_ee_len > max_len)
1574		return 0;
1575#ifdef AGGRESSIVE_TEST
1576	if (ext1_ee_len >= 4)
1577		return 0;
1578#endif
1579
1580	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1581		return 1;
1582	return 0;
1583}
1584
1585/*
1586 * This function tries to merge the "ex" extent to the next extent in the tree.
1587 * It always tries to merge towards right. If you want to merge towards
1588 * left, pass "ex - 1" as argument instead of "ex".
1589 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1590 * 1 if they got merged.
1591 */
1592int ext4_ext_try_to_merge(struct inode *inode,
1593			  struct ext4_ext_path *path,
1594			  struct ext4_extent *ex)
1595{
1596	struct ext4_extent_header *eh;
1597	unsigned int depth, len;
1598	int merge_done = 0;
1599	int uninitialized = 0;
1600
1601	depth = ext_depth(inode);
1602	BUG_ON(path[depth].p_hdr == NULL);
1603	eh = path[depth].p_hdr;
1604
1605	while (ex < EXT_LAST_EXTENT(eh)) {
1606		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1607			break;
1608		/* merge with next extent! */
1609		if (ext4_ext_is_uninitialized(ex))
1610			uninitialized = 1;
1611		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1612				+ ext4_ext_get_actual_len(ex + 1));
1613		if (uninitialized)
1614			ext4_ext_mark_uninitialized(ex);
1615
1616		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1617			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1618				* sizeof(struct ext4_extent);
1619			memmove(ex + 1, ex + 2, len);
1620		}
1621		le16_add_cpu(&eh->eh_entries, -1);
1622		merge_done = 1;
1623		WARN_ON(eh->eh_entries == 0);
1624		if (!eh->eh_entries)
1625			ext4_error(inode->i_sb,
1626				   "inode#%lu, eh->eh_entries = 0!",
1627				   inode->i_ino);
1628	}
1629
1630	return merge_done;
1631}
1632
1633/*
1634 * check if a portion of the "newext" extent overlaps with an
1635 * existing extent.
1636 *
1637 * If there is an overlap discovered, it updates the length of the newext
1638 * such that there will be no overlap, and then returns 1.
1639 * If there is no overlap found, it returns 0.
1640 */
1641unsigned int ext4_ext_check_overlap(struct inode *inode,
1642				    struct ext4_extent *newext,
1643				    struct ext4_ext_path *path)
1644{
1645	ext4_lblk_t b1, b2;
1646	unsigned int depth, len1;
1647	unsigned int ret = 0;
1648
1649	b1 = le32_to_cpu(newext->ee_block);
1650	len1 = ext4_ext_get_actual_len(newext);
1651	depth = ext_depth(inode);
1652	if (!path[depth].p_ext)
1653		goto out;
1654	b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1655
1656	/*
1657	 * get the next allocated block if the extent in the path
1658	 * is before the requested block(s)
1659	 */
1660	if (b2 < b1) {
1661		b2 = ext4_ext_next_allocated_block(path);
1662		if (b2 == EXT_MAX_BLOCK)
1663			goto out;
1664	}
1665
1666	/* check for wrap through zero on extent logical start block*/
1667	if (b1 + len1 < b1) {
1668		len1 = EXT_MAX_BLOCK - b1;
1669		newext->ee_len = cpu_to_le16(len1);
1670		ret = 1;
1671	}
1672
1673	/* check for overlap */
1674	if (b1 + len1 > b2) {
1675		newext->ee_len = cpu_to_le16(b2 - b1);
1676		ret = 1;
1677	}
1678out:
1679	return ret;
1680}
1681
1682/*
1683 * ext4_ext_insert_extent:
1684 * tries to merge requsted extent into the existing extent or
1685 * inserts requested extent as new one into the tree,
1686 * creating new leaf in the no-space case.
1687 */
1688int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1689				struct ext4_ext_path *path,
1690				struct ext4_extent *newext, int flag)
1691{
1692	struct ext4_extent_header *eh;
1693	struct ext4_extent *ex, *fex;
1694	struct ext4_extent *nearex; /* nearest extent */
1695	struct ext4_ext_path *npath = NULL;
1696	int depth, len, err;
1697	ext4_lblk_t next;
1698	unsigned uninitialized = 0;
1699
1700	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1701		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1702		return -EIO;
1703	}
1704	depth = ext_depth(inode);
1705	ex = path[depth].p_ext;
1706	if (unlikely(path[depth].p_hdr == NULL)) {
1707		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1708		return -EIO;
1709	}
1710
1711	/* try to insert block into found extent and return */
1712	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1713		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1714		ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1715				ext4_ext_is_uninitialized(newext),
1716				ext4_ext_get_actual_len(newext),
1717				le32_to_cpu(ex->ee_block),
1718				ext4_ext_is_uninitialized(ex),
1719				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1720		err = ext4_ext_get_access(handle, inode, path + depth);
1721		if (err)
1722			return err;
1723
1724		/*
1725		 * ext4_can_extents_be_merged should have checked that either
1726		 * both extents are uninitialized, or both aren't. Thus we
1727		 * need to check only one of them here.
1728		 */
1729		if (ext4_ext_is_uninitialized(ex))
1730			uninitialized = 1;
1731		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1732					+ ext4_ext_get_actual_len(newext));
1733		if (uninitialized)
1734			ext4_ext_mark_uninitialized(ex);
1735		eh = path[depth].p_hdr;
1736		nearex = ex;
1737		goto merge;
1738	}
1739
1740repeat:
1741	depth = ext_depth(inode);
1742	eh = path[depth].p_hdr;
1743	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1744		goto has_space;
1745
1746	/* probably next leaf has space for us? */
1747	fex = EXT_LAST_EXTENT(eh);
1748	next = ext4_ext_next_leaf_block(inode, path);
1749	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1750	    && next != EXT_MAX_BLOCK) {
1751		ext_debug("next leaf block - %d\n", next);
1752		BUG_ON(npath != NULL);
1753		npath = ext4_ext_find_extent(inode, next, NULL);
1754		if (IS_ERR(npath))
1755			return PTR_ERR(npath);
1756		BUG_ON(npath->p_depth != path->p_depth);
1757		eh = npath[depth].p_hdr;
1758		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1759			ext_debug("next leaf isnt full(%d)\n",
1760				  le16_to_cpu(eh->eh_entries));
1761			path = npath;
1762			goto repeat;
1763		}
1764		ext_debug("next leaf has no free space(%d,%d)\n",
1765			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1766	}
1767
1768	/*
1769	 * There is no free space in the found leaf.
1770	 * We're gonna add a new leaf in the tree.
1771	 */
1772	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1773	if (err)
1774		goto cleanup;
1775	depth = ext_depth(inode);
1776	eh = path[depth].p_hdr;
1777
1778has_space:
1779	nearex = path[depth].p_ext;
1780
1781	err = ext4_ext_get_access(handle, inode, path + depth);
1782	if (err)
1783		goto cleanup;
1784
1785	if (!nearex) {
1786		/* there is no extent in this leaf, create first one */
1787		ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
1788				le32_to_cpu(newext->ee_block),
1789				ext_pblock(newext),
1790				ext4_ext_is_uninitialized(newext),
1791				ext4_ext_get_actual_len(newext));
1792		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1793	} else if (le32_to_cpu(newext->ee_block)
1794			   > le32_to_cpu(nearex->ee_block)) {
1795/*		BUG_ON(newext->ee_block == nearex->ee_block); */
1796		if (nearex != EXT_LAST_EXTENT(eh)) {
1797			len = EXT_MAX_EXTENT(eh) - nearex;
1798			len = (len - 1) * sizeof(struct ext4_extent);
1799			len = len < 0 ? 0 : len;
1800			ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
1801					"move %d from 0x%p to 0x%p\n",
1802					le32_to_cpu(newext->ee_block),
1803					ext_pblock(newext),
1804					ext4_ext_is_uninitialized(newext),
1805					ext4_ext_get_actual_len(newext),
1806					nearex, len, nearex + 1, nearex + 2);
1807			memmove(nearex + 2, nearex + 1, len);
1808		}
1809		path[depth].p_ext = nearex + 1;
1810	} else {
1811		BUG_ON(newext->ee_block == nearex->ee_block);
1812		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1813		len = len < 0 ? 0 : len;
1814		ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
1815				"move %d from 0x%p to 0x%p\n",
1816				le32_to_cpu(newext->ee_block),
1817				ext_pblock(newext),
1818				ext4_ext_is_uninitialized(newext),
1819				ext4_ext_get_actual_len(newext),
1820				nearex, len, nearex + 1, nearex + 2);
1821		memmove(nearex + 1, nearex, len);
1822		path[depth].p_ext = nearex;
1823	}
1824
1825	le16_add_cpu(&eh->eh_entries, 1);
1826	nearex = path[depth].p_ext;
1827	nearex->ee_block = newext->ee_block;
1828	ext4_ext_store_pblock(nearex, ext_pblock(newext));
1829	nearex->ee_len = newext->ee_len;
1830
1831merge:
1832	/* try to merge extents to the right */
1833	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1834		ext4_ext_try_to_merge(inode, path, nearex);
1835
1836	/* try to merge extents to the left */
1837
1838	/* time to correct all indexes above */
1839	err = ext4_ext_correct_indexes(handle, inode, path);
1840	if (err)
1841		goto cleanup;
1842
1843	err = ext4_ext_dirty(handle, inode, path + depth);
1844
1845cleanup:
1846	if (npath) {
1847		ext4_ext_drop_refs(npath);
1848		kfree(npath);
1849	}
1850	ext4_ext_invalidate_cache(inode);
1851	return err;
1852}
1853
1854int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1855			ext4_lblk_t num, ext_prepare_callback func,
1856			void *cbdata)
1857{
1858	struct ext4_ext_path *path = NULL;
1859	struct ext4_ext_cache cbex;
1860	struct ext4_extent *ex;
1861	ext4_lblk_t next, start = 0, end = 0;
1862	ext4_lblk_t last = block + num;
1863	int depth, exists, err = 0;
1864
1865	BUG_ON(func == NULL);
1866	BUG_ON(inode == NULL);
1867
1868	while (block < last && block != EXT_MAX_BLOCK) {
1869		num = last - block;
1870		/* find extent for this block */
1871		down_read(&EXT4_I(inode)->i_data_sem);
1872		path = ext4_ext_find_extent(inode, block, path);
1873		up_read(&EXT4_I(inode)->i_data_sem);
1874		if (IS_ERR(path)) {
1875			err = PTR_ERR(path);
1876			path = NULL;
1877			break;
1878		}
1879
1880		depth = ext_depth(inode);
1881		if (unlikely(path[depth].p_hdr == NULL)) {
1882			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1883			err = -EIO;
1884			break;
1885		}
1886		ex = path[depth].p_ext;
1887		next = ext4_ext_next_allocated_block(path);
1888
1889		exists = 0;
1890		if (!ex) {
1891			/* there is no extent yet, so try to allocate
1892			 * all requested space */
1893			start = block;
1894			end = block + num;
1895		} else if (le32_to_cpu(ex->ee_block) > block) {
1896			/* need to allocate space before found extent */
1897			start = block;
1898			end = le32_to_cpu(ex->ee_block);
1899			if (block + num < end)
1900				end = block + num;
1901		} else if (block >= le32_to_cpu(ex->ee_block)
1902					+ ext4_ext_get_actual_len(ex)) {
1903			/* need to allocate space after found extent */
1904			start = block;
1905			end = block + num;
1906			if (end >= next)
1907				end = next;
1908		} else if (block >= le32_to_cpu(ex->ee_block)) {
1909			/*
1910			 * some part of requested space is covered
1911			 * by found extent
1912			 */
1913			start = block;
1914			end = le32_to_cpu(ex->ee_block)
1915				+ ext4_ext_get_actual_len(ex);
1916			if (block + num < end)
1917				end = block + num;
1918			exists = 1;
1919		} else {
1920			BUG();
1921		}
1922		BUG_ON(end <= start);
1923
1924		if (!exists) {
1925			cbex.ec_block = start;
1926			cbex.ec_len = end - start;
1927			cbex.ec_start = 0;
1928			cbex.ec_type = EXT4_EXT_CACHE_GAP;
1929		} else {
1930			cbex.ec_block = le32_to_cpu(ex->ee_block);
1931			cbex.ec_len = ext4_ext_get_actual_len(ex);
1932			cbex.ec_start = ext_pblock(ex);
1933			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1934		}
1935
1936		if (unlikely(cbex.ec_len == 0)) {
1937			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1938			err = -EIO;
1939			break;
1940		}
1941		err = func(inode, path, &cbex, ex, cbdata);
1942		ext4_ext_drop_refs(path);
1943
1944		if (err < 0)
1945			break;
1946
1947		if (err == EXT_REPEAT)
1948			continue;
1949		else if (err == EXT_BREAK) {
1950			err = 0;
1951			break;
1952		}
1953
1954		if (ext_depth(inode) != depth) {
1955			/* depth was changed. we have to realloc path */
1956			kfree(path);
1957			path = NULL;
1958		}
1959
1960		block = cbex.ec_block + cbex.ec_len;
1961	}
1962
1963	if (path) {
1964		ext4_ext_drop_refs(path);
1965		kfree(path);
1966	}
1967
1968	return err;
1969}
1970
1971static void
1972ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1973			__u32 len, ext4_fsblk_t start, int type)
1974{
1975	struct ext4_ext_cache *cex;
1976	BUG_ON(len == 0);
1977	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1978	cex = &EXT4_I(inode)->i_cached_extent;
1979	cex->ec_type = type;
1980	cex->ec_block = block;
1981	cex->ec_len = len;
1982	cex->ec_start = start;
1983	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1984}
1985
1986/*
1987 * ext4_ext_put_gap_in_cache:
1988 * calculate boundaries of the gap that the requested block fits into
1989 * and cache this gap
1990 */
1991static void
1992ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1993				ext4_lblk_t block)
1994{
1995	int depth = ext_depth(inode);
1996	unsigned long len;
1997	ext4_lblk_t lblock;
1998	struct ext4_extent *ex;
1999
2000	ex = path[depth].p_ext;
2001	if (ex == NULL) {
2002		/* there is no extent yet, so gap is [0;-] */
2003		lblock = 0;
2004		len = EXT_MAX_BLOCK;
2005		ext_debug("cache gap(whole file):");
2006	} else if (block < le32_to_cpu(ex->ee_block)) {
2007		lblock = block;
2008		len = le32_to_cpu(ex->ee_block) - block;
2009		ext_debug("cache gap(before): %u [%u:%u]",
2010				block,
2011				le32_to_cpu(ex->ee_block),
2012				 ext4_ext_get_actual_len(ex));
2013	} else if (block >= le32_to_cpu(ex->ee_block)
2014			+ ext4_ext_get_actual_len(ex)) {
2015		ext4_lblk_t next;
2016		lblock = le32_to_cpu(ex->ee_block)
2017			+ ext4_ext_get_actual_len(ex);
2018
2019		next = ext4_ext_next_allocated_block(path);
2020		ext_debug("cache gap(after): [%u:%u] %u",
2021				le32_to_cpu(ex->ee_block),
2022				ext4_ext_get_actual_len(ex),
2023				block);
2024		BUG_ON(next == lblock);
2025		len = next - lblock;
2026	} else {
2027		lblock = len = 0;
2028		BUG();
2029	}
2030
2031	ext_debug(" -> %u:%lu\n", lblock, len);
2032	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
2033}
2034
2035static int
2036ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2037			struct ext4_extent *ex)
2038{
2039	struct ext4_ext_cache *cex;
2040	int ret = EXT4_EXT_CACHE_NO;
2041
2042	/*
2043	 * We borrow i_block_reservation_lock to protect i_cached_extent
2044	 */
2045	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2046	cex = &EXT4_I(inode)->i_cached_extent;
2047
2048	/* has cache valid data? */
2049	if (cex->ec_type == EXT4_EXT_CACHE_NO)
2050		goto errout;
2051
2052	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
2053			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
2054	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
2055		ex->ee_block = cpu_to_le32(cex->ec_block);
2056		ext4_ext_store_pblock(ex, cex->ec_start);
2057		ex->ee_len = cpu_to_le16(cex->ec_len);
2058		ext_debug("%u cached by %u:%u:%llu\n",
2059				block,
2060				cex->ec_block, cex->ec_len, cex->ec_start);
2061		ret = cex->ec_type;
2062	}
2063errout:
2064	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2065	return ret;
2066}
2067
2068/*
2069 * ext4_ext_rm_idx:
2070 * removes index from the index block.
2071 * It's used in truncate case only, thus all requests are for
2072 * last index in the block only.
2073 */
2074static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2075			struct ext4_ext_path *path)
2076{
2077	int err;
2078	ext4_fsblk_t leaf;
2079
2080	/* free index block */
2081	path--;
2082	leaf = idx_pblock(path->p_idx);
2083	if (unlikely(path->p_hdr->eh_entries == 0)) {
2084		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2085		return -EIO;
2086	}
2087	err = ext4_ext_get_access(handle, inode, path);
2088	if (err)
2089		return err;
2090	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2091	err = ext4_ext_dirty(handle, inode, path);
2092	if (err)
2093		return err;
2094	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2095	ext4_free_blocks(handle, inode, 0, leaf, 1,
2096			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2097	return err;
2098}
2099
2100/*
2101 * ext4_ext_calc_credits_for_single_extent:
2102 * This routine returns max. credits that needed to insert an extent
2103 * to the extent tree.
2104 * When pass the actual path, the caller should calculate credits
2105 * under i_data_sem.
2106 */
2107int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2108						struct ext4_ext_path *path)
2109{
2110	if (path) {
2111		int depth = ext_depth(inode);
2112		int ret = 0;
2113
2114		/* probably there is space in leaf? */
2115		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2116				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2117
2118			/*
2119			 *  There are some space in the leaf tree, no
2120			 *  need to account for leaf block credit
2121			 *
2122			 *  bitmaps and block group descriptor blocks
2123			 *  and other metadat blocks still need to be
2124			 *  accounted.
2125			 */
2126			/* 1 bitmap, 1 block group descriptor */
2127			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2128			return ret;
2129		}
2130	}
2131
2132	return ext4_chunk_trans_blocks(inode, nrblocks);
2133}
2134
2135/*
2136 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2137 *
2138 * if nrblocks are fit in a single extent (chunk flag is 1), then
2139 * in the worse case, each tree level index/leaf need to be changed
2140 * if the tree split due to insert a new extent, then the old tree
2141 * index/leaf need to be updated too
2142 *
2143 * If the nrblocks are discontiguous, they could cause
2144 * the whole tree split more than once, but this is really rare.
2145 */
2146int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2147{
2148	int index;
2149	int depth = ext_depth(inode);
2150
2151	if (chunk)
2152		index = depth * 2;
2153	else
2154		index = depth * 3;
2155
2156	return index;
2157}
2158
2159static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2160				struct ext4_extent *ex,
2161				ext4_lblk_t from, ext4_lblk_t to)
2162{
2163	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2164	int flags = EXT4_FREE_BLOCKS_FORGET;
2165
2166	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2167		flags |= EXT4_FREE_BLOCKS_METADATA;
2168#ifdef EXTENTS_STATS
2169	{
2170		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2171		spin_lock(&sbi->s_ext_stats_lock);
2172		sbi->s_ext_blocks += ee_len;
2173		sbi->s_ext_extents++;
2174		if (ee_len < sbi->s_ext_min)
2175			sbi->s_ext_min = ee_len;
2176		if (ee_len > sbi->s_ext_max)
2177			sbi->s_ext_max = ee_len;
2178		if (ext_depth(inode) > sbi->s_depth_max)
2179			sbi->s_depth_max = ext_depth(inode);
2180		spin_unlock(&sbi->s_ext_stats_lock);
2181	}
2182#endif
2183	if (from >= le32_to_cpu(ex->ee_block)
2184	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2185		/* tail removal */
2186		ext4_lblk_t num;
2187		ext4_fsblk_t start;
2188
2189		num = le32_to_cpu(ex->ee_block) + ee_len - from;
2190		start = ext_pblock(ex) + ee_len - num;
2191		ext_debug("free last %u blocks starting %llu\n", num, start);
2192		ext4_free_blocks(handle, inode, 0, start, num, flags);
2193	} else if (from == le32_to_cpu(ex->ee_block)
2194		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2195		printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
2196			from, to, le32_to_cpu(ex->ee_block), ee_len);
2197	} else {
2198		printk(KERN_INFO "strange request: removal(2) "
2199				"%u-%u from %u:%u\n",
2200				from, to, le32_to_cpu(ex->ee_block), ee_len);
2201	}
2202	return 0;
2203}
2204
2205static int
2206ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2207		struct ext4_ext_path *path, ext4_lblk_t start)
2208{
2209	int err = 0, correct_index = 0;
2210	int depth = ext_depth(inode), credits;
2211	struct ext4_extent_header *eh;
2212	ext4_lblk_t a, b, block;
2213	unsigned num;
2214	ext4_lblk_t ex_ee_block;
2215	unsigned short ex_ee_len;
2216	unsigned uninitialized = 0;
2217	struct ext4_extent *ex;
2218
2219	/* the header must be checked already in ext4_ext_remove_space() */
2220	ext_debug("truncate since %u in leaf\n", start);
2221	if (!path[depth].p_hdr)
2222		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2223	eh = path[depth].p_hdr;
2224	if (unlikely(path[depth].p_hdr == NULL)) {
2225		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2226		return -EIO;
2227	}
2228	/* find where to start removing */
2229	ex = EXT_LAST_EXTENT(eh);
2230
2231	ex_ee_block = le32_to_cpu(ex->ee_block);
2232	ex_ee_len = ext4_ext_get_actual_len(ex);
2233
2234	while (ex >= EXT_FIRST_EXTENT(eh) &&
2235			ex_ee_block + ex_ee_len > start) {
2236
2237		if (ext4_ext_is_uninitialized(ex))
2238			uninitialized = 1;
2239		else
2240			uninitialized = 0;
2241
2242		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2243			 uninitialized, ex_ee_len);
2244		path[depth].p_ext = ex;
2245
2246		a = ex_ee_block > start ? ex_ee_block : start;
2247		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2248			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2249
2250		ext_debug("  border %u:%u\n", a, b);
2251
2252		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2253			block = 0;
2254			num = 0;
2255			BUG();
2256		} else if (a != ex_ee_block) {
2257			/* remove tail of the extent */
2258			block = ex_ee_block;
2259			num = a - block;
2260		} else if (b != ex_ee_block + ex_ee_len - 1) {
2261			/* remove head of the extent */
2262			block = a;
2263			num = b - a;
2264			/* there is no "make a hole" API yet */
2265			BUG();
2266		} else {
2267			/* remove whole extent: excellent! */
2268			block = ex_ee_block;
2269			num = 0;
2270			BUG_ON(a != ex_ee_block);
2271			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2272		}
2273
2274		/*
2275		 * 3 for leaf, sb, and inode plus 2 (bmap and group
2276		 * descriptor) for each block group; assume two block
2277		 * groups plus ex_ee_len/blocks_per_block_group for
2278		 * the worst case
2279		 */
2280		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2281		if (ex == EXT_FIRST_EXTENT(eh)) {
2282			correct_index = 1;
2283			credits += (ext_depth(inode)) + 1;
2284		}
2285		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2286
2287		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2288		if (err)
2289			goto out;
2290
2291		err = ext4_ext_get_access(handle, inode, path + depth);
2292		if (err)
2293			goto out;
2294
2295		err = ext4_remove_blocks(handle, inode, ex, a, b);
2296		if (err)
2297			goto out;
2298
2299		if (num == 0) {
2300			/* this extent is removed; mark slot entirely unused */
2301			ext4_ext_store_pblock(ex, 0);
2302			le16_add_cpu(&eh->eh_entries, -1);
2303		}
2304
2305		ex->ee_block = cpu_to_le32(block);
2306		ex->ee_len = cpu_to_le16(num);
2307		/*
2308		 * Do not mark uninitialized if all the blocks in the
2309		 * extent have been removed.
2310		 */
2311		if (uninitialized && num)
2312			ext4_ext_mark_uninitialized(ex);
2313
2314		err = ext4_ext_dirty(handle, inode, path + depth);
2315		if (err)
2316			goto out;
2317
2318		ext_debug("new extent: %u:%u:%llu\n", block, num,
2319				ext_pblock(ex));
2320		ex--;
2321		ex_ee_block = le32_to_cpu(ex->ee_block);
2322		ex_ee_len = ext4_ext_get_actual_len(ex);
2323	}
2324
2325	if (correct_index && eh->eh_entries)
2326		err = ext4_ext_correct_indexes(handle, inode, path);
2327
2328	/* if this leaf is free, then we should
2329	 * remove it from index block above */
2330	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2331		err = ext4_ext_rm_idx(handle, inode, path + depth);
2332
2333out:
2334	return err;
2335}
2336
2337/*
2338 * ext4_ext_more_to_rm:
2339 * returns 1 if current index has to be freed (even partial)
2340 */
2341static int
2342ext4_ext_more_to_rm(struct ext4_ext_path *path)
2343{
2344	BUG_ON(path->p_idx == NULL);
2345
2346	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2347		return 0;
2348
2349	/*
2350	 * if truncate on deeper level happened, it wasn't partial,
2351	 * so we have to consider current index for truncation
2352	 */
2353	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2354		return 0;
2355	return 1;
2356}
2357
2358static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2359{
2360	struct super_block *sb = inode->i_sb;
2361	int depth = ext_depth(inode);
2362	struct ext4_ext_path *path;
2363	handle_t *handle;
2364	int i = 0, err = 0;
2365
2366	ext_debug("truncate since %u\n", start);
2367
2368	/* probably first extent we're gonna free will be last in block */
2369	handle = ext4_journal_start(inode, depth + 1);
2370	if (IS_ERR(handle))
2371		return PTR_ERR(handle);
2372
2373	ext4_ext_invalidate_cache(inode);
2374
2375	/*
2376	 * We start scanning from right side, freeing all the blocks
2377	 * after i_size and walking into the tree depth-wise.
2378	 */
2379	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2380	if (path == NULL) {
2381		ext4_journal_stop(handle);
2382		return -ENOMEM;
2383	}
2384	path[0].p_hdr = ext_inode_hdr(inode);
2385	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2386		err = -EIO;
2387		goto out;
2388	}
2389	path[0].p_depth = depth;
2390
2391	while (i >= 0 && err == 0) {
2392		if (i == depth) {
2393			/* this is leaf block */
2394			err = ext4_ext_rm_leaf(handle, inode, path, start);
2395			/* root level has p_bh == NULL, brelse() eats this */
2396			brelse(path[i].p_bh);
2397			path[i].p_bh = NULL;
2398			i--;
2399			continue;
2400		}
2401
2402		/* this is index block */
2403		if (!path[i].p_hdr) {
2404			ext_debug("initialize header\n");
2405			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2406		}
2407
2408		if (!path[i].p_idx) {
2409			/* this level hasn't been touched yet */
2410			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2411			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2412			ext_debug("init index ptr: hdr 0x%p, num %d\n",
2413				  path[i].p_hdr,
2414				  le16_to_cpu(path[i].p_hdr->eh_entries));
2415		} else {
2416			/* we were already here, see at next index */
2417			path[i].p_idx--;
2418		}
2419
2420		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2421				i, EXT_FIRST_INDEX(path[i].p_hdr),
2422				path[i].p_idx);
2423		if (ext4_ext_more_to_rm(path + i)) {
2424			struct buffer_head *bh;
2425			/* go to the next level */
2426			ext_debug("move to level %d (block %llu)\n",
2427				  i + 1, idx_pblock(path[i].p_idx));
2428			memset(path + i + 1, 0, sizeof(*path));
2429			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2430			if (!bh) {
2431				/* should we reset i_size? */
2432				err = -EIO;
2433				break;
2434			}
2435			if (WARN_ON(i + 1 > depth)) {
2436				err = -EIO;
2437				break;
2438			}
2439			if (ext4_ext_check(inode, ext_block_hdr(bh),
2440							depth - i - 1)) {
2441				err = -EIO;
2442				break;
2443			}
2444			path[i + 1].p_bh = bh;
2445
2446			/* save actual number of indexes since this
2447			 * number is changed at the next iteration */
2448			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2449			i++;
2450		} else {
2451			/* we finished processing this index, go up */
2452			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2453				/* index is empty, remove it;
2454				 * handle must be already prepared by the
2455				 * truncatei_leaf() */
2456				err = ext4_ext_rm_idx(handle, inode, path + i);
2457			}
2458			/* root level has p_bh == NULL, brelse() eats this */
2459			brelse(path[i].p_bh);
2460			path[i].p_bh = NULL;
2461			i--;
2462			ext_debug("return to level %d\n", i);
2463		}
2464	}
2465
2466	/* TODO: flexible tree reduction should be here */
2467	if (path->p_hdr->eh_entries == 0) {
2468		/*
2469		 * truncate to zero freed all the tree,
2470		 * so we need to correct eh_depth
2471		 */
2472		err = ext4_ext_get_access(handle, inode, path);
2473		if (err == 0) {
2474			ext_inode_hdr(inode)->eh_depth = 0;
2475			ext_inode_hdr(inode)->eh_max =
2476				cpu_to_le16(ext4_ext_space_root(inode, 0));
2477			err = ext4_ext_dirty(handle, inode, path);
2478		}
2479	}
2480out:
2481	ext4_ext_drop_refs(path);
2482	kfree(path);
2483	ext4_journal_stop(handle);
2484
2485	return err;
2486}
2487
2488/*
2489 * called at mount time
2490 */
2491void ext4_ext_init(struct super_block *sb)
2492{
2493	/*
2494	 * possible initialization would be here
2495	 */
2496
2497	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2498#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2499		printk(KERN_INFO "EXT4-fs: file extents enabled");
2500#ifdef AGGRESSIVE_TEST
2501		printk(", aggressive tests");
2502#endif
2503#ifdef CHECK_BINSEARCH
2504		printk(", check binsearch");
2505#endif
2506#ifdef EXTENTS_STATS
2507		printk(", stats");
2508#endif
2509		printk("\n");
2510#endif
2511#ifdef EXTENTS_STATS
2512		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2513		EXT4_SB(sb)->s_ext_min = 1 << 30;
2514		EXT4_SB(sb)->s_ext_max = 0;
2515#endif
2516	}
2517}
2518
2519/*
2520 * called at umount time
2521 */
2522void ext4_ext_release(struct super_block *sb)
2523{
2524	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2525		return;
2526
2527#ifdef EXTENTS_STATS
2528	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2529		struct ext4_sb_info *sbi = EXT4_SB(sb);
2530		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2531			sbi->s_ext_blocks, sbi->s_ext_extents,
2532			sbi->s_ext_blocks / sbi->s_ext_extents);
2533		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2534			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2535	}
2536#endif
2537}
2538
2539static void bi_complete(struct bio *bio, int error)
2540{
2541	complete((struct completion *)bio->bi_private);
2542}
2543
2544/* FIXME!! we need to try to merge to left or right after zero-out  */
2545static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2546{
2547	int ret = -EIO;
2548	struct bio *bio;
2549	int blkbits, blocksize;
2550	sector_t ee_pblock;
2551	struct completion event;
2552	unsigned int ee_len, len, done, offset;
2553
2554
2555	blkbits   = inode->i_blkbits;
2556	blocksize = inode->i_sb->s_blocksize;
2557	ee_len    = ext4_ext_get_actual_len(ex);
2558	ee_pblock = ext_pblock(ex);
2559
2560	/* convert ee_pblock to 512 byte sectors */
2561	ee_pblock = ee_pblock << (blkbits - 9);
2562
2563	while (ee_len > 0) {
2564
2565		if (ee_len > BIO_MAX_PAGES)
2566			len = BIO_MAX_PAGES;
2567		else
2568			len = ee_len;
2569
2570		bio = bio_alloc(GFP_NOIO, len);
2571		bio->bi_sector = ee_pblock;
2572		bio->bi_bdev   = inode->i_sb->s_bdev;
2573
2574		done = 0;
2575		offset = 0;
2576		while (done < len) {
2577			ret = bio_add_page(bio, ZERO_PAGE(0),
2578							blocksize, offset);
2579			if (ret != blocksize) {
2580				/*
2581				 * We can't add any more pages because of
2582				 * hardware limitations.  Start a new bio.
2583				 */
2584				break;
2585			}
2586			done++;
2587			offset += blocksize;
2588			if (offset >= PAGE_CACHE_SIZE)
2589				offset = 0;
2590		}
2591
2592		init_completion(&event);
2593		bio->bi_private = &event;
2594		bio->bi_end_io = bi_complete;
2595		submit_bio(WRITE, bio);
2596		wait_for_completion(&event);
2597
2598		if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2599			ret = 0;
2600		else {
2601			ret = -EIO;
2602			break;
2603		}
2604		bio_put(bio);
2605		ee_len    -= done;
2606		ee_pblock += done  << (blkbits - 9);
2607	}
2608	return ret;
2609}
2610
2611#define EXT4_EXT_ZERO_LEN 7
2612/*
2613 * This function is called by ext4_ext_get_blocks() if someone tries to write
2614 * to an uninitialized extent. It may result in splitting the uninitialized
2615 * extent into multiple extents (upto three - one initialized and two
2616 * uninitialized).
2617 * There are three possibilities:
2618 *   a> There is no split required: Entire extent should be initialized
2619 *   b> Splits in two extents: Write is happening at either end of the extent
2620 *   c> Splits in three extents: Somone is writing in middle of the extent
2621 */
2622static int ext4_ext_convert_to_initialized(handle_t *handle,
2623						struct inode *inode,
2624						struct ext4_ext_path *path,
2625						ext4_lblk_t iblock,
2626						unsigned int max_blocks)
2627{
2628	struct ext4_extent *ex, newex, orig_ex;
2629	struct ext4_extent *ex1 = NULL;
2630	struct ext4_extent *ex2 = NULL;
2631	struct ext4_extent *ex3 = NULL;
2632	struct ext4_extent_header *eh;
2633	ext4_lblk_t ee_block;
2634	unsigned int allocated, ee_len, depth;
2635	ext4_fsblk_t newblock;
2636	int err = 0;
2637	int ret = 0;
2638
2639	depth = ext_depth(inode);
2640	eh = path[depth].p_hdr;
2641	ex = path[depth].p_ext;
2642	ee_block = le32_to_cpu(ex->ee_block);
2643	ee_len = ext4_ext_get_actual_len(ex);
2644	allocated = ee_len - (iblock - ee_block);
2645	newblock = iblock - ee_block + ext_pblock(ex);
2646	ex2 = ex;
2647	orig_ex.ee_block = ex->ee_block;
2648	orig_ex.ee_len   = cpu_to_le16(ee_len);
2649	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2650
2651	err = ext4_ext_get_access(handle, inode, path + depth);
2652	if (err)
2653		goto out;
2654	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2655	if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2656		err =  ext4_ext_zeroout(inode, &orig_ex);
2657		if (err)
2658			goto fix_extent_len;
2659		/* update the extent length and mark as initialized */
2660		ex->ee_block = orig_ex.ee_block;
2661		ex->ee_len   = orig_ex.ee_len;
2662		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2663		ext4_ext_dirty(handle, inode, path + depth);
2664		/* zeroed the full extent */
2665		return allocated;
2666	}
2667
2668	/* ex1: ee_block to iblock - 1 : uninitialized */
2669	if (iblock > ee_block) {
2670		ex1 = ex;
2671		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2672		ext4_ext_mark_uninitialized(ex1);
2673		ex2 = &newex;
2674	}
2675	/*
2676	 * for sanity, update the length of the ex2 extent before
2677	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2678	 * overlap of blocks.
2679	 */
2680	if (!ex1 && allocated > max_blocks)
2681		ex2->ee_len = cpu_to_le16(max_blocks);
2682	/* ex3: to ee_block + ee_len : uninitialised */
2683	if (allocated > max_blocks) {
2684		unsigned int newdepth;
2685		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2686		if (allocated <= EXT4_EXT_ZERO_LEN) {
2687			/*
2688			 * iblock == ee_block is handled by the zerouout
2689			 * at the beginning.
2690			 * Mark first half uninitialized.
2691			 * Mark second half initialized and zero out the
2692			 * initialized extent
2693			 */
2694			ex->ee_block = orig_ex.ee_block;
2695			ex->ee_len   = cpu_to_le16(ee_len - allocated);
2696			ext4_ext_mark_uninitialized(ex);
2697			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2698			ext4_ext_dirty(handle, inode, path + depth);
2699
2700			ex3 = &newex;
2701			ex3->ee_block = cpu_to_le32(iblock);
2702			ext4_ext_store_pblock(ex3, newblock);
2703			ex3->ee_len = cpu_to_le16(allocated);
2704			err = ext4_ext_insert_extent(handle, inode, path,
2705							ex3, 0);
2706			if (err == -ENOSPC) {
2707				err =  ext4_ext_zeroout(inode, &orig_ex);
2708				if (err)
2709					goto fix_extent_len;
2710				ex->ee_block = orig_ex.ee_block;
2711				ex->ee_len   = orig_ex.ee_len;
2712				ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2713				ext4_ext_dirty(handle, inode, path + depth);
2714				/* blocks available from iblock */
2715				return allocated;
2716
2717			} else if (err)
2718				goto fix_extent_len;
2719
2720			/*
2721			 * We need to zero out the second half because
2722			 * an fallocate request can update file size and
2723			 * converting the second half to initialized extent
2724			 * implies that we can leak some junk data to user
2725			 * space.
2726			 */
2727			err =  ext4_ext_zeroout(inode, ex3);
2728			if (err) {
2729				/*
2730				 * We should actually mark the
2731				 * second half as uninit and return error
2732				 * Insert would have changed the extent
2733				 */
2734				depth = ext_depth(inode);
2735				ext4_ext_drop_refs(path);
2736				path = ext4_ext_find_extent(inode,
2737								iblock, path);
2738				if (IS_ERR(path)) {
2739					err = PTR_ERR(path);
2740					return err;
2741				}
2742				/* get the second half extent details */
2743				ex = path[depth].p_ext;
2744				err = ext4_ext_get_access(handle, inode,
2745								path + depth);
2746				if (err)
2747					return err;
2748				ext4_ext_mark_uninitialized(ex);
2749				ext4_ext_dirty(handle, inode, path + depth);
2750				return err;
2751			}
2752
2753			/* zeroed the second half */
2754			return allocated;
2755		}
2756		ex3 = &newex;
2757		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2758		ext4_ext_store_pblock(ex3, newblock + max_blocks);
2759		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2760		ext4_ext_mark_uninitialized(ex3);
2761		err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2762		if (err == -ENOSPC) {
2763			err =  ext4_ext_zeroout(inode, &orig_ex);
2764			if (err)
2765				goto fix_extent_len;
2766			/* update the extent length and mark as initialized */
2767			ex->ee_block = orig_ex.ee_block;
2768			ex->ee_len   = orig_ex.ee_len;
2769			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2770			ext4_ext_dirty(handle, inode, path + depth);
2771			/* zeroed the full extent */
2772			/* blocks available from iblock */
2773			return allocated;
2774
2775		} else if (err)
2776			goto fix_extent_len;
2777		/*
2778		 * The depth, and hence eh & ex might change
2779		 * as part of the insert above.
2780		 */
2781		newdepth = ext_depth(inode);
2782		/*
2783		 * update the extent length after successful insert of the
2784		 * split extent
2785		 */
2786		orig_ex.ee_len = cpu_to_le16(ee_len -
2787						ext4_ext_get_actual_len(ex3));
2788		depth = newdepth;
2789		ext4_ext_drop_refs(path);
2790		path = ext4_ext_find_extent(inode, iblock, path);
2791		if (IS_ERR(path)) {
2792			err = PTR_ERR(path);
2793			goto out;
2794		}
2795		eh = path[depth].p_hdr;
2796		ex = path[depth].p_ext;
2797		if (ex2 != &newex)
2798			ex2 = ex;
2799
2800		err = ext4_ext_get_access(handle, inode, path + depth);
2801		if (err)
2802			goto out;
2803
2804		allocated = max_blocks;
2805
2806		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2807		 * to insert a extent in the middle zerout directly
2808		 * otherwise give the extent a chance to merge to left
2809		 */
2810		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2811							iblock != ee_block) {
2812			err =  ext4_ext_zeroout(inode, &orig_ex);
2813			if (err)
2814				goto fix_extent_len;
2815			/* update the extent length and mark as initialized */
2816			ex->ee_block = orig_ex.ee_block;
2817			ex->ee_len   = orig_ex.ee_len;
2818			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2819			ext4_ext_dirty(handle, inode, path + depth);
2820			/* zero out the first half */
2821			/* blocks available from iblock */
2822			return allocated;
2823		}
2824	}
2825	/*
2826	 * If there was a change of depth as part of the
2827	 * insertion of ex3 above, we need to update the length
2828	 * of the ex1 extent again here
2829	 */
2830	if (ex1 && ex1 != ex) {
2831		ex1 = ex;
2832		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2833		ext4_ext_mark_uninitialized(ex1);
2834		ex2 = &newex;
2835	}
2836	/* ex2: iblock to iblock + maxblocks-1 : initialised */
2837	ex2->ee_block = cpu_to_le32(iblock);
2838	ext4_ext_store_pblock(ex2, newblock);
2839	ex2->ee_len = cpu_to_le16(allocated);
2840	if (ex2 != ex)
2841		goto insert;
2842	/*
2843	 * New (initialized) extent starts from the first block
2844	 * in the current extent. i.e., ex2 == ex
2845	 * We have to see if it can be merged with the extent
2846	 * on the left.
2847	 */
2848	if (ex2 > EXT_FIRST_EXTENT(eh)) {
2849		/*
2850		 * To merge left, pass "ex2 - 1" to try_to_merge(),
2851		 * since it merges towards right _only_.
2852		 */
2853		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2854		if (ret) {
2855			err = ext4_ext_correct_indexes(handle, inode, path);
2856			if (err)
2857				goto out;
2858			depth = ext_depth(inode);
2859			ex2--;
2860		}
2861	}
2862	/*
2863	 * Try to Merge towards right. This might be required
2864	 * only when the whole extent is being written to.
2865	 * i.e. ex2 == ex and ex3 == NULL.
2866	 */
2867	if (!ex3) {
2868		ret = ext4_ext_try_to_merge(inode, path, ex2);
2869		if (ret) {
2870			err = ext4_ext_correct_indexes(handle, inode, path);
2871			if (err)
2872				goto out;
2873		}
2874	}
2875	/* Mark modified extent as dirty */
2876	err = ext4_ext_dirty(handle, inode, path + depth);
2877	goto out;
2878insert:
2879	err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2880	if (err == -ENOSPC) {
2881		err =  ext4_ext_zeroout(inode, &orig_ex);
2882		if (err)
2883			goto fix_extent_len;
2884		/* update the extent length and mark as initialized */
2885		ex->ee_block = orig_ex.ee_block;
2886		ex->ee_len   = orig_ex.ee_len;
2887		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2888		ext4_ext_dirty(handle, inode, path + depth);
2889		/* zero out the first half */
2890		return allocated;
2891	} else if (err)
2892		goto fix_extent_len;
2893out:
2894	ext4_ext_show_leaf(inode, path);
2895	return err ? err : allocated;
2896
2897fix_extent_len:
2898	ex->ee_block = orig_ex.ee_block;
2899	ex->ee_len   = orig_ex.ee_len;
2900	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2901	ext4_ext_mark_uninitialized(ex);
2902	ext4_ext_dirty(handle, inode, path + depth);
2903	return err;
2904}
2905
2906/*
2907 * This function is called by ext4_ext_get_blocks() from
2908 * ext4_get_blocks_dio_write() when DIO to write
2909 * to an uninitialized extent.
2910 *
2911 * Writing to an uninitized extent may result in splitting the uninitialized
2912 * extent into multiple /intialized unintialized extents (up to three)
2913 * There are three possibilities:
2914 *   a> There is no split required: Entire extent should be uninitialized
2915 *   b> Splits in two extents: Write is happening at either end of the extent
2916 *   c> Splits in three extents: Somone is writing in middle of the extent
2917 *
2918 * One of more index blocks maybe needed if the extent tree grow after
2919 * the unintialized extent split. To prevent ENOSPC occur at the IO
2920 * complete, we need to split the uninitialized extent before DIO submit
2921 * the IO. The uninitilized extent called at this time will be split
2922 * into three uninitialized extent(at most). After IO complete, the part
2923 * being filled will be convert to initialized by the end_io callback function
2924 * via ext4_convert_unwritten_extents().
2925 *
2926 * Returns the size of uninitialized extent to be written on success.
2927 */
2928static int ext4_split_unwritten_extents(handle_t *handle,
2929					struct inode *inode,
2930					struct ext4_ext_path *path,
2931					ext4_lblk_t iblock,
2932					unsigned int max_blocks,
2933					int flags)
2934{
2935	struct ext4_extent *ex, newex, orig_ex;
2936	struct ext4_extent *ex1 = NULL;
2937	struct ext4_extent *ex2 = NULL;
2938	struct ext4_extent *ex3 = NULL;
2939	struct ext4_extent_header *eh;
2940	ext4_lblk_t ee_block;
2941	unsigned int allocated, ee_len, depth;
2942	ext4_fsblk_t newblock;
2943	int err = 0;
2944
2945	ext_debug("ext4_split_unwritten_extents: inode %lu,"
2946		  "iblock %llu, max_blocks %u\n", inode->i_ino,
2947		  (unsigned long long)iblock, max_blocks);
2948	depth = ext_depth(inode);
2949	eh = path[depth].p_hdr;
2950	ex = path[depth].p_ext;
2951	ee_block = le32_to_cpu(ex->ee_block);
2952	ee_len = ext4_ext_get_actual_len(ex);
2953	allocated = ee_len - (iblock - ee_block);
2954	newblock = iblock - ee_block + ext_pblock(ex);
2955	ex2 = ex;
2956	orig_ex.ee_block = ex->ee_block;
2957	orig_ex.ee_len   = cpu_to_le16(ee_len);
2958	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2959
2960	/*
2961 	 * If the uninitialized extent begins at the same logical
2962 	 * block where the write begins, and the write completely
2963 	 * covers the extent, then we don't need to split it.
2964 	 */
2965	if ((iblock == ee_block) && (allocated <= max_blocks))
2966		return allocated;
2967
2968	err = ext4_ext_get_access(handle, inode, path + depth);
2969	if (err)
2970		goto out;
2971	/* ex1: ee_block to iblock - 1 : uninitialized */
2972	if (iblock > ee_block) {
2973		ex1 = ex;
2974		ex1->ee_len = cpu_to_le16(iblock - ee_block);
2975		ext4_ext_mark_uninitialized(ex1);
2976		ex2 = &newex;
2977	}
2978	/*
2979	 * for sanity, update the length of the ex2 extent before
2980	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2981	 * overlap of blocks.
2982	 */
2983	if (!ex1 && allocated > max_blocks)
2984		ex2->ee_len = cpu_to_le16(max_blocks);
2985	/* ex3: to ee_block + ee_len : uninitialised */
2986	if (allocated > max_blocks) {
2987		unsigned int newdepth;
2988		ex3 = &newex;
2989		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2990		ext4_ext_store_pblock(ex3, newblock + max_blocks);
2991		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2992		ext4_ext_mark_uninitialized(ex3);
2993		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2994		if (err == -ENOSPC) {
2995			err =  ext4_ext_zeroout(inode, &orig_ex);
2996			if (err)
2997				goto fix_extent_len;
2998			/* update the extent length and mark as initialized */
2999			ex->ee_block = orig_ex.ee_block;
3000			ex->ee_len   = orig_ex.ee_len;
3001			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3002			ext4_ext_dirty(handle, inode, path + depth);
3003			/* zeroed the full extent */
3004			/* blocks available from iblock */
3005			return allocated;
3006
3007		} else if (err)
3008			goto fix_extent_len;
3009		/*
3010		 * The depth, and hence eh & ex might change
3011		 * as part of the insert above.
3012		 */
3013		newdepth = ext_depth(inode);
3014		/*
3015		 * update the extent length after successful insert of the
3016		 * split extent
3017		 */
3018		orig_ex.ee_len = cpu_to_le16(ee_len -
3019						ext4_ext_get_actual_len(ex3));
3020		depth = newdepth;
3021		ext4_ext_drop_refs(path);
3022		path = ext4_ext_find_extent(inode, iblock, path);
3023		if (IS_ERR(path)) {
3024			err = PTR_ERR(path);
3025			goto out;
3026		}
3027		eh = path[depth].p_hdr;
3028		ex = path[depth].p_ext;
3029		if (ex2 != &newex)
3030			ex2 = ex;
3031
3032		err = ext4_ext_get_access(handle, inode, path + depth);
3033		if (err)
3034			goto out;
3035
3036		allocated = max_blocks;
3037	}
3038	/*
3039	 * If there was a change of depth as part of the
3040	 * insertion of ex3 above, we need to update the length
3041	 * of the ex1 extent again here
3042	 */
3043	if (ex1 && ex1 != ex) {
3044		ex1 = ex;
3045		ex1->ee_len = cpu_to_le16(iblock - ee_block);
3046		ext4_ext_mark_uninitialized(ex1);
3047		ex2 = &newex;
3048	}
3049	/*
3050	 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
3051	 * uninitialised still.
3052	 */
3053	ex2->ee_block = cpu_to_le32(iblock);
3054	ext4_ext_store_pblock(ex2, newblock);
3055	ex2->ee_len = cpu_to_le16(allocated);
3056	ext4_ext_mark_uninitialized(ex2);
3057	if (ex2 != ex)
3058		goto insert;
3059	/* Mark modified extent as dirty */
3060	err = ext4_ext_dirty(handle, inode, path + depth);
3061	ext_debug("out here\n");
3062	goto out;
3063insert:
3064	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3065	if (err == -ENOSPC) {
3066		err =  ext4_ext_zeroout(inode, &orig_ex);
3067		if (err)
3068			goto fix_extent_len;
3069		/* update the extent length and mark as initialized */
3070		ex->ee_block = orig_ex.ee_block;
3071		ex->ee_len   = orig_ex.ee_len;
3072		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3073		ext4_ext_dirty(handle, inode, path + depth);
3074		/* zero out the first half */
3075		return allocated;
3076	} else if (err)
3077		goto fix_extent_len;
3078out:
3079	ext4_ext_show_leaf(inode, path);
3080	return err ? err : allocated;
3081
3082fix_extent_len:
3083	ex->ee_block = orig_ex.ee_block;
3084	ex->ee_len   = orig_ex.ee_len;
3085	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3086	ext4_ext_mark_uninitialized(ex);
3087	ext4_ext_dirty(handle, inode, path + depth);
3088	return err;
3089}
3090static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3091					      struct inode *inode,
3092					      struct ext4_ext_path *path)
3093{
3094	struct ext4_extent *ex;
3095	struct ext4_extent_header *eh;
3096	int depth;
3097	int err = 0;
3098	int ret = 0;
3099
3100	depth = ext_depth(inode);
3101	eh = path[depth].p_hdr;
3102	ex = path[depth].p_ext;
3103
3104	err = ext4_ext_get_access(handle, inode, path + depth);
3105	if (err)
3106		goto out;
3107	/* first mark the extent as initialized */
3108	ext4_ext_mark_initialized(ex);
3109
3110	/*
3111	 * We have to see if it can be merged with the extent
3112	 * on the left.
3113	 */
3114	if (ex > EXT_FIRST_EXTENT(eh)) {
3115		/*
3116		 * To merge left, pass "ex - 1" to try_to_merge(),
3117		 * since it merges towards right _only_.
3118		 */
3119		ret = ext4_ext_try_to_merge(inode, path, ex - 1);
3120		if (ret) {
3121			err = ext4_ext_correct_indexes(handle, inode, path);
3122			if (err)
3123				goto out;
3124			depth = ext_depth(inode);
3125			ex--;
3126		}
3127	}
3128	/*
3129	 * Try to Merge towards right.
3130	 */
3131	ret = ext4_ext_try_to_merge(inode, path, ex);
3132	if (ret) {
3133		err = ext4_ext_correct_indexes(handle, inode, path);
3134		if (err)
3135			goto out;
3136		depth = ext_depth(inode);
3137	}
3138	/* Mark modified extent as dirty */
3139	err = ext4_ext_dirty(handle, inode, path + depth);
3140out:
3141	ext4_ext_show_leaf(inode, path);
3142	return err;
3143}
3144
3145static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3146			sector_t block, int count)
3147{
3148	int i;
3149	for (i = 0; i < count; i++)
3150                unmap_underlying_metadata(bdev, block + i);
3151}
3152
3153static int
3154ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3155			ext4_lblk_t iblock, unsigned int max_blocks,
3156			struct ext4_ext_path *path, int flags,
3157			unsigned int allocated, struct buffer_head *bh_result,
3158			ext4_fsblk_t newblock)
3159{
3160	int ret = 0;
3161	int err = 0;
3162	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3163
3164	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3165		  "block %llu, max_blocks %u, flags %d, allocated %u",
3166		  inode->i_ino, (unsigned long long)iblock, max_blocks,
3167		  flags, allocated);
3168	ext4_ext_show_leaf(inode, path);
3169
3170	/* get_block() before submit the IO, split the extent */
3171	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3172		ret = ext4_split_unwritten_extents(handle,
3173						inode, path, iblock,
3174						max_blocks, flags);
3175		/*
3176		 * Flag the inode(non aio case) or end_io struct (aio case)
3177		 * that this IO needs to convertion to written when IO is
3178		 * completed
3179		 */
3180		if (io)
3181			io->flag = EXT4_IO_UNWRITTEN;
3182		else
3183			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3184		if (ext4_should_dioread_nolock(inode))
3185			set_buffer_uninit(bh_result);
3186		goto out;
3187	}
3188	/* IO end_io complete, convert the filled extent to written */
3189	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3190		ret = ext4_convert_unwritten_extents_endio(handle, inode,
3191							path);
3192		if (ret >= 0)
3193			ext4_update_inode_fsync_trans(handle, inode, 1);
3194		goto out2;
3195	}
3196	/* buffered IO case */
3197	/*
3198	 * repeat fallocate creation request
3199	 * we already have an unwritten extent
3200	 */
3201	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3202		goto map_out;
3203
3204	/* buffered READ or buffered write_begin() lookup */
3205	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3206		/*
3207		 * We have blocks reserved already.  We
3208		 * return allocated blocks so that delalloc
3209		 * won't do block reservation for us.  But
3210		 * the buffer head will be unmapped so that
3211		 * a read from the block returns 0s.
3212		 */
3213		set_buffer_unwritten(bh_result);
3214		goto out1;
3215	}
3216
3217	/* buffered write, writepage time, convert*/
3218	ret = ext4_ext_convert_to_initialized(handle, inode,
3219						path, iblock,
3220						max_blocks);
3221	if (ret >= 0)
3222		ext4_update_inode_fsync_trans(handle, inode, 1);
3223out:
3224	if (ret <= 0) {
3225		err = ret;
3226		goto out2;
3227	} else
3228		allocated = ret;
3229	set_buffer_new(bh_result);
3230	/*
3231	 * if we allocated more blocks than requested
3232	 * we need to make sure we unmap the extra block
3233	 * allocated. The actual needed block will get
3234	 * unmapped later when we find the buffer_head marked
3235	 * new.
3236	 */
3237	if (allocated > max_blocks) {
3238		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3239					newblock + max_blocks,
3240					allocated - max_blocks);
3241		allocated = max_blocks;
3242	}
3243
3244	/*
3245	 * If we have done fallocate with the offset that is already
3246	 * delayed allocated, we would have block reservation
3247	 * and quota reservation done in the delayed write path.
3248	 * But fallocate would have already updated quota and block
3249	 * count for this offset. So cancel these reservation
3250	 */
3251	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3252		ext4_da_update_reserve_space(inode, allocated, 0);
3253
3254map_out:
3255	set_buffer_mapped(bh_result);
3256out1:
3257	if (allocated > max_blocks)
3258		allocated = max_blocks;
3259	ext4_ext_show_leaf(inode, path);
3260	bh_result->b_bdev = inode->i_sb->s_bdev;
3261	bh_result->b_blocknr = newblock;
3262out2:
3263	if (path) {
3264		ext4_ext_drop_refs(path);
3265		kfree(path);
3266	}
3267	return err ? err : allocated;
3268}
3269/*
3270 * Block allocation/map/preallocation routine for extents based files
3271 *
3272 *
3273 * Need to be called with
3274 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3275 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3276 *
3277 * return > 0, number of of blocks already mapped/allocated
3278 *          if create == 0 and these are pre-allocated blocks
3279 *          	buffer head is unmapped
3280 *          otherwise blocks are mapped
3281 *
3282 * return = 0, if plain look up failed (blocks have not been allocated)
3283 *          buffer head is unmapped
3284 *
3285 * return < 0, error case.
3286 */
3287int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3288			ext4_lblk_t iblock,
3289			unsigned int max_blocks, struct buffer_head *bh_result,
3290			int flags)
3291{
3292	struct ext4_ext_path *path = NULL;
3293	struct ext4_extent_header *eh;
3294	struct ext4_extent newex, *ex, *last_ex;
3295	ext4_fsblk_t newblock;
3296	int err = 0, depth, ret, cache_type;
3297	unsigned int allocated = 0;
3298	struct ext4_allocation_request ar;
3299	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3300
3301	__clear_bit(BH_New, &bh_result->b_state);
3302	ext_debug("blocks %u/%u requested for inode %lu\n",
3303			iblock, max_blocks, inode->i_ino);
3304
3305	/* check in cache */
3306	cache_type = ext4_ext_in_cache(inode, iblock, &newex);
3307	if (cache_type) {
3308		if (cache_type == EXT4_EXT_CACHE_GAP) {
3309			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3310				/*
3311				 * block isn't allocated yet and
3312				 * user doesn't want to allocate it
3313				 */
3314				goto out2;
3315			}
3316			/* we should allocate requested block */
3317		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
3318			/* block is already allocated */
3319			newblock = iblock
3320				   - le32_to_cpu(newex.ee_block)
3321				   + ext_pblock(&newex);
3322			/* number of remaining blocks in the extent */
3323			allocated = ext4_ext_get_actual_len(&newex) -
3324					(iblock - le32_to_cpu(newex.ee_block));
3325			goto out;
3326		} else {
3327			BUG();
3328		}
3329	}
3330
3331	/* find extent for this block */
3332	path = ext4_ext_find_extent(inode, iblock, NULL);
3333	if (IS_ERR(path)) {
3334		err = PTR_ERR(path);
3335		path = NULL;
3336		goto out2;
3337	}
3338
3339	depth = ext_depth(inode);
3340
3341	/*
3342	 * consistent leaf must not be empty;
3343	 * this situation is possible, though, _during_ tree modification;
3344	 * this is why assert can't be put in ext4_ext_find_extent()
3345	 */
3346	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3347		EXT4_ERROR_INODE(inode, "bad extent address "
3348				 "iblock: %d, depth: %d pblock %lld",
3349				 iblock, depth, path[depth].p_block);
3350		err = -EIO;
3351		goto out2;
3352	}
3353	eh = path[depth].p_hdr;
3354
3355	ex = path[depth].p_ext;
3356	if (ex) {
3357		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3358		ext4_fsblk_t ee_start = ext_pblock(ex);
3359		unsigned short ee_len;
3360
3361		/*
3362		 * Uninitialized extents are treated as holes, except that
3363		 * we split out initialized portions during a write.
3364		 */
3365		ee_len = ext4_ext_get_actual_len(ex);
3366		/* if found extent covers block, simply return it */
3367		if (iblock >= ee_block && iblock < ee_block + ee_len) {
3368			newblock = iblock - ee_block + ee_start;
3369			/* number of remaining blocks in the extent */
3370			allocated = ee_len - (iblock - ee_block);
3371			ext_debug("%u fit into %u:%d -> %llu\n", iblock,
3372					ee_block, ee_len, newblock);
3373
3374			/* Do not put uninitialized extent in the cache */
3375			if (!ext4_ext_is_uninitialized(ex)) {
3376				ext4_ext_put_in_cache(inode, ee_block,
3377							ee_len, ee_start,
3378							EXT4_EXT_CACHE_EXTENT);
3379				goto out;
3380			}
3381			ret = ext4_ext_handle_uninitialized_extents(handle,
3382					inode, iblock, max_blocks, path,
3383					flags, allocated, bh_result, newblock);
3384			return ret;
3385		}
3386	}
3387
3388	/*
3389	 * requested block isn't allocated yet;
3390	 * we couldn't try to create block if create flag is zero
3391	 */
3392	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3393		/*
3394		 * put just found gap into cache to speed up
3395		 * subsequent requests
3396		 */
3397		ext4_ext_put_gap_in_cache(inode, path, iblock);
3398		goto out2;
3399	}
3400	/*
3401	 * Okay, we need to do block allocation.
3402	 */
3403
3404	/* find neighbour allocated blocks */
3405	ar.lleft = iblock;
3406	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3407	if (err)
3408		goto out2;
3409	ar.lright = iblock;
3410	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3411	if (err)
3412		goto out2;
3413
3414	/*
3415	 * See if request is beyond maximum number of blocks we can have in
3416	 * a single extent. For an initialized extent this limit is
3417	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3418	 * EXT_UNINIT_MAX_LEN.
3419	 */
3420	if (max_blocks > EXT_INIT_MAX_LEN &&
3421	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3422		max_blocks = EXT_INIT_MAX_LEN;
3423	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
3424		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3425		max_blocks = EXT_UNINIT_MAX_LEN;
3426
3427	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
3428	newex.ee_block = cpu_to_le32(iblock);
3429	newex.ee_len = cpu_to_le16(max_blocks);
3430	err = ext4_ext_check_overlap(inode, &newex, path);
3431	if (err)
3432		allocated = ext4_ext_get_actual_len(&newex);
3433	else
3434		allocated = max_blocks;
3435
3436	/* allocate new block */
3437	ar.inode = inode;
3438	ar.goal = ext4_ext_find_goal(inode, path, iblock);
3439	ar.logical = iblock;
3440	ar.len = allocated;
3441	if (S_ISREG(inode->i_mode))
3442		ar.flags = EXT4_MB_HINT_DATA;
3443	else
3444		/* disable in-core preallocation for non-regular files */
3445		ar.flags = 0;
3446	newblock = ext4_mb_new_blocks(handle, &ar, &err);
3447	if (!newblock)
3448		goto out2;
3449	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3450		  ar.goal, newblock, allocated);
3451
3452	/* try to insert new extent into found leaf and return */
3453	ext4_ext_store_pblock(&newex, newblock);
3454	newex.ee_len = cpu_to_le16(ar.len);
3455	/* Mark uninitialized */
3456	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
3457		ext4_ext_mark_uninitialized(&newex);
3458		/*
3459		 * io_end structure was created for every IO write to an
3460		 * uninitialized extent. To avoid unecessary conversion,
3461		 * here we flag the IO that really needs the conversion.
3462		 * For non asycn direct IO case, flag the inode state
3463		 * that we need to perform convertion when IO is done.
3464		 */
3465		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3466			if (io)
3467				io->flag = EXT4_IO_UNWRITTEN;
3468			else
3469				ext4_set_inode_state(inode,
3470						     EXT4_STATE_DIO_UNWRITTEN);
3471		}
3472		if (ext4_should_dioread_nolock(inode))
3473			set_buffer_uninit(bh_result);
3474	}
3475
3476	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
3477		if (unlikely(!eh->eh_entries)) {
3478			EXT4_ERROR_INODE(inode,
3479					 "eh->eh_entries == 0 ee_block %d",
3480					 ex->ee_block);
3481			err = -EIO;
3482			goto out2;
3483		}
3484		last_ex = EXT_LAST_EXTENT(eh);
3485		if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
3486		    + ext4_ext_get_actual_len(last_ex))
3487			EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
3488	}
3489	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3490	if (err) {
3491		/* free data blocks we just allocated */
3492		/* not a good idea to call discard here directly,
3493		 * but otherwise we'd need to call it every free() */
3494		ext4_discard_preallocations(inode);
3495		ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
3496				 ext4_ext_get_actual_len(&newex), 0);
3497		goto out2;
3498	}
3499
3500	/* previous routine could use block we allocated */
3501	newblock = ext_pblock(&newex);
3502	allocated = ext4_ext_get_actual_len(&newex);
3503	if (allocated > max_blocks)
3504		allocated = max_blocks;
3505	set_buffer_new(bh_result);
3506
3507	/*
3508	 * Update reserved blocks/metadata blocks after successful
3509	 * block allocation which had been deferred till now.
3510	 */
3511	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3512		ext4_da_update_reserve_space(inode, allocated, 1);
3513
3514	/*
3515	 * Cache the extent and update transaction to commit on fdatasync only
3516	 * when it is _not_ an uninitialized extent.
3517	 */
3518	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3519		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
3520						EXT4_EXT_CACHE_EXTENT);
3521		ext4_update_inode_fsync_trans(handle, inode, 1);
3522	} else
3523		ext4_update_inode_fsync_trans(handle, inode, 0);
3524out:
3525	if (allocated > max_blocks)
3526		allocated = max_blocks;
3527	ext4_ext_show_leaf(inode, path);
3528	set_buffer_mapped(bh_result);
3529	bh_result->b_bdev = inode->i_sb->s_bdev;
3530	bh_result->b_blocknr = newblock;
3531out2:
3532	if (path) {
3533		ext4_ext_drop_refs(path);
3534		kfree(path);
3535	}
3536	return err ? err : allocated;
3537}
3538
3539void ext4_ext_truncate(struct inode *inode)
3540{
3541	struct address_space *mapping = inode->i_mapping;
3542	struct super_block *sb = inode->i_sb;
3543	ext4_lblk_t last_block;
3544	handle_t *handle;
3545	int err = 0;
3546
3547	/*
3548	 * probably first extent we're gonna free will be last in block
3549	 */
3550	err = ext4_writepage_trans_blocks(inode);
3551	handle = ext4_journal_start(inode, err);
3552	if (IS_ERR(handle))
3553		return;
3554
3555	if (inode->i_size & (sb->s_blocksize - 1))
3556		ext4_block_truncate_page(handle, mapping, inode->i_size);
3557
3558	if (ext4_orphan_add(handle, inode))
3559		goto out_stop;
3560
3561	down_write(&EXT4_I(inode)->i_data_sem);
3562	ext4_ext_invalidate_cache(inode);
3563
3564	ext4_discard_preallocations(inode);
3565
3566	/*
3567	 * TODO: optimization is possible here.
3568	 * Probably we need not scan at all,
3569	 * because page truncation is enough.
3570	 */
3571
3572	/* we have to know where to truncate from in crash case */
3573	EXT4_I(inode)->i_disksize = inode->i_size;
3574	ext4_mark_inode_dirty(handle, inode);
3575
3576	last_block = (inode->i_size + sb->s_blocksize - 1)
3577			>> EXT4_BLOCK_SIZE_BITS(sb);
3578	err = ext4_ext_remove_space(inode, last_block);
3579
3580	/* In a multi-transaction truncate, we only make the final
3581	 * transaction synchronous.
3582	 */
3583	if (IS_SYNC(inode))
3584		ext4_handle_sync(handle);
3585
3586out_stop:
3587	up_write(&EXT4_I(inode)->i_data_sem);
3588	/*
3589	 * If this was a simple ftruncate() and the file will remain alive,
3590	 * then we need to clear up the orphan record which we created above.
3591	 * However, if this was a real unlink then we were called by
3592	 * ext4_delete_inode(), and we allow that function to clean up the
3593	 * orphan info for us.
3594	 */
3595	if (inode->i_nlink)
3596		ext4_orphan_del(handle, inode);
3597
3598	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3599	ext4_mark_inode_dirty(handle, inode);
3600	ext4_journal_stop(handle);
3601}
3602
3603static void ext4_falloc_update_inode(struct inode *inode,
3604				int mode, loff_t new_size, int update_ctime)
3605{
3606	struct timespec now;
3607
3608	if (update_ctime) {
3609		now = current_fs_time(inode->i_sb);
3610		if (!timespec_equal(&inode->i_ctime, &now))
3611			inode->i_ctime = now;
3612	}
3613	/*
3614	 * Update only when preallocation was requested beyond
3615	 * the file size.
3616	 */
3617	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3618		if (new_size > i_size_read(inode))
3619			i_size_write(inode, new_size);
3620		if (new_size > EXT4_I(inode)->i_disksize)
3621			ext4_update_i_disksize(inode, new_size);
3622	} else {
3623		/*
3624		 * Mark that we allocate beyond EOF so the subsequent truncate
3625		 * can proceed even if the new size is the same as i_size.
3626		 */
3627		if (new_size > i_size_read(inode))
3628			EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL;
3629	}
3630
3631}
3632
3633/*
3634 * preallocate space for a file. This implements ext4's fallocate inode
3635 * operation, which gets called from sys_fallocate system call.
3636 * For block-mapped files, posix_fallocate should fall back to the method
3637 * of writing zeroes to the required new blocks (the same behavior which is
3638 * expected for file systems which do not support fallocate() system call).
3639 */
3640long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3641{
3642	handle_t *handle;
3643	ext4_lblk_t block;
3644	loff_t new_size;
3645	unsigned int max_blocks;
3646	int ret = 0;
3647	int ret2 = 0;
3648	int retries = 0;
3649	struct buffer_head map_bh;
3650	unsigned int credits, blkbits = inode->i_blkbits;
3651
3652	/*
3653	 * currently supporting (pre)allocate mode for extent-based
3654	 * files _only_
3655	 */
3656	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3657		return -EOPNOTSUPP;
3658
3659	/* preallocation to directories is currently not supported */
3660	if (S_ISDIR(inode->i_mode))
3661		return -ENODEV;
3662
3663	block = offset >> blkbits;
3664	/*
3665	 * We can't just convert len to max_blocks because
3666	 * If blocksize = 4096 offset = 3072 and len = 2048
3667	 */
3668	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3669							- block;
3670	/*
3671	 * credits to insert 1 extent into extent tree
3672	 */
3673	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3674	mutex_lock(&inode->i_mutex);
3675retry:
3676	while (ret >= 0 && ret < max_blocks) {
3677		block = block + ret;
3678		max_blocks = max_blocks - ret;
3679		handle = ext4_journal_start(inode, credits);
3680		if (IS_ERR(handle)) {
3681			ret = PTR_ERR(handle);
3682			break;
3683		}
3684		map_bh.b_state = 0;
3685		ret = ext4_get_blocks(handle, inode, block,
3686				      max_blocks, &map_bh,
3687				      EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3688		if (ret <= 0) {
3689#ifdef EXT4FS_DEBUG
3690			WARN_ON(ret <= 0);
3691			printk(KERN_ERR "%s: ext4_ext_get_blocks "
3692				    "returned error inode#%lu, block=%u, "
3693				    "max_blocks=%u", __func__,
3694				    inode->i_ino, block, max_blocks);
3695#endif
3696			ext4_mark_inode_dirty(handle, inode);
3697			ret2 = ext4_journal_stop(handle);
3698			break;
3699		}
3700		if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3701						blkbits) >> blkbits))
3702			new_size = offset + len;
3703		else
3704			new_size = (block + ret) << blkbits;
3705
3706		ext4_falloc_update_inode(inode, mode, new_size,
3707						buffer_new(&map_bh));
3708		ext4_mark_inode_dirty(handle, inode);
3709		ret2 = ext4_journal_stop(handle);
3710		if (ret2)
3711			break;
3712	}
3713	if (ret == -ENOSPC &&
3714			ext4_should_retry_alloc(inode->i_sb, &retries)) {
3715		ret = 0;
3716		goto retry;
3717	}
3718	mutex_unlock(&inode->i_mutex);
3719	return ret > 0 ? ret2 : ret;
3720}
3721
3722/*
3723 * This function convert a range of blocks to written extents
3724 * The caller of this function will pass the start offset and the size.
3725 * all unwritten extents within this range will be converted to
3726 * written extents.
3727 *
3728 * This function is called from the direct IO end io call back
3729 * function, to convert the fallocated extents after IO is completed.
3730 * Returns 0 on success.
3731 */
3732int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3733				    ssize_t len)
3734{
3735	handle_t *handle;
3736	ext4_lblk_t block;
3737	unsigned int max_blocks;
3738	int ret = 0;
3739	int ret2 = 0;
3740	struct buffer_head map_bh;
3741	unsigned int credits, blkbits = inode->i_blkbits;
3742
3743	block = offset >> blkbits;
3744	/*
3745	 * We can't just convert len to max_blocks because
3746	 * If blocksize = 4096 offset = 3072 and len = 2048
3747	 */
3748	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3749							- block;
3750	/*
3751	 * credits to insert 1 extent into extent tree
3752	 */
3753	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3754	while (ret >= 0 && ret < max_blocks) {
3755		block = block + ret;
3756		max_blocks = max_blocks - ret;
3757		handle = ext4_journal_start(inode, credits);
3758		if (IS_ERR(handle)) {
3759			ret = PTR_ERR(handle);
3760			break;
3761		}
3762		map_bh.b_state = 0;
3763		ret = ext4_get_blocks(handle, inode, block,
3764				      max_blocks, &map_bh,
3765				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3766		if (ret <= 0) {
3767			WARN_ON(ret <= 0);
3768			printk(KERN_ERR "%s: ext4_ext_get_blocks "
3769				    "returned error inode#%lu, block=%u, "
3770				    "max_blocks=%u", __func__,
3771				    inode->i_ino, block, max_blocks);
3772		}
3773		ext4_mark_inode_dirty(handle, inode);
3774		ret2 = ext4_journal_stop(handle);
3775		if (ret <= 0 || ret2 )
3776			break;
3777	}
3778	return ret > 0 ? ret2 : ret;
3779}
3780/*
3781 * Callback function called for each extent to gather FIEMAP information.
3782 */
3783static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3784		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
3785		       void *data)
3786{
3787	struct fiemap_extent_info *fieinfo = data;
3788	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3789	__u64	logical;
3790	__u64	physical;
3791	__u64	length;
3792	__u32	flags = 0;
3793	int	error;
3794
3795	logical =  (__u64)newex->ec_block << blksize_bits;
3796
3797	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3798		pgoff_t offset;
3799		struct page *page;
3800		struct buffer_head *bh = NULL;
3801
3802		offset = logical >> PAGE_SHIFT;
3803		page = find_get_page(inode->i_mapping, offset);
3804		if (!page || !page_has_buffers(page))
3805			return EXT_CONTINUE;
3806
3807		bh = page_buffers(page);
3808
3809		if (!bh)
3810			return EXT_CONTINUE;
3811
3812		if (buffer_delay(bh)) {
3813			flags |= FIEMAP_EXTENT_DELALLOC;
3814			page_cache_release(page);
3815		} else {
3816			page_cache_release(page);
3817			return EXT_CONTINUE;
3818		}
3819	}
3820
3821	physical = (__u64)newex->ec_start << blksize_bits;
3822	length =   (__u64)newex->ec_len << blksize_bits;
3823
3824	if (ex && ext4_ext_is_uninitialized(ex))
3825		flags |= FIEMAP_EXTENT_UNWRITTEN;
3826
3827	/*
3828	 * If this extent reaches EXT_MAX_BLOCK, it must be last.
3829	 *
3830	 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3831	 * this also indicates no more allocated blocks.
3832	 *
3833	 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3834	 */
3835	if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3836	    newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
3837		loff_t size = i_size_read(inode);
3838		loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
3839
3840		flags |= FIEMAP_EXTENT_LAST;
3841		if ((flags & FIEMAP_EXTENT_DELALLOC) &&
3842		    logical+length > size)
3843			length = (size - logical + bs - 1) & ~(bs-1);
3844	}
3845
3846	error = fiemap_fill_next_extent(fieinfo, logical, physical,
3847					length, flags);
3848	if (error < 0)
3849		return error;
3850	if (error == 1)
3851		return EXT_BREAK;
3852
3853	return EXT_CONTINUE;
3854}
3855
3856/* fiemap flags we can handle specified here */
3857#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3858
3859static int ext4_xattr_fiemap(struct inode *inode,
3860				struct fiemap_extent_info *fieinfo)
3861{
3862	__u64 physical = 0;
3863	__u64 length;
3864	__u32 flags = FIEMAP_EXTENT_LAST;
3865	int blockbits = inode->i_sb->s_blocksize_bits;
3866	int error = 0;
3867
3868	/* in-inode? */
3869	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3870		struct ext4_iloc iloc;
3871		int offset;	/* offset of xattr in inode */
3872
3873		error = ext4_get_inode_loc(inode, &iloc);
3874		if (error)
3875			return error;
3876		physical = iloc.bh->b_blocknr << blockbits;
3877		offset = EXT4_GOOD_OLD_INODE_SIZE +
3878				EXT4_I(inode)->i_extra_isize;
3879		physical += offset;
3880		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3881		flags |= FIEMAP_EXTENT_DATA_INLINE;
3882	} else { /* external block */
3883		physical = EXT4_I(inode)->i_file_acl << blockbits;
3884		length = inode->i_sb->s_blocksize;
3885	}
3886
3887	if (physical)
3888		error = fiemap_fill_next_extent(fieinfo, 0, physical,
3889						length, flags);
3890	return (error < 0 ? error : 0);
3891}
3892
3893int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3894		__u64 start, __u64 len)
3895{
3896	ext4_lblk_t start_blk;
3897	int error = 0;
3898
3899	/* fallback to generic here if not in extents fmt */
3900	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3901		return generic_block_fiemap(inode, fieinfo, start, len,
3902			ext4_get_block);
3903
3904	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3905		return -EBADR;
3906
3907	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3908		error = ext4_xattr_fiemap(inode, fieinfo);
3909	} else {
3910		ext4_lblk_t len_blks;
3911		__u64 last_blk;
3912
3913		start_blk = start >> inode->i_sb->s_blocksize_bits;
3914		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
3915		if (last_blk >= EXT_MAX_BLOCK)
3916			last_blk = EXT_MAX_BLOCK-1;
3917		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3918
3919		/*
3920		 * Walk the extent tree gathering extent information.
3921		 * ext4_ext_fiemap_cb will push extents back to user.
3922		 */
3923		error = ext4_ext_walk_space(inode, start_blk, len_blks,
3924					  ext4_ext_fiemap_cb, fieinfo);
3925	}
3926
3927	return error;
3928}
3929
3930